Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: promote drift to stable #1311

Merged
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions kwok/charts/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,10 @@ For full Karpenter documentation please checkout [https://karpenter.sh](https://
| serviceMonitor.additionalLabels | object | `{}` | Additional labels for the ServiceMonitor. |
| serviceMonitor.enabled | bool | `false` | Specifies whether a ServiceMonitor should be created. |
| serviceMonitor.endpointConfig | object | `{}` | Endpoint configuration for the ServiceMonitor. |
| settings | object | `{"batchIdleDuration":"1s","batchMaxDuration":"10s","featureGates":{"drift":true,"spotToSpotConsolidation":false}}` | Global Settings to configure Karpenter |
| settings | object | `{"batchIdleDuration":"1s","batchMaxDuration":"10s","featureGates":{"spotToSpotConsolidation":false}}` | Global Settings to configure Karpenter |
| settings.batchIdleDuration | string | `"1s"` | The maximum amount of time with no new ending pods that if exceeded ends the current batching window. If pods arrive faster than this time, the batching window will be extended up to the maxDuration. If they arrive slower, the pods will be batched separately. |
| settings.batchMaxDuration | string | `"10s"` | The maximum length of a batch window. The longer this is, the more pods we can consider for provisioning at one time which usually results in fewer but larger nodes. |
| settings.featureGates | object | `{"drift":true,"spotToSpotConsolidation":false}` | Feature Gate configuration values. Feature Gates will follow the same graduation process and requirements as feature gates in Kubernetes. More information here https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features |
| settings.featureGates.drift | bool | `true` | drift is in BETA and is enabled by default. Setting drift to false disables the drift disruption method to watch for drift between currently deployed nodes and the desired state of nodes set in nodepools and nodeclasses |
| settings.featureGates | object | `{"spotToSpotConsolidation":false}` | Feature Gate configuration values. Feature Gates will follow the same graduation process and requirements as feature gates in Kubernetes. More information here https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features |
| settings.featureGates.spotToSpotConsolidation | bool | `false` | spotToSpotConsolidation is ALPHA and is disabled by default. Setting this to true will enable spot replacement consolidation for both single and multi-node consolidation. |
| strategy | object | `{"rollingUpdate":{"maxUnavailable":1}}` | Strategy for updating the pod. |
| terminationGracePeriodSeconds | string | `nil` | Override the default termination grace period for the pod. |
Expand Down
2 changes: 1 addition & 1 deletion kwok/charts/templates/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ spec:
divisor: "0"
resource: limits.memory
- name: FEATURE_GATES
value: "Drift={{ .Values.settings.featureGates.drift }},SpotToSpotConsolidation={{ .Values.settings.featureGates.spotToSpotConsolidation }}"
value: "SpotToSpotConsolidation={{ .Values.settings.featureGates.spotToSpotConsolidation }}"
{{- with .Values.settings.batchMaxDuration }}
- name: BATCH_MAX_DURATION
value: "{{ . }}"
Expand Down
4 changes: 0 additions & 4 deletions kwok/charts/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -136,10 +136,6 @@ settings:
# -- Feature Gate configuration values. Feature Gates will follow the same graduation process and requirements as feature gates
# in Kubernetes. More information here https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features
featureGates:
# -- drift is in BETA and is enabled by default.
# Setting drift to false disables the drift disruption method to watch for drift between currently deployed nodes
# and the desired state of nodes set in nodepools and nodeclasses
drift: true
# -- spotToSpotConsolidation is ALPHA and is disabled by default.
# Setting this to true will enable spot replacement consolidation for both single and multi-node consolidation.
spotToSpotConsolidation: false
4 changes: 1 addition & 3 deletions pkg/controllers/disruption/drift.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import (
"sigs.k8s.io/karpenter/pkg/controllers/state"
"sigs.k8s.io/karpenter/pkg/events"
"sigs.k8s.io/karpenter/pkg/metrics"
"sigs.k8s.io/karpenter/pkg/operator/options"
)

// Drift is a subreconciler that deletes drifted candidates.
Expand All @@ -52,8 +51,7 @@ func NewDrift(kubeClient client.Client, cluster *state.Cluster, provisioner *pro

// ShouldDisrupt is a predicate used to filter candidates
func (d *Drift) ShouldDisrupt(ctx context.Context, c *Candidate) bool {
return options.FromContext(ctx).FeatureGates.Drift &&
c.NodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted).IsTrue()
return c.NodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted).IsTrue()
}

// ComputeCommand generates a disruption command given candidates
Expand Down
19 changes: 0 additions & 19 deletions pkg/controllers/disruption/drift_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ import (
"sigs.k8s.io/karpenter/pkg/cloudprovider"
"sigs.k8s.io/karpenter/pkg/cloudprovider/fake"
"sigs.k8s.io/karpenter/pkg/controllers/disruption"
"sigs.k8s.io/karpenter/pkg/operator/options"
"sigs.k8s.io/karpenter/pkg/scheduling"
"sigs.k8s.io/karpenter/pkg/test"
. "sigs.k8s.io/karpenter/pkg/test/expectations"
Expand Down Expand Up @@ -471,24 +470,6 @@ var _ = Describe("Drift", func() {
})

Context("Drift", func() {
It("should ignore drifted nodes if the feature flag is disabled", func() {
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(false)}}))
ExpectApplied(ctx, env.Client, nodeClaim, node, nodePool)

// inform cluster state about nodes and nodeclaims
ExpectMakeNodesAndNodeClaimsInitializedAndStateUpdated(ctx, env.Client, nodeStateController, nodeClaimStateController, []*v1.Node{node}, []*v1beta1.NodeClaim{nodeClaim})

fakeClock.Step(10 * time.Minute)

var wg sync.WaitGroup
ExpectTriggerVerifyAction(&wg)
ExpectSingletonReconciled(ctx, disruptionController)
wg.Wait()

// Expect to not create or delete more nodeclaims
Expect(ExpectNodeClaims(ctx, env.Client)).To(HaveLen(1))
ExpectExists(ctx, env.Client, nodeClaim)
})
It("should continue to the next drifted node if the first cannot reschedule all pods", func() {
pod := test.Pod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{
Expand Down
3 changes: 1 addition & 2 deletions pkg/controllers/disruption/orchestration/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -73,7 +72,7 @@ func TestAPIs(t *testing.T) {

var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(true)}}))
ctx = options.ToContext(ctx, test.Options())
fakeClock = clock.NewFakeClock(time.Now())
cloudProvider = fake.NewCloudProvider()
cluster = state.NewCluster(fakeClock, env.Client, cloudProvider)
Expand Down
4 changes: 2 additions & 2 deletions pkg/controllers/disruption/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func TestAPIs(t *testing.T) {

var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(coreapis.CRDs...))
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(true)}}))
ctx = options.ToContext(ctx, test.Options())
cloudProvider = fake.NewCloudProvider()
fakeClock = clock.NewFakeClock(time.Now())
cluster = state.NewCluster(fakeClock, env.Client, cloudProvider)
Expand Down Expand Up @@ -112,7 +112,7 @@ var _ = BeforeEach(func() {
cluster.MarkUnconsolidated()

// Reset Feature Flags to test defaults
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(true)}}))
ctx = options.ToContext(ctx, test.Options())

onDemandInstances = lo.Filter(cloudProvider.InstanceTypes, func(i *cloudprovider.InstanceType, _ int) bool {
for _, o := range i.Offerings.Available() {
Expand Down
3 changes: 1 addition & 2 deletions pkg/controllers/node/termination/terminator/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/samber/lo"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -57,7 +56,7 @@ func TestAPIs(t *testing.T) {

var _ = BeforeSuite(func() {
env = test.NewEnvironment(scheme.Scheme, test.WithCRDs(apis.CRDs...))
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(true)}}))
ctx = options.ToContext(ctx, test.Options())
recorder = test.NewEventRecorder()
queue = terminator.NewQueue(env.Client, recorder)
})
Expand Down
15 changes: 3 additions & 12 deletions pkg/controllers/nodeclaim/disruption/drift.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import (
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
"sigs.k8s.io/karpenter/pkg/cloudprovider"
"sigs.k8s.io/karpenter/pkg/metrics"
"sigs.k8s.io/karpenter/pkg/operator/options"
"sigs.k8s.io/karpenter/pkg/scheduling"
)

Expand All @@ -48,15 +47,7 @@ func (d *Drift) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeC
hasDriftedCondition := nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted) != nil

// From here there are three scenarios to handle:
// 1. If drift is not enabled but the NodeClaim is drifted, remove the status condition
if !options.FromContext(ctx).FeatureGates.Drift {
_ = nodeClaim.StatusConditions().Clear(v1beta1.ConditionTypeDrifted)
if hasDriftedCondition {
log.FromContext(ctx).V(1).Info("removing drift status condition, drift has been disabled")
}
return reconcile.Result{}, nil
}
// 2. If NodeClaim is not launched, remove the drift status condition
// 1. If NodeClaim is not launched, remove the drift status condition
if !nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeLaunched).IsTrue() {
_ = nodeClaim.StatusConditions().Clear(v1beta1.ConditionTypeDrifted)
if hasDriftedCondition {
Expand All @@ -68,15 +59,15 @@ func (d *Drift) Reconcile(ctx context.Context, nodePool *v1beta1.NodePool, nodeC
if err != nil {
return reconcile.Result{}, cloudprovider.IgnoreNodeClaimNotFoundError(fmt.Errorf("getting drift, %w", err))
}
// 3. Otherwise, if the NodeClaim isn't drifted, but has the status condition, remove it.
// 2. Otherwise, if the NodeClaim isn't drifted, but has the status condition, remove it.
if driftedReason == "" {
_ = nodeClaim.StatusConditions().Clear(v1beta1.ConditionTypeDrifted)
if hasDriftedCondition {
log.FromContext(ctx).V(1).Info("removing drifted status condition, not drifted")
}
return reconcile.Result{RequeueAfter: 5 * time.Minute}, nil
}
// 4. Finally, if the NodeClaim is drifted, but doesn't have status condition, add it.
// 3. Finally, if the NodeClaim is drifted, but doesn't have status condition, add it.
nodeClaim.StatusConditions().SetTrueWithReason(v1beta1.ConditionTypeDrifted, string(driftedReason), string(driftedReason))
if !hasDriftedCondition {
log.FromContext(ctx).V(1).WithValues("reason", string(driftedReason)).Info("marking drifted")
Expand Down
21 changes: 0 additions & 21 deletions pkg/controllers/nodeclaim/disruption/drift_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"
"sigs.k8s.io/karpenter/pkg/controllers/nodeclaim/disruption"
"sigs.k8s.io/karpenter/pkg/controllers/nodepool/hash"
"sigs.k8s.io/karpenter/pkg/operator/options"
. "sigs.k8s.io/karpenter/pkg/test/expectations"

"sigs.k8s.io/karpenter/pkg/test"
Expand Down Expand Up @@ -153,26 +152,6 @@ var _ = Describe("Drift", func() {
Expect(nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted).IsTrue()).To(BeTrue())
Expect(nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted).Reason).To(Equal(string(disruption.RequirementsDrifted)))
})
It("should not detect drift if the feature flag is disabled", func() {
cp.Drifted = "drifted"
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(false)}}))
ExpectApplied(ctx, env.Client, nodePool, nodeClaim)
ExpectObjectReconciled(ctx, env.Client, nodeClaimDisruptionController, nodeClaim)

nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
Expect(nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted)).To(BeNil())
})
It("should remove the status condition from the nodeClaim if the feature flag is disabled", func() {
cp.Drifted = "drifted"
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(false)}}))
nodeClaim.StatusConditions().SetTrue(v1beta1.ConditionTypeDrifted)
ExpectApplied(ctx, env.Client, nodePool, nodeClaim)

ExpectObjectReconciled(ctx, env.Client, nodeClaimDisruptionController, nodeClaim)

nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
Expect(nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted)).To(BeNil())
})
It("should remove the status condition from the nodeClaim when the nodeClaim launch condition is unknown", func() {
cp.Drifted = "drifted"
nodeClaim.StatusConditions().SetTrue(v1beta1.ConditionTypeDrifted)
Expand Down
8 changes: 4 additions & 4 deletions pkg/controllers/nodeclaim/disruption/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ var _ = AfterSuite(func() {
})

var _ = BeforeEach(func() {
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(true)}}))
ctx = options.ToContext(ctx, test.Options())
fakeClock.SetTime(time.Now())
})

Expand Down Expand Up @@ -126,12 +126,12 @@ var _ = Describe("Disruption", func() {
ExpectApplied(ctx, env.Client, nodePool, nodeClaim, node)
ExpectMakeNodeClaimsInitialized(ctx, env.Client, nodeClaim)

// Drift, Expiration, and Emptiness are disabled through configuration
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{FeatureGates: test.FeatureGates{Drift: lo.ToPtr(false)}}))
// Expiration, and Emptiness are disabled through configuration
rschalo marked this conversation as resolved.
Show resolved Hide resolved
ctx = options.ToContext(ctx, test.Options())
ExpectObjectReconciled(ctx, env.Client, nodeClaimDisruptionController, nodeClaim)

nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
Expect(nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted)).To(BeNil())
Expect(nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeDrifted).IsTrue()).To(BeTrue())
Expect(nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeEmpty)).To(BeNil())
Expect(nodeClaim.StatusConditions().Get(v1beta1.ConditionTypeExpired)).To(BeNil())
})
Expand Down
6 changes: 1 addition & 5 deletions pkg/operator/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ type optionsKey struct{}
type FeatureGates struct {
inputStr string

Drift bool
SpotToSpotConsolidation bool
}

Expand Down Expand Up @@ -96,7 +95,7 @@ func (o *Options) AddFlags(fs *FlagSet) {
fs.StringVar(&o.LogLevel, "log-level", env.WithDefaultString("LOG_LEVEL", "info"), "Log verbosity level. Can be one of 'debug', 'info', or 'error'")
fs.DurationVar(&o.BatchMaxDuration, "batch-max-duration", env.WithDefaultDuration("BATCH_MAX_DURATION", 10*time.Second), "The maximum length of a batch window. The longer this is, the more pods we can consider for provisioning at one time which usually results in fewer but larger nodes.")
fs.DurationVar(&o.BatchIdleDuration, "batch-idle-duration", env.WithDefaultDuration("BATCH_IDLE_DURATION", time.Second), "The maximum amount of time with no new pending pods that if exceeded ends the current batching window. If pods arrive faster than this time, the batching window will be extended up to the maxDuration. If they arrive slower, the pods will be batched separately.")
fs.StringVar(&o.FeatureGates.inputStr, "feature-gates", env.WithDefaultString("FEATURE_GATES", "Drift=true,SpotToSpotConsolidation=false"), "Optional features can be enabled / disabled using feature gates. Current options are: Drift,SpotToSpotConsolidation")
fs.StringVar(&o.FeatureGates.inputStr, "feature-gates", env.WithDefaultString("FEATURE_GATES", "SpotToSpotConsolidation=false"), "Optional features can be enabled / disabled using feature gates. Current options are: SpotToSpotConsolidation")
}

func (o *Options) Parse(fs *FlagSet, args ...string) error {
Expand Down Expand Up @@ -131,9 +130,6 @@ func ParseFeatureGates(gateStr string) (FeatureGates, error) {
if err := cliflag.NewMapStringBool(&gateMap).Set(gateStr); err != nil {
return gates, err
}
if val, ok := gateMap["Drift"]; ok {
gates.Drift = val
}
if val, ok := gateMap["SpotToSpotConsolidation"]; ok {
gates.SpotToSpotConsolidation = val
}
Expand Down
28 changes: 14 additions & 14 deletions pkg/operator/options/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,15 +79,15 @@ var _ = Describe("Options", func() {
Context("FeatureGates", func() {
DescribeTable(
"should successfully parse well formed feature gate strings",
func(str string, driftVal bool) {
func(str string, spotToSpotConsolidationVal bool) {
gates, err := options.ParseFeatureGates(str)
Expect(err).To(BeNil())
Expect(gates.Drift).To(Equal(driftVal))
Expect(gates.SpotToSpotConsolidation).To(Equal(spotToSpotConsolidationVal))
},
Entry("basic true", "Drift=true", true),
Entry("basic false", "Drift=false", false),
Entry("with whitespace", "Drift\t= false", false),
Entry("multiple values", "Hello=true,Drift=false,World=true", false),
Entry("basic true", "SpotToSpotConsolidation=true", true),
Entry("basic false", "SpotToSpotConsolidation=false", false),
Entry("with whitespace", "SpotToSpotConsolidation\t= false", false),
Entry("multiple values", "Hello=true,SpotToSpotConsolidation=false,World=true", false),
)
})

Expand All @@ -111,7 +111,7 @@ var _ = Describe("Options", func() {
BatchMaxDuration: lo.ToPtr(10 * time.Second),
BatchIdleDuration: lo.ToPtr(time.Second),
FeatureGates: test.FeatureGates{
Drift: lo.ToPtr(true),
SpotToSpotConsolidation: lo.ToPtr(false),
},
}))
})
Expand All @@ -133,7 +133,7 @@ var _ = Describe("Options", func() {
"--log-level", "debug",
"--batch-max-duration", "5s",
"--batch-idle-duration", "5s",
"--feature-gates", "Drift=true",
"--feature-gates", "SpotToSpotConsolidation=true",
)
Expect(err).To(BeNil())
expectOptionsMatch(opts, test.Options(test.OptionsFields{
Expand All @@ -152,7 +152,7 @@ var _ = Describe("Options", func() {
BatchMaxDuration: lo.ToPtr(5 * time.Second),
BatchIdleDuration: lo.ToPtr(5 * time.Second),
FeatureGates: test.FeatureGates{
Drift: lo.ToPtr(true),
SpotToSpotConsolidation: lo.ToPtr(true),
},
}))
})
Expand All @@ -172,7 +172,7 @@ var _ = Describe("Options", func() {
os.Setenv("LOG_LEVEL", "debug")
os.Setenv("BATCH_MAX_DURATION", "5s")
os.Setenv("BATCH_IDLE_DURATION", "5s")
os.Setenv("FEATURE_GATES", "Drift=true")
os.Setenv("FEATURE_GATES", "SpotToSpotConsolidation=true")
fs = &options.FlagSet{
FlagSet: flag.NewFlagSet("karpenter", flag.ContinueOnError),
}
Expand All @@ -195,7 +195,7 @@ var _ = Describe("Options", func() {
BatchMaxDuration: lo.ToPtr(5 * time.Second),
BatchIdleDuration: lo.ToPtr(5 * time.Second),
FeatureGates: test.FeatureGates{
Drift: lo.ToPtr(true),
SpotToSpotConsolidation: lo.ToPtr(true),
},
}))
})
Expand All @@ -213,7 +213,7 @@ var _ = Describe("Options", func() {
os.Setenv("LOG_LEVEL", "debug")
os.Setenv("BATCH_MAX_DURATION", "5s")
os.Setenv("BATCH_IDLE_DURATION", "5s")
os.Setenv("FEATURE_GATES", "Drift=true")
os.Setenv("FEATURE_GATES", "SpotToSpotConsolidation=true")
fs = &options.FlagSet{
FlagSet: flag.NewFlagSet("karpenter", flag.ContinueOnError),
}
Expand All @@ -240,7 +240,7 @@ var _ = Describe("Options", func() {
BatchMaxDuration: lo.ToPtr(5 * time.Second),
BatchIdleDuration: lo.ToPtr(5 * time.Second),
FeatureGates: test.FeatureGates{
Drift: lo.ToPtr(true),
SpotToSpotConsolidation: lo.ToPtr(true),
},
}))
})
Expand Down Expand Up @@ -299,5 +299,5 @@ func expectOptionsMatch(optsA, optsB *options.Options) {
Expect(optsA.LogLevel).To(Equal(optsB.LogLevel))
Expect(optsA.BatchMaxDuration).To(Equal(optsB.BatchMaxDuration))
Expect(optsA.BatchIdleDuration).To(Equal(optsB.BatchIdleDuration))
Expect(optsA.FeatureGates.Drift).To(Equal(optsB.FeatureGates.Drift))
Expect(optsA.FeatureGates.SpotToSpotConsolidation).To(Equal(optsB.FeatureGates.SpotToSpotConsolidation))
}
Loading
Loading