Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: remove timeout tests that weren't working #1309

Merged
merged 1 commit into from
Jun 7, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
173 changes: 0 additions & 173 deletions pkg/controllers/disruption/consolidation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4135,179 +4135,6 @@ var _ = Describe("Consolidation", func() {
})
})

Context("Timeout", func() {
It("should return the last valid command when multi-nodeclaim consolidation times out", func() {
numNodes := 20
nodeClaims, nodes := test.NodeClaimsAndNodes(numNodes, v1beta1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1beta1.NodePoolLabelKey: nodePool.Name,
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1beta1.CapacityTypeLabelKey: leastExpensiveOffering.Requirements.Get(v1beta1.CapacityTypeLabelKey).Any(),
v1.LabelTopologyZone: leastExpensiveOffering.Requirements.Get(v1.LabelTopologyZone).Any(),
},
},
Status: v1beta1.NodeClaimStatus{
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
}},
)
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(numNodes, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
},
}},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
// Make the resource requests small so that many nodes can be consolidated at once.
v1.ResourceCPU: resource.MustParse("10m"),
},
},
})

ExpectApplied(ctx, env.Client, rs, nodePool)
for _, nodeClaim := range nodeClaims {
ExpectApplied(ctx, env.Client, nodeClaim)
}
for _, node := range nodes {
ExpectApplied(ctx, env.Client, node)
}
for i, pod := range pods {
ExpectApplied(ctx, env.Client, pod)
ExpectManualBinding(ctx, env.Client, pod, nodes[i])
}

// inform cluster state about nodes and nodeClaims
ExpectMakeNodesAndNodeClaimsInitializedAndStateUpdated(ctx, env.Client, nodeStateController, nodeClaimStateController, nodes, nodeClaims)

var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
ExpectSingletonReconciled(ctx, disruptionController)
}()

// advance the clock so that the timeout expires
fakeClock.Step(disruption.MultiNodeConsolidationTimeoutDuration)

// wait for the controller to block on the validation timeout
Eventually(fakeClock.HasWaiters, time.Second*10).Should(BeTrue())

ExpectTriggerVerifyAction(&wg)

// controller should be blocking during the timeout
Expect(finished.Load()).To(BeFalse())

// and the node should not be deleted yet
for i := range nodeClaims {
ExpectExists(ctx, env.Client, nodeClaims[i])
}

// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()

ExpectSingletonReconciled(ctx, queue)

// should have at least two nodes deleted from multi nodeClaim consolidation
Expect(len(ExpectNodeClaims(ctx, env.Client))).To(BeNumerically("<=", numNodes-2))
})
It("should exit single-nodeclaim consolidation if it times out", func() {
numNodes := 25
nodeClaims, nodes := test.NodeClaimsAndNodes(numNodes, v1beta1.NodeClaim{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
v1beta1.NodePoolLabelKey: nodePool.Name,
v1.LabelInstanceTypeStable: leastExpensiveInstance.Name,
v1beta1.CapacityTypeLabelKey: leastExpensiveOffering.Requirements.Get(v1beta1.CapacityTypeLabelKey).Any(),
v1.LabelTopologyZone: leastExpensiveOffering.Requirements.Get(v1.LabelTopologyZone).Any(),
},
},
Status: v1beta1.NodeClaimStatus{
Allocatable: map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: resource.MustParse("32"),
v1.ResourcePods: resource.MustParse("100"),
},
}},
)
// create our RS so we can link a pod to it
rs := test.ReplicaSet()
ExpectApplied(ctx, env.Client, rs)
pods := test.Pods(numNodes, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: "apps/v1",
Kind: "ReplicaSet",
Name: rs.Name,
UID: rs.UID,
Controller: lo.ToPtr(true),
BlockOwnerDeletion: lo.ToPtr(true),
},
}},
ResourceRequirements: v1.ResourceRequirements{
Requests: v1.ResourceList{
// Make the pods more than half of the allocatable so that only one nodeclaim can be done at any time
v1.ResourceCPU: resource.MustParse("20"),
},
},
})

ExpectApplied(ctx, env.Client, rs, nodePool)
for _, nodeClaim := range nodeClaims {
ExpectApplied(ctx, env.Client, nodeClaim)
}
for _, node := range nodes {
ExpectApplied(ctx, env.Client, node)
}
for i, pod := range pods {
ExpectApplied(ctx, env.Client, pod)
ExpectManualBinding(ctx, env.Client, pod, nodes[i])
}

// inform cluster state about nodes and nodeClaims
ExpectMakeNodesAndNodeClaimsInitializedAndStateUpdated(ctx, env.Client, nodeStateController, nodeClaimStateController, nodes, nodeClaims)

var wg sync.WaitGroup
wg.Add(1)
finished := atomic.Bool{}
go func() {
defer GinkgoRecover()
defer wg.Done()
defer finished.Store(true)
ExpectSingletonReconciled(ctx, disruptionController)
}()

// advance the clock so that the timeout expires for multi-nodeClaim
fakeClock.Step(disruption.MultiNodeConsolidationTimeoutDuration)
// advance the clock so that the timeout expires for single-nodeClaim
fakeClock.Step(disruption.SingleNodeConsolidationTimeoutDuration)

ExpectTriggerVerifyAction(&wg)

// controller should finish
Eventually(finished.Load, 10*time.Second).Should(BeTrue())
wg.Wait()

// should have no nodeClaims deleted from single nodeClaim consolidation
Expect(ExpectNodeClaims(ctx, env.Client)).To(HaveLen(numNodes))
})
})
Context("Multi-NodeClaim", func() {
var nodeClaims, spotNodeClaims []*v1beta1.NodeClaim
var nodes, spotNodes []*v1.Node
Expand Down
Loading