Skip to content

Commit

Permalink
Enforce stricter compatibility for existing nodes/machines (kubernete…
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathan-innis authored Aug 3, 2023
1 parent 507e5de commit 3aa152d
Show file tree
Hide file tree
Showing 3 changed files with 93 additions and 3 deletions.
4 changes: 2 additions & 2 deletions pkg/controllers/provisioning/scheduling/existingnode.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func (n *ExistingNode) Add(ctx context.Context, kubeClient client.Client, pod *v
nodeRequirements := scheduling.NewRequirements(n.requirements.Values()...)
podRequirements := scheduling.NewPodRequirements(pod)
// Check Machine Affinity Requirements
if err = nodeRequirements.Compatible(podRequirements); err != nil {
if err = nodeRequirements.StrictlyCompatible(podRequirements); err != nil {
return err
}
nodeRequirements.Add(podRequirements.Values()...)
Expand All @@ -99,7 +99,7 @@ func (n *ExistingNode) Add(ctx context.Context, kubeClient client.Client, pod *v
if err != nil {
return err
}
if err = nodeRequirements.Compatible(topologyRequirements); err != nil {
if err = nodeRequirements.StrictlyCompatible(topologyRequirements); err != nil {
return err
}
nodeRequirements.Add(topologyRequirements.Values()...)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/provisioning/scheduling/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ func (s *Scheduler) calculateExistingMachines(stateNodes []*state.StateNode, dae
if err := scheduling.Taints(node.Taints()).Tolerates(p); err != nil {
continue
}
if err := scheduling.NewLabelRequirements(node.Labels()).Compatible(scheduling.NewPodRequirements(p)); err != nil {
if err := scheduling.NewLabelRequirements(node.Labels()).StrictlyCompatible(scheduling.NewPodRequirements(p)); err != nil {
continue
}
daemons = append(daemons, p)
Expand Down
90 changes: 90 additions & 0 deletions pkg/controllers/provisioning/scheduling/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2391,6 +2391,96 @@ var _ = Describe("Existing Nodes", func() {
// Expect that the scheduled node is equal to the ready node since it's initialized
Expect(scheduledNode.Name).To(Equal(nodes[elem].Name))
})
It("should consider a pod incompatible with an existing node but compatible with Provisioner", func() {
machine, node := test.MachineAndNode(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("10Gi"),
v1.ResourcePods: resource.MustParse("110"),
},
},
})
ExpectApplied(ctx, env.Client, machine, node)
ExpectMakeMachinesInitialized(ctx, env.Client, machine)
ExpectMakeNodesInitialized(ctx, env.Client, node)

ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(machine))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))

pod := test.UnschedulablePod(test.PodOptions{
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectNotScheduled(ctx, env.Client, pod)

ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
ExpectScheduled(ctx, env.Client, pod)
})
Context("Daemonsets", func() {
It("should not subtract daemonset overhead that is not strictly compatible with an existing node", func() {
machine, node := test.MachineAndNode(v1alpha5.Machine{
Status: v1alpha5.MachineStatus{
Allocatable: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi"),
v1.ResourcePods: resource.MustParse("110"),
},
},
})
// This DaemonSet is not compatible with the existing Machine/Node
ds := test.DaemonSet(
test.DaemonSetOptions{PodOptions: test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100Gi")},
},
NodeRequirements: []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-zone-1"},
},
},
}},
)
ExpectApplied(ctx, env.Client, provisioner, machine, node, ds)
ExpectMakeMachinesInitialized(ctx, env.Client, machine)
ExpectMakeNodesInitialized(ctx, env.Client, node)

ExpectReconcileSucceeded(ctx, machineStateController, client.ObjectKeyFromObject(machine))
ExpectReconcileSucceeded(ctx, nodeStateController, client.ObjectKeyFromObject(node))

pod := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi")},
},
})
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod)
scheduledNode := ExpectScheduled(ctx, env.Client, pod)
Expect(scheduledNode.Name).To(Equal(node.Name))

// Add another pod and expect that pod not to schedule against a Provisioner since we will model the DS against the Provisioner
// In this case, the DS overhead will take over the entire capacity for every "theoretical node" so we can't schedule a new pod to any new Node
pod2 := test.UnschedulablePod(test.PodOptions{
ResourceRequirements: v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("1Gi")},
},
})
ExpectApplied(ctx, env.Client, provisioner)
ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod2)
ExpectNotScheduled(ctx, env.Client, pod2)
})
})
})

var _ = Describe("No Pre-Binding", func() {
Expand Down

0 comments on commit 3aa152d

Please sign in to comment.