diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index 0f19d0d391..9ff2105009 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -26,10 +26,10 @@ import ( "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/client" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" "sigs.k8s.io/descheduler/pkg/descheduler/strategies" - "sigs.k8s.io/descheduler/pkg/utils" ) func Run(rs *options.DeschedulerServer) error { @@ -77,13 +77,19 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy * return } - nodePodCount := utils.InitializeNodePodCount(nodes) - - strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount) - strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount) - strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount) - strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount) - strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount) + podEvictor := evictions.NewPodEvictor( + rs.Client, + evictionPolicyGroupVersion, + rs.DryRun, + rs.MaxNoOfPodsToEvictPerNode, + nodes, + ) + + strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], nodes, podEvictor) + strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], nodes, podEvictor) + strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], nodes, podEvictor) + strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], nodes, podEvictor) + strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], nodes, podEvictor) // If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration if rs.DeschedulingInterval.Seconds() == 0 { diff --git a/pkg/descheduler/strategies/duplicates.go b/pkg/descheduler/strategies/duplicates.go index c638e65fee..bac654d534 100644 --- a/pkg/descheduler/strategies/duplicates.go +++ b/pkg/descheduler/strategies/duplicates.go @@ -27,7 +27,6 @@ import ( "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" - "sigs.k8s.io/descheduler/pkg/utils" ) //type creator string @@ -36,16 +35,20 @@ type DuplicatePodsMap map[string][]*v1.Pod // RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node. // A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same // namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. -func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) { +func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { if !strategy.Enabled { return } - deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods) + deleteDuplicatePods(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor) } // deleteDuplicatePods evicts the pod from node and returns the count of evicted pods. -func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int { - podsEvicted := 0 +func deleteDuplicatePods( + client clientset.Interface, + nodes []*v1.Node, + evictLocalStoragePods bool, + podEvictor *evictions.PodEvictor, +) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v", node.Name) dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods) @@ -54,22 +57,13 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, klog.V(1).Infof("%#v", creator) // i = 0 does not evict the first pod for i := 1; i < len(pods); i++ { - if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict { + if _, err := podEvictor.EvictPod(pods[i], node); err != nil { break } - success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun) - if !success { - klog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err) - } else { - nodepodCount[node]++ - klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err) - } } } } - podsEvicted += nodepodCount[node] } - return podsEvicted } // ListDuplicatePodsOnANode lists duplicate pods on a given node. diff --git a/pkg/descheduler/strategies/duplicates_test.go b/pkg/descheduler/strategies/duplicates_test.go index 987b1a7d3a..7c90672e68 100644 --- a/pkg/descheduler/strategies/duplicates_test.go +++ b/pkg/descheduler/strategies/duplicates_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/test" ) @@ -127,9 +128,6 @@ func TestFindDuplicatePods(t *testing.T) { } for _, testCase := range testCases { - - npe := utils.NodePodEvictedCount{} - npe[node] = 0 fakeClient := &fake.Clientset{} fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { return true, &v1.PodList{Items: testCase.pods}, nil @@ -137,7 +135,16 @@ func TestFindDuplicatePods(t *testing.T) { fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { return true, node, nil }) - podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, testCase.maxPodsToEvict, false) + podEvictor := evictions.NewPodEvictor( + fakeClient, + "v1", + false, + testCase.maxPodsToEvict, + []*v1.Node{node}, + ) + + deleteDuplicatePods(fakeClient, []*v1.Node{node}, false, podEvictor) + podsEvicted := podEvictor.TotalEvicted() if podsEvicted != testCase.expectedEvictedPodCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted) } diff --git a/pkg/descheduler/strategies/lownodeutilization.go b/pkg/descheduler/strategies/lownodeutilization.go index a49253a4ba..2208cb7de8 100644 --- a/pkg/descheduler/strategies/lownodeutilization.go +++ b/pkg/descheduler/strategies/lownodeutilization.go @@ -40,7 +40,7 @@ type NodeUsageMap struct { type NodePodsMap map[*v1.Node][]*v1.Pod -func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) { +func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { if !strategy.Enabled { return } @@ -87,18 +87,14 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods]) klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes)) - totalPodsEvicted := evictPodsFromTargetNodes( - ds.Client, - evictionPolicyGroupVersion, + evictPodsFromTargetNodes( targetNodes, lowNodes, targetThresholds, - ds.DryRun, - ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods, - nodepodCount) - klog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted) + podEvictor) + klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted()) } func validateThresholds(thresholds api.ResourceThresholds) bool { @@ -163,17 +159,11 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr // evicts them based on QoS as fallback option. // TODO: @ravig Break this function into smaller functions. func evictPodsFromTargetNodes( - client clientset.Interface, - evictionPolicyGroupVersion string, - targetNodes, - lowNodes []NodeUsageMap, + targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, - dryRun bool, - maxPodsToEvict int, evictLocalStoragePods bool, - nodepodCount utils.NodePodEvictedCount, -) int { - podsEvicted := 0 + podEvictor *evictions.PodEvictor, +) { SortNodesByUsage(targetNodes) @@ -212,7 +202,6 @@ func evictPodsFromTargetNodes( nodeCapacity = node.node.Status.Allocatable } klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage) - currentPodsEvicted := nodepodCount[node.node] nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods := classifyPods(node.allPods, evictLocalStoragePods) klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bestEffortPods:%v, burstablePods:%v, guaranteedPods:%v", len(node.allPods), len(nonRemovablePods), len(bestEffortPods), len(burstablePods), len(guaranteedPods)) @@ -225,46 +214,36 @@ func evictPodsFromTargetNodes( // sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers. sortPodsBasedOnPriority(evictablePods) - evictPods(evictablePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes) + evictPods(evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) } else { // TODO: Remove this when we support only priority. // Falling back to evicting pods based on priority. klog.V(1).Infof("Evicting pods based on QoS") klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods)) // evict best effort pods - evictPods(bestEffortPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes) + evictPods(bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) // evict burstable pods - evictPods(burstablePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes) + evictPods(burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) // evict guaranteed pods - evictPods(guaranteedPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict, taintsOfLowNodes) + evictPods(guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) } - nodepodCount[node.node] = currentPodsEvicted - podsEvicted = podsEvicted + nodepodCount[node.node] - klog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage) + klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage) } - return podsEvicted } func evictPods(inputPods []*v1.Pod, - client clientset.Interface, - evictionPolicyGroupVersion string, targetThresholds api.ResourceThresholds, nodeCapacity v1.ResourceList, nodeUsage api.ResourceThresholds, totalPods *float64, totalCPU *float64, totalMem *float64, - podsEvicted *int, - dryRun bool, - maxPodsToEvict int, - taintsOfLowNodes map[string][]v1.Taint) { + taintsOfLowNodes map[string][]v1.Taint, + podEvictor *evictions.PodEvictor, + node *v1.Node) { if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCPU > 0 || *totalMem > 0) { onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value())) for _, pod := range inputPods { - if maxPodsToEvict > 0 && *podsEvicted+1 > maxPodsToEvict { - break - } - if !utils.PodToleratesTaints(pod, taintsOfLowNodes) { klog.V(3).Infof("Skipping eviction for Pod: %#v, doesn't tolerate node taint", pod.Name) continue @@ -273,13 +252,14 @@ func evictPods(inputPods []*v1.Pod, cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU) mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory) - success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun) - if !success { - klog.Warningf("Error when evicting pod: %#v (%#v)", pod.Name, err) - } else { - klog.V(3).Infof("Evicted pod: %#v (%#v)", pod.Name, err) + success, err := podEvictor.EvictPod(pod, node) + if err != nil { + break + } + + if success { + klog.V(3).Infof("Evicted pod: %#v", pod.Name) // update remaining pods - *podsEvicted++ nodeUsage[v1.ResourcePods] -= onePodPercentage *totalPods-- diff --git a/pkg/descheduler/strategies/lownodeutilization_test.go b/pkg/descheduler/strategies/lownodeutilization_test.go index 60e819acf3..40f6cb4be1 100644 --- a/pkg/descheduler/strategies/lownodeutilization_test.go +++ b/pkg/descheduler/strategies/lownodeutilization_test.go @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/apis/componentconfig" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/test" ) @@ -341,10 +342,8 @@ func TestLowNodeUtilization(t *testing.T) { } var nodes []*v1.Node - npe := utils.NodePodEvictedCount{} for _, node := range test.nodes { nodes = append(nodes, node) - npe[node] = 0 } npm := createNodePodsMap(fakeClient, nodes) @@ -352,7 +351,16 @@ func TestLowNodeUtilization(t *testing.T) { if len(lowNodes) != 1 { t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.") } - podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, test.targetThresholds, false, test.expectedPodsEvicted, false, npe) + podEvictor := evictions.NewPodEvictor( + fakeClient, + "v1", + false, + test.expectedPodsEvicted, + nodes, + ) + + evictPodsFromTargetNodes(targetNodes, lowNodes, test.targetThresholds, false, podEvictor) + podsEvicted := podEvictor.TotalEvicted() if test.expectedPodsEvicted != podsEvicted { t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted) } @@ -622,8 +630,15 @@ func TestWithTaints(t *testing.T) { }, } - nodePodCount := utils.InitializeNodePodCount(item.nodes) - LowNodeUtilization(ds, strategy, "policy/v1", item.nodes, nodePodCount) + podEvictor := evictions.NewPodEvictor( + &fake.Clientset{Fake: *fakePtr}, + "policy/v1", + ds.DryRun, + item.evictionsExpected, + item.nodes, + ) + + LowNodeUtilization(ds, strategy, item.nodes, podEvictor) if item.evictionsExpected != evictionCounter { t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter) diff --git a/pkg/descheduler/strategies/node_affinity.go b/pkg/descheduler/strategies/node_affinity.go index 96c775eddb..246987dbb2 100644 --- a/pkg/descheduler/strategies/node_affinity.go +++ b/pkg/descheduler/strategies/node_affinity.go @@ -19,7 +19,6 @@ package strategies import ( "k8s.io/api/core/v1" "k8s.io/klog" - "sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" @@ -28,16 +27,15 @@ import ( podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" ) -func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) { - removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods) -} - -func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int { - evictedPodCount := 0 +func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { if !strategy.Enabled { - return evictedPodCount + return } + removePodsViolatingNodeAffinityCount(ds, strategy, nodes, ds.EvictLocalStoragePods, podEvictor) +} + +func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, nodeAffinity := range strategy.Params.NodeAffinityType { klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity) @@ -52,25 +50,19 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg } for _, pod := range pods { - if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict { - break - } if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) { klog.V(1).Infof("Evicting pod: %v", pod.Name) - evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, ds.DryRun) - nodepodCount[node]++ + if _, err := podEvictor.EvictPod(pod, node); err != nil { + break + } } } } - evictedPodCount += nodepodCount[node] } default: klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity) - return evictedPodCount } } - klog.V(1).Infof("Evicted %v pods", evictedPodCount) - return evictedPodCount + klog.V(1).Infof("Evicted %v pods", podEvictor.TotalEvicted()) } diff --git a/pkg/descheduler/strategies/node_affinity_test.go b/pkg/descheduler/strategies/node_affinity_test.go index 8fab3de500..796d32de62 100644 --- a/pkg/descheduler/strategies/node_affinity_test.go +++ b/pkg/descheduler/strategies/node_affinity_test.go @@ -25,7 +25,8 @@ import ( core "k8s.io/client-go/testing" "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" - "sigs.k8s.io/descheduler/pkg/utils" + "sigs.k8s.io/descheduler/pkg/apis/componentconfig" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/test" ) @@ -93,7 +94,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { pods []v1.Pod strategy api.DeschedulerStrategy expectedEvictedPodCount int - npe utils.NodePodEvictedCount maxPodsToEvict int }{ { @@ -109,7 +109,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { expectedEvictedPodCount: 0, pods: addPodsToNode(nodeWithoutLabels), nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels}, - npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0}, maxPodsToEvict: 0, }, { @@ -125,7 +124,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { expectedEvictedPodCount: 0, pods: addPodsToNode(nodeWithoutLabels), nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels}, - npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0}, maxPodsToEvict: 0, }, { @@ -134,7 +132,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { expectedEvictedPodCount: 0, pods: addPodsToNode(nodeWithLabels), nodes: []*v1.Node{nodeWithLabels}, - npe: utils.NodePodEvictedCount{nodeWithLabels: 0}, maxPodsToEvict: 0, }, { @@ -143,7 +140,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy, pods: addPodsToNode(nodeWithoutLabels), nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels}, - npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0}, maxPodsToEvict: 0, }, { @@ -152,7 +148,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy, pods: addPodsToNode(nodeWithoutLabels), nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels}, - npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0}, maxPodsToEvict: 1, }, { @@ -161,7 +156,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy, pods: addPodsToNode(nodeWithoutLabels), nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels}, - npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0}, maxPodsToEvict: 0, }, } @@ -175,9 +169,21 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { ds := options.DeschedulerServer{ Client: fakeClient, + DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{ + EvictLocalStoragePods: false, + }, } - actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict, false) + podEvictor := evictions.NewPodEvictor( + fakeClient, + "v1", + ds.DryRun, + tc.maxPodsToEvict, + tc.nodes, + ) + + RemovePodsViolatingNodeAffinity(&ds, tc.strategy, tc.nodes, podEvictor) + actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) } diff --git a/pkg/descheduler/strategies/node_taint.go b/pkg/descheduler/strategies/node_taint.go index 3fa63518bf..84865b094a 100644 --- a/pkg/descheduler/strategies/node_taint.go +++ b/pkg/descheduler/strategies/node_taint.go @@ -29,44 +29,34 @@ import ( ) // RemovePodsViolatingNodeTaints with elimination strategy -func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) { +func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { if !strategy.Enabled { return } - deletePodsViolatingNodeTaints(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods) + deletePodsViolatingNodeTaints(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor) } // deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes -func deletePodsViolatingNodeTaints(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int { - podsEvicted := 0 +func deletePodsViolatingNodeTaints(client clientset.Interface, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) if err != nil { //no pods evicted as error encountered retrieving evictable Pods - return 0 + return } totalPods := len(pods) for i := 0; i < totalPods; i++ { - if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict { - break - } if !utils.TolerationsTolerateTaintsWithFilter( pods[i].Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule }, ) { klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name) - success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun) - if !success { - klog.Errorf("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err) - } else { - nodePodCount[node]++ - klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err) + if _, err := podEvictor.EvictPod(pods[i], node); err != nil { + break } } } - podsEvicted += nodePodCount[node] } - return podsEvicted } diff --git a/pkg/descheduler/strategies/node_taint_test.go b/pkg/descheduler/strategies/node_taint_test.go index b76313ed2f..b49ac8af0e 100644 --- a/pkg/descheduler/strategies/node_taint_test.go +++ b/pkg/descheduler/strategies/node_taint_test.go @@ -9,6 +9,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/test" ) @@ -99,7 +100,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { nodes []*v1.Node pods []v1.Pod evictLocalStoragePods bool - npe utils.NodePodEvictedCount maxPodsToEvict int expectedEvictedPodCount int }{ @@ -109,7 +109,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { pods: []v1.Pod{*p1, *p2, *p3}, nodes: []*v1.Node{node1}, evictLocalStoragePods: false, - npe: utils.NodePodEvictedCount{node1: 0}, maxPodsToEvict: 0, expectedEvictedPodCount: 1, //p2 gets evicted }, @@ -118,7 +117,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { pods: []v1.Pod{*p1, *p3, *p4}, nodes: []*v1.Node{node1}, evictLocalStoragePods: false, - npe: utils.NodePodEvictedCount{node1: 0}, maxPodsToEvict: 0, expectedEvictedPodCount: 1, //p4 gets evicted }, @@ -127,7 +125,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { pods: []v1.Pod{*p1, *p5, *p6}, nodes: []*v1.Node{node1}, evictLocalStoragePods: false, - npe: utils.NodePodEvictedCount{node1: 0}, maxPodsToEvict: 1, expectedEvictedPodCount: 1, //p5 or p6 gets evicted }, @@ -136,7 +133,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { pods: []v1.Pod{*p7, *p8, *p9, *p10}, nodes: []*v1.Node{node2}, evictLocalStoragePods: false, - npe: utils.NodePodEvictedCount{node2: 0}, maxPodsToEvict: 0, expectedEvictedPodCount: 0, }, @@ -145,7 +141,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { pods: []v1.Pod{*p7, *p8, *p9, *p10}, nodes: []*v1.Node{node2}, evictLocalStoragePods: true, - npe: utils.NodePodEvictedCount{node2: 0}, maxPodsToEvict: 0, expectedEvictedPodCount: 1, }, @@ -154,7 +149,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { pods: []v1.Pod{*p7, *p8, *p10, *p11}, nodes: []*v1.Node{node2}, evictLocalStoragePods: false, - npe: utils.NodePodEvictedCount{node2: 0}, maxPodsToEvict: 0, expectedEvictedPodCount: 1, }, @@ -168,7 +162,16 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { return true, &v1.PodList{Items: tc.pods}, nil }) - actualEvictedPodCount := deletePodsViolatingNodeTaints(fakeClient, "v1", tc.nodes, false, tc.npe, tc.maxPodsToEvict, tc.evictLocalStoragePods) + podEvictor := evictions.NewPodEvictor( + fakeClient, + "v1", + false, + tc.maxPodsToEvict, + tc.nodes, + ) + + deletePodsViolatingNodeTaints(fakeClient, tc.nodes, tc.evictLocalStoragePods, podEvictor) + actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount) } diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index 63ff7e70c7..f46904af8f 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -30,34 +30,31 @@ import ( ) // RemovePodsViolatingInterPodAntiAffinity with elimination strategy -func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) { +func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { if !strategy.Enabled { return } - removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods) + removePodsWithAffinityRules(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor) } // removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules. -func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int { - podsEvicted := 0 +func removePodsWithAffinityRules(client clientset.Interface, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) if err != nil { - return 0 + return } totalPods := len(pods) for i := 0; i < totalPods; i++ { - if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict { - break - } if checkPodsWithAntiAffinityExist(pods[i], pods) { - success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun) - if !success { - klog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err) - } else { - nodePodCount[node]++ - klog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err) + success, err := podEvictor.EvictPod(pods[i], node) + if err != nil { + break + } + + if success { + klog.V(1).Infof("Evicted pod: %#v\n because of existing anti-affinity", pods[i].Name) // Since the current pod is evicted all other pods which have anti-affinity with this // pod need not be evicted. // Update pods. @@ -67,9 +64,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion } } } - podsEvicted += nodePodCount[node] } - return podsEvicted } // checkPodsWithAntiAffinityExist checks if there are other pods on the node that the current pod cannot tolerate. diff --git a/pkg/descheduler/strategies/pod_antiaffinity_test.go b/pkg/descheduler/strategies/pod_antiaffinity_test.go index d651e2f0e3..d6b8670985 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity_test.go +++ b/pkg/descheduler/strategies/pod_antiaffinity_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - "sigs.k8s.io/descheduler/pkg/utils" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/test" ) @@ -45,26 +45,49 @@ func TestPodAntiAffinity(t *testing.T) { setPodAntiAffinity(p3) setPodAntiAffinity(p4) - // create fake client - fakeClient := &fake.Clientset{} - fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { - return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil - }) - fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { - return true, node, nil - }) - npe := utils.NodePodEvictedCount{} - npe[node] = 0 - expectedEvictedPodCount := 3 - podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0, false) - if podsEvicted != expectedEvictedPodCount { - t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount) + tests := []struct { + description string + maxPodsToEvict int + pods []v1.Pod + expectedEvictedPodCount int + }{ + { + description: "Maximum pods to evict - 0", + maxPodsToEvict: 0, + pods: []v1.Pod{*p1, *p2, *p3, *p4}, + expectedEvictedPodCount: 3, + }, + { + description: "Maximum pods to evict - 3", + maxPodsToEvict: 3, + pods: []v1.Pod{*p1, *p2, *p3, *p4}, + expectedEvictedPodCount: 3, + }, } - npe[node] = 0 - expectedEvictedPodCount = 1 - podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1, false) - if podsEvicted != expectedEvictedPodCount { - t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount) + + for _, test := range tests { + // create fake client + fakeClient := &fake.Clientset{} + fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { + return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil + }) + fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { + return true, node, nil + }) + + podEvictor := evictions.NewPodEvictor( + fakeClient, + "v1", + false, + test.maxPodsToEvict, + []*v1.Node{node}, + ) + + removePodsWithAffinityRules(fakeClient, []*v1.Node{node}, false, podEvictor) + podsEvicted := podEvictor.TotalEvicted() + if podsEvicted != test.expectedEvictedPodCount { + t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount) + } } } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go deleted file mode 100644 index 29ecce60c8..0000000000 --- a/pkg/utils/utils.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import v1 "k8s.io/api/core/v1" - -// This file contains the datastructures, types & functions needed by all the strategies so that we don't have -// to compute them again in each strategy. - -// NodePodEvictedCount keeps count of pods evicted on node. This is used in conjunction with strategies to -type NodePodEvictedCount map[*v1.Node]int - -// InitializeNodePodCount initializes the nodePodCount. -func InitializeNodePodCount(nodeList []*v1.Node) NodePodEvictedCount { - var nodePodCount = make(NodePodEvictedCount) - for _, node := range nodeList { - // Initialize podsEvicted till now with 0. - nodePodCount[node] = 0 - } - return nodePodCount -} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 4354c71329..3325ad1f8b 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -33,11 +33,11 @@ import ( deschedulerapi "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler" "sigs.k8s.io/descheduler/pkg/descheduler/client" + "sigs.k8s.io/descheduler/pkg/descheduler/evictions" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" "sigs.k8s.io/descheduler/pkg/descheduler/strategies" - "sigs.k8s.io/descheduler/pkg/utils" ) func MakePodSpec() v1.PodSpec { @@ -114,12 +114,28 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInfor if err != nil { klog.Fatalf("%v", err) } - nodeUtilizationThresholds := deschedulerapi.NodeResourceUtilizationThresholds{Thresholds: thresholds, TargetThresholds: targetThresholds} - nodeUtilizationStrategyParams := deschedulerapi.StrategyParameters{NodeResourceUtilizationThresholds: nodeUtilizationThresholds} - lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{Enabled: true, Params: nodeUtilizationStrategyParams} + + lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{ + Enabled: true, + Params: deschedulerapi.StrategyParameters{ + NodeResourceUtilizationThresholds: deschedulerapi.NodeResourceUtilizationThresholds{ + Thresholds: thresholds, + TargetThresholds: targetThresholds, + }, + }, + } + ds := &options.DeschedulerServer{Client: clientset} - nodePodCount := utils.InitializeNodePodCount(nodes) - strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, evictionPolicyGroupVersion, nodes, nodePodCount) + + podEvictor := evictions.NewPodEvictor( + ds.Client, + evictionPolicyGroupVersion, + ds.DryRun, + ds.MaxNoOfPodsToEvictPerNode, + nodes, + ) + + strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, nodes, podEvictor) time.Sleep(10 * time.Second) }