From cbcefb5d2f03536a7b758ce1b1ae737aef13dbf3 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 20 Apr 2020 22:48:16 +0200 Subject: [PATCH] Remove options.DeschedulerServer from all strategies --- pkg/descheduler/descheduler.go | 24 +++++++++++---- pkg/descheduler/strategies/duplicates.go | 30 ++++++------------- pkg/descheduler/strategies/duplicates_test.go | 3 +- .../strategies/lownodeutilization.go | 9 +++--- .../strategies/lownodeutilization_test.go | 13 ++------ pkg/descheduler/strategies/node_affinity.go | 14 ++------- .../strategies/node_affinity_test.go | 28 ++--------------- pkg/descheduler/strategies/node_taint.go | 13 ++------ pkg/descheduler/strategies/node_taint_test.go | 3 +- .../strategies/pod_antiaffinity.go | 13 ++------ .../strategies/pod_antiaffinity_test.go | 3 +- pkg/descheduler/strategies/toomanyrestarts.go | 12 +++----- .../strategies/toomanyrestarts_test.go | 26 +++++----------- test/e2e/e2e_test.go | 10 +++---- 14 files changed, 64 insertions(+), 137 deletions(-) diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index 1f77ceeac8..a755e92ba4 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -19,6 +19,8 @@ package descheduler import ( "fmt" + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" "k8s.io/apimachinery/pkg/util/wait" @@ -56,6 +58,8 @@ func Run(rs *options.DeschedulerServer) error { return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel) } +type strategyFunction func(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) + func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error { sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0) nodeInformer := sharedInformerFactory.Core().V1().Nodes() @@ -63,6 +67,15 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy * sharedInformerFactory.Start(stopChannel) sharedInformerFactory.WaitForCacheSync(stopChannel) + strategyFuncs := map[string]strategyFunction{ + "RemoveDuplicates": strategies.RemoveDuplicatePods, + "LowNodeUtilization": strategies.LowNodeUtilization, + "RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity, + "RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity, + "RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints, + "RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts, + } + wait.Until(func() { nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel) if err != nil { @@ -85,12 +98,11 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy * nodes, ) - strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], nodes, podEvictor) - strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], nodes, podEvictor) - strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], nodes, podEvictor) - strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], nodes, podEvictor) - strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], nodes, podEvictor) - strategies.RemovePodsHavingTooManyRestarts(rs, deschedulerPolicy.Strategies["RemovePodsHavingTooManyRestarts"], nodes, podEvictor) + for name, f := range strategyFuncs { + if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled { + f(rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor) + } + } // If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration if rs.DeschedulingInterval.Seconds() == 0 { diff --git a/pkg/descheduler/strategies/duplicates.go b/pkg/descheduler/strategies/duplicates.go index bac654d534..885724dc11 100644 --- a/pkg/descheduler/strategies/duplicates.go +++ b/pkg/descheduler/strategies/duplicates.go @@ -23,35 +23,24 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" ) -//type creator string -type DuplicatePodsMap map[string][]*v1.Pod - // RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node. // A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same // namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. -func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { - if !strategy.Enabled { - return - } - deleteDuplicatePods(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor) -} - -// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods. -func deleteDuplicatePods( +func RemoveDuplicatePods( client clientset.Interface, + strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor, ) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v", node.Name) - dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods) + dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods) for creator, pods := range dpm { if len(pods) > 1 { klog.V(1).Infof("%#v", creator) @@ -66,18 +55,17 @@ func deleteDuplicatePods( } } -// ListDuplicatePodsOnANode lists duplicate pods on a given node. -func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap { +//type creator string +type duplicatePodsMap map[string][]*v1.Pod + +// listDuplicatePodsOnANode lists duplicate pods on a given node. +func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap { pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) if err != nil { return nil } - return FindDuplicatePods(pods) -} -// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap. -func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap { - dpm := DuplicatePodsMap{} + dpm := duplicatePodsMap{} // Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error. for _, pod := range pods { ownerRefList := podutil.OwnerRef(pod) diff --git a/pkg/descheduler/strategies/duplicates_test.go b/pkg/descheduler/strategies/duplicates_test.go index 7c90672e68..d2ccd92ee7 100644 --- a/pkg/descheduler/strategies/duplicates_test.go +++ b/pkg/descheduler/strategies/duplicates_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/test" @@ -143,7 +144,7 @@ func TestFindDuplicatePods(t *testing.T) { []*v1.Node{node}, ) - deleteDuplicatePods(fakeClient, []*v1.Node{node}, false, podEvictor) + RemoveDuplicatePods(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != testCase.expectedEvictedPodCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted) diff --git a/pkg/descheduler/strategies/lownodeutilization.go b/pkg/descheduler/strategies/lownodeutilization.go index 1673d57b51..ce3b4dd063 100644 --- a/pkg/descheduler/strategies/lownodeutilization.go +++ b/pkg/descheduler/strategies/lownodeutilization.go @@ -24,7 +24,6 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" @@ -40,7 +39,7 @@ type NodeUsageMap struct { type NodePodsMap map[*v1.Node][]*v1.Pod -func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { +func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { if !strategy.Enabled { return } @@ -60,8 +59,8 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS return } - npm := createNodePodsMap(ds.Client, nodes) - lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods) + npm := createNodePodsMap(client, nodes) + lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods) klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v", thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods]) @@ -95,7 +94,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS targetNodes, lowNodes, targetThresholds, - ds.EvictLocalStoragePods, + evictLocalStoragePods, podEvictor) klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted()) diff --git a/pkg/descheduler/strategies/lownodeutilization_test.go b/pkg/descheduler/strategies/lownodeutilization_test.go index 7625f86ffd..a6aa22ac3e 100644 --- a/pkg/descheduler/strategies/lownodeutilization_test.go +++ b/pkg/descheduler/strategies/lownodeutilization_test.go @@ -33,9 +33,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" - "sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/test" @@ -623,22 +621,15 @@ func TestWithTaints(t *testing.T) { return true, nil, nil }) - ds := &options.DeschedulerServer{ - Client: &fake.Clientset{Fake: *fakePtr}, - DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{ - EvictLocalStoragePods: false, - }, - } - podEvictor := evictions.NewPodEvictor( &fake.Clientset{Fake: *fakePtr}, "policy/v1", - ds.DryRun, + false, item.evictionsExpected, item.nodes, ) - LowNodeUtilization(ds, strategy, item.nodes, podEvictor) + LowNodeUtilization(&fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor) if item.evictionsExpected != evictionCounter { t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter) diff --git a/pkg/descheduler/strategies/node_affinity.go b/pkg/descheduler/strategies/node_affinity.go index 246987dbb2..b8783e8368 100644 --- a/pkg/descheduler/strategies/node_affinity.go +++ b/pkg/descheduler/strategies/node_affinity.go @@ -18,24 +18,16 @@ package strategies import ( "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" ) -func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { - if !strategy.Enabled { - return - } - - removePodsViolatingNodeAffinityCount(ds, strategy, nodes, ds.EvictLocalStoragePods, podEvictor) -} - -func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, nodeAffinity := range strategy.Params.NodeAffinityType { klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity) @@ -44,7 +36,7 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods) + pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) if err != nil { klog.Errorf("failed to get pods from %v: %v", node.Name, err) } diff --git a/pkg/descheduler/strategies/node_affinity_test.go b/pkg/descheduler/strategies/node_affinity_test.go index 796d32de62..57cabdec85 100644 --- a/pkg/descheduler/strategies/node_affinity_test.go +++ b/pkg/descheduler/strategies/node_affinity_test.go @@ -23,9 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" - "sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/test" ) @@ -96,21 +94,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { expectedEvictedPodCount int maxPodsToEvict int }{ - { - description: "Strategy disabled, should not evict any pods", - strategy: api.DeschedulerStrategy{ - Enabled: false, - Params: api.StrategyParameters{ - NodeAffinityType: []string{ - "requiredDuringSchedulingIgnoredDuringExecution", - }, - }, - }, - expectedEvictedPodCount: 0, - pods: addPodsToNode(nodeWithoutLabels), - nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels}, - maxPodsToEvict: 0, - }, { description: "Invalid strategy type, should not evict any pods", strategy: api.DeschedulerStrategy{ @@ -167,22 +150,15 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { return true, &v1.PodList{Items: tc.pods}, nil }) - ds := options.DeschedulerServer{ - Client: fakeClient, - DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{ - EvictLocalStoragePods: false, - }, - } - podEvictor := evictions.NewPodEvictor( fakeClient, "v1", - ds.DryRun, + false, tc.maxPodsToEvict, tc.nodes, ) - RemovePodsViolatingNodeAffinity(&ds, tc.strategy, tc.nodes, podEvictor) + RemovePodsViolatingNodeAffinity(fakeClient, tc.strategy, tc.nodes, false, podEvictor) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) diff --git a/pkg/descheduler/strategies/node_taint.go b/pkg/descheduler/strategies/node_taint.go index 84865b094a..955760e280 100644 --- a/pkg/descheduler/strategies/node_taint.go +++ b/pkg/descheduler/strategies/node_taint.go @@ -17,7 +17,6 @@ limitations under the License. package strategies import ( - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -28,16 +27,8 @@ import ( "k8s.io/klog" ) -// RemovePodsViolatingNodeTaints with elimination strategy -func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { - if !strategy.Enabled { - return - } - deletePodsViolatingNodeTaints(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor) -} - -// deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes -func deletePodsViolatingNodeTaints(client clientset.Interface, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes +func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) diff --git a/pkg/descheduler/strategies/node_taint_test.go b/pkg/descheduler/strategies/node_taint_test.go index b49ac8af0e..d6e1f5e021 100644 --- a/pkg/descheduler/strategies/node_taint_test.go +++ b/pkg/descheduler/strategies/node_taint_test.go @@ -9,6 +9,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/test" @@ -170,7 +171,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { tc.nodes, ) - deletePodsViolatingNodeTaints(fakeClient, tc.nodes, tc.evictLocalStoragePods, podEvictor) + RemovePodsViolatingNodeTaints(fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount) diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index f46904af8f..cb787d940a 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -17,7 +17,6 @@ limitations under the License. package strategies import ( - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -29,16 +28,8 @@ import ( "k8s.io/klog" ) -// RemovePodsViolatingInterPodAntiAffinity with elimination strategy -func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { - if !strategy.Enabled { - return - } - removePodsWithAffinityRules(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor) -} - -// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules. -func removePodsWithAffinityRules(client clientset.Interface, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules. +func RemovePodsViolatingInterPodAntiAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) diff --git a/pkg/descheduler/strategies/pod_antiaffinity_test.go b/pkg/descheduler/strategies/pod_antiaffinity_test.go index d6b8670985..f3c9c21003 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity_test.go +++ b/pkg/descheduler/strategies/pod_antiaffinity_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/test" ) @@ -83,7 +84,7 @@ func TestPodAntiAffinity(t *testing.T) { []*v1.Node{node}, ) - removePodsWithAffinityRules(fakeClient, []*v1.Node{node}, false, podEvictor) + RemovePodsViolatingInterPodAntiAffinity(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != test.expectedEvictedPodCount { t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount) diff --git a/pkg/descheduler/strategies/toomanyrestarts.go b/pkg/descheduler/strategies/toomanyrestarts.go index ab919c237a..5d0487230c 100644 --- a/pkg/descheduler/strategies/toomanyrestarts.go +++ b/pkg/descheduler/strategies/toomanyrestarts.go @@ -18,9 +18,9 @@ package strategies import ( "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -29,17 +29,13 @@ import ( // RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node. // There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings. // As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. -func RemovePodsHavingTooManyRestarts(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { - if !strategy.Enabled || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 { +func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { + if strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 { return } - removePodsHavingTooManyRestarts(ds, strategy, nodes, podEvictor, ds.EvictLocalStoragePods) -} - -func removePodsHavingTooManyRestarts(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictLocalStoragePods bool) { for _, node := range nodes { klog.V(1).Infof("Processing node: %s", node.Name) - pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods) + pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) if err != nil { klog.Errorf("Error when list pods at node %s", node.Name) continue diff --git a/pkg/descheduler/strategies/toomanyrestarts_test.go b/pkg/descheduler/strategies/toomanyrestarts_test.go index 9948d9df73..88dbb90cc3 100644 --- a/pkg/descheduler/strategies/toomanyrestarts_test.go +++ b/pkg/descheduler/strategies/toomanyrestarts_test.go @@ -20,14 +20,13 @@ import ( "testing" "fmt" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - "sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/pkg/api" - "sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/test" ) @@ -92,15 +91,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) { } } - node := test.BuildTestNode("node1", 2000, 3000, 10, nil) - pods := initPods(node) - - fakeClient := &fake.Clientset{} - - fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { - return true, &v1.PodList{Items: pods}, nil - }) - tests := []struct { description string pods []v1.Pod @@ -165,13 +155,13 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) { } for _, tc := range tests { + node := test.BuildTestNode("node1", 2000, 3000, 10, nil) + pods := initPods(node) - ds := options.DeschedulerServer{ - DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{ - MaxNoOfPodsToEvictPerNode: tc.maxPodsToEvict, - }, - Client: fakeClient, - } + fakeClient := &fake.Clientset{} + fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { + return true, &v1.PodList{Items: pods}, nil + }) podEvictor := evictions.NewPodEvictor( fakeClient, @@ -181,7 +171,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) { []*v1.Node{node}, ) - removePodsHavingTooManyRestarts(&ds, tc.strategy, []*v1.Node{node}, podEvictor, ds.EvictLocalStoragePods) + RemovePodsHavingTooManyRestarts(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 92e3e267f0..bff92ef282 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -125,17 +125,15 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInfor }, } - ds := &options.DeschedulerServer{Client: clientset} - podEvictor := evictions.NewPodEvictor( - ds.Client, + clientset, evictionPolicyGroupVersion, - ds.DryRun, - ds.MaxNoOfPodsToEvictPerNode, + false, + 0, nodes, ) - strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, nodes, podEvictor) + strategies.LowNodeUtilization(clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor) time.Sleep(10 * time.Second) }