Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to k8s 1.18.2 dependencies #280

Merged
merged 3 commits into from
May 12, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
3 changes: 2 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,15 @@ language: go
go:
- 1.13.x
env:
- K8S_VERSION=v1.18.2
- K8S_VERSION=v1.17.0
- K8S_VERSION=v1.16.4
- K8S_VERSION=v1.15.7
services:
- docker
before_script:
- curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
- wget https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64
- wget https://github.com/kubernetes-sigs/kind/releases/download/v0.8.1/kind-linux-amd64
- chmod +x kind-linux-amd64
- mv kind-linux-amd64 kind
- export PATH=$PATH:$PWD
Expand Down
13 changes: 5 additions & 8 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,12 @@ module sigs.k8s.io/descheduler
go 1.13

require (
github.com/gogo/protobuf v1.3.1 // indirect
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5
k8s.io/api v0.17.0
k8s.io/apimachinery v0.17.3-beta.0
k8s.io/apiserver v0.17.0
k8s.io/client-go v0.17.0
k8s.io/component-base v0.17.0
k8s.io/api v0.18.2
k8s.io/apimachinery v0.18.2
k8s.io/apiserver v0.18.2
k8s.io/client-go v0.18.2
k8s.io/component-base v0.18.2
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)
68 changes: 35 additions & 33 deletions go.sum

Large diffs are not rendered by default.

12 changes: 7 additions & 5 deletions pkg/descheduler/descheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package descheduler

import (
"context"
"fmt"

"k8s.io/api/core/v1"
Expand All @@ -35,6 +36,7 @@ import (
)

func Run(rs *options.DeschedulerServer) error {
ctx := context.Background()
rsclient, err := client.CreateClient(rs.KubeconfigFile)
if err != nil {
return err
Expand All @@ -55,12 +57,12 @@ func Run(rs *options.DeschedulerServer) error {
}

stopChannel := make(chan struct{})
return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
}

type strategyFunction func(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)

func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()

Expand All @@ -78,7 +80,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
}

wait.Until(func() {
nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
if err != nil {
klog.V(1).Infof("Unable to get ready nodes: %v", err)
close(stopChannel)
Expand All @@ -101,7 +103,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *

for name, f := range strategyFuncs {
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
f(rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
f(ctx, rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
}
}

Expand Down
10 changes: 6 additions & 4 deletions pkg/descheduler/descheduler_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package descheduler

import (
"context"
"fmt"
"strings"
"testing"
Expand All @@ -16,6 +17,7 @@ import (
)

func TestTaintsUpdated(t *testing.T) {
ctx := context.Background()
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)

Expand All @@ -40,15 +42,15 @@ func TestTaintsUpdated(t *testing.T) {
rs.Client = client
rs.DeschedulingInterval = 100 * time.Millisecond
go func() {
err := RunDeschedulerStrategies(rs, dp, "v1beta1", stopChannel)
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1", stopChannel)
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
}()

// Wait for few cycles and then verify the only pod still exists
time.Sleep(300 * time.Millisecond)
pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{})
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Unable to list pods: %v", err)
}
Expand All @@ -65,7 +67,7 @@ func TestTaintsUpdated(t *testing.T) {
},
}

if _, err := client.CoreV1().Nodes().Update(n1WithTaint); err != nil {
if _, err := client.CoreV1().Nodes().Update(ctx, n1WithTaint, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Unable to update node: %v\n", err)
}

Expand All @@ -74,7 +76,7 @@ func TestTaintsUpdated(t *testing.T) {
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
// List is better, it does not panic.
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{})
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err == nil {
if len(pods.Items) > 0 {
return false, nil
Expand Down
9 changes: 5 additions & 4 deletions pkg/descheduler/evictions/evictions.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package evictions

import (
"context"
"fmt"

v1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -82,12 +83,12 @@ func (pe *PodEvictor) TotalEvicted() int {
// EvictPod returns non-nil error only when evicting a pod on a node is not
// possible (due to maxPodsToEvict constraint). Success is true when the pod
// is evicted on the server side.
func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err error) {
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (success bool, err error) {
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
}

success, err = EvictPod(pe.client, pod, pe.policyGroupVersion, pe.dryRun)
success, err = EvictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
if success {
pe.nodepodCount[node]++
klog.V(1).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
Expand All @@ -98,7 +99,7 @@ func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err er
return false, nil
}

func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
func EvictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
if dryRun {
return true, nil
}
Expand All @@ -115,7 +116,7 @@ func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string
},
DeleteOptions: deleteOptions,
}
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)

if err == nil {
eventBroadcaster := record.NewBroadcaster()
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/evictions/evictions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package evictions

import (
"context"
"testing"

"k8s.io/api/core/v1"
Expand All @@ -27,6 +28,7 @@ import (
)

func TestEvictPod(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
tests := []struct {
Expand Down Expand Up @@ -57,7 +59,7 @@ func TestEvictPod(t *testing.T) {
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil
})
got, _ := EvictPod(fakeClient, test.pod, "v1", false)
got, _ := EvictPod(ctx, fakeClient, test.pod, "v1", false)
if got != test.want {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
}
Expand Down
5 changes: 3 additions & 2 deletions pkg/descheduler/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package node

import (
"context"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
Expand All @@ -28,7 +29,7 @@ import (

// ReadyNodes returns ready nodes irrespective of whether they are
// schedulable or not.
func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
ns, err := labels.Parse(nodeSelector)
if err != nil {
return []*v1.Node{}, err
Expand All @@ -43,7 +44,7 @@ func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInfor
if len(nodes) == 0 {
klog.V(2).Infof("node lister returned empty list, now fetch directly")

nItems, err := client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
if err != nil {
return []*v1.Node{}, err
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/node/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package node

import (
"context"
"testing"

"k8s.io/api/core/v1"
Expand Down Expand Up @@ -56,6 +57,7 @@ func TestReadyNodes(t *testing.T) {
}

func TestReadyNodesWithNodeSelector(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
node1.Labels = map[string]string{"type": "compute"}
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
Expand All @@ -72,7 +74,7 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
sharedInformerFactory.WaitForCacheSync(stopChannel)
defer close(stopChannel)

nodes, _ := ReadyNodes(fakeClient, nodeInformer, nodeSelector, nil)
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector, nil)

if nodes[0].Name != "node1" {
t.Errorf("Expected node1, got %s", nodes[0].Name)
Expand Down
9 changes: 5 additions & 4 deletions pkg/descheduler/pod/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package pod

import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
Expand All @@ -38,8 +39,8 @@ func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
}

// ListEvictablePodsOnNode returns the list of evictable pods on node.
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
pods, err := ListPodsOnANode(client, node)
func ListEvictablePodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
pods, err := ListPodsOnANode(ctx, client, node)
if err != nil {
return []*v1.Pod{}, err
}
Expand All @@ -54,13 +55,13 @@ func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLoc
return evictablePods, nil
}

func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
if err != nil {
return []*v1.Pod{}, err
}

podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
metav1.ListOptions{FieldSelector: fieldSelector.String()})
if err != nil {
return []*v1.Pod{}, err
Expand Down
10 changes: 6 additions & 4 deletions pkg/descheduler/strategies/duplicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package strategies

import (
"context"
"strings"

"k8s.io/api/core/v1"
Expand All @@ -32,6 +33,7 @@ import (
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemoveDuplicatePods(
ctx context.Context,
client clientset.Interface,
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
Expand All @@ -40,13 +42,13 @@ func RemoveDuplicatePods(
) {
for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v", node.Name)
dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods)
dpm := listDuplicatePodsOnANode(ctx, client, node, evictLocalStoragePods)
for creator, pods := range dpm {
if len(pods) > 1 {
klog.V(1).Infof("%#v", creator)
// i = 0 does not evict the first pod
for i := 1; i < len(pods); i++ {
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
break
}
}
Expand All @@ -59,8 +61,8 @@ func RemoveDuplicatePods(
type duplicatePodsMap map[string][]*v1.Pod

// listDuplicatePodsOnANode lists duplicate pods on a given node.
func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
func listDuplicatePodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
if err != nil {
return nil
}
Expand Down
4 changes: 3 additions & 1 deletion pkg/descheduler/strategies/duplicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package strategies

import (
"context"
"testing"

"k8s.io/api/core/v1"
Expand All @@ -31,6 +32,7 @@ import (
)

func TestFindDuplicatePods(t *testing.T) {
ctx := context.Background()
// first setup pods
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
Expand Down Expand Up @@ -144,7 +146,7 @@ func TestFindDuplicatePods(t *testing.T) {
[]*v1.Node{node},
)

RemoveDuplicatePods(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
RemoveDuplicatePods(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
Expand Down
Loading