Skip to content
This repository has been archived by the owner on May 25, 2023. It is now read-only.

Commit

Permalink
Merge pull request #566 from k82cn/automated-cherry-pick-of-#565-#564-…
Browse files Browse the repository at this point in the history
…upstream-release-0.4

Automated cherry pick of #565: Removed reclaim&preempt by default. #564: Updated vendors.
  • Loading branch information
k8s-ci-robot authored Jan 25, 2019
2 parents 31e2c89 + 7b58421 commit c466e05
Show file tree
Hide file tree
Showing 397 changed files with 15,416 additions and 27,899 deletions.
234 changes: 85 additions & 149 deletions Gopkg.lock

Large diffs are not rendered by default.

45 changes: 25 additions & 20 deletions Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,45 +24,50 @@
# go-tests = true
# unused-packages = true


[[constraint]]
name = "github.com/spf13/pflag"
version = "1.0.1"
branch = "master"
name = "github.com/golang/glog"

[[constraint]]
name = "k8s.io/apimachinery"
version = "kubernetes-1.13.2"
name = "github.com/onsi/ginkgo"
version = "1.7.0"

[[constraint]]
name = "k8s.io/api"
version = "kubernetes-1.13.2"
name = "github.com/onsi/gomega"
version = "1.4.3"

[[constraint]]
name = "k8s.io/apiserver"
version = "kubernetes-1.13.2"
name = "github.com/spf13/pflag"
version = "1.0.3"

[[constraint]]
name = "gopkg.in/yaml.v2"
version = "2.2.2"

[[constraint]]
name = "k8s.io/apiextensions-apiserver"
version = "kubernetes-1.13.2"

[[constraint]]
name = "k8s.io/kubernetes"
version = "v1.13.2"
branch = "master"
name = "k8s.io/gengo"

[[constraint]]
name = "k8s.io/client-go"
version = "v10.0.0"
name = "k8s.io/kubernetes"
version = "1.13.2"

[[constraint]]
name = "github.com/stretchr/testify"
version = "1.1.4"
name = "k8s.io/apimachinery"
version = "kubernetes-1.13.2"

[[override]]
name = "gopkg.in/fsnotify.v1"
source = "https://github.com/fsnotify/fsnotify.git"
[[constraint]]
name = "k8s.io/api"
version = "kubernetes-1.13.2"

[[override]]
name = "github.com/imdario/mergo"
revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58"
[[constraint]]
name = "k8s.io/apiserver"
version = "kubernetes-1.13.2"

[prune]
go-tests = true
Expand Down
9 changes: 9 additions & 0 deletions example/kube-batch-conf.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
actions: "reclaim, allocate, backfill, preempt"
tiers:
- plugins:
- name: priority
- name: gang
- plugins:
- name: drf
- name: predicates
- name: proportion
2 changes: 1 addition & 1 deletion hack/run-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ kubectl create -f config/crds/scheduling_v1alpha1_podgroup.yaml
kubectl create -f config/crds/scheduling_v1alpha1_queue.yaml

# start kube-batch
nohup ${KA_BIN}/kube-batch --kubeconfig ${HOME}/.kube/config --enable-namespace-as-queue=${ENABLE_NAMESPACES_AS_QUEUE} --logtostderr --v ${LOG_LEVEL} > scheduler.log 2>&1 &
nohup ${KA_BIN}/kube-batch --kubeconfig ${HOME}/.kube/config --scheduler-conf=example/kube-batch-conf.yaml --enable-namespace-as-queue=${ENABLE_NAMESPACES_AS_QUEUE} --logtostderr --v ${LOG_LEVEL} > scheduler.log 2>&1 &

# clean up
function cleanup {
Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion pkg/scheduler/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import (
)

var defaultSchedulerConf = `
actions: "reclaim, allocate, backfill, preempt"
actions: "allocate, backfill"
tiers:
- plugins:
- name: priority
Expand Down
49 changes: 24 additions & 25 deletions test/e2e/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,15 @@ import (
"k8s.io/client-go/tools/clientcmd"

kbv1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1"
"github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
arbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
kbver "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned"
kbapi "github.com/kubernetes-sigs/kube-batch/pkg/scheduler/api"
)

var oneMinute = 1 * time.Minute

var halfCPU = v1.ResourceList{"cpu": resource.MustParse("500m")}
var oneCPU = v1.ResourceList{"cpu": resource.MustParse("1000m")}
var twoCPU = v1.ResourceList{"cpu": resource.MustParse("2000m")}
var threeCPU = v1.ResourceList{"cpu": resource.MustParse("3000m")}

const (
workerPriority = "worker-pri"
Expand All @@ -66,7 +65,7 @@ func homeDir() string {

type context struct {
kubeclient *kubernetes.Clientset
karclient *versioned.Clientset
kbclient *kbver.Clientset

namespace string
queues []string
Expand All @@ -86,7 +85,7 @@ func initTestContext() *context {
config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(home, ".kube", "config"))
Expect(err).NotTo(HaveOccurred())

cxt.karclient = versioned.NewForConfigOrDie(config)
cxt.kbclient = kbver.NewForConfigOrDie(config)
cxt.kubeclient = kubernetes.NewForConfigOrDie(config)

cxt.enableNamespaceAsQueue = enableNamespaceAsQueue
Expand Down Expand Up @@ -138,7 +137,7 @@ func queueNotExist(ctx *context) wait.ConditionFunc {
if ctx.enableNamespaceAsQueue {
_, err = ctx.kubeclient.CoreV1().Namespaces().Get(q, metav1.GetOptions{})
} else {
_, err = ctx.karclient.Scheduling().Queues().Get(q, metav1.GetOptions{})
_, err = ctx.kbclient.Scheduling().Queues().Get(q, metav1.GetOptions{})
}

if !(err != nil && errors.IsNotFound(err)) {
Expand Down Expand Up @@ -190,7 +189,7 @@ func createQueues(cxt *context) {
},
})
} else {
_, err = cxt.karclient.Scheduling().Queues().Create(&kbv1.Queue{
_, err = cxt.kbclient.Scheduling().Queues().Create(&kbv1.Queue{
ObjectMeta: metav1.ObjectMeta{
Name: q,
},
Expand All @@ -204,7 +203,7 @@ func createQueues(cxt *context) {
}

if !cxt.enableNamespaceAsQueue {
_, err := cxt.karclient.Scheduling().Queues().Create(&kbv1.Queue{
_, err := cxt.kbclient.Scheduling().Queues().Create(&kbv1.Queue{
ObjectMeta: metav1.ObjectMeta{
Name: cxt.namespace,
},
Expand All @@ -228,7 +227,7 @@ func deleteQueues(cxt *context) {
PropagationPolicy: &foreground,
})
} else {
err = cxt.karclient.Scheduling().Queues().Delete(q, &metav1.DeleteOptions{
err = cxt.kbclient.Scheduling().Queues().Delete(q, &metav1.DeleteOptions{
PropagationPolicy: &foreground,
})
}
Expand All @@ -237,7 +236,7 @@ func deleteQueues(cxt *context) {
}

if !cxt.enableNamespaceAsQueue {
err := cxt.karclient.Scheduling().Queues().Delete(cxt.namespace, &metav1.DeleteOptions{
err := cxt.kbclient.Scheduling().Queues().Delete(cxt.namespace, &metav1.DeleteOptions{
PropagationPolicy: &foreground,
})

Expand Down Expand Up @@ -334,15 +333,15 @@ func createJobEx(context *context, job *jobSpec) ([]*batchv1.Job, *kbv1.PodGroup
pg.Spec.MinMember = *job.minMember
}

podgroup, err := context.karclient.Scheduling().PodGroups(pg.Namespace).Create(pg)
podgroup, err := context.kbclient.Scheduling().PodGroups(pg.Namespace).Create(pg)
Expect(err).NotTo(HaveOccurred())

return jobs, podgroup
}

func taskPhase(ctx *context, pg *kbv1.PodGroup, phase []v1.PodPhase, taskNum int) wait.ConditionFunc {
return func() (bool, error) {
pg, err := ctx.karclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
pg, err := ctx.kbclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

pods, err := ctx.kubeclient.CoreV1().Pods(pg.Namespace).List(metav1.ListOptions{})
Expand All @@ -368,7 +367,7 @@ func taskPhase(ctx *context, pg *kbv1.PodGroup, phase []v1.PodPhase, taskNum int

func taskPhaseEx(ctx *context, pg *kbv1.PodGroup, phase []v1.PodPhase, taskNum map[string]int) wait.ConditionFunc {
return func() (bool, error) {
pg, err := ctx.karclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
pg, err := ctx.kbclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

pods, err := ctx.kubeclient.CoreV1().Pods(pg.Namespace).List(metav1.ListOptions{})
Expand Down Expand Up @@ -419,7 +418,7 @@ func podGroupUnschedulable(ctx *context, pg *kbv1.PodGroup, time time.Time) wait

func podGroupEvicted(ctx *context, pg *kbv1.PodGroup, time time.Time) wait.ConditionFunc {
return func() (bool, error) {
pg, err := ctx.karclient.SchedulingV1alpha1().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
pg, err := ctx.kbclient.SchedulingV1alpha1().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

events, err := ctx.kubeclient.CoreV1().Events(pg.Namespace).List(metav1.ListOptions{})
Expand Down Expand Up @@ -571,7 +570,7 @@ func clusterSize(ctx *context, req v1.ResourceList) int32 {
pods, err := ctx.kubeclient.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())

used := map[string]*arbapi.Resource{}
used := map[string]*kbapi.Resource{}

for _, pod := range pods.Items {
nodeName := pod.Spec.NodeName
Expand All @@ -584,11 +583,11 @@ func clusterSize(ctx *context, req v1.ResourceList) int32 {
}

if _, found := used[nodeName]; !found {
used[nodeName] = arbapi.EmptyResource()
used[nodeName] = kbapi.EmptyResource()
}

for _, c := range pod.Spec.Containers {
req := arbapi.NewResource(c.Resources.Requests)
req := kbapi.NewResource(c.Resources.Requests)
used[nodeName].Add(req)
}
}
Expand All @@ -601,8 +600,8 @@ func clusterSize(ctx *context, req v1.ResourceList) int32 {
continue
}

alloc := arbapi.NewResource(node.Status.Allocatable)
slot := arbapi.NewResource(req)
alloc := kbapi.NewResource(node.Status.Allocatable)
slot := kbapi.NewResource(req)

// Removed used resources.
if res, found := used[node.Name]; found {
Expand Down Expand Up @@ -640,7 +639,7 @@ func computeNode(ctx *context, req v1.ResourceList) (string, int32) {
pods, err := ctx.kubeclient.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())

used := map[string]*arbapi.Resource{}
used := map[string]*kbapi.Resource{}

for _, pod := range pods.Items {
nodeName := pod.Spec.NodeName
Expand All @@ -653,11 +652,11 @@ func computeNode(ctx *context, req v1.ResourceList) (string, int32) {
}

if _, found := used[nodeName]; !found {
used[nodeName] = arbapi.EmptyResource()
used[nodeName] = kbapi.EmptyResource()
}

for _, c := range pod.Spec.Containers {
req := arbapi.NewResource(c.Resources.Requests)
req := kbapi.NewResource(c.Resources.Requests)
used[nodeName].Add(req)
}
}
Expand All @@ -669,8 +668,8 @@ func computeNode(ctx *context, req v1.ResourceList) (string, int32) {

res := int32(0)

alloc := arbapi.NewResource(node.Status.Allocatable)
slot := arbapi.NewResource(req)
alloc := kbapi.NewResource(node.Status.Allocatable)
slot := kbapi.NewResource(req)

// Removed used resources.
if res, found := used[node.Name]; found {
Expand All @@ -691,7 +690,7 @@ func computeNode(ctx *context, req v1.ResourceList) (string, int32) {
}

func getPodOfPodGroup(ctx *context, pg *kbv1.PodGroup) []*v1.Pod {
pg, err := ctx.karclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
pg, err := ctx.kbclient.Scheduling().PodGroups(pg.Namespace).Get(pg.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

pods, err := ctx.kubeclient.CoreV1().Pods(pg.Namespace).List(metav1.ListOptions{})
Expand Down
24 changes: 18 additions & 6 deletions vendor/cloud.google.com/go/compute/metadata/metadata.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion vendor/github.com/Sirupsen/logrus/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit c466e05

Please sign in to comment.