Skip to content

Commit

Permalink
update UT tests
Browse files Browse the repository at this point in the history
Signed-off-by: lili <lili_9309@163.com>
  • Loading branch information
Lily922 committed Nov 20, 2023
1 parent 46f306e commit 2a95357
Show file tree
Hide file tree
Showing 13 changed files with 27 additions and 31 deletions.
1 change: 1 addition & 0 deletions cmd/scheduler/app/options/options_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ func TestAddFlags(t *testing.T) {
PercentageOfNodesToFind: defaultPercentageOfNodesToFind,
EnableLeaderElection: true,
LockObjectNamespace: defaultLockObjectNamespace,
NodeWorkerThreads: defaultNodeWorkers,
}

if !reflect.DeepEqual(expected, s) {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/actions/allocate/allocate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ func TestAllocate(t *testing.T) {
}

for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, pod := range test.pods {
schedulerCache.AddPod(pod)
Expand Down Expand Up @@ -454,7 +454,7 @@ func TestAllocateWithDynamicPVC(t *testing.T) {
schedulerCache.AddPod(pod)
}
for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}

trueValue := true
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/actions/preempt/preempt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ func TestPreempt(t *testing.T) {
Value: 10,
}
for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, pod := range test.pods {
schedulerCache.AddPod(pod)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/actions/reclaim/reclaim_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ func TestReclaim(t *testing.T) {
Value: 10,
}
for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, pod := range test.pods {
schedulerCache.AddPod(pod)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/actions/shuffle/shuffle_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ func TestShuffle(t *testing.T) {
}

for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, q := range test.queues {
schedulerCache.AddQueueV1beta1(q)
Expand Down
19 changes: 7 additions & 12 deletions pkg/scheduler/cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"

"volcano.sh/volcano/pkg/scheduler/api"
volumescheduling "volcano.sh/volcano/pkg/scheduler/capabilities/volumebinding"
"volcano.sh/volcano/pkg/scheduler/util"
Expand Down Expand Up @@ -155,7 +154,7 @@ func TestSchedulerCache_Bind_NodeWithSufficientResources(t *testing.T) {
cache.AddPod(pod)

node := buildNode("n1", api.BuildResourceList("2000m", "10G", []api.ScalarResource{{Name: "pods", Value: "10"}}...))
cache.AddNode(node)
cache.AddOrUpdateNode(node)

task := api.NewTaskInfo(pod)
task.Job = "j1"
Expand Down Expand Up @@ -187,7 +186,7 @@ func TestSchedulerCache_Bind_NodeWithInsufficientResources(t *testing.T) {
cache.AddPod(pod)

node := buildNode("n1", api.BuildResourceList("2000m", "10G", []api.ScalarResource{{Name: "pods", Value: "10"}}...))
cache.AddNode(node)
cache.AddOrUpdateNode(node)

task := api.NewTaskInfo(pod)
task.Job = "j1"
Expand Down Expand Up @@ -299,7 +298,7 @@ func TestNodeOperation(t *testing.T) {
}

for _, n := range test.nodes {
cache.AddNode(n)
cache.AddOrUpdateNode(n)
}

if !reflect.DeepEqual(cache, test.expected) {
Expand All @@ -308,7 +307,7 @@ func TestNodeOperation(t *testing.T) {
}

// delete node
cache.DeleteNode(test.deletedNode)
cache.RemoveNode(test.deletedNode.Name)
if !reflect.DeepEqual(cache, test.delExpect) {
t.Errorf("case %d: \n expected %v, \n got %v \n",
i, test.delExpect, cache)
Expand Down Expand Up @@ -336,6 +335,7 @@ func TestBindTasks(t *testing.T) {
pvInformer: informerFactory.Core().V1().PersistentVolumes(),
scInformer: informerFactory.Storage().V1().StorageClasses(),
errTasks: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
nodeQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
}

sc.Binder = &DefaultBinder{}
Expand All @@ -359,12 +359,6 @@ func TestBindTasks(t *testing.T) {
DeleteFunc: sc.DeletePod,
},
)
sc.nodeInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: sc.AddNode,
UpdateFunc: sc.UpdateNode,
},
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go wait.Until(sc.processBindTask, time.Millisecond*5, ctx.Done())
Expand All @@ -375,9 +369,10 @@ func TestBindTasks(t *testing.T) {

// make sure pod exist when calling fake client binding
fakeKube.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
fakeKube.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
// set node in cache directly
sc.AddOrUpdateNode(node)

task := api.NewTaskInfo(pod)
task.NodeName = "n1"
Expand Down
10 changes: 5 additions & 5 deletions pkg/scheduler/cache/event_handlers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func TestSchedulerCache_updateTask(t *testing.T) {
}

for _, n := range test.Nodes {
cache.AddNode(n)
cache.AddOrUpdateNode(n)
}

cache.AddPod(test.OldPod)
Expand Down Expand Up @@ -129,7 +129,7 @@ func TestSchedulerCache_UpdatePod(t *testing.T) {
}

for _, n := range test.Nodes {
cache.AddNode(n)
cache.AddOrUpdateNode(n)
}

cache.AddPod(test.OldPod)
Expand Down Expand Up @@ -210,7 +210,7 @@ func TestSchedulerCache_AddPodGroupV1beta1(t *testing.T) {
}

for _, n := range test.Nodes {
cache.AddNode(n)
cache.AddOrUpdateNode(n)
}
test.Pod.Annotations = map[string]string{
"scheduling.k8s.io/group-name": "j1",
Expand Down Expand Up @@ -336,7 +336,7 @@ func TestSchedulerCache_UpdatePodGroupV1beta1(t *testing.T) {
}

for _, n := range test.Nodes {
cache.AddNode(n)
cache.AddOrUpdateNode(n)
}
test.Pod.Annotations = map[string]string{
"scheduling.k8s.io/group-name": "j1",
Expand Down Expand Up @@ -431,7 +431,7 @@ func TestSchedulerCache_DeletePodGroupV1beta1(t *testing.T) {
cache.DeletedJobs = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())

for _, n := range test.Nodes {
cache.AddNode(n)
cache.AddOrUpdateNode(n)
}
test.Pod.Annotations = map[string]string{
"scheduling.k8s.io/group-name": "j1",
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/binpack/binpack_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ func TestNode(t *testing.T) {
Recorder: record.NewFakeRecorder(100),
}
for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, pod := range test.pods {
schedulerCache.AddPod(pod)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/drf/hdrf_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ func TestHDRF(t *testing.T) {
Recorder: record.NewFakeRecorder(100),
}
for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, q := range test.queueSpecs {
schedulerCache.AddQueueV1beta1(
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/plugins/predicates/predicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func TestEventHandler(t *testing.T) {
return
}

sc := cache.New(config, option.SchedulerNames, option.DefaultQueue, option.NodeSelector)
sc := cache.New(config, option.SchedulerNames, option.DefaultQueue, option.NodeSelector, option.NodeWorkerThreads)
schedulerCache := sc.(*cache.SchedulerCache)

// pending pods
Expand Down Expand Up @@ -157,7 +157,7 @@ func TestEventHandler(t *testing.T) {
}
}()
for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, pod := range test.pods {
schedulerCache.AddPod(pod)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/plugins/proportion/proportion_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ func TestProportion(t *testing.T) {
schedulerCache.DeletedJobs = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())

for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, pod := range test.pods {
schedulerCache.AddPod(pod)
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/plugins/tdm/tdm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ func Test_TDM(t *testing.T) {
Recorder: record.NewFakeRecorder(100),
}
for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}

schedulerCache.AddPod(test.pod)
Expand Down Expand Up @@ -713,7 +713,7 @@ func Test_TDM_victimsFn(t *testing.T) {
Recorder: record.NewFakeRecorder(100),
}
for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}

for _, pod := range test.pods {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/plugins/usage/usage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ func TestUsage_predicateFn(t *testing.T) {
}

for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, pod := range test.pods {
schedulerCache.AddPod(pod)
Expand Down Expand Up @@ -513,7 +513,7 @@ func TestUsage_nodeOrderFn(t *testing.T) {
}

for _, node := range test.nodes {
schedulerCache.AddNode(node)
schedulerCache.AddOrUpdateNode(node)
}
for _, pod := range test.pods {
schedulerCache.AddPod(pod)
Expand Down

0 comments on commit 2a95357

Please sign in to comment.