Skip to content

Commit

Permalink
Fix unit tests that failure due to fake-client behavior changes.
Browse files Browse the repository at this point in the history
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
  • Loading branch information
RainbowMango committed Jul 29, 2023
1 parent c8c4abb commit 01e788c
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 147 deletions.
15 changes: 0 additions & 15 deletions pkg/controllers/binding/binding_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"fmt"
"reflect"
"testing"
"time"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -75,7 +74,6 @@ func makeFakeRBCByResource(rs *workv1alpha2.ObjectReference) (*ResourceBindingCo
}

func TestResourceBindingController_Reconcile(t *testing.T) {
preTime := metav1.Date(2023, 0, 0, 0, 0, 0, 0, time.UTC)
tmpReq := controllerruntime.Request{
NamespacedName: types.NamespacedName{
Name: "test-rb",
Expand All @@ -95,19 +93,6 @@ func TestResourceBindingController_Reconcile(t *testing.T) {
wantErr: false,
req: tmpReq,
},
{
name: "RB found with deleting",
want: controllerruntime.Result{},
wantErr: false,
rb: &workv1alpha2.ResourceBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "test-rb",
Namespace: "default",
DeletionTimestamp: &preTime,
},
},
req: tmpReq,
},
{
name: "RB found without deleting",
want: controllerruntime.Result{Requeue: true},
Expand Down
153 changes: 31 additions & 122 deletions pkg/controllers/status/cluster_status_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,19 +46,6 @@ func TestClusterStatusController_Reconcile(t *testing.T) {
expectedResult: controllerruntime.Result{},
expectedError: false,
},
{
name: "Cluster found with finalizer",
clusterName: "test-cluster",
cluster: &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Finalizers: []string{
util.ClusterControllerFinalizer,
},
},
},
expectedResult: controllerruntime.Result{},
expectedError: false,
},
{
name: "Cluster found without finalizer",
clusterName: "test-cluster",
Expand Down Expand Up @@ -156,8 +143,17 @@ func TestClusterStatusController_syncClusterStatus(t *testing.T) {
server := mockServer(http.StatusOK, false)
defer server.Close()
serverAddress = server.URL
cluster := &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: clusterv1alpha1.ClusterSpec{
APIEndpoint: server.URL,
SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"},
InsecureSkipTLSVerification: true,
ProxyURL: "http://1.1.1.1",
},
}
c := &ClusterStatusController{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(),
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithStatusSubresource(cluster).Build(),
GenericInformerManager: genericmanager.GetInstance(),
TypedInformerManager: typedmanager.GetInstance(),
ClusterSuccessThreshold: metav1.Duration{
Expand All @@ -177,33 +173,29 @@ func TestClusterStatusController_syncClusterStatus(t *testing.T) {
ClusterClientSetFunc: clusterClientSetFuncWithError,
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
}

cluster := &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: clusterv1alpha1.ClusterSpec{
APIEndpoint: server.URL,
SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"},
InsecureSkipTLSVerification: true,
ProxyURL: "http://1.1.1.1",
},
}

if err := c.Client.Create(context.Background(), cluster); err != nil {
t.Fatalf("Failed to create cluster: %v", err)
}

res, err := c.syncClusterStatus(cluster)
expect := controllerruntime.Result{}
assert.Equal(t, expect, res)
assert.Empty(t, err)
})

t.Run("online is false, readyCondition.Status isn't true", func(t *testing.T) {
server := mockServer(http.StatusNotFound, true)
defer server.Close()
serverAddress = server.URL
cluster := &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: clusterv1alpha1.ClusterSpec{
APIEndpoint: server.URL,
SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"},
InsecureSkipTLSVerification: true,
ProxyURL: "http://1.1.1.1",
},
}
c := &ClusterStatusController{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(),
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithStatusSubresource(cluster).Build(),
GenericInformerManager: genericmanager.GetInstance(),
TypedInformerManager: typedmanager.GetInstance(),
ClusterSuccessThreshold: metav1.Duration{
Expand All @@ -223,33 +215,30 @@ func TestClusterStatusController_syncClusterStatus(t *testing.T) {
ClusterClientSetFunc: clusterClientSetFunc,
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
}

cluster := &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: clusterv1alpha1.ClusterSpec{
APIEndpoint: server.URL,
SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"},
InsecureSkipTLSVerification: true,
ProxyURL: "http://1.1.1.1",
},
}

if err := c.Client.Create(context.Background(), cluster); err != nil {
t.Fatalf("Failed to create cluster: %v", err)
}

res, err := c.syncClusterStatus(cluster)
expect := controllerruntime.Result{}
assert.Equal(t, expect, res)
assert.Empty(t, err)
})

t.Run("online and healthy is true", func(t *testing.T) {
server := mockServer(http.StatusOK, false)
defer server.Close()
serverAddress = server.URL
cluster := &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: clusterv1alpha1.ClusterSpec{
APIEndpoint: server.URL,
SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"},
InsecureSkipTLSVerification: true,
ProxyURL: "http://1.1.1.1",
SyncMode: clusterv1alpha1.Pull,
},
}
c := &ClusterStatusController{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(),
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithStatusSubresource(cluster).Build(),
GenericInformerManager: genericmanager.GetInstance(),
TypedInformerManager: typedmanager.GetInstance(),
ClusterSuccessThreshold: metav1.Duration{
Expand All @@ -269,22 +258,9 @@ func TestClusterStatusController_syncClusterStatus(t *testing.T) {
ClusterClientSetFunc: clusterClientSetFunc,
ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent,
}

cluster := &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: clusterv1alpha1.ClusterSpec{
APIEndpoint: server.URL,
SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"},
InsecureSkipTLSVerification: true,
ProxyURL: "http://1.1.1.1",
SyncMode: clusterv1alpha1.Pull,
},
}

if err := c.Client.Create(context.Background(), cluster); err != nil {
t.Fatalf("Failed to create cluster: %v", err)
}

res, err := c.syncClusterStatus(cluster)
expect := controllerruntime.Result{}
assert.Equal(t, expect, res)
Expand Down Expand Up @@ -814,73 +790,6 @@ func TestGetAllocatableModelings(t *testing.T) {
}

func TestClusterStatusController_updateStatusIfNeeded(t *testing.T) {
t.Run("cluster is in client", func(t *testing.T) {
cluster := &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster1",
Namespace: "karmada",
},
Status: clusterv1alpha1.ClusterStatus{
KubernetesVersion: "v1",
},
Spec: clusterv1alpha1.ClusterSpec{
ResourceModels: []clusterv1alpha1.ResourceModel{
{
Grade: 0,
Ranges: []clusterv1alpha1.ResourceModelRange{
{
Name: clusterv1alpha1.ResourceCPU,
Min: *resource.NewMilliQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(1, resource.DecimalSI),
},
{
Name: clusterv1alpha1.ResourceMemory,
Min: *resource.NewMilliQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(1024, resource.DecimalSI),
},
},
},
{
Grade: 1,
Ranges: []clusterv1alpha1.ResourceModelRange{
{
Name: clusterv1alpha1.ResourceCPU,
Min: *resource.NewMilliQuantity(1, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
{
Name: clusterv1alpha1.ResourceMemory,
Min: *resource.NewMilliQuantity(1024, resource.DecimalSI),
Max: *resource.NewQuantity(1024*2, resource.DecimalSI),
},
},
},
},
},
}

currentClusterStatus := clusterv1alpha1.ClusterStatus{
KubernetesVersion: "v2",
}

c := &ClusterStatusController{
Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(
cluster,
).Build(),
GenericInformerManager: genericmanager.GetInstance(),
TypedInformerManager: typedmanager.GetInstance(),
ClusterClientOption: &util.ClientOption{
QPS: 5,
Burst: 10,
},
ClusterClientSetFunc: util.NewClusterClientSet,
}

actual, err := c.updateStatusIfNeeded(cluster, currentClusterStatus)
assert.Equal(t, controllerruntime.Result{}, actual)
assert.Empty(t, err, "updateStatusIfNeeded returns error")
})

t.Run("cluster isn't in client", func(t *testing.T) {
cluster := &clusterv1alpha1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Expand Down
10 changes: 0 additions & 10 deletions pkg/controllers/status/work_status_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -611,16 +611,6 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) {
workWithRigntNS: false,
expectedError: false,
},
{
name: "set DeletionTimestamp in work",
obj: newPodObj("karmada-es-cluster"),
pod: newPod(workNs, workName),
raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`),
controllerWithoutInformer: true,
workWithRigntNS: true,
workWithDeletionTimestamp: true,
expectedError: false,
},
{
name: "failed to getRawManifest, wrong Manifests in work",
obj: newPodObj("karmada-es-cluster"),
Expand Down

0 comments on commit 01e788c

Please sign in to comment.