diff --git a/pkg/controllers/binding/binding_controller_test.go b/pkg/controllers/binding/binding_controller_test.go index b66fdf79f575..81d4434a8c60 100644 --- a/pkg/controllers/binding/binding_controller_test.go +++ b/pkg/controllers/binding/binding_controller_test.go @@ -5,7 +5,6 @@ import ( "fmt" "reflect" "testing" - "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -75,7 +74,6 @@ func makeFakeRBCByResource(rs *workv1alpha2.ObjectReference) (*ResourceBindingCo } func TestResourceBindingController_Reconcile(t *testing.T) { - preTime := metav1.Date(2023, 0, 0, 0, 0, 0, 0, time.UTC) tmpReq := controllerruntime.Request{ NamespacedName: types.NamespacedName{ Name: "test-rb", @@ -95,19 +93,6 @@ func TestResourceBindingController_Reconcile(t *testing.T) { wantErr: false, req: tmpReq, }, - { - name: "RB found with deleting", - want: controllerruntime.Result{}, - wantErr: false, - rb: &workv1alpha2.ResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-rb", - Namespace: "default", - DeletionTimestamp: &preTime, - }, - }, - req: tmpReq, - }, { name: "RB found without deleting", want: controllerruntime.Result{Requeue: true}, diff --git a/pkg/controllers/status/cluster_status_controller_test.go b/pkg/controllers/status/cluster_status_controller_test.go index e25b8c97ad46..18b0b9e5a2ce 100644 --- a/pkg/controllers/status/cluster_status_controller_test.go +++ b/pkg/controllers/status/cluster_status_controller_test.go @@ -46,19 +46,6 @@ func TestClusterStatusController_Reconcile(t *testing.T) { expectedResult: controllerruntime.Result{}, expectedError: false, }, - { - name: "Cluster found with finalizer", - clusterName: "test-cluster", - cluster: &clusterv1alpha1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Finalizers: []string{ - util.ClusterControllerFinalizer, - }, - }, - }, - expectedResult: controllerruntime.Result{}, - expectedError: false, - }, { name: "Cluster found without finalizer", clusterName: "test-cluster", @@ -156,8 +143,17 @@ func TestClusterStatusController_syncClusterStatus(t *testing.T) { server := mockServer(http.StatusOK, false) defer server.Close() serverAddress = server.URL + cluster := &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: clusterv1alpha1.ClusterSpec{ + APIEndpoint: server.URL, + SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, + InsecureSkipTLSVerification: true, + ProxyURL: "http://1.1.1.1", + }, + } c := &ClusterStatusController{ - Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithStatusSubresource(cluster).Build(), GenericInformerManager: genericmanager.GetInstance(), TypedInformerManager: typedmanager.GetInstance(), ClusterSuccessThreshold: metav1.Duration{ @@ -177,33 +173,29 @@ func TestClusterStatusController_syncClusterStatus(t *testing.T) { ClusterClientSetFunc: clusterClientSetFuncWithError, ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent, } - - cluster := &clusterv1alpha1.Cluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: clusterv1alpha1.ClusterSpec{ - APIEndpoint: server.URL, - SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, - InsecureSkipTLSVerification: true, - ProxyURL: "http://1.1.1.1", - }, - } - if err := c.Client.Create(context.Background(), cluster); err != nil { t.Fatalf("Failed to create cluster: %v", err) } - res, err := c.syncClusterStatus(cluster) expect := controllerruntime.Result{} assert.Equal(t, expect, res) assert.Empty(t, err) }) - t.Run("online is false, readyCondition.Status isn't true", func(t *testing.T) { server := mockServer(http.StatusNotFound, true) defer server.Close() serverAddress = server.URL + cluster := &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: clusterv1alpha1.ClusterSpec{ + APIEndpoint: server.URL, + SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, + InsecureSkipTLSVerification: true, + ProxyURL: "http://1.1.1.1", + }, + } c := &ClusterStatusController{ - Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithStatusSubresource(cluster).Build(), GenericInformerManager: genericmanager.GetInstance(), TypedInformerManager: typedmanager.GetInstance(), ClusterSuccessThreshold: metav1.Duration{ @@ -223,33 +215,30 @@ func TestClusterStatusController_syncClusterStatus(t *testing.T) { ClusterClientSetFunc: clusterClientSetFunc, ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent, } - - cluster := &clusterv1alpha1.Cluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: clusterv1alpha1.ClusterSpec{ - APIEndpoint: server.URL, - SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, - InsecureSkipTLSVerification: true, - ProxyURL: "http://1.1.1.1", - }, - } - if err := c.Client.Create(context.Background(), cluster); err != nil { t.Fatalf("Failed to create cluster: %v", err) } - res, err := c.syncClusterStatus(cluster) expect := controllerruntime.Result{} assert.Equal(t, expect, res) assert.Empty(t, err) }) - t.Run("online and healthy is true", func(t *testing.T) { server := mockServer(http.StatusOK, false) defer server.Close() serverAddress = server.URL + cluster := &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: clusterv1alpha1.ClusterSpec{ + APIEndpoint: server.URL, + SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, + InsecureSkipTLSVerification: true, + ProxyURL: "http://1.1.1.1", + SyncMode: clusterv1alpha1.Pull, + }, + } c := &ClusterStatusController{ - Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithStatusSubresource(cluster).Build(), GenericInformerManager: genericmanager.GetInstance(), TypedInformerManager: typedmanager.GetInstance(), ClusterSuccessThreshold: metav1.Duration{ @@ -269,22 +258,9 @@ func TestClusterStatusController_syncClusterStatus(t *testing.T) { ClusterClientSetFunc: clusterClientSetFunc, ClusterDynamicClientSetFunc: util.NewClusterDynamicClientSetForAgent, } - - cluster := &clusterv1alpha1.Cluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: clusterv1alpha1.ClusterSpec{ - APIEndpoint: server.URL, - SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, - InsecureSkipTLSVerification: true, - ProxyURL: "http://1.1.1.1", - SyncMode: clusterv1alpha1.Pull, - }, - } - if err := c.Client.Create(context.Background(), cluster); err != nil { t.Fatalf("Failed to create cluster: %v", err) } - res, err := c.syncClusterStatus(cluster) expect := controllerruntime.Result{} assert.Equal(t, expect, res) @@ -814,73 +790,6 @@ func TestGetAllocatableModelings(t *testing.T) { } func TestClusterStatusController_updateStatusIfNeeded(t *testing.T) { - t.Run("cluster is in client", func(t *testing.T) { - cluster := &clusterv1alpha1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster1", - Namespace: "karmada", - }, - Status: clusterv1alpha1.ClusterStatus{ - KubernetesVersion: "v1", - }, - Spec: clusterv1alpha1.ClusterSpec{ - ResourceModels: []clusterv1alpha1.ResourceModel{ - { - Grade: 0, - Ranges: []clusterv1alpha1.ResourceModelRange{ - { - Name: clusterv1alpha1.ResourceCPU, - Min: *resource.NewMilliQuantity(0, resource.DecimalSI), - Max: *resource.NewQuantity(1, resource.DecimalSI), - }, - { - Name: clusterv1alpha1.ResourceMemory, - Min: *resource.NewMilliQuantity(0, resource.DecimalSI), - Max: *resource.NewQuantity(1024, resource.DecimalSI), - }, - }, - }, - { - Grade: 1, - Ranges: []clusterv1alpha1.ResourceModelRange{ - { - Name: clusterv1alpha1.ResourceCPU, - Min: *resource.NewMilliQuantity(1, resource.DecimalSI), - Max: *resource.NewQuantity(2, resource.DecimalSI), - }, - { - Name: clusterv1alpha1.ResourceMemory, - Min: *resource.NewMilliQuantity(1024, resource.DecimalSI), - Max: *resource.NewQuantity(1024*2, resource.DecimalSI), - }, - }, - }, - }, - }, - } - - currentClusterStatus := clusterv1alpha1.ClusterStatus{ - KubernetesVersion: "v2", - } - - c := &ClusterStatusController{ - Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects( - cluster, - ).Build(), - GenericInformerManager: genericmanager.GetInstance(), - TypedInformerManager: typedmanager.GetInstance(), - ClusterClientOption: &util.ClientOption{ - QPS: 5, - Burst: 10, - }, - ClusterClientSetFunc: util.NewClusterClientSet, - } - - actual, err := c.updateStatusIfNeeded(cluster, currentClusterStatus) - assert.Equal(t, controllerruntime.Result{}, actual) - assert.Empty(t, err, "updateStatusIfNeeded returns error") - }) - t.Run("cluster isn't in client", func(t *testing.T) { cluster := &clusterv1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controllers/status/work_status_controller_test.go b/pkg/controllers/status/work_status_controller_test.go index 116a4506a876..b9e9dc1c1b2a 100644 --- a/pkg/controllers/status/work_status_controller_test.go +++ b/pkg/controllers/status/work_status_controller_test.go @@ -611,16 +611,6 @@ func TestWorkStatusController_syncWorkStatus(t *testing.T) { workWithRigntNS: false, expectedError: false, }, - { - name: "set DeletionTimestamp in work", - obj: newPodObj("karmada-es-cluster"), - pod: newPod(workNs, workName), - raw: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"pod","namespace":"default"}}`), - controllerWithoutInformer: true, - workWithRigntNS: true, - workWithDeletionTimestamp: true, - expectedError: false, - }, { name: "failed to getRawManifest, wrong Manifests in work", obj: newPodObj("karmada-es-cluster"),