diff --git a/internal/controllers/capiprovider_controller.go b/internal/controllers/capiprovider_controller.go index 12418c3c..fbc2662a 100644 --- a/internal/controllers/capiprovider_controller.go +++ b/internal/controllers/capiprovider_controller.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/log" operatorv1 "sigs.k8s.io/cluster-api-operator/api/v1alpha2" @@ -96,8 +97,9 @@ func (r *CAPIProviderReconciler) patchStatus(ctx context.Context, capiProvider * } // SetupWithManager sets up the controller with the Manager. -func (r *CAPIProviderReconciler) SetupWithManager(_ context.Context, mgr ctrl.Manager) (err error) { +func (r *CAPIProviderReconciler) SetupWithManager(_ context.Context, mgr ctrl.Manager, options controller.Options) (err error) { b := ctrl.NewControllerManagedBy(mgr). + WithOptions(options). For(&turtlesv1.CAPIProvider{}) resources := []client.Object{ diff --git a/internal/controllers/capiprovider_controller_test.go b/internal/controllers/capiprovider_controller_test.go index aa9cd4cd..c9458fd7 100644 --- a/internal/controllers/capiprovider_controller_test.go +++ b/internal/controllers/capiprovider_controller_test.go @@ -27,6 +27,7 @@ import ( operatorv1 "sigs.k8s.io/cluster-api-operator/api/v1alpha2" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" ) @@ -53,7 +54,7 @@ var _ = Describe("Reconcile CAPIProvider", func() { Scheme: testEnv.GetScheme(), } - Expect(r.SetupWithManager(ctx, testEnv.Manager)).ToNot(HaveOccurred()) + Expect(r.SetupWithManager(ctx, testEnv.Manager, controller.Options{})).ToNot(HaveOccurred()) }) It("Should create infrastructure docker provider and secret", func() { diff --git a/main.go b/main.go index 669b1263..ccfd2bf3 100644 --- a/main.go +++ b/main.go @@ -261,6 +261,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { RancherClient: rancherClient, }).SetupWithManager(ctx, mgr, controller.Options{ MaxConcurrentReconciles: concurrencyNumber, + CacheSyncTimeout: maxDuration, }); err != nil { setupLog.Error(err, "unable to create rancher management v3 cleanup controller") os.Exit(1) @@ -272,7 +273,10 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { if err := (&controllers.RancherKubeconfigSecretReconciler{ Client: mgr.GetClient(), WatchFilterValue: watchFilterValue, - }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: concurrencyNumber}); err != nil { + }).SetupWithManager(ctx, mgr, controller.Options{ + MaxConcurrentReconciles: concurrencyNumber, + CacheSyncTimeout: maxDuration, + }); err != nil { setupLog.Error(err, "unable to create Rancher kubeconfig secret controller") os.Exit(1) } @@ -283,7 +287,10 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { if err := (&controllers.CAPIProviderReconciler{ Client: mgr.GetClient(), Scheme: scheme, - }).SetupWithManager(ctx, mgr); err != nil { + }).SetupWithManager(ctx, mgr, controller.Options{ + MaxConcurrentReconciles: concurrencyNumber, + CacheSyncTimeout: maxDuration, + }); err != nil { setupLog.Error(err, "unable to create CAPI Provider controller") os.Exit(1) }