diff --git a/api/v1alpha1/lvmcluster_test.go b/api/v1alpha1/lvmcluster_test.go new file mode 100644 index 000000000..f14c3e0ea --- /dev/null +++ b/api/v1alpha1/lvmcluster_test.go @@ -0,0 +1,137 @@ +package v1alpha1 + +import ( + "errors" + "fmt" + "hash/fnv" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/openshift/lvm-operator/pkg/cluster" + + v1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func generateUniqueNameForTestCase(ctx SpecContext) string { + GinkgoHelper() + hash := fnv.New32() + _, err := hash.Write([]byte(ctx.SpecReport().LeafNodeText)) + Expect(err).ToNot(HaveOccurred()) + name := fmt.Sprintf("test-%v", hash.Sum32()) + By(fmt.Sprintf("Test Case %q mapped to Unique Name %q", ctx.SpecReport().LeafNodeText, name)) + return name +} + +var _ = Describe("webhook acceptance tests", func() { + defaultClusterTemplate := &LVMCluster{ + Spec: LVMClusterSpec{ + Storage: Storage{ + DeviceClasses: []DeviceClass{{ + Name: "test-device-class", + ThinPoolConfig: &ThinPoolConfig{ + Name: "thin-pool-1", + SizePercent: 90, + OverprovisionRatio: 10, + }, + Default: true, + FilesystemType: "xfs", + }}, + }, + }, + } + + It("minimum viable configuration", func(ctx SpecContext) { + generatedName := generateUniqueNameForTestCase(ctx) + GinkgoT().Setenv(cluster.OperatorNamespaceEnvVar, generatedName) + namespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: generatedName}} + Expect(k8sClient.Create(ctx, namespace)).To(Succeed()) + DeferCleanup(func(ctx SpecContext) { + Expect(k8sClient.Delete(ctx, namespace)).To(Succeed()) + }) + + resource := defaultClusterTemplate.DeepCopy() + resource.SetName(generatedName) + resource.SetNamespace(namespace.GetName()) + + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("duplicate LVMClusters get rejected", func(ctx SpecContext) { + generatedName := generateUniqueNameForTestCase(ctx) + GinkgoT().Setenv(cluster.OperatorNamespaceEnvVar, generatedName) + namespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: generatedName}} + Expect(k8sClient.Create(ctx, namespace)).To(Succeed()) + DeferCleanup(func(ctx SpecContext) { + Expect(k8sClient.Delete(ctx, namespace)).To(Succeed()) + }) + + resource := defaultClusterTemplate.DeepCopy() + resource.SetName(generatedName) + resource.SetNamespace(namespace.GetName()) + + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + + duplicate := resource.DeepCopy() + duplicate.SetName(fmt.Sprintf("%s-dupe", duplicate.GetName())) + + err := k8sClient.Create(ctx, duplicate) + Expect(err).To(HaveOccurred()) + Expect(err).To(Satisfy(k8serrors.IsForbidden)) + + statusError := &k8serrors.StatusError{} + Expect(errors.As(err, &statusError)).To(BeTrue()) + Expect(statusError.Status().Message).To(ContainSubstring(ErrDuplicateLVMCluster.Error())) + + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("namespace cannot be looked up via ENV", func(ctx SpecContext) { + generatedName := generateUniqueNameForTestCase(ctx) + inacceptableNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: generatedName}} + Expect(k8sClient.Create(ctx, inacceptableNamespace)).To(Succeed()) + DeferCleanup(func(ctx SpecContext) { + Expect(k8sClient.Delete(ctx, inacceptableNamespace)).To(Succeed()) + }) + + resource := defaultClusterTemplate.DeepCopy() + resource.SetName(generatedName) + resource.SetNamespace(inacceptableNamespace.GetName()) + + err := k8sClient.Create(ctx, resource) + Expect(err).To(HaveOccurred()) + Expect(err).To(Satisfy(k8serrors.IsForbidden)) + + statusError := &k8serrors.StatusError{} + Expect(errors.As(err, &statusError)).To(BeTrue()) + Expect(statusError.Status().Message).To(ContainSubstring( + fmt.Sprintf("%s not found", cluster.OperatorNamespaceEnvVar))) + }) + + It("invalid namespace gets rejected", func(ctx SpecContext) { + acceptableNamespace := "openshift-storage" + GinkgoT().Setenv(cluster.OperatorNamespaceEnvVar, acceptableNamespace) + generatedName := generateUniqueNameForTestCase(ctx) + inacceptableNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: generatedName}} + Expect(k8sClient.Create(ctx, inacceptableNamespace)).To(Succeed()) + DeferCleanup(func(ctx SpecContext) { + Expect(k8sClient.Delete(ctx, inacceptableNamespace)).To(Succeed()) + }) + + resource := defaultClusterTemplate.DeepCopy() + resource.SetName(generatedName) + resource.SetNamespace(inacceptableNamespace.GetName()) + + err := k8sClient.Create(ctx, resource) + Expect(err).To(HaveOccurred()) + Expect(err).To(Satisfy(k8serrors.IsForbidden)) + + statusError := &k8serrors.StatusError{} + Expect(errors.As(err, &statusError)).To(BeTrue()) + Expect(statusError.Status().Message).To(ContainSubstring(ErrInvalidNamespace.Error())) + }) + +}) diff --git a/api/v1alpha1/lvmcluster_webhook.go b/api/v1alpha1/lvmcluster_webhook.go index ad6a9fd4b..928d4d6c5 100644 --- a/api/v1alpha1/lvmcluster_webhook.go +++ b/api/v1alpha1/lvmcluster_webhook.go @@ -17,11 +17,16 @@ limitations under the License. package v1alpha1 import ( + "context" + "errors" "fmt" "strings" + "github.com/openshift/lvm-operator/pkg/cluster" + "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -30,11 +35,18 @@ import ( // log is for logging in this package. var lvmclusterlog = logf.Log.WithName("lvmcluster-webhook") -var _ webhook.Validator = &LVMCluster{} +type lvmClusterValidator struct { + client.Client +} + +var _ webhook.CustomValidator = &lvmClusterValidator{} var ( - ErrDeviceClassNotFound = fmt.Errorf("DeviceClass not found in the LVMCluster") - ErrThinPoolConfigNotSet = fmt.Errorf("ThinPoolConfig is not set for the DeviceClass") + ErrDeviceClassNotFound = errors.New("DeviceClass not found in the LVMCluster") + ErrThinPoolConfigNotSet = errors.New("ThinPoolConfig is not set for the DeviceClass") + ErrInvalidNamespace = errors.New("invalid namespace was supplied") + ErrNoValidLVMCluster = errors.New("object passed to lvmClusterValidator is not LVMCluster") + ErrDuplicateLVMCluster = errors.New("duplicate LVMClusters are not allowed, remove the old LVMCluster or work with the existing instance") ) //+kubebuilder:webhook:path=/validate-lvm-topolvm-io-v1alpha1-lvmcluster,mutating=false,failurePolicy=fail,sideEffects=None,groups=lvm.topolvm.io,resources=lvmclusters,verbs=create;update,versions=v1alpha1,name=vlvmcluster.kb.io,admissionReviewVersions=v1 @@ -42,36 +54,59 @@ var ( func (l *LVMCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(l). + WithValidator(&lvmClusterValidator{Client: mgr.GetClient()}). Complete() } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (l *LVMCluster) ValidateCreate() (admission.Warnings, error) { - lvmclusterlog.Info("validate create", "name", l.Name) +func (v *lvmClusterValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + l, ok := obj.(*LVMCluster) + if !ok { + return nil, ErrNoValidLVMCluster + } + warnings := admission.Warnings{} + lvmclusterlog.Info("validate create", "name", l.Name) + + if namespace, err := cluster.GetOperatorNamespace(); err != nil { + return warnings, fmt.Errorf("could not verify namespace of lvmcluster: %w", err) + } else if namespace != l.GetNamespace() { + return warnings, fmt.Errorf( + "creating LVMCluster is only supported within namespace %q: %w", + namespace, ErrInvalidNamespace, + ) + } + + existing := &LVMClusterList{} + if err := v.List(ctx, existing, &client.ListOptions{Limit: 1, Namespace: l.GetNamespace()}); err != nil { + return warnings, fmt.Errorf("could not verify that LVMCluster was not already created %w", err) + } else if len(existing.Items) > 0 { + return warnings, fmt.Errorf("LVMCluster exists at %q: %w", + client.ObjectKeyFromObject(&existing.Items[0]), ErrDuplicateLVMCluster) + } - deviceClassWarnings, err := l.verifyDeviceClass() + deviceClassWarnings, err := v.verifyDeviceClass(l) warnings = append(warnings, deviceClassWarnings...) if err != nil { return warnings, err } - err = l.verifyPathsAreNotEmpty() + err = v.verifyPathsAreNotEmpty(l) if err != nil { return warnings, err } - err = l.verifyAbsolutePath() + err = v.verifyAbsolutePath(l) if err != nil { return warnings, err } - err = l.verifyNoDeviceOverlap() + err = v.verifyNoDeviceOverlap(l) if err != nil { return warnings, err } - err = l.verifyFstype() + err = v.verifyFstype(l) if err != nil { return warnings, err } @@ -80,32 +115,36 @@ func (l *LVMCluster) ValidateCreate() (admission.Warnings, error) { } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (l *LVMCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { +func (v *lvmClusterValidator) ValidateUpdate(ctx context.Context, old, new runtime.Object) (admission.Warnings, error) { + l, ok := new.(*LVMCluster) + if !ok { + return nil, ErrNoValidLVMCluster + } lvmclusterlog.Info("validate update", "name", l.Name) warnings := admission.Warnings{} - deviceClassWarnings, err := l.verifyDeviceClass() + deviceClassWarnings, err := v.verifyDeviceClass(l) warnings = append(warnings, deviceClassWarnings...) if err != nil { return warnings, err } - err = l.verifyPathsAreNotEmpty() + err = v.verifyPathsAreNotEmpty(l) if err != nil { return warnings, err } - err = l.verifyAbsolutePath() + err = v.verifyAbsolutePath(l) if err != nil { return warnings, err } - err = l.verifyNoDeviceOverlap() + err = v.verifyNoDeviceOverlap(l) if err != nil { return warnings, err } - err = l.verifyFstype() + err = v.verifyFstype(l) if err != nil { return warnings, err } @@ -120,7 +159,7 @@ func (l *LVMCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, err var newDevices, newOptionalDevices, oldDevices, oldOptionalDevices []string newThinPoolConfig = deviceClass.ThinPoolConfig - oldThinPoolConfig, err = oldLVMCluster.getThinPoolsConfigOfDeviceClass(deviceClass.Name) + oldThinPoolConfig, err = v.getThinPoolsConfigOfDeviceClass(oldLVMCluster, deviceClass.Name) if (newThinPoolConfig != nil && oldThinPoolConfig == nil && err != ErrDeviceClassNotFound) || (newThinPoolConfig == nil && oldThinPoolConfig != nil) { @@ -142,7 +181,7 @@ func (l *LVMCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, err newOptionalDevices = deviceClass.DeviceSelector.OptionalPaths } - oldDevices, oldOptionalDevices, err = oldLVMCluster.getPathsOfDeviceClass(deviceClass.Name) + oldDevices, oldOptionalDevices, err = v.getPathsOfDeviceClass(oldLVMCluster, deviceClass.Name) // Is this a new device class? if err == ErrDeviceClassNotFound { @@ -195,13 +234,18 @@ func validateDevicePathsStillExist(old, new []string) error { } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (l *LVMCluster) ValidateDelete() (admission.Warnings, error) { +func (v *lvmClusterValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + l, ok := obj.(*LVMCluster) + if !ok { + return nil, ErrNoValidLVMCluster + } + lvmclusterlog.Info("validate delete", "name", l.Name) return []string{}, nil } -func (l *LVMCluster) verifyDeviceClass() (admission.Warnings, error) { +func (v *lvmClusterValidator) verifyDeviceClass(l *LVMCluster) (admission.Warnings, error) { deviceClasses := l.Spec.Storage.DeviceClasses if len(deviceClasses) < 1 { return nil, fmt.Errorf("at least one deviceClass is required") @@ -222,7 +266,7 @@ func (l *LVMCluster) verifyDeviceClass() (admission.Warnings, error) { return nil, nil } -func (l *LVMCluster) verifyPathsAreNotEmpty() error { +func (v *lvmClusterValidator) verifyPathsAreNotEmpty(l *LVMCluster) error { var deviceClassesWithoutPaths []string for _, deviceClass := range l.Spec.Storage.DeviceClasses { @@ -241,8 +285,7 @@ func (l *LVMCluster) verifyPathsAreNotEmpty() error { return nil } -func (l *LVMCluster) verifyAbsolutePath() error { - +func (v *lvmClusterValidator) verifyAbsolutePath(l *LVMCluster) error { for _, deviceClass := range l.Spec.Storage.DeviceClasses { if deviceClass.DeviceSelector != nil { for _, path := range deviceClass.DeviceSelector.Paths { @@ -262,7 +305,7 @@ func (l *LVMCluster) verifyAbsolutePath() error { return nil } -func (l *LVMCluster) verifyNoDeviceOverlap() error { +func (v *lvmClusterValidator) verifyNoDeviceOverlap(l *LVMCluster) error { // make sure no device overlap with another VGs // use map to find the duplicate entries for paths @@ -327,7 +370,7 @@ func (l *LVMCluster) verifyNoDeviceOverlap() error { return nil } -func (l *LVMCluster) getPathsOfDeviceClass(deviceClassName string) (required []string, optional []string, err error) { +func (v *lvmClusterValidator) getPathsOfDeviceClass(l *LVMCluster, deviceClassName string) (required []string, optional []string, err error) { required, optional, err = []string{}, []string{}, nil for _, deviceClass := range l.Spec.Storage.DeviceClasses { if deviceClass.Name == deviceClassName { @@ -344,7 +387,7 @@ func (l *LVMCluster) getPathsOfDeviceClass(deviceClassName string) (required []s return } -func (l *LVMCluster) getThinPoolsConfigOfDeviceClass(deviceClassName string) (*ThinPoolConfig, error) { +func (v *lvmClusterValidator) getThinPoolsConfigOfDeviceClass(l *LVMCluster, deviceClassName string) (*ThinPoolConfig, error) { for _, deviceClass := range l.Spec.Storage.DeviceClasses { if deviceClass.Name == deviceClassName { @@ -358,7 +401,7 @@ func (l *LVMCluster) getThinPoolsConfigOfDeviceClass(deviceClassName string) (*T return nil, ErrDeviceClassNotFound } -func (l *LVMCluster) verifyFstype() error { +func (v *lvmClusterValidator) verifyFstype(l *LVMCluster) error { for _, deviceClass := range l.Spec.Storage.DeviceClasses { if deviceClass.FilesystemType != FilesystemTypeExt4 && deviceClass.FilesystemType != FilesystemTypeXFS { return fmt.Errorf("fstype '%s' is not a supported filesystem type", deviceClass.FilesystemType) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index d677650da..0adf9bb92 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -27,12 +27,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" admissionv1beta1 "k8s.io/api/admission/v1beta1" - //+kubebuilder:scaffold:imports - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -75,23 +75,22 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - scheme := runtime.NewScheme() - err = AddToScheme(scheme) + err = AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = admissionv1beta1.AddToScheme(scheme) + err = admissionv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) //+kubebuilder:scaffold:scheme - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) // start webhook server using Manager webhookInstallOptions := &testEnv.WebhookInstallOptions mgr, err := ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme, + Scheme: scheme.Scheme, Metrics: metricsserver.Options{ BindAddress: "0", }, diff --git a/main.go b/main.go index bc68acddd..ffeb35ce6 100644 --- a/main.go +++ b/main.go @@ -19,13 +19,13 @@ package main import ( "context" "flag" - "fmt" "os" configv1 "github.com/openshift/api/config/v1" secv1 "github.com/openshift/api/security/v1" "github.com/openshift/lvm-operator/controllers/node" "k8s.io/apimachinery/pkg/api/meta" + "sigs.k8s.io/controller-runtime/pkg/cache" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -56,9 +56,8 @@ import ( ) var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") - operatorNamespaceEnvVar = "POD_NAMESPACE" + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") ) func init() { @@ -90,7 +89,7 @@ func main() { ctrl.SetLogger(logr) klog.SetLogger(logr) - operatorNamespace, err := getOperatorNamespace() + operatorNamespace, err := cluster.GetOperatorNamespace() if err != nil { setupLog.Error(err, "unable to get operatorNamespace"+ "Exiting") @@ -145,6 +144,9 @@ func main() { WebhookServer: &webhook.DefaultServer{Options: webhook.Options{ Port: 9443, }}, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{operatorNamespace: {}}, + }, HealthProbeBindAddress: probeAddr, LeaderElectionResourceLockInterface: le.Lock, LeaderElection: !leaderElectionConfig.Disable, @@ -217,15 +219,3 @@ func main() { os.Exit(1) } } - -// getOperatorNamespace returns the Namespace the operator should be watching for changes -func getOperatorNamespace() (string, error) { - // The env variable POD_NAMESPACE which specifies the Namespace the pod is running in - // and hence will watch. - - ns, found := os.LookupEnv(operatorNamespaceEnvVar) - if !found { - return "", fmt.Errorf("%s not found", operatorNamespaceEnvVar) - } - return ns, nil -} diff --git a/pkg/cluster/namespace.go b/pkg/cluster/namespace.go new file mode 100644 index 000000000..8a5521452 --- /dev/null +++ b/pkg/cluster/namespace.go @@ -0,0 +1,20 @@ +package cluster + +import ( + "fmt" + "os" +) + +const OperatorNamespaceEnvVar = "POD_NAMESPACE" + +// GetOperatorNamespace returns the Namespace the operator should be watching for changes +func GetOperatorNamespace() (string, error) { + // The env variable POD_NAMESPACE which specifies the Namespace the pod is running in + // and hence will watch. + + ns, found := os.LookupEnv(OperatorNamespaceEnvVar) + if !found { + return "", fmt.Errorf("%s not found", OperatorNamespaceEnvVar) + } + return ns, nil +}