Skip to content

Commit

Permalink
fix: unify deletion logic and fix controller refs
Browse files Browse the repository at this point in the history
  • Loading branch information
jakobmoellerdev committed Aug 30, 2023
1 parent 8ddc847 commit 252f146
Show file tree
Hide file tree
Showing 9 changed files with 162 additions and 202 deletions.
52 changes: 29 additions & 23 deletions controllers/lvm_volumegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package controllers

import (
"context"
"errors"
"fmt"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
Expand Down Expand Up @@ -72,40 +71,48 @@ func (c lvmVG) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCl
}

func (c lvmVG) ensureDeleted(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) error {
logger := log.FromContext(ctx).WithValues("topolvmNode", c.getName())
logger := log.FromContext(ctx).WithValues("resourceManager", c.getName())
vgcrs := lvmVolumeGroups(r.Namespace, lvmCluster.Spec.Storage.DeviceClasses)
allVGsDeleted := true

var volumeGroupsPendingInStatus []string

for _, volumeGroup := range vgcrs {
if err := r.Client.Get(ctx, client.ObjectKeyFromObject(volumeGroup), volumeGroup); err != nil {
vgName := client.ObjectKeyFromObject(volumeGroup)
logger := logger.WithValues("LVMVolumeGroup", volumeGroup.GetName())

if err := r.Client.Get(ctx, vgName, volumeGroup); err != nil {
return client.IgnoreNotFound(err)
}

// if not deleted, initiate deletion
// if not marked for deletion, mark now.
// The controller reference will usually propagate all deletion timestamps so this will
// only occur if the propagation from the API server was delayed.
if volumeGroup.GetDeletionTimestamp().IsZero() {
if err := r.Client.Delete(ctx, volumeGroup); err != nil {
return fmt.Errorf("failed to delete LVMVolumeGroup %s: %w", volumeGroup.GetName(), err)
}
logger.Info("initiated LVMVolumeGroup deletion", "name", volumeGroup.Name)
allVGsDeleted = false
} else {
// Has the VG been cleaned up on all hosts?
exists := doesVGExistOnHosts(volumeGroup.Name, lvmCluster)
if !exists {
// Remove finalizer
if update := cutil.RemoveFinalizer(volumeGroup, lvmvgFinalizer); update {
if err := r.Client.Update(ctx, volumeGroup); err != nil {
return fmt.Errorf("failed to remove finalizer from LVMVolumeGroup")
}
}
} else {
allVGsDeleted = false
}

// Has the VG been cleaned up on all hosts?
if doesVGExistInDeviceClassStatus(volumeGroup.Name, lvmCluster) {
volumeGroupsPendingInStatus = append(volumeGroupsPendingInStatus, vgName.String())
continue
}

// Remove finalizer
if update := cutil.RemoveFinalizer(volumeGroup, lvmvgFinalizer); update {
if err := r.Client.Update(ctx, volumeGroup); err != nil {
return fmt.Errorf("failed to remove finalizer from LVMVolumeGroup %s: %w", volumeGroup.GetName(), err)
}
}

logger.Info("initiated LVMVolumeGroup deletion")
volumeGroupsPendingInStatus = append(volumeGroupsPendingInStatus, vgName.String())
}

if !allVGsDeleted {
return errors.New("waiting for all VGs to be deleted")
if len(volumeGroupsPendingInStatus) > 0 {
return fmt.Errorf("waiting for LVMVolumeGroup's to be removed from nodestatus of %s: %v",
client.ObjectKeyFromObject(lvmCluster), volumeGroupsPendingInStatus)
}

return nil
Expand Down Expand Up @@ -136,8 +143,7 @@ func lvmVolumeGroups(namespace string, deviceClasses []lvmv1alpha1.DeviceClass)
return lvmVolumeGroups
}

func doesVGExistOnHosts(volumeGroup string, instance *lvmv1alpha1.LVMCluster) bool {

func doesVGExistInDeviceClassStatus(volumeGroup string, instance *lvmv1alpha1.LVMCluster) bool {
dcStatuses := instance.Status.DeviceClassStatuses
for _, dc := range dcStatuses {
if dc.Name == volumeGroup {
Expand Down
174 changes: 89 additions & 85 deletions controllers/lvmcluster_controller_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
)

Expand Down Expand Up @@ -64,6 +65,13 @@ var _ = Describe("LVMCluster controller", func() {
},
}

// this is a custom matcher that verifies for a correct owner-ref set with LVMCluster
containLVMClusterOwnerRefField := ContainElement(SatisfyAll(
HaveField("Name", lvmClusterIn.Name),
HaveField("BlockOwnerDeletion", pointer.Bool(true)),
HaveField("Controller", pointer.Bool(true)),
))

nodeIn := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: testNodeName,
Expand Down Expand Up @@ -114,6 +122,9 @@ var _ = Describe("LVMCluster controller", func() {
scOut := &storagev1.StorageClass{}

Context("Reconciliation on creating an LVMCluster CR", func() {
SetDefaultEventuallyTimeout(timeout)
SetDefaultEventuallyPollingInterval(interval)

It("should reconcile LVMCluster CR creation, ", func() {
By("verifying CR status on reconciliation")
// create node as it should be present
Expand All @@ -129,135 +140,128 @@ var _ = Describe("LVMCluster controller", func() {
// lvmcluster controller expecting this to be present to set the status properly
Expect(k8sClient.Create(ctx, lvmVolumeGroupNodeStatusIn)).Should(Succeed())

// placeholder to check CR status.Ready field to be true
By("verifying LVMCluster .Status.Ready is true")
Eventually(func() bool {
err := k8sClient.Get(ctx, lvmClusterName, lvmClusterOut)
if err != nil {
if err := k8sClient.Get(ctx, lvmClusterName, lvmClusterOut); err != nil {
return false
}
return lvmClusterOut.Status.Ready
}, timeout, interval).Should(Equal(true))
}).Should(BeTrue())

// placeholder to check CR status.State field to be Ready
By("verifying LVMCluster .Status.State is Ready")
Eventually(func() bool {
err := k8sClient.Get(ctx, lvmClusterName, lvmClusterOut)
if err != nil {
if err := k8sClient.Get(ctx, lvmClusterName, lvmClusterOut); err != nil {
return false
}
return lvmClusterOut.Status.State == lvmv1alpha1.LVMStatusReady
}, timeout, interval).Should(BeTrue())
}).Should(BeTrue())

By("confirming presence of CSIDriver resource")
Eventually(func() bool {
err := k8sClient.Get(ctx, csiDriverName, csiDriverOut)
return err == nil
}, timeout, interval).Should(BeTrue())
By("confirming presence of CSIDriver")
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, csiDriverName, csiDriverOut)
}).WithContext(ctx).Should(Succeed())

By("confirming presence of VgManager resource")
Eventually(func() bool {
err := k8sClient.Get(ctx, vgManagerNamespacedName, vgManagerDaemonset)
return err == nil
}, timeout, interval).Should(BeTrue())
By("confirming presence of vg-manager")
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, vgManagerNamespacedName, vgManagerDaemonset)
}).WithContext(ctx).Should(Succeed())

By("confirming presence of Topolvm Controller deployment")
Eventually(func() bool {
err := k8sClient.Get(ctx, controllerName, controllerOut)
return err == nil
}, timeout, interval).Should(BeTrue())
By("confirming presence of TopoLVM Controller Deployment")
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, controllerName, controllerOut)
}).WithContext(ctx).Should(Succeed())

By("confirming the existence of CSI Node resource")
Eventually(func() bool {
err := k8sClient.Get(ctx, csiNodeName, csiNodeOut)
return err == nil
}, timeout, interval).Should(BeTrue())
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, csiNodeName, csiNodeOut)
}).WithContext(ctx).Should(Succeed())

By("confirming the existence of LVMVolumeGroup resource")
Eventually(func() bool {
err := k8sClient.Get(ctx, lvmVolumeGroupName, lvmVolumeGroupOut)
return err == nil
}, timeout, interval).Should(BeTrue())
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, lvmVolumeGroupName, lvmVolumeGroupOut)
}).WithContext(ctx).Should(Succeed())

By("confirming creation of TopolvmStorageClasses")
for _, scName := range scNames {
Eventually(func() bool {
err := k8sClient.Get(ctx, scName, scOut)
return err == nil
}, timeout, interval).Should(BeTrue())
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, scName, scOut)
}).WithContext(ctx).Should(Succeed())
scOut = &storagev1.StorageClass{}
}
})
})

Context("Reconciliation on deleting the LVMCluster CR", func() {
It("should reconcile LVMCluster CR deletion ", func() {
By("confirming absence of lvm cluster CR and deletion of operator created resources")
It("should reconcile LVMCluster CR deletion ", func(ctx context.Context) {

// delete lvmVolumeGroupNodeStatus as it should be deleted by vgmanager
// and if it is present lvmcluster reconciler takes it as vg is present on node

// we will now remove the node which will cause the LVM cluster status to also lose that vg
By("confirming absence of lvm cluster CR and deletion of operator created resources")
Expect(k8sClient.Delete(ctx, nodeIn)).Should(Succeed())
// deletion of LVMCluster CR and thus also the NodeStatus through the removal controller
Eventually(func() bool {
err := k8sClient.Get(ctx, client.ObjectKeyFromObject(lvmVolumeGroupNodeStatusIn),
&lvmv1alpha1.LVMVolumeGroupNodeStatus{})
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, client.ObjectKeyFromObject(lvmVolumeGroupNodeStatusIn), lvmVolumeGroupNodeStatusIn)
}).WithContext(ctx).Should(Satisfy(errors.IsNotFound))

// deletion of LVMCluster CR
Eventually(func() bool {
err := k8sClient.Delete(ctx, lvmClusterOut)
return err != nil
}, timeout, interval).Should(BeTrue())
By("deleting the LVMClusterCR")
Expect(k8sClient.Delete(ctx, lvmClusterOut)).Should(Succeed())

// auto deletion of CSI Driver resource based on CR deletion
By("confirming absence of CSI Driver Resource")
Eventually(func() bool {
err := k8sClient.Get(ctx, csiDriverName, csiDriverOut)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())

// ensure that VgManager has owner reference of LVMCluster. (envTest does not support garbage collection)
By("confirming VgManager resource has owner reference of LVMCluster resource")
Eventually(func() bool {
err := k8sClient.Get(ctx, vgManagerNamespacedName, vgManagerDaemonset)
return err == nil && vgManagerDaemonset.OwnerReferences[0].Name == lvmClusterIn.Name
}, timeout, interval).Should(BeTrue())

// auto deletion of Topolvm Controller deployment based on CR deletion
By("confirming absence of Topolvm Controller Deployment")
Eventually(func() bool {
err := k8sClient.Get(ctx, controllerName, controllerOut)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())

By("confirming absence of CSI Node Resource")
Eventually(func() bool {
err := k8sClient.Get(ctx, csiNodeName, csiNodeOut)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, csiDriverName, csiDriverOut)
}).WithContext(ctx).Should(Satisfy(errors.IsNotFound))

// envtest does not support garbage collection, so we simulate the deletion
// see https://book.kubebuilder.io/reference/envtest.html?highlight=considerations#testing-considerations
By("confirming vg-manager has owner reference of LVMCluster")
Expect(k8sClient.Get(ctx, vgManagerNamespacedName, vgManagerDaemonset)).Should(Succeed())
Expect(vgManagerDaemonset.OwnerReferences).To(containLVMClusterOwnerRefField)
Expect(k8sClient.Delete(ctx, vgManagerDaemonset)).To(Succeed(), "simulated ownerref cleanup should succeed")
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, vgManagerNamespacedName, vgManagerDaemonset)
}).WithContext(ctx).Should(Satisfy(errors.IsNotFound))

// envtest does not support garbage collection, so we simulate the deletion
// see https://book.kubebuilder.io/reference/envtest.html?highlight=considerations#testing-considerations
By("confirming TopoLVM controller resource has owner reference of LVMCluster")
Expect(k8sClient.Get(ctx, controllerName, controllerOut)).Should(Succeed())
Expect(controllerOut.OwnerReferences).To(containLVMClusterOwnerRefField)
Expect(k8sClient.Delete(ctx, controllerOut)).To(Succeed(), "simulated ownerref cleanup should succeed")
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, controllerName, controllerOut)
}).WithContext(ctx).Should(Satisfy(errors.IsNotFound))

// envtest does not support garbage collection, so we simulate the deletion
// see https://book.kubebuilder.io/reference/envtest.html?highlight=considerations#testing-considerations
By("confirming TopoLVM Node DaemonSet has owner reference of LVMCluster")
Expect(k8sClient.Get(ctx, csiNodeName, csiNodeOut)).Should(Succeed())
Expect(csiNodeOut.OwnerReferences).To(containLVMClusterOwnerRefField)
Expect(k8sClient.Delete(ctx, csiNodeOut)).To(Succeed(), "simulated ownerref cleanup should succeed")
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, csiNodeName, csiNodeOut)
}).WithContext(ctx).Should(Satisfy(errors.IsNotFound))

By("confirming absence of LVMVolumeGroup Resource")
Eventually(func() bool {
err := k8sClient.Get(ctx, lvmVolumeGroupName, lvmVolumeGroupOut)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
// technically we also set ownerrefs on volume groups so we would also need to check,
// but our controller still deletes them (in addition to the ownerrefs)
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, lvmVolumeGroupName, lvmVolumeGroupOut)
}).WithContext(ctx).Should(Satisfy(errors.IsNotFound))

By("confirming absence of TopolvmStorageClasses")
for _, scName := range scNames {
Eventually(func() bool {
err := k8sClient.Get(ctx, scName, scOut)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())
scOut = &storagev1.StorageClass{}
Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, scName, scOut)
}).WithContext(ctx).Should(Satisfy(errors.IsNotFound))
}

By("confirming absence of LVMCluster CR")
Eventually(func() bool {
err := k8sClient.Get(ctx, lvmClusterName, lvmClusterOut)
return errors.IsNotFound(err)
}, timeout, interval).Should(BeTrue())

Eventually(func(ctx context.Context) error {
return k8sClient.Get(ctx, lvmClusterName, lvmClusterOut)
}).WithContext(ctx).Should(Satisfy(errors.IsNotFound))
})
})

Expand Down
17 changes: 13 additions & 4 deletions controllers/scc.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
Expand Down Expand Up @@ -71,12 +72,20 @@ func (c openshiftSccs) ensureDeleted(r *LVMClusterReconciler, ctx context.Contex
logger := log.FromContext(ctx).WithValues("resourceManager", c.getName())
sccs := getAllSCCs(r.Namespace)
for _, scc := range sccs {
name := types.NamespacedName{Name: scName}
logger := logger.WithValues("SecurityContextConstraint", scName)
if err := r.Client.Get(ctx, name, scc); err != nil {
return client.IgnoreNotFound(err)
}

if !scc.GetDeletionTimestamp().IsZero() {
return fmt.Errorf("the SecurityContextConstraint %s is still present, waiting for deletion", scName)
}

if err := r.Delete(ctx, scc); err != nil {
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to delete SecurityContextConstraint %s: %w", scc.GetName(), err)
}
logger.Info("SecurityContextConstraint is already deleted", "SecurityContextConstraint", scc.Name)
return fmt.Errorf("failed to delete SecurityContextConstraint %s: %w", scc.GetName(), err)
}
logger.Info("initiated SecurityContextConstraint deletion")
}
return nil
}
Expand Down
Loading

0 comments on commit 252f146

Please sign in to comment.