Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OCPVE-670: fix ownerref alignment and deletion logic on removal #402

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 0 additions & 27 deletions controllers/internal/multierror.go

This file was deleted.

52 changes: 29 additions & 23 deletions controllers/lvm_volumegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package controllers

import (
"context"
"errors"
"fmt"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
Expand Down Expand Up @@ -72,40 +71,48 @@ func (c lvmVG) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCl
}

func (c lvmVG) ensureDeleted(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) error {
logger := log.FromContext(ctx).WithValues("topolvmNode", c.getName())
logger := log.FromContext(ctx).WithValues("resourceManager", c.getName())
vgcrs := lvmVolumeGroups(r.Namespace, lvmCluster.Spec.Storage.DeviceClasses)
allVGsDeleted := true

var volumeGroupsPendingInStatus []string

for _, volumeGroup := range vgcrs {
if err := r.Client.Get(ctx, client.ObjectKeyFromObject(volumeGroup), volumeGroup); err != nil {
vgName := client.ObjectKeyFromObject(volumeGroup)
logger := logger.WithValues("LVMVolumeGroup", volumeGroup.GetName())

if err := r.Client.Get(ctx, vgName, volumeGroup); err != nil {
return client.IgnoreNotFound(err)
}

// if not deleted, initiate deletion
// if not marked for deletion, mark now.
// The controller reference will usually propagate all deletion timestamps so this will
// only occur if the propagation from the API server was delayed.
if volumeGroup.GetDeletionTimestamp().IsZero() {
if err := r.Client.Delete(ctx, volumeGroup); err != nil {
return fmt.Errorf("failed to delete LVMVolumeGroup %s: %w", volumeGroup.GetName(), err)
}
logger.Info("initiated LVMVolumeGroup deletion", "name", volumeGroup.Name)
allVGsDeleted = false
} else {
// Has the VG been cleaned up on all hosts?
exists := doesVGExistOnHosts(volumeGroup.Name, lvmCluster)
if !exists {
// Remove finalizer
if update := cutil.RemoveFinalizer(volumeGroup, lvmvgFinalizer); update {
if err := r.Client.Update(ctx, volumeGroup); err != nil {
return fmt.Errorf("failed to remove finalizer from LVMVolumeGroup")
}
}
} else {
allVGsDeleted = false
}

// Has the VG been cleaned up on all hosts?
if doesVGExistInDeviceClassStatus(volumeGroup.Name, lvmCluster) {
volumeGroupsPendingInStatus = append(volumeGroupsPendingInStatus, vgName.String())
continue
}

// Remove finalizer
if update := cutil.RemoveFinalizer(volumeGroup, lvmvgFinalizer); update {
if err := r.Client.Update(ctx, volumeGroup); err != nil {
return fmt.Errorf("failed to remove finalizer from LVMVolumeGroup %s: %w", volumeGroup.GetName(), err)
}
}

logger.Info("initiated LVMVolumeGroup deletion")
volumeGroupsPendingInStatus = append(volumeGroupsPendingInStatus, vgName.String())
}

if !allVGsDeleted {
return errors.New("waiting for all VGs to be deleted")
if len(volumeGroupsPendingInStatus) > 0 {
return fmt.Errorf("waiting for LVMVolumeGroup's to be removed from nodestatus of %s: %v",
client.ObjectKeyFromObject(lvmCluster), volumeGroupsPendingInStatus)
}

return nil
Expand Down Expand Up @@ -136,8 +143,7 @@ func lvmVolumeGroups(namespace string, deviceClasses []lvmv1alpha1.DeviceClass)
return lvmVolumeGroups
}

func doesVGExistOnHosts(volumeGroup string, instance *lvmv1alpha1.LVMCluster) bool {

func doesVGExistInDeviceClassStatus(volumeGroup string, instance *lvmv1alpha1.LVMCluster) bool {
dcStatuses := instance.Status.DeviceClassStatuses
for _, dc := range dcStatuses {
if dc.Name == volumeGroup {
Expand Down
10 changes: 5 additions & 5 deletions controllers/lvmcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,19 +18,19 @@ package controllers

import (
"context"
"errors"
"fmt"
"os"
"time"

configv1 "github.com/openshift/api/config/v1"
lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/controllers/internal"
"github.com/openshift/lvm-operator/pkg/cluster"

topolvmv1 "github.com/topolvm/topolvm/api/v1"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
corev1helper "k8s.io/component-helpers/scheduling/corev1"
Expand Down Expand Up @@ -126,7 +126,7 @@ func (r *LVMClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request)
// Reconcile errors have higher priority than status update errors
if reconcileError != nil {
return result, reconcileError
} else if statusError != nil && !errors.IsNotFound(statusError) {
} else if statusError != nil && !k8serrors.IsNotFound(statusError) {
return result, fmt.Errorf("failed to update LVMCluster status: %w", statusError)
} else {
return result, nil
Expand Down Expand Up @@ -168,7 +168,7 @@ func (r *LVMClusterReconciler) reconcile(ctx context.Context, instance *lvmv1alp

resources := []resourceManager{
&csiDriver{},
&topolvmController{r.TopoLVMLeaderElectionPassthrough},
&topolvmController{},
&topolvmNode{},
&vgManager{},
&lvmVG{},
Expand Down Expand Up @@ -200,7 +200,7 @@ func (r *LVMClusterReconciler) reconcile(ctx context.Context, instance *lvmv1alp
resourceSyncElapsedTime := time.Since(resourceSyncStart)
if len(errs) > 0 {
return ctrl.Result{}, fmt.Errorf("failed to reconcile resources managed by LVMCluster within %v: %w",
resourceSyncElapsedTime, internal.NewMultiError(errs))
resourceSyncElapsedTime, errors.Join(errs...))
}

logger.Info("successfully reconciled LVMCluster", "resourceSyncElapsedTime", resourceSyncElapsedTime)
Expand Down
Loading