diff --git a/.mockery.yaml b/.mockery.yaml index 7ba027962..2baeb8c8d 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -10,3 +10,9 @@ packages: github.com/openshift/lvm-operator/pkg/lvm: interfaces: LVM: + github.com/openshift/lvm-operator/pkg/wipefs: + interfaces: + Wipefs: + github.com/openshift/lvm-operator/pkg/dmsetup: + interfaces: + Dmsetup: diff --git a/api/v1alpha1/lvmcluster_types.go b/api/v1alpha1/lvmcluster_types.go index a8ea461f5..ba022336a 100644 --- a/api/v1alpha1/lvmcluster_types.go +++ b/api/v1alpha1/lvmcluster_types.go @@ -21,9 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - // LVMClusterSpec defines the desired state of LVMCluster type LVMClusterSpec struct { // Important: Run "make" to regenerate code after modifying this file @@ -98,6 +95,7 @@ type DeviceClass struct { } // DeviceSelector specifies the list of criteria that have to match before a device is assigned +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.forceWipeDevicesAndDestroyAllData) || has(self.forceWipeDevicesAndDestroyAllData)", message="ForceWipeDevicesAndDestroyAllData is required once set" type DeviceSelector struct { // MinSize is the minimum size of the device which needs to be included. Defaults to `1Gi` if empty // +optional @@ -114,15 +112,14 @@ type DeviceSelector struct { // We discourage using the device names as they can change over node restarts. // +optional OptionalPaths []string `json:"optionalPaths,omitempty"` -} - -// type DeviceClassConfig struct { -// LVMConfig *LVMConfig `json:"lvmConfig,omitempty"` -// } -// type LVMConfig struct { -// thinProvision bool `json:"thinProvision,omitempty"` -// } + // ForceWipeDevicesAndDestroyAllData runs wipefs to wipe the devices. + // This can lead to data lose. Enable this only when you know that the disk + // does not contain any important data. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ForceWipeDevicesAndDestroyAllData is immutable" + ForceWipeDevicesAndDestroyAllData bool `json:"forceWipeDevicesAndDestroyAllData,omitempty"` +} type LVMStateType string diff --git a/bundle/manifests/lvm.topolvm.io_lvmclusters.yaml b/bundle/manifests/lvm.topolvm.io_lvmclusters.yaml index 081ed4a9b..7c62019d5 100644 --- a/bundle/manifests/lvm.topolvm.io_lvmclusters.yaml +++ b/bundle/manifests/lvm.topolvm.io_lvmclusters.yaml @@ -56,6 +56,15 @@ spec: description: DeviceSelector is a set of rules that should match for a device to be included in the LVMCluster properties: + forceWipeDevicesAndDestroyAllData: + description: ForceWipeDevicesAndDestroyAllData runs + wipefs to wipe the devices. This can lead to data + lose. Enable this only when you know that the disk + does not contain any important data. + type: boolean + x-kubernetes-validations: + - message: ForceWipeDevicesAndDestroyAllData is immutable + rule: self == oldSelf optionalPaths: description: A list of device paths which could be chosen for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1" @@ -73,6 +82,11 @@ spec: type: string type: array type: object + x-kubernetes-validations: + - message: ForceWipeDevicesAndDestroyAllData is required + once set + rule: '!has(oldSelf.forceWipeDevicesAndDestroyAllData) + || has(self.forceWipeDevicesAndDestroyAllData)' fstype: default: xfs description: FilesystemType sets the filesystem the device diff --git a/bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml b/bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml index 2849b3cf4..b5a5705df 100644 --- a/bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml +++ b/bundle/manifests/lvm.topolvm.io_lvmvolumegroups.yaml @@ -42,6 +42,14 @@ spec: description: DeviceSelector is a set of rules that should match for a device to be included in this TopoLVMCluster properties: + forceWipeDevicesAndDestroyAllData: + description: ForceWipeDevicesAndDestroyAllData runs wipefs to + wipe the devices. This can lead to data lose. Enable this only + when you know that the disk does not contain any important data. + type: boolean + x-kubernetes-validations: + - message: ForceWipeDevicesAndDestroyAllData is immutable + rule: self == oldSelf optionalPaths: description: A list of device paths which could be chosen for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1" @@ -59,6 +67,9 @@ spec: type: string type: array type: object + x-kubernetes-validations: + - message: ForceWipeDevicesAndDestroyAllData is required once set + rule: '!has(oldSelf.forceWipeDevicesAndDestroyAllData) || has(self.forceWipeDevicesAndDestroyAllData)' nodeSelector: description: NodeSelector chooses nodes properties: diff --git a/cmd/vgmanager/vgmanager.go b/cmd/vgmanager/vgmanager.go index e88faa5b5..198dd2f36 100644 --- a/cmd/vgmanager/vgmanager.go +++ b/cmd/vgmanager/vgmanager.go @@ -21,11 +21,13 @@ import ( "os" "github.com/go-logr/logr" + "github.com/openshift/lvm-operator/pkg/dmsetup" "github.com/openshift/lvm-operator/pkg/filter" "github.com/openshift/lvm-operator/pkg/lsblk" "github.com/openshift/lvm-operator/pkg/lvm" "github.com/openshift/lvm-operator/pkg/lvmd" "github.com/openshift/lvm-operator/pkg/vgmanager" + "github.com/openshift/lvm-operator/pkg/wipefs" "github.com/spf13/cobra" "sigs.k8s.io/controller-runtime/pkg/cache" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -106,6 +108,8 @@ func run(_ *cobra.Command, _ []string, opts *Options) error { LVMD: lvmd.DefaultConfigurator(), Scheme: mgr.GetScheme(), LSBLK: lsblk.NewDefaultHostLSBLK(), + Wipefs: wipefs.NewDefaultHostWipefs(), + Dmsetup: dmsetup.NewDefaultHostDmsetup(), LVM: lvm.NewDefaultHostLVM(), NodeName: os.Getenv("NODE_NAME"), Namespace: os.Getenv("POD_NAMESPACE"), diff --git a/config/crd/bases/lvm.topolvm.io_lvmclusters.yaml b/config/crd/bases/lvm.topolvm.io_lvmclusters.yaml index a2c092ce9..b1f50ae73 100644 --- a/config/crd/bases/lvm.topolvm.io_lvmclusters.yaml +++ b/config/crd/bases/lvm.topolvm.io_lvmclusters.yaml @@ -52,6 +52,15 @@ spec: description: DeviceSelector is a set of rules that should match for a device to be included in the LVMCluster properties: + forceWipeDevicesAndDestroyAllData: + description: ForceWipeDevicesAndDestroyAllData runs + wipefs to wipe the devices. This can lead to data + lose. Enable this only when you know that the disk + does not contain any important data. + type: boolean + x-kubernetes-validations: + - message: ForceWipeDevicesAndDestroyAllData is immutable + rule: self == oldSelf optionalPaths: description: A list of device paths which could be chosen for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1" @@ -69,6 +78,11 @@ spec: type: string type: array type: object + x-kubernetes-validations: + - message: ForceWipeDevicesAndDestroyAllData is required + once set + rule: '!has(oldSelf.forceWipeDevicesAndDestroyAllData) + || has(self.forceWipeDevicesAndDestroyAllData)' fstype: default: xfs description: FilesystemType sets the filesystem the device diff --git a/config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml b/config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml index 2cc3b6fd7..4562a58f2 100644 --- a/config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml +++ b/config/crd/bases/lvm.topolvm.io_lvmvolumegroups.yaml @@ -42,6 +42,14 @@ spec: description: DeviceSelector is a set of rules that should match for a device to be included in this TopoLVMCluster properties: + forceWipeDevicesAndDestroyAllData: + description: ForceWipeDevicesAndDestroyAllData runs wipefs to + wipe the devices. This can lead to data lose. Enable this only + when you know that the disk does not contain any important data. + type: boolean + x-kubernetes-validations: + - message: ForceWipeDevicesAndDestroyAllData is immutable + rule: self == oldSelf optionalPaths: description: A list of device paths which could be chosen for creating Volume Group. For example "/dev/disk/by-path/pci-0000:04:00.0-nvme-1" @@ -59,6 +67,9 @@ spec: type: string type: array type: object + x-kubernetes-validations: + - message: ForceWipeDevicesAndDestroyAllData is required once set + rule: '!has(oldSelf.forceWipeDevicesAndDestroyAllData) || has(self.forceWipeDevicesAndDestroyAllData)' nodeSelector: description: NodeSelector chooses nodes properties: diff --git a/pkg/dmsetup/dmsetup.go b/pkg/dmsetup/dmsetup.go new file mode 100644 index 000000000..b78359e30 --- /dev/null +++ b/pkg/dmsetup/dmsetup.go @@ -0,0 +1,53 @@ +package dmsetup + +import ( + "errors" + "fmt" + "strings" + + "github.com/openshift/lvm-operator/pkg/internal/exec" +) + +var ( + DefaultDMSetup = "/usr/sbin/dmsetup" + ErrReferenceNotFound = errors.New("device-mapper reference not found") +) + +type Dmsetup interface { + Remove(deviceName string) error +} + +type HostDmsetup struct { + exec.Executor + dmsetup string +} + +func NewDefaultHostDmsetup() *HostDmsetup { + return NewHostDmsetup(&exec.CommandExecutor{}, DefaultDMSetup) +} + +func NewHostDmsetup(executor exec.Executor, dmsetup string) *HostDmsetup { + return &HostDmsetup{ + Executor: executor, + dmsetup: dmsetup, + } +} + +// Remove removes the device's reference from the device-mapper +func (dmsetup *HostDmsetup) Remove(deviceName string) error { + if len(deviceName) == 0 { + return errors.New("failed to remove device-mapper reference. Device name is empty") + } + + args := []string{"remove"} + args = append(args, deviceName) + output, err := dmsetup.ExecuteCommandWithOutputAsHost(dmsetup.dmsetup, args...) + if err != nil { + if strings.Contains(output, "not found") { + return ErrReferenceNotFound + } + return fmt.Errorf("failed to remove the reference from device-mapper %q: %v", deviceName, err) + } + + return nil +} diff --git a/pkg/dmsetup/dmsetup_test.go b/pkg/dmsetup/dmsetup_test.go new file mode 100644 index 000000000..f194f13ee --- /dev/null +++ b/pkg/dmsetup/dmsetup_test.go @@ -0,0 +1,47 @@ +package dmsetup + +import ( + "errors" + "fmt" + "testing" + + mockExec "github.com/openshift/lvm-operator/pkg/internal/exec/test" + "github.com/stretchr/testify/assert" +) + +func TestRemove(t *testing.T) { + tests := []struct { + name string + deviceName string + wantErr bool + }{ + {"Empty device name", "", true}, + {"Existing device name", "/dev/loop1", false}, + {"Non-existing device name", "/dev/loop2", true}, + } + + executor := &mockExec.MockExecutor{ + MockExecuteCommandWithOutputAsHost: func(command string, args ...string) (string, error) { + if args[0] != "remove" { + return "", fmt.Errorf("invalid args %q", args[0]) + } + if args[1] == "/dev/loop1" { + return "", nil + } else if args[1] == "/dev/loop2" { + return "device loop2 not found", errors.New("device loop2 not found") + } + return "", fmt.Errorf("invalid args %q", args[1]) + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := NewHostDmsetup(executor, DefaultDMSetup).Remove(tt.deviceName) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/pkg/dmsetup/mocks/mock_dmsetup.go b/pkg/dmsetup/mocks/mock_dmsetup.go new file mode 100644 index 000000000..f36efa7e6 --- /dev/null +++ b/pkg/dmsetup/mocks/mock_dmsetup.go @@ -0,0 +1,74 @@ +// Code generated by mockery v2.33.2. DO NOT EDIT. + +package dmsetup + +import mock "github.com/stretchr/testify/mock" + +// MockDmsetup is an autogenerated mock type for the Dmsetup type +type MockDmsetup struct { + mock.Mock +} + +type MockDmsetup_Expecter struct { + mock *mock.Mock +} + +func (_m *MockDmsetup) EXPECT() *MockDmsetup_Expecter { + return &MockDmsetup_Expecter{mock: &_m.Mock} +} + +// Remove provides a mock function with given fields: deviceName +func (_m *MockDmsetup) Remove(deviceName string) error { + ret := _m.Called(deviceName) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(deviceName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockDmsetup_Remove_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Remove' +type MockDmsetup_Remove_Call struct { + *mock.Call +} + +// Remove is a helper method to define mock.On call +// - deviceName string +func (_e *MockDmsetup_Expecter) Remove(deviceName interface{}) *MockDmsetup_Remove_Call { + return &MockDmsetup_Remove_Call{Call: _e.mock.On("Remove", deviceName)} +} + +func (_c *MockDmsetup_Remove_Call) Run(run func(deviceName string)) *MockDmsetup_Remove_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockDmsetup_Remove_Call) Return(_a0 error) *MockDmsetup_Remove_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockDmsetup_Remove_Call) RunAndReturn(run func(string) error) *MockDmsetup_Remove_Call { + _c.Call.Return(run) + return _c +} + +// NewMockDmsetup creates a new instance of MockDmsetup. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockDmsetup(t interface { + mock.TestingT + Cleanup(func()) +}) *MockDmsetup { + mock := &MockDmsetup{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/internal/exec/exec.go b/pkg/internal/exec/exec.go index 6b42826e6..cde2baa1d 100644 --- a/pkg/internal/exec/exec.go +++ b/pkg/internal/exec/exec.go @@ -26,7 +26,7 @@ var ( nsenterPath = "/usr/bin/nsenter" ) -// Executor is the interface for running exec commands +// Executor is the interface for running exec commands type Executor interface { ExecuteCommandWithOutput(command string, arg ...string) (string, error) ExecuteCommandWithOutputAsHost(command string, arg ...string) (string, error) diff --git a/pkg/vgmanager/devices.go b/pkg/vgmanager/devices.go index 371195704..54925a089 100644 --- a/pkg/vgmanager/devices.go +++ b/pkg/vgmanager/devices.go @@ -138,7 +138,7 @@ func (r *VGReconciler) filterDevices(ctx context.Context, devices []lsblk.BlockD } // getNewDevicesToBeAdded gets all devices that should be added to the volume group -func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices []lsblk.BlockDevice, vgs []lvm.VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]lsblk.BlockDevice, error) { +func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices []lsblk.BlockDevice, nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus, volumeGroup *lvmv1alpha1.LVMVolumeGroup) ([]lsblk.BlockDevice, error) { logger := log.FromContext(ctx) var validBlockDevices []lsblk.BlockDevice @@ -151,7 +151,7 @@ func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices // If Paths is specified, treat it as required paths for _, path := range volumeGroup.Spec.DeviceSelector.Paths { - blockDevice, err := getValidDevice(path, blockDevices, vgs, volumeGroup) + blockDevice, err := getValidDevice(path, blockDevices, nodeStatus, volumeGroup) if err != nil { // An error for required devices is critical return nil, fmt.Errorf("unable to validate device %s: %v", path, err) @@ -168,7 +168,7 @@ func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices } for _, path := range volumeGroup.Spec.DeviceSelector.OptionalPaths { - blockDevice, err := getValidDevice(path, blockDevices, vgs, volumeGroup) + blockDevice, err := getValidDevice(path, blockDevices, nodeStatus, volumeGroup) // Check if we should skip this device if err != nil { @@ -200,11 +200,14 @@ func (r *VGReconciler) getNewDevicesToBeAdded(ctx context.Context, blockDevices return validBlockDevices, nil } -func isDeviceAlreadyPartOfVG(vgs []lvm.VolumeGroup, diskName string, volumeGroup *lvmv1alpha1.LVMVolumeGroup) bool { - for _, vg := range vgs { - if vg.Name == volumeGroup.Name { - for _, pv := range vg.PVs { - if pv.PvName == diskName { +func isDeviceAlreadyPartOfVG(nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus, diskName string, volumeGroup *lvmv1alpha1.LVMVolumeGroup) bool { + if nodeStatus == nil { + return false + } + for _, vgStatus := range nodeStatus.Spec.LVMVGStatus { + if vgStatus.Name == volumeGroup.Name { + for _, pv := range vgStatus.Devices { + if pv == diskName { return true } } @@ -232,7 +235,7 @@ func hasExactDisk(blockDevices []lsblk.BlockDevice, deviceName string) (lsblk.Bl // // An error will be returned if the device is invalid // No error and an empty BlockDevice object will be returned if this device should be skipped (ex: duplicate device) -func getValidDevice(devicePath string, blockDevices []lsblk.BlockDevice, vgs []lvm.VolumeGroup, volumeGroup *lvmv1alpha1.LVMVolumeGroup) (lsblk.BlockDevice, error) { +func getValidDevice(devicePath string, blockDevices []lsblk.BlockDevice, nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus, volumeGroup *lvmv1alpha1.LVMVolumeGroup) (lsblk.BlockDevice, error) { // Make sure the symlink exists diskName, err := filepath.EvalSymlinks(devicePath) if err != nil { @@ -240,7 +243,7 @@ func getValidDevice(devicePath string, blockDevices []lsblk.BlockDevice, vgs []l } // Make sure this isn't a duplicate in the VG - if isDeviceAlreadyPartOfVG(vgs, diskName, volumeGroup) { + if isDeviceAlreadyPartOfVG(nodeStatus, diskName, volumeGroup) { return lsblk.BlockDevice{}, nil // No error, we just don't want a duplicate } diff --git a/pkg/vgmanager/devices_test.go b/pkg/vgmanager/devices_test.go index 12ab7b82b..7f7345aa9 100644 --- a/pkg/vgmanager/devices_test.go +++ b/pkg/vgmanager/devices_test.go @@ -10,7 +10,6 @@ import ( "github.com/openshift/lvm-operator/api/v1alpha1" "github.com/openshift/lvm-operator/pkg/filter" "github.com/openshift/lvm-operator/pkg/lsblk" - "github.com/openshift/lvm-operator/pkg/lvm" "github.com/stretchr/testify/assert" "sigs.k8s.io/controller-runtime/pkg/log" @@ -42,7 +41,7 @@ func Test_getNewDevicesToBeAdded(t *testing.T) { description string volumeGroup v1alpha1.LVMVolumeGroup existingBlockDevices []lsblk.BlockDevice - existingVGs []lvm.VolumeGroup + nodeStatus v1alpha1.LVMVolumeGroupNodeStatus numOfAvailableDevices int expectError bool }{ @@ -312,12 +311,16 @@ func Test_getNewDevicesToBeAdded(t *testing.T) { }, }, }, - existingVGs: []lvm.VolumeGroup{ - { - Name: "vg1", - PVs: []lvm.PhysicalVolume{ - {PvName: calculateDevicePath(t, "nvme1n1p1")}, - {PvName: calculateDevicePath(t, "nvme1n1p2")}, + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{ + Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + { + Name: "vg1", + Devices: []string{ + calculateDevicePath(t, "nvme1n1p1"), + calculateDevicePath(t, "nvme1n1p2"), + }, + }, }, }, }, @@ -356,9 +359,13 @@ func Test_getNewDevicesToBeAdded(t *testing.T) { }, }, }, - existingVGs: []lvm.VolumeGroup{ - { - Name: "vg1", + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{ + Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + { + Name: "vg1", + }, + }, }, }, existingBlockDevices: []lsblk.BlockDevice{ @@ -388,9 +395,13 @@ func Test_getNewDevicesToBeAdded(t *testing.T) { }, }, }, - existingVGs: []lvm.VolumeGroup{ - { - Name: "vg1", + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{ + Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + { + Name: "vg1", + }, + }, }, }, existingBlockDevices: []lsblk.BlockDevice{ @@ -570,7 +581,7 @@ func Test_getNewDevicesToBeAdded(t *testing.T) { for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { ctx := log.IntoContext(context.Background(), testr.New(t)) - availableDevices, err := r.getNewDevicesToBeAdded(ctx, tc.existingBlockDevices, tc.existingVGs, &tc.volumeGroup) + availableDevices, err := r.getNewDevicesToBeAdded(ctx, tc.existingBlockDevices, &tc.nodeStatus, &tc.volumeGroup) if !tc.expectError { assert.NoError(t, err) } else { diff --git a/pkg/vgmanager/vgmanager_controller.go b/pkg/vgmanager/vgmanager_controller.go index 3d8f3a828..895cf280f 100644 --- a/pkg/vgmanager/vgmanager_controller.go +++ b/pkg/vgmanager/vgmanager_controller.go @@ -25,10 +25,13 @@ import ( "github.com/google/go-cmp/cmp" lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" "github.com/openshift/lvm-operator/controllers" + "github.com/openshift/lvm-operator/pkg/dmsetup" "github.com/openshift/lvm-operator/pkg/filter" "github.com/openshift/lvm-operator/pkg/lsblk" "github.com/openshift/lvm-operator/pkg/lvm" "github.com/openshift/lvm-operator/pkg/lvmd" + "github.com/openshift/lvm-operator/pkg/wipefs" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -87,6 +90,8 @@ type VGReconciler struct { LVMD lvmd.Configurator lvm.LVM lsblk.LSBLK + wipefs.Wipefs + dmsetup.Dmsetup NodeName string Namespace string Filters func(*lvmv1alpha1.LVMVolumeGroup, lvm.LVM, lsblk.LSBLK) filter.Filters @@ -128,12 +133,13 @@ func (r *VGReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Re } } - return r.reconcile(ctx, volumeGroup) + return r.reconcile(ctx, volumeGroup, nodeStatus) } func (r *VGReconciler) reconcile( ctx context.Context, volumeGroup *lvmv1alpha1.LVMVolumeGroup, + nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus, ) (ctrl.Result, error) { logger := log.FromContext(ctx) // Check if the LVMVolumeGroup resource is deleted @@ -165,35 +171,46 @@ func (r *VGReconciler) reconcile( } existingLvmdConfig := *lvmdConfig - vgs, err := r.LVM.ListVGs() + blockDevices, err := r.LSBLK.ListBlockDevices() if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to list volume groups: %w", err) - } - - vgExistsInLVM := false - for _, vg := range vgs { - if volumeGroup.Name == vg.Name { - if len(vg.PVs) > 0 { - vgExistsInLVM = true - } - } + return ctrl.Result{}, fmt.Errorf("failed to list block devices: %w", err) } - blockDevices, err := r.LSBLK.ListBlockDevices() + wiped, err := r.wipeDevicesIfNecessary(ctx, volumeGroup, nodeStatus, blockDevices) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to list block devices: %w", err) + return ctrl.Result{}, fmt.Errorf("failed to wipe devices: %w", err) + } + if wiped { + blockDevices, err = r.LSBLK.ListBlockDevices() + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list block devices: %w", err) + } } // Get the available block devices that can be used for this volume group // valid means that it can be used to create or extend the volume group // devices that are already part of the volume group will not be returned - newDevices, err := r.getNewDevicesToBeAdded(ctx, blockDevices, vgs, volumeGroup) + newDevices, err := r.getNewDevicesToBeAdded(ctx, blockDevices, nodeStatus, volumeGroup) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to get matching available block devices for volumegroup %s: %w", volumeGroup.GetName(), err) } devices := r.filterDevices(ctx, newDevices, r.Filters(volumeGroup, r.LVM, r.LSBLK)) + vgs, err := r.LVM.ListVGs() + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list volume groups: %w", err) + } + + vgExistsInLVM := false + for _, vg := range vgs { + if volumeGroup.Name == vg.Name { + if len(vg.PVs) > 0 { + vgExistsInLVM = true + } + } + } + // If there are no available devices, that could mean either // - There is no available devices to attach to the volume group // - All the available devices are already attached diff --git a/pkg/vgmanager/wipe_devices.go b/pkg/vgmanager/wipe_devices.go new file mode 100644 index 000000000..e2d1e352b --- /dev/null +++ b/pkg/vgmanager/wipe_devices.go @@ -0,0 +1,83 @@ +package vgmanager + +import ( + "context" + "errors" + "fmt" + + lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1" + "github.com/openshift/lvm-operator/pkg/dmsetup" + "github.com/openshift/lvm-operator/pkg/lsblk" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func (r *VGReconciler) wipeDevicesIfNecessary(ctx context.Context, volumeGroup *lvmv1alpha1.LVMVolumeGroup, nodeStatus *lvmv1alpha1.LVMVolumeGroupNodeStatus, blockDevices []lsblk.BlockDevice) (bool, error) { + if volumeGroup.Spec.DeviceSelector == nil || !volumeGroup.Spec.DeviceSelector.ForceWipeDevicesAndDestroyAllData { + return false, nil + } + + allPaths := append(volumeGroup.Spec.DeviceSelector.Paths, volumeGroup.Spec.DeviceSelector.OptionalPaths...) + wiped := false + for _, path := range allPaths { + if isDeviceAlreadyPartOfVG(nodeStatus, path, volumeGroup) { // Do not only check the vg devices, but also node status device paths + continue + } + deviceWiped, err := r.wipeDevice(ctx, path, blockDevices) + if err != nil { + return false, fmt.Errorf("failed to wipe device %s: %w", path, err) + } + if deviceWiped { + wiped = true + } + } + + return wiped, nil +} + +func (r *VGReconciler) wipeDevice(ctx context.Context, deviceName string, blockDevices []lsblk.BlockDevice) (bool, error) { + logger := log.FromContext(ctx).WithValues("deviceName", deviceName) + + wiped := false + for _, device := range blockDevices { + if device.KName == deviceName { + if err := r.Wipefs.Wipe(device.KName); err != nil { + return false, err + } + wiped = true + logger.Info("device wiped successfully") + for _, child := range device.Children { + // If the device was used as a Physical Volume before, wipefs does not remove the child LVs. + // So, a device-mapper reference removal is necessary to further remove the child LV references. + r.removeMapperReference(ctx, child) + } + } else if device.HasChildren() { + childWiped, err := r.wipeDevice(ctx, deviceName, device.Children) + if err != nil { + return false, err + } + if childWiped { + wiped = true + } + } + } + return wiped, nil +} + +// removeMapperReference remove the device-mapper reference of the device starting from the most inner child +func (r *VGReconciler) removeMapperReference(ctx context.Context, device lsblk.BlockDevice) { + logger := log.FromContext(ctx).WithValues("deviceName", device.KName) + if device.HasChildren() { + for _, child := range device.Children { + r.removeMapperReference(ctx, child) + } + } + if err := r.Dmsetup.Remove(device.KName); err != nil { + if errors.Is(err, dmsetup.ErrReferenceNotFound) { + logger.Info("skipping the removal of device-mapper reference as the reference does not exist", "childName", device.KName) + } else { + logger.Info("failed to remove device-mapper reference", "childName", device.KName, "error", err) + } + } else { + logger.Info("device-mapper reference removed successfully", "childName", device.KName) + } +} diff --git a/pkg/vgmanager/wipe_devices_test.go b/pkg/vgmanager/wipe_devices_test.go new file mode 100644 index 000000000..4d0f660de --- /dev/null +++ b/pkg/vgmanager/wipe_devices_test.go @@ -0,0 +1,175 @@ +package vgmanager + +import ( + "context" + "testing" + + "github.com/openshift/lvm-operator/api/v1alpha1" + dmsetupmocks "github.com/openshift/lvm-operator/pkg/dmsetup/mocks" + "github.com/openshift/lvm-operator/pkg/lsblk" + wipefsmocks "github.com/openshift/lvm-operator/pkg/wipefs/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestWipeDevices(t *testing.T) { + tests := []struct { + name string + forceWipeNotEnabled bool + devicePaths []string + blockDevices []lsblk.BlockDevice + nodeStatus v1alpha1.LVMVolumeGroupNodeStatus + wipeCount int + removeReferenceCount int + }{ + { + name: "Force wipe feature is not enabled", + forceWipeNotEnabled: true, + devicePaths: []string{"/dev/loop1"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/sdb"}, {KName: "/dev/loop1"}}, + wipeCount: 0, + removeReferenceCount: 0, + }, + { + name: "There is no path specified", + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/sdb"}, {KName: "/dev/loop1"}}, + wipeCount: 0, + removeReferenceCount: 0, + }, + { + name: "Device exist in the device list", + devicePaths: []string{"/dev/loop1"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/sdb"}, {KName: "/dev/loop1"}}, + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + {Name: "vg1", Devices: []string{"/dev/sda"}}}, + }}, + wipeCount: 1, + removeReferenceCount: 0, + }, + { + name: "Device does not exist in the device list", + devicePaths: []string{"/dev/loop1"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/sdb"}}, + wipeCount: 0, + removeReferenceCount: 0, + }, + { + name: "Device exist as a child", + devicePaths: []string{"/dev/loop1"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda", Children: []lsblk.BlockDevice{{KName: "/dev/loop1"}}}, {KName: "/dev/sdb"}}, + wipeCount: 1, + removeReferenceCount: 0, + }, + { + name: "Device has child references", + devicePaths: []string{"/dev/loop1"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/loop1", Children: []lsblk.BlockDevice{{KName: "/dev/loop1p1", Children: []lsblk.BlockDevice{{KName: "/dev/loop1p1p1"}}}, {KName: "/dev/loop1p2"}}}, {KName: "/dev/sda"}}, + wipeCount: 1, + removeReferenceCount: 3, + }, + { + name: "Device exists as a child device that has a child reference", + devicePaths: []string{"/dev/loop1"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda", Children: []lsblk.BlockDevice{{KName: "/dev/loop1", Children: []lsblk.BlockDevice{{KName: "/dev/loop1p1"}}}, {KName: "/dev/loop2", Children: []lsblk.BlockDevice{{KName: "/dev/loop2p1"}}}}}, {KName: "/dev/sdb"}}, + wipeCount: 1, + removeReferenceCount: 1, + }, + { + name: "Device exist in the device list and is already part of a vg", + devicePaths: []string{"/dev/loop1"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/sdb"}, {KName: "/dev/loop1"}}, + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + {Name: "vg1", Devices: []string{"/dev/loop1"}}}, + }}, + wipeCount: 0, + removeReferenceCount: 0, + }, + { + name: "Only one device out of two exists in the device list", + devicePaths: []string{"/dev/loop1", "/dev/loop2"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/sdb"}, {KName: "/dev/loop1"}}, + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + {Name: "vg1", Devices: []string{"/dev/sda"}}}, + }}, + wipeCount: 1, + removeReferenceCount: 0, + }, + { + name: "Both devices exist in the device list", + devicePaths: []string{"/dev/loop1", "/dev/loop2"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/loop1"}, {KName: "/dev/loop2", Children: []lsblk.BlockDevice{{KName: "/dev/loop2p1"}}}}, + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + {Name: "vg1", Devices: []string{"/dev/sda"}}}, + }}, + wipeCount: 2, + removeReferenceCount: 1, + }, + { + name: "Both devices, one of them is a child, exist in the device list", + devicePaths: []string{"/dev/loop1", "/dev/loop2p1"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/loop1"}, {KName: "/dev/loop2", Children: []lsblk.BlockDevice{{KName: "/dev/loop2p1"}}}}, + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + {Name: "vg1", Devices: []string{"/dev/sda"}}}, + }}, + wipeCount: 2, + removeReferenceCount: 0, + }, + { + name: "Both devices exist in the device list, one of them is part of the vg", + devicePaths: []string{"/dev/loop1", "/dev/loop2"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/loop1"}, {KName: "/dev/loop2", Children: []lsblk.BlockDevice{{KName: "/dev/loop2p1"}}}}, + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + {Name: "vg1", Devices: []string{"/dev/loop1"}}}, + }}, + wipeCount: 1, + removeReferenceCount: 1, + }, + { + name: "Both devices are part of the vg", + devicePaths: []string{"/dev/loop1", "/dev/loop2"}, + blockDevices: []lsblk.BlockDevice{{KName: "/dev/sda"}, {KName: "/dev/loop1"}, {KName: "/dev/loop2", Children: []lsblk.BlockDevice{{KName: "/dev/loop2p1"}}}}, + nodeStatus: v1alpha1.LVMVolumeGroupNodeStatus{Spec: v1alpha1.LVMVolumeGroupNodeStatusSpec{ + LVMVGStatus: []v1alpha1.VGStatus{ + {Name: "vg1", Devices: []string{"/dev/loop1", "/dev/loop2"}}}, + }}, + wipeCount: 0, + removeReferenceCount: 0, + }, + } + mockWipefs := wipefsmocks.NewMockWipefs(t) + mockDmsetup := dmsetupmocks.NewMockDmsetup(t) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &VGReconciler{Wipefs: mockWipefs, Dmsetup: mockDmsetup} + if tt.wipeCount > 0 { + mockWipefs.EXPECT().Wipe(mock.Anything).Return(nil).Times(tt.wipeCount) + } + if tt.removeReferenceCount > 0 { + mockDmsetup.EXPECT().Remove(mock.Anything).Return(nil).Times(tt.removeReferenceCount) + } + volumeGroup := &v1alpha1.LVMVolumeGroup{ + ObjectMeta: metav1.ObjectMeta{Name: "vg1"}, + Spec: v1alpha1.LVMVolumeGroupSpec{DeviceSelector: &v1alpha1.DeviceSelector{ + Paths: tt.devicePaths, + ForceWipeDevicesAndDestroyAllData: !tt.forceWipeNotEnabled, + }}, + } + + wiped, err := r.wipeDevicesIfNecessary(context.Background(), volumeGroup, &tt.nodeStatus, tt.blockDevices) + if tt.wipeCount > 0 { + assert.True(t, wiped) + } else { + assert.False(t, wiped) + } + assert.NoError(t, err) + }) + } +} diff --git a/pkg/wipefs/mocks/mock_wipefs.go b/pkg/wipefs/mocks/mock_wipefs.go new file mode 100644 index 000000000..253cf95c5 --- /dev/null +++ b/pkg/wipefs/mocks/mock_wipefs.go @@ -0,0 +1,74 @@ +// Code generated by mockery v2.33.2. DO NOT EDIT. + +package wipefs + +import mock "github.com/stretchr/testify/mock" + +// MockWipefs is an autogenerated mock type for the Wipefs type +type MockWipefs struct { + mock.Mock +} + +type MockWipefs_Expecter struct { + mock *mock.Mock +} + +func (_m *MockWipefs) EXPECT() *MockWipefs_Expecter { + return &MockWipefs_Expecter{mock: &_m.Mock} +} + +// Wipe provides a mock function with given fields: deviceName +func (_m *MockWipefs) Wipe(deviceName string) error { + ret := _m.Called(deviceName) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(deviceName) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockWipefs_Wipe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Wipe' +type MockWipefs_Wipe_Call struct { + *mock.Call +} + +// Wipe is a helper method to define mock.On call +// - deviceName string +func (_e *MockWipefs_Expecter) Wipe(deviceName interface{}) *MockWipefs_Wipe_Call { + return &MockWipefs_Wipe_Call{Call: _e.mock.On("Wipe", deviceName)} +} + +func (_c *MockWipefs_Wipe_Call) Run(run func(deviceName string)) *MockWipefs_Wipe_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *MockWipefs_Wipe_Call) Return(_a0 error) *MockWipefs_Wipe_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockWipefs_Wipe_Call) RunAndReturn(run func(string) error) *MockWipefs_Wipe_Call { + _c.Call.Return(run) + return _c +} + +// NewMockWipefs creates a new instance of MockWipefs. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockWipefs(t interface { + mock.TestingT + Cleanup(func()) +}) *MockWipefs { + mock := &MockWipefs{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/wipefs/wipefs.go b/pkg/wipefs/wipefs.go new file mode 100644 index 000000000..4e005a942 --- /dev/null +++ b/pkg/wipefs/wipefs.go @@ -0,0 +1,47 @@ +package wipefs + +import ( + "fmt" + + "github.com/openshift/lvm-operator/pkg/internal/exec" +) + +var ( + DefaultWipefs = "/usr/sbin/wipefs" +) + +type Wipefs interface { + Wipe(deviceName string) error +} + +type HostWipefs struct { + exec.Executor + wipefs string +} + +func NewDefaultHostWipefs() *HostWipefs { + return NewHostWipefs(&exec.CommandExecutor{}, DefaultWipefs) +} + +func NewHostWipefs(executor exec.Executor, wipefs string) *HostWipefs { + return &HostWipefs{ + Executor: executor, + wipefs: wipefs, + } +} + +// Wipe wipes the device only if force delete flag is set +func (wipefs *HostWipefs) Wipe(deviceName string) error { + if len(deviceName) == 0 { + return fmt.Errorf("failed to wipe the device. Device name is empty") + } + + args := []string{"--all", "--force"} + args = append(args, deviceName) + _, err := wipefs.ExecuteCommandWithOutputAsHost(wipefs.wipefs, args...) + if err != nil { + return fmt.Errorf("failed to wipe the device %q. %v", deviceName, err) + } + + return nil +} diff --git a/pkg/wipefs/wipefs_test.go b/pkg/wipefs/wipefs_test.go new file mode 100644 index 000000000..1e663bdcd --- /dev/null +++ b/pkg/wipefs/wipefs_test.go @@ -0,0 +1,47 @@ +package wipefs + +import ( + "errors" + "fmt" + "testing" + + mockExec "github.com/openshift/lvm-operator/pkg/internal/exec/test" + "github.com/stretchr/testify/assert" +) + +func TestWipe(t *testing.T) { + tests := []struct { + name string + deviceName string + wantErr bool + }{ + {"Empty device name", "", true}, + {"Existing device name", "/dev/loop1", false}, + {"Non-existing device name", "/dev/loop2", true}, + } + + executor := &mockExec.MockExecutor{ + MockExecuteCommandWithOutputAsHost: func(command string, args ...string) (string, error) { + if args[0] != "--all" || args[1] != "--force" { + return "", fmt.Errorf("invalid args %q", args[0:2]) + } + if args[2] == "/dev/loop1" { + return "", nil + } else if args[2] == "/dev/loop2" { + return "no such file or directory", errors.New("no such file or directory") + } + return "", fmt.Errorf("invalid args %q", args[2]) + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := NewHostWipefs(executor, DefaultWipefs).Wipe(tt.deviceName) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +}