diff --git a/examples/volumes/vsphere/README.md b/examples/volumes/vsphere/README.md index bc5f7eaa27076..dce01c7b59574 100644 --- a/examples/volumes/vsphere/README.md +++ b/examples/volumes/vsphere/README.md @@ -5,7 +5,9 @@ - [Volumes](#volumes) - [Persistent Volumes](#persistent-volumes) - [Storage Class](#storage-class) - - [Virtual SAN policy support inside Kubernetes](#virtual-san-policy-support-inside-kubernetes) + - [Storage Policy Management inside kubernetes] (#storage-policy-management-inside-kubernetes) + - [Using existing vCenter SPBM policy] (#using-existing-vcenter-spbm-policy) + - [Virtual SAN policy support](#virtual-san-policy-support) - [Stateful Set](#stateful-set) ## Prerequisites @@ -374,7 +376,47 @@ pvpod 1/1 Running 0 48m ``` -### Virtual SAN policy support inside Kubernetes +### Storage Policy Management inside kubernetes +#### Using existing vCenter SPBM policy + Admins can use the existing vCenter Storage Policy Based Management (SPBM) policy to configure a persistent volume with the SPBM policy. + + __Note: Here you don't need to create persistent volume it is created for you.__ + 1. Create Storage Class. + + Example 1: + + ```yaml + kind: StorageClass + apiVersion: storage.k8s.io/v1beta1 + metadata: + name: fast + provisioner: kubernetes.io/vsphere-volume + parameters: + diskformat: zeroedthick + storagePolicyName: gold + ``` + [Download example](vsphere-volume-spbm-policy.yaml?raw=true) + + The admin specifies the SPBM policy - "gold" as part of storage class definition for dynamic volume provisioning. When a PVC is created, the persistent volume will be provisioned on a compatible datastore with maximum free space that satisfies the "gold" storage policy requirements. + + Example 2: + + ```yaml + kind: StorageClass + apiVersion: storage.k8s.io/v1beta1 + metadata: + name: fast + provisioner: kubernetes.io/vsphere-volume + parameters: + diskformat: zeroedthick + storagePolicyName: gold + datastore: VSANDatastore + ``` + [Download example](vsphere-volume-spbm-policy-with-datastore.yaml?raw=true) + + The admin can also specify a custom datastore where he wants the volume to be provisioned along with the SPBM policy name. When a PVC is created, the vSphere Cloud Provider checks if the user specified datastore satisfies the "gold" storage policy requirements. If yes, it will provision the persistent volume on user specified datastore. If not, it will error out to the user that the user specified datastore is not compatible with "gold" storage policy requirements. + +#### Virtual SAN policy support Vsphere Infrastructure(VI) Admins will have the ability to specify custom Virtual SAN Storage Capabilities during dynamic volume provisioning. You can now define storage requirements, such as performance and availability, in the form of storage capabilities during dynamic volume provisioning. The storage capability requirements are converted into a Virtual SAN policy which are then pushed down to the Virtual SAN layer when a persistent volume (virtual disk) is being created. The virtual disk is distributed across the Virtual SAN datastore to meet the requirements. diff --git a/examples/volumes/vsphere/vsphere-volume-spbm-policy-with-datastore.yaml b/examples/volumes/vsphere/vsphere-volume-spbm-policy-with-datastore.yaml new file mode 100644 index 0000000000000..7ebcd88a5b1c8 --- /dev/null +++ b/examples/volumes/vsphere/vsphere-volume-spbm-policy-with-datastore.yaml @@ -0,0 +1,9 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: fast +provisioner: kubernetes.io/vsphere-volume +parameters: + diskformat: zeroedthick + storagePolicyName: gold + datastore: VSANDatastore diff --git a/examples/volumes/vsphere/vsphere-volume-spbm-policy.yaml b/examples/volumes/vsphere/vsphere-volume-spbm-policy.yaml new file mode 100644 index 0000000000000..6d0ce49b85f32 --- /dev/null +++ b/examples/volumes/vsphere/vsphere-volume-spbm-policy.yaml @@ -0,0 +1,8 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: fast +provisioner: kubernetes.io/vsphere-volume +parameters: + diskformat: zeroedthick + storagePolicyName: gold diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index ed9164e80514c..5d45f98195520 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -45,6 +45,7 @@ import ( "github.com/vmware/govmomi/vim25/types" "golang.org/x/net/context" + pbm "github.com/vmware/govmomi/pbm" k8stypes "k8s.io/apimachinery/pkg/types" k8runtime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/kubernetes/pkg/api/v1" @@ -165,7 +166,7 @@ type VSphereConfig struct { type Volumes interface { // AttachDisk attaches given disk to given node. Current node // is used when nodeName is empty string. - AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) + AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) // DetachDisk detaches given disk to given node. Current node // is used when nodeName is empty string. @@ -189,12 +190,14 @@ type Volumes interface { // VolumeOptions specifies capacity, tags, name and diskFormat for a volume. type VolumeOptions struct { - CapacityKB int - Tags map[string]string - Name string - DiskFormat string - Datastore string - StorageProfileData string + CapacityKB int + Tags map[string]string + Name string + DiskFormat string + Datastore string + VSANStorageProfileData string + StoragePolicyName string + StoragePolicyID string } // Generates Valid Options for Diskformat @@ -554,14 +557,12 @@ func (vs *VSphere) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, addressType = v1.NodeInternalIP } for _, ip := range v.IpAddress { - if net.ParseIP(ip).To4() != nil { - v1helper.AddToNodeAddresses(&addrs, - v1.NodeAddress{ - Type: addressType, - Address: ip, - }, - ) - } + v1helper.AddToNodeAddresses(&addrs, + v1.NodeAddress{ + Type: addressType, + Address: ip, + }, + ) } } return addrs, nil @@ -737,7 +738,7 @@ func cleanUpController(ctx context.Context, newSCSIController types.BaseVirtualD } // Attaches given virtual disk volume to the compute running kubelet. -func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) { +func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) { var newSCSIController types.BaseVirtualDevice // Create context @@ -785,14 +786,8 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di return "", "", err } - // verify scsi controller in virtual machine - vmDevices, err := vm.Device(ctx) - if err != nil { - return "", "", err - } - // Get VM device list - _, vmDevices, _, err = getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) + _, vmDevices, _, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) if err != nil { glog.Errorf("cannot get vmDevices for VM err=%s", err) return "", "", fmt.Errorf("cannot get vmDevices for VM err=%s", err) @@ -811,9 +806,9 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di // Create a new finder f := find.NewFinder(vs.client.Client, true) - // Set data center f.SetDatacenter(dc) + datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) if !isSuccess { @@ -837,28 +832,54 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent) - // Attach disk to the VM - err = vm.AddDevice(ctx, disk) + virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} + deviceConfigSpec := &types.VirtualDeviceConfigSpec{ + Device: disk, + Operation: types.VirtualDeviceConfigSpecOperationAdd, + } + // Configure the disk with the SPBM profile only if ProfileID is not empty. + if storagePolicyID != "" { + profileSpec := &types.VirtualMachineDefinedProfileSpec{ + ProfileId: storagePolicyID, + } + deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, profileSpec) + } + virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec) + task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec) if err != nil { - glog.Errorf("cannot attach disk to the vm - %v", err) + glog.Errorf("Failed to attach the disk with storagePolicy: %+q with err - %v", storagePolicyID, err) if newSCSICreated { cleanUpController(ctx, newSCSIController, vmDevices, vm) } return "", "", err } - - vmDevices, err = vm.Device(ctx) + err = task.Wait(ctx) if err != nil { + glog.Errorf("Failed to attach the disk with storagePolicy: %+q with err - %v", storagePolicyID, err) if newSCSICreated { cleanUpController(ctx, newSCSIController, vmDevices, vm) } return "", "", err } - devices := vmDevices.SelectByType(disk) - if len(devices) < 1 { + + deviceName, diskUUID, err := getVMDiskInfo(ctx, vm, disk) + if err != nil { if newSCSICreated { cleanUpController(ctx, newSCSIController, vmDevices, vm) } + vs.DetachDisk(deviceName, nodeName) + return "", "", err + } + return deviceName, diskUUID, nil +} + +func getVMDiskInfo(ctx context.Context, vm *object.VirtualMachine, disk *types.VirtualDisk) (string, string, error) { + vmDevices, err := vm.Device(ctx) + if err != nil { + return "", "", err + } + devices := vmDevices.SelectByType(disk) + if len(devices) < 1 { return "", "", ErrNoDevicesFound } @@ -867,18 +888,13 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di deviceName := devices.Name(newDevice) // get device uuid - diskUUID, err = getVirtualDiskUUID(newDevice) + diskUUID, err := getVirtualDiskUUID(newDevice) if err != nil { - if newSCSICreated { - cleanUpController(ctx, newSCSIController, vmDevices, vm) - } - vs.DetachDisk(deviceName, nodeName) return "", "", err } return deviceName, diskUUID, nil } - func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) { // get next available SCSI controller unit number var takenUnitNumbers [SCSIDeviceSlots]bool @@ -1266,19 +1282,43 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter) f.SetDatacenter(dc) + if volumeOptions.StoragePolicyName != "" { + // Get the pbm client + pbmClient, err := pbm.NewClient(ctx, vs.client.Client) + if err != nil { + return "", err + } + volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName) + if err != nil { + return "", err + } + // Get the resource pool for current node. + resourcePool, err := vs.getCurrentNodeResourcePool(ctx, dc) + if err != nil { + return "", err + } + + dsRefs, err := vs.GetCompatibleDatastores(ctx, pbmClient, resourcePool, volumeOptions.StoragePolicyID) + if err != nil { + return "", err + } + + if volumeOptions.Datastore != "" { + if !IsUserSpecifiedDatastoreCompatible(dsRefs, volumeOptions.Datastore) { + return "", fmt.Errorf("User specified datastore: %q is not compatible with the StoragePolicy: %q requirements", volumeOptions.Datastore, volumeOptions.StoragePolicyName) + } + } else { + datastore = GetBestFitCompatibleDatastore(dsRefs) + } + } + ds, err := f.Datastore(ctx, datastore) if err != nil { glog.Errorf("Failed while searching for datastore %+q. err %s", datastore, err) return "", err } - // Create a disk with the VSAN storage capabilities specified in the volumeOptions.StorageProfileData. - // This is achieved by following steps: - // 1. Create dummy VM if not already present. - // 2. Add a new disk to the VM by performing VM reconfigure. - // 3. Detach the new disk from the dummy VM. - // 4. Delete the dummy VM. - if volumeOptions.StorageProfileData != "" { + if volumeOptions.VSANStorageProfileData != "" { // Check if the datastore is VSAN if any capability requirements are specified. // VSphere cloud provider now only supports VSAN capabilities requirements ok, err := checkIfDatastoreTypeIsVSAN(vs.client, ds) @@ -1291,7 +1331,14 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string " The policy parameters will work only with VSAN Datastore."+ " So, please specify a valid VSAN datastore in Storage class definition.", datastore) } - + } + // Create a disk with the VSAN storage capabilities specified in the volumeOptions.VSANStorageProfileData. + // This is achieved by following steps: + // 1. Create dummy VM if not already present. + // 2. Add a new disk to the VM by performing VM reconfigure. + // 3. Detach the new disk from the dummy VM. + // 4. Delete the dummy VM. + if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" { // Acquire a read lock to ensure multiple PVC requests can be processed simultaneously. cleanUpDummyVMLock.RLock() defer cleanUpDummyVMLock.RUnlock() @@ -1562,13 +1609,11 @@ func (vs *VSphere) createDummyVM(ctx context.Context, datacenter *object.Datacen if err != nil { return nil, err } - // Get the folder reference for global working directory where the dummy VM needs to be created. vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir) if err != nil { return nil, fmt.Errorf("Failed to get the folder reference for %q with err: %+v", vs.cfg.Global.WorkingDir, err) } - task, err := vmFolder.CreateVM(ctx, virtualMachineConfigSpec, resourcePool, nil) if err != nil { return nil, err @@ -1665,12 +1710,17 @@ func (vs *VSphere) createVirtualDiskWithPolicy(ctx context.Context, datacenter * FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate, } - storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{ - ProfileId: "", - ProfileData: &types.VirtualMachineProfileRawData{ + storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{} + // Is PBM storage policy ID is present, set the storage spec profile ID, + // else, set raw the VSAN policy string. + if volumeOptions.StoragePolicyID != "" { + storageProfileSpec.ProfileId = volumeOptions.StoragePolicyID + } else if volumeOptions.VSANStorageProfileData != "" { + storageProfileSpec.ProfileId = "" + storageProfileSpec.ProfileData = &types.VirtualMachineProfileRawData{ ExtensionKey: "com.vmware.vim.sps", - ObjectData: volumeOptions.StorageProfileData, - }, + ObjectData: volumeOptions.VSANStorageProfileData, + } } deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec) diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_test.go b/pkg/cloudprovider/providers/vsphere/vsphere_test.go index ffb91a6e6f851..7ab128ecc9ddd 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_test.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_test.go @@ -232,7 +232,7 @@ func TestVolumes(t *testing.T) { t.Fatalf("Cannot create a new VMDK volume: %v", err) } - _, _, err = vs.AttachDisk(volPath, "") + _, _, err = vs.AttachDisk(volPath, "", "") if err != nil { t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, nodeName, err) } diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index fb9fc930faa52..247b27b5d54fe 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -18,10 +18,27 @@ package vsphere import ( "context" - "github.com/vmware/govmomi" "os" "runtime" "strings" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/pbm" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + + "fmt" + + pbmtypes "github.com/vmware/govmomi/pbm/types" +) + +const ( + ClusterComputeResource = "ClusterComputeResource" + ParentProperty = "parent" + DatastoreProperty = "datastore" + DatastoreInfoProperty = "info" ) // Reads vSphere configuration from system environment and construct vSphere object @@ -63,3 +80,112 @@ func GetgovmomiClient(cfg *VSphereConfig) (*govmomi.Client, error) { client, err := newClient(context.TODO(), cfg) return client, err } + +// Get list of compatible datastores that satisfies the storage policy requirements. +func (vs *VSphere) GetCompatibleDatastores(ctx context.Context, pbmClient *pbm.Client, resourcePool *object.ResourcePool, storagePolicyID string) ([]mo.Datastore, error) { + datastores, err := vs.getAllAccessibleDatastoresForK8sCluster(ctx, resourcePool) + if err != nil { + return nil, err + } + var hubs []pbmtypes.PbmPlacementHub + for _, ds := range datastores { + hubs = append(hubs, pbmtypes.PbmPlacementHub{ + HubType: ds.Type, + HubId: ds.Value, + }) + } + req := []pbmtypes.BasePbmPlacementRequirement{ + &pbmtypes.PbmPlacementCapabilityProfileRequirement{ + ProfileId: pbmtypes.PbmProfileId{ + UniqueId: storagePolicyID, + }, + }, + } + res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req) + if err != nil { + return nil, err + } + compatibleHubs := res.CompatibleDatastores() + // Return an error if there are no compatible datastores. + if len(compatibleHubs) < 1 { + return nil, fmt.Errorf("There are no compatible datastores: %+v that satisfy the storage policy: %+q requirements", datastores, storagePolicyID) + } + var compatibleDatastoreRefs []types.ManagedObjectReference + for _, hub := range compatibleHubs { + compatibleDatastoreRefs = append(compatibleDatastoreRefs, types.ManagedObjectReference{ + Type: hub.HubType, + Value: hub.HubId, + }) + } + dsMorefs, err := vs.getDatastoreMorefs(ctx, compatibleDatastoreRefs) + if err != nil { + return nil, err + } + return dsMorefs, nil +} + +// Verify if the user specified datastore is in the list of compatible datastores. +func IsUserSpecifiedDatastoreCompatible(dsRefs []mo.Datastore, dsName string) bool { + for _, ds := range dsRefs { + if ds.Info.GetDatastoreInfo().Name == dsName { + return true + } + } + return false +} + +// Get the best fit compatible datastore by free space. +func GetBestFitCompatibleDatastore(dsRefs []mo.Datastore) string { + var curMax int64 + curMax = -1 + var index int + for i, ds := range dsRefs { + dsFreeSpace := ds.Info.GetDatastoreInfo().FreeSpace + if dsFreeSpace > curMax { + curMax = dsFreeSpace + index = i + } + } + return dsRefs[index].Info.GetDatastoreInfo().Name +} + +// Get the datastore morefs. +func (vs *VSphere) getDatastoreMorefs(ctx context.Context, dsRefs []types.ManagedObjectReference) ([]mo.Datastore, error) { + pc := property.DefaultCollector(vs.client.Client) + var datastoreMorefs []mo.Datastore + err := pc.Retrieve(ctx, dsRefs, []string{DatastoreInfoProperty}, &datastoreMorefs) + if err != nil { + return nil, err + } + return datastoreMorefs, nil +} + +// Get all datastores accessible inside the current Kubernetes cluster. +func (vs *VSphere) getAllAccessibleDatastoresForK8sCluster(ctx context.Context, resourcePool *object.ResourcePool) ([]types.ManagedObjectReference, error) { + var resourcePoolMoref mo.ResourcePool + s := object.NewSearchIndex(vs.client.Client) + err := s.Properties(ctx, resourcePool.Reference(), []string{ParentProperty}, &resourcePoolMoref) + if err != nil { + return nil, err + } + + // The K8s cluster might be deployed inside a cluster or a host. + // For a cluster it is ClusterComputeResource object, for others it is a ComputeResource object. + var datastores []types.ManagedObjectReference + if resourcePoolMoref.Parent.Type == ClusterComputeResource { + var cluster mo.ClusterComputeResource + err = s.Properties(ctx, resourcePoolMoref.Parent.Reference(), []string{DatastoreProperty}, &cluster) + if err != nil { + return nil, err + } + datastores = cluster.Datastore + } else { + var host mo.ComputeResource + err = s.Properties(ctx, resourcePoolMoref.Parent.Reference(), []string{DatastoreProperty}, &host) + if err != nil { + return nil, err + } + datastores = host.Datastore + } + return datastores, nil +} diff --git a/pkg/volume/vsphere_volume/attacher.go b/pkg/volume/vsphere_volume/attacher.go index d60619b0aab62..91ebdc183b9e6 100644 --- a/pkg/volume/vsphere_volume/attacher.go +++ b/pkg/volume/vsphere_volume/attacher.go @@ -75,7 +75,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No // vsphereCloud.AttachDisk checks if disk is already attached to host and // succeeds in that case, so no need to do that separately. - _, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, nodeName) + _, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyID, nodeName) if err != nil { glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err) return "", err diff --git a/pkg/volume/vsphere_volume/vsphere_volume.go b/pkg/volume/vsphere_volume/vsphere_volume.go index c1767f129e30f..e466fc373e198 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/pkg/volume/vsphere_volume/vsphere_volume.go @@ -152,7 +152,7 @@ func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath str // Abstract interface to disk operations. type vdManager interface { // Creates a volume - CreateVolume(provisioner *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeGB int, fstype string, err error) + CreateVolume(provisioner *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) // Deletes a volume DeleteVolume(deleter *vsphereVolumeDeleter) error } @@ -344,13 +344,13 @@ func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeO } func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { - vmDiskPath, sizeKB, fstype, err := v.manager.CreateVolume(v) + volSpec, err := v.manager.CreateVolume(v) if err != nil { return nil, err } - if fstype == "" { - fstype = "ext4" + if volSpec.Fstype == "" { + volSpec.Fstype = "ext4" } pv := &v1.PersistentVolume{ @@ -365,12 +365,14 @@ func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) { PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy, AccessModes: v.options.PVC.Spec.AccessModes, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", volSpec.Size)), }, PersistentVolumeSource: v1.PersistentVolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ - VolumePath: vmDiskPath, - FSType: fstype, + VolumePath: volSpec.Path, + FSType: volSpec.Fstype, + StoragePolicyName: volSpec.StoragePolicyName, + StoragePolicyID: volSpec.StoragePolicyID, }, }, }, diff --git a/pkg/volume/vsphere_volume/vsphere_volume_test.go b/pkg/volume/vsphere_volume/vsphere_volume_test.go index 6608c513f6461..ba16b58187113 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_test.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_test.go @@ -63,8 +63,15 @@ func getFakeDeviceName(host volume.VolumeHost, volPath string) string { return path.Join(host.GetPluginDir(vsphereVolumePluginName), "device", volPath) } -func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, fstype string, err error) { - return "[local] test-volume-name.vmdk", 100, "ext4", nil +func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) { + volSpec = &VolumeSpec{ + Path: "[local] test-volume-name.vmdk", + Size: 100, + Fstype: "ext4", + StoragePolicyName: "gold", + StoragePolicyID: "1234", + } + return volSpec, nil } func (fake *fakePDManager) DeleteVolume(vd *vsphereVolumeDeleter) error { @@ -155,6 +162,10 @@ func TestPlugin(t *testing.T) { t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath) } + if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.StoragePolicyName != "gold" { + t.Errorf("Provision() returned unexpected storagepolicy name %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.StoragePolicyName) + } + cap := persistentSpec.Spec.Capacity[v1.ResourceStorage] size := cap.Value() if size != 100*1024 { diff --git a/pkg/volume/vsphere_volume/vsphere_volume_util.go b/pkg/volume/vsphere_volume/vsphere_volume_util.go index 7f97dda56b2ae..62aa33979ea23 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -39,6 +39,7 @@ const ( diskformat = "diskformat" datastore = "datastore" Fstype = "fstype" + StoragePolicyName = "storagepolicyname" HostFailuresToTolerateCapability = "hostfailurestotolerate" ForceProvisioningCapability = "forceprovisioning" @@ -63,6 +64,14 @@ var ErrProbeVolume = errors.New("Error scanning attached volumes") type VsphereDiskUtil struct{} +type VolumeSpec struct { + Path string + Size int + Fstype string + StoragePolicyID string + StoragePolicyName string +} + func verifyDevicePath(path string) (string, error) { if pathExists, err := volumeutil.PathExists(path); err != nil { return "", fmt.Errorf("Error checking if path exists: %v", err) @@ -74,11 +83,11 @@ func verifyDevicePath(path string) (string, error) { } // CreateVolume creates a vSphere volume. -func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, fstype string, err error) { - +func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) { + var fstype string cloud, err := getCloudProvider(v.plugin.host.GetCloudProvider()) if err != nil { - return "", 0, "", err + return nil, err } capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] @@ -103,37 +112,48 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPa case Fstype: fstype = value glog.V(4).Infof("Setting fstype as %q", fstype) + case StoragePolicyName: + volumeOptions.StoragePolicyName = value + glog.V(4).Infof("Setting StoragePolicyName as %q", volumeOptions.StoragePolicyName) case HostFailuresToTolerateCapability, ForceProvisioningCapability, CacheReservationCapability, DiskStripesCapability, ObjectSpaceReservationCapability, IopsLimitCapability: capabilityData, err := validateVSANCapability(strings.ToLower(parameter), value) if err != nil { - return "", 0, "", err - } else { - volumeOptions.StorageProfileData += capabilityData + return nil, err } - + volumeOptions.VSANStorageProfileData += capabilityData default: - return "", 0, "", fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName()) + return nil, fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName()) } } - if volumeOptions.StorageProfileData != "" { - volumeOptions.StorageProfileData = "(" + volumeOptions.StorageProfileData + ")" + if volumeOptions.VSANStorageProfileData != "" { + if volumeOptions.StoragePolicyName != "" { + return nil, fmt.Errorf("Cannot specify storage policy capabilities along with storage policy name. Please specify only one.") + } + volumeOptions.VSANStorageProfileData = "(" + volumeOptions.VSANStorageProfileData + ")" } - glog.V(1).Infof("StorageProfileData in vsphere volume %q", volumeOptions.StorageProfileData) + glog.V(4).Infof("VSANStorageProfileData in vsphere volume %q", volumeOptions.VSANStorageProfileData) // TODO: implement PVC.Selector parsing if v.options.PVC.Spec.Selector != nil { - return "", 0, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere") + return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere") } - vmDiskPath, err = cloud.CreateVolume(volumeOptions) + vmDiskPath, err := cloud.CreateVolume(volumeOptions) if err != nil { glog.V(2).Infof("Error creating vsphere volume: %v", err) - return "", 0, "", err + return nil, err + } + volSpec = &VolumeSpec{ + Path: vmDiskPath, + Size: volSizeKB, + Fstype: fstype, + StoragePolicyName: volumeOptions.StoragePolicyName, + StoragePolicyID: volumeOptions.StoragePolicyID, } glog.V(2).Infof("Successfully created vsphere volume %s", name) - return vmDiskPath, volSizeKB, fstype, nil + return volSpec, nil } // DeleteVolume deletes a vSphere volume.