diff --git a/go.mod b/go.mod index 698cc06b..676b1530 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/dell/dell-csi-extensions/volumeGroupSnapshot v1.2.3-0.20230517135918-9920e636bff1 github.com/dell/gocsi v1.7.0 github.com/dell/gofsutil v1.12.0 - github.com/dell/goscaleio v1.10.0 + github.com/dell/goscaleio v1.11.1-0.20230707063208-b67372e0f8d0 github.com/fsnotify/fsnotify v1.5.1 github.com/golang/protobuf v1.5.3 github.com/google/uuid v1.3.0 diff --git a/go.sum b/go.sum index 81d45a72..7df67b41 100644 --- a/go.sum +++ b/go.sum @@ -114,8 +114,8 @@ github.com/dell/gocsi v1.7.0 h1:fMQO2zwAXCaIsUoPCcnnuPMwfQMoaI1/0aqkQVndlxU= github.com/dell/gocsi v1.7.0/go.mod h1:X/8Ll8qqKAKCenmd1gPJMUvUmgY8cK0LiS8Pck12UaU= github.com/dell/gofsutil v1.12.0 h1:oo2YHfGFKHvHS1urtqjOIKpaHwcdyqacwKHLXzUg33M= github.com/dell/gofsutil v1.12.0/go.mod h1:mGMN5grVDtHv2imNw5+gFr2RmCqeyYgBFBldUbHtV78= -github.com/dell/goscaleio v1.10.0 h1:XCEI9j+IlbqNPY7uvBjNNlT0Nt2PMXlnyxUavmennVY= -github.com/dell/goscaleio v1.10.0/go.mod h1:Zh2iQ44Jd8FMqU2h+rT5x1K6mdPMKQ15lkFxojU4z3w= +github.com/dell/goscaleio v1.11.1-0.20230707063208-b67372e0f8d0 h1:3GqvTjqCAAG6y8FDUtKRR9q/Uj5lKD1hFD6w7FTzhww= +github.com/dell/goscaleio v1.11.1-0.20230707063208-b67372e0f8d0/go.mod h1:dMTrHnXSsPus+Kd9mrs0JuyrCndoKvFP/bbEdc21Bi8= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= diff --git a/helm/csi-vxflexos/templates/controller.yaml b/helm/csi-vxflexos/templates/controller.yaml index 8c4b4bef..00cca354 100644 --- a/helm/csi-vxflexos/templates/controller.yaml +++ b/helm/csi-vxflexos/templates/controller.yaml @@ -397,6 +397,14 @@ spec: value: "{{ .Values.controller.healthMonitor.enabled }}" {{- end }} {{- end }} + {{- if hasKey .Values "nfsAcls" }} + - name: X_CSI_NFS_ACLS + value: "{{ .Values.nfsAcls }}" + {{- end }} + {{- if hasKey .Values "externalAccess" }} + - name: X_CSI_POWERFLEX_EXTERNAL_ACCESS + value: "{{ .Values.externalAccess }}" + {{- end }} volumeMounts: - name: socket-dir mountPath: /var/run/csi diff --git a/helm/csi-vxflexos/values.yaml b/helm/csi-vxflexos/values.yaml index 3efb8d38..06fbbf13 100644 --- a/helm/csi-vxflexos/values.yaml +++ b/helm/csi-vxflexos/values.yaml @@ -38,6 +38,11 @@ kubeletConfigDir: /var/lib/kubelet # Default value: none defaultFsType: ext4 +# externalAccess: allows to specify additional entries for host to access NFS volumes. Both single IP address and subnet are valid entries. +# Allowed Values: x.x.x.x/xx or x.x.x.x +# Default Value: None +externalAccess: + # imagePullPolicy: Policy to determine if the image should be pulled prior to starting the container. # Allowed values: # Always: Always pull the image. @@ -46,6 +51,21 @@ defaultFsType: ext4 # Default value: None imagePullPolicy: IfNotPresent +# nfsAcls: enables setting permissions on NFS mount directory +# This value acts as default value for NFS ACL (nfsAcls), if not specified for an array config in secret +# Permissions can be specified in two formats: +# 1) Unix mode (NFSv3) +# 2) NFSv4 ACLs (NFSv4) +# NFSv4 ACLs are supported on NFSv4 share only. +# Allowed values: +# 1) Unix mode: valid octal mode number +# Examples: "0777", "777", "0755" +# 2) NFSv4 acls: valid NFSv4 acls, seperated by comma +# Examples: "A::OWNER@:RWX,A::GROUP@:RWX", "A::OWNER@:rxtncy" +# Optional: true +# Default value: "0777" +nfsAcls: "0777" + # "enablesnapshotcgdelete"- a boolean that, when enabled, will delete all snapshots in a consistency group # everytime a snap in the group is deleted # Allowed values: true, false diff --git a/samples/secret.yaml b/samples/secret.yaml index 0edb1e98..f2e422d2 100644 --- a/samples/secret.yaml +++ b/samples/secret.yaml @@ -24,6 +24,28 @@ # defines the MDM(s) that SDC should register with on start. # Allowed values: a list of IP addresses or hostnames separated by comma. # Default value: none + + # NFS is only supported on arrays >= 4.0 + # nasName: what NAS should be used for NFS volumes + # Allowed Values: string - (name of NAS server) + # Default Value: None + nasName: "nas-server" + + # nfsAcls: enables setting permissions on NFS mount directory + # This value will be used if a storage class does not have the NFS ACL (nfsAcls) parameter specified + # Permissions can be specified in two formats: + # 1) Unix mode (NFSv3) + # 2) NFSv4 ACLs (NFSv4) + # NFSv4 ACLs are supported on NFSv4 share only. + # Allowed values: + # 1) Unix mode: valid octal mode number + # Examples: "0777", "777", "0755" + # 2) NFSv4 acls: valid NFSv4 acls, seperated by comma + # Examples: "A::OWNER@:RWX,A::GROUP@:RWX", "A::OWNER@:rxtncy" + # Optional: true + # Default value: "0777" + # nfsAcls: "0777" + mdm: "10.0.0.1,10.0.0.2" - username: "admin" password: "Password123" diff --git a/samples/storageclass/storageclass-nfs.yaml b/samples/storageclass/storageclass-nfs.yaml new file mode 100644 index 00000000..179fdc31 --- /dev/null +++ b/samples/storageclass/storageclass-nfs.yaml @@ -0,0 +1,97 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: vxflexos-nfs +provisioner: csi-vxflexos.dellemc.com +# reclaimPolicy: PVs that are dynamically created by a StorageClass will have the reclaim policy specified here +# Allowed values: +# Reclaim: retain the PV after PVC deletion +# Delete: delete the PV after PVC deletion +# Optional: true +# Default value: Delete +reclaimPolicy: Delete +# allowVolumeExpansion: allows the users to resize the volume by editing the corresponding PVC object +# Allowed values: +# true: allow users to resize the PVC +# false: does not allow users to resize the PVC +# Optional: true +# Default value: false +allowVolumeExpansion: true +parameters: + # Storage pool to use on system + # Optional: false + storagepool: # Insert Storage pool + # Protection domain that storage pool above belongs to + # Needed if array has two storagepools that share the same name, but belong to different protection domains + # Optional: true + # Uncomment the line below if you want to use protectiondomain + # protectiondomain: # Insert Protection domain name + # System you would like this storage class to use + # Allowed values: one string for system ID + # Optional: false + systemID: # Insert System ID + # format options to pass to mkfs + # Allowed values: A string dictating the fs options you want passed + # Optional: true + # Uncomment the line below if you want to use mkfsFormatOption + # mkfsFormatOption: "" # Insert file system format option + # Filesytem type for volumes created by storageclass + # Default value: None if defaultFsType is not mentioned in values.yaml + # Else defaultFsType value mentioned in values.yaml + # will be used as default value + csi.storage.k8s.io/fstype: nfs + # Limit the volume network bandwidth + # Value is a positive number in granularity of 1024 Kbps; 0 = unlimited + # Allowed values: one string for bandwidth limit in Kbps + # Optional: false + # Uncomment the line below if you want to use bandwidthLimitInKbps + # bandwidthLimitInKbps: # Insert bandwidth limit in Kbps + # Limit the volume IOPS + # The number of IOPS must be greater than 10; 0 = unlimited + # Allowed values: one string for iops limit + # Optional: false + # Uncomment the line below if you want to use iopsLimit + # iopsLimit: # Insert iops limit + + # nasName: NAS server's name. If not specified, value from secret.yaml will be used + # Allowed values: string + # Optional: true + # Default value: None + nasName: "nas-server" + + # allowRoot: enables or disables root squashing (valid only for NFS) + # Allowed values: + # true: will allow root users to use their privileges + # false: will prevent root users on NFS clients from exercising root privileges on the NFS server + # Optional: true + # Default value: false + allowRoot: "false" + + # nfsAcls: enables setting permissions on NFS mount directory + # This value overrides the NFS ACL (nfsAcls) attribute of corresponding array config in secret, if present + # Permissions can be specified in two formats: + # 1) Unix mode (NFSv3) + # 2) NFSv4 ACLs (NFSv4) + # NFSv4 ACLs are supported on NFSv4 share only. + # Allowed values: + # 1) Unix mode: valid octal mode number + # Examples: "0777", "777", "0755" + # 2) NFSv4 acls: valid NFSv4 acls, seperated by comma + # Examples: "A::OWNER@:RWX,A::GROUP@:RWX", "A::OWNER@:rxtncy" + # Optional: true + # Default value: "0777" + # nfsAcls: "0777" + +# volumeBindingMode determines how volume binding and dynamic provisioning should occur +# Allowed values: +# Immediate: volume binding and dynamic provisioning occurs once PVC is created +# WaitForFirstConsumer: delay the binding and provisioning of PV until a pod using the PVC is created. +# Optional: false +# Default value: WaitForFirstConsumer (required for topology section below) +volumeBindingMode: WaitForFirstConsumer +# allowedTopologies helps scheduling pods on worker nodes which match all of below expressions. +allowedTopologies: +- matchLabelExpressions: + - key: csi-vxflexos.dellemc.com/-nfs # Insert System ID + values: + - "true" diff --git a/scripts/build_ubi_micro.sh b/scripts/build_ubi_micro.sh index 2ead068f..29eab6d5 100755 --- a/scripts/build_ubi_micro.sh +++ b/scripts/build_ubi_micro.sh @@ -14,7 +14,7 @@ microcontainer=$(buildah from $1) micromount=$(buildah mount $microcontainer) -dnf install --installroot $micromount --releasever=8 --nodocs --setopt install_weak_deps=false --setopt=reposdir=/etc/yum.repos.d/ e4fsprogs xfsprogs libaio kmod numactl util-linux -y +dnf install --installroot $micromount --releasever=8 --nodocs --setopt install_weak_deps=false --setopt=reposdir=/etc/yum.repos.d/ e4fsprogs xfsprogs libaio kmod numactl util-linux nfs-utils -y dnf clean all --installroot $micromount buildah umount $microcontainer buildah commit $microcontainer csipowerflex-ubimicro diff --git a/service/controller.go b/service/controller.go index 9c96f35b..4a4115a5 100644 --- a/service/controller.go +++ b/service/controller.go @@ -59,6 +59,24 @@ const ( // volume create parameters map KeyMkfsFormatOption = "mkfsFormatOption" + // KeyNasName is the key used to get the NAS name from the + // volume create parameters map + KeyNasName = "nasName" + + // KeyNfsACL is the key used to get the NFS ACL from the + // volume create parameters map + KeyNfsACL = "nfsAcls" + + // KeyFsType is the key used to get the filesystem type from the + // volume create parameters map + KeyFsType = "fsType" + + // NFSExportLocalPath is the local path for NFSExport + NFSExportLocalPath = "/" + // NFSExportNamePrefix is the prefix used for nfs exports created using + // csi-powerflex driver + NFSExportNamePrefix = "csishare-" + // DefaultVolumeSizeKiB is default volume sgolang/protobuf/blob/master/ptypesize // to create on a scaleIO cluster when no size is given, expressed in KiB DefaultVolumeSizeKiB = 16 * kiBytesInGiB @@ -83,6 +101,7 @@ const ( removeModeOnlyMe = "ONLY_ME" sioGatewayNotFound = "Not found" sioGatewayVolumeNotFound = "Could not find the volume" + sioGatewayFileSystemNotFound = "couldn't find filesystem by id" sioVolumeRemovalOperationInProgress = "A volume removal operation is currently in progress" sioGatewayVolumeNameInUse = "Volume name already in use. Please use a different name." errNoMultiMap = "volume not enabled for mapping to multiple hosts" @@ -136,9 +155,15 @@ func (s *service) CreateVolume( s.logStatistics() cr := req.GetCapacityRange() - sizeInKiB, err := validateVolSize(cr) - if err != nil { - return nil, err + + // Check for filesystem type + isNFS := false + var fsType string + if len(req.VolumeCapabilities) != 0 { + fsType = req.VolumeCapabilities[0].GetMount().GetFsType() + if fsType == "nfs" { + isNFS = true + } } // validate AccessibleTopology @@ -150,7 +175,6 @@ func (s *service) CreateVolume( var volumeTopology []*csi.Topology systemSegments := map[string]string{} // topology segments matching requested system for a volume if accessibility != nil && len(accessibility.GetPreferred()) > 0 { - requestedSystem := "" sID := "" system := s.systems[systemID] @@ -170,6 +194,20 @@ func (s *service) CreateVolume( constraint = tokens[1] } Log.Printf("Found topology constraint: VxFlex OS system: %s", constraint) + + // Update constraint wrt to topology specified for NFS volume + if isNFS { + nfsTokens := strings.Split(constraint, "-") + nfsLabel := "" + if len(nfsTokens) > 1 { + constraint = nfsTokens[0] + nfsLabel = nfsTokens[1] + if nfsLabel != "nfs" { + return nil, status.Errorf(codes.InvalidArgument, + "Invalid topology requested for NFS Volume. Please validate your storage class has nfs topology.") + } + } + } if constraint == sID || constraint == sName { if constraint == sID { requestedSystem = sID @@ -199,28 +237,18 @@ func (s *service) CreateVolume( } } - params = mergeStringMaps(params, req.GetSecrets()) - - // We require the storagePool name for creation - sp, ok := params[KeyStoragePool] - if !ok { - return nil, status.Errorf(codes.InvalidArgument, - "%s is a required parameter", KeyStoragePool) - } - - pdID := "" - pd, ok := params[KeyProtectionDomain] - if !ok { - Log.Printf("Protection Domain name not provided; there could be conflicts if two storage pools share a name") - } else { - pdID, err = s.getProtectionDomainIDFromName(systemID, pd) - if err != nil { - return nil, err + if len(req.VolumeCapabilities) != 0 { + if req.VolumeCapabilities[0].GetBlock() != nil { + // We need to check if user requests raw block access from nfs and prevent that + fsType, ok := params[KeyFsType] + // FsType can be empty + if ok && fsType == "nfs" { + return nil, status.Errorf(codes.InvalidArgument, "raw block requested from NFS Volume") + } } } - volType := s.getVolProvisionType(params) // Thick or Thin - + // fetch volume name name := req.GetName() if name == "" { return nil, status.Error(codes.InvalidArgument, @@ -233,121 +261,267 @@ func (s *service) CreateVolume( req.Name = name } - contentSource := req.GetVolumeContentSource() - if contentSource != nil { - volumeSource := contentSource.GetVolume() - if volumeSource != nil { - Log.Printf("volume %s specified as volume content source", volumeSource.VolumeId) - return s.Clone(req, volumeSource, name, sizeInKiB, sp) + nfsAcls := s.opts.NfsAcls + var arr *ArrayConnectionData + sysID := s.opts.defaultSystemID + arr = s.opts.arrays[sysID] + volName := name + + if isNFS { + // fetch NAS server ID + nasName, ok := params[KeyNasName] + if !ok { + return nil, status.Errorf(codes.InvalidArgument, "`%s` is a required parameter", KeyNasName) } - snapshotSource := contentSource.GetSnapshot() - if snapshotSource != nil { - Log.Printf("snapshot %s specified as volume content source", snapshotSource.SnapshotId) - return s.createVolumeFromSnapshot(req, snapshotSource, name, sizeInKiB, sp) + nasServerID, err := s.getNASServerIDFromName(systemID, nasName) + if err != nil { + return nil, err } - } - // TODO handle Access mode in volume capability + // fetch storage pool ID + pdID := "" + pd, ok := params[KeyProtectionDomain] + if !ok { + Log.Printf("Protection Domain name not provided; there could be conflicts if two storage pools share a name") + } else { + pdID, err = s.getProtectionDomainIDFromName(systemID, pd) + if err != nil { + return nil, err + } + + } - fields := map[string]interface{}{ - "name": name, - "sizeInKiB": sizeInKiB, - "storagePool": sp, - "volType": volType, - HeaderPersistentVolumeName: params[CSIPersistentVolumeName], - HeaderPersistentVolumeClaimName: params[CSIPersistentVolumeClaimName], - HeaderPersistentVolumeClaimNamespace: params[CSIPersistentVolumeClaimNamespace], - } - - Log.WithFields(fields).Info("creating volume") - - volumeParam := &siotypes.VolumeParam{ - Name: name, - VolumeSizeInKb: fmt.Sprintf("%d", sizeInKiB), - VolumeType: volType, - } - - // If the VolumeParam has a MetaData method, set the values accordingly. - if t, ok := interface{}(volumeParam).(interface { - MetaData() http.Header - }); ok { - t.MetaData().Set(HeaderPersistentVolumeName, params[CSIPersistentVolumeName]) - t.MetaData().Set(HeaderPersistentVolumeClaimName, params[CSIPersistentVolumeClaimName]) - t.MetaData().Set(HeaderPersistentVolumeClaimNamespace, params[CSIPersistentVolumeClaimNamespace]) - t.MetaData().Set(HeaderCSIPluginIdentifier, Name) - t.MetaData().Set(HeaderSystemIdentifier, systemID) - } else { - Log.Println("warning: goscaleio.VolumeParam: no MetaData method exists, consider updating goscaleio library.") - } + storagePoolName, ok := params[KeyStoragePool] + if !ok { + return nil, status.Errorf(codes.InvalidArgument, + "%s is a required parameter", KeyStoragePool) + } + storagePoolID, err := s.getStoragePoolID(storagePoolName, systemID, pdID) + if err != nil { + return nil, err + } - createResp, err := s.adminClients[systemID].CreateVolume(volumeParam, sp, pdID) - if err != nil { - // handle case where volume already exists - if !strings.EqualFold(err.Error(), sioGatewayVolumeNameInUse) { - Log.Printf("error creating volume: %s pool %s error: %s", name, sp, err.Error()) - return nil, status.Errorf(codes.Internal, - "error when creating volume %s storagepool %s: %s", name, sp, err.Error()) + // fetch NFS ACL + if params[KeyNfsACL] != "" { + nfsAcls = params[KeyNfsACL] // Storage class takes precedence + } else if arr.NfsAcls != "" { + nfsAcls = arr.NfsAcls // Secrets next } - } - var id string - if createResp == nil { - // volume already exists, look it up by name - id, err = s.adminClients[systemID].FindVolumeID(name) + // fetch volume size + size := cr.GetRequiredBytes() + + // log all parameters used in CreateVolume call + fields := map[string]interface{}{ + "Name": volName, + "SizeInB": size, + "StoragePoolID": storagePoolID, + "NasServerID": nasServerID, + HeaderPersistentVolumeName: params[CSIPersistentVolumeName], + HeaderPersistentVolumeClaimName: params[CSIPersistentVolumeClaimName], + HeaderPersistentVolumeClaimNamespace: params[CSIPersistentVolumeClaimNamespace], + } + Log.WithFields(fields).Info("Executing CreateVolume with following fields") + + volumeParam := &siotypes.FsCreate{ + Name: volName, + SizeTotal: int(size), + StoragePoolID: storagePoolID, + NasServerID: nasServerID, + } + + //Idempotency check + system, err := s.adminClients[systemID].FindSystem(systemID, "", "") + if err != nil { + return nil, err + } + existingFS, err := system.GetFileSystemByIDName("", volName) + + if existingFS != nil { + if existingFS.SizeTotal == int(size) { + vi := s.getCSIVolumeFromFilesystem(existingFS, systemID) + vi.VolumeContext[KeyNasName] = nasName + vi.VolumeContext[KeyNfsACL] = nfsAcls + vi.VolumeContext[KeyFsType] = fsType + nfsTopology := s.GetNfsTopology(systemID) + vi.AccessibleTopology = nfsTopology + csiResp := &csi.CreateVolumeResponse{ + Volume: vi, + } + Log.Info("Volume exists in the requested state with same size") + return csiResp, nil + } + Log.Info("'Volume name' already exists and size is different") + return nil, status.Error(codes.AlreadyExists, "'Volume name' already exists and size is different.") + } + Log.Debug("Volume does not exist, proceeding to create new volume") + fsResp, err := system.CreateFileSystem(volumeParam) if err != nil { - return nil, status.Errorf(codes.Internal, err.Error()) + Log.Debugf("Create volume response error:%v", err) + return nil, status.Errorf(codes.Unknown, "Create Volume %s failed with error: %v", volName, err) + } + + newFs, err := system.GetFileSystemByIDName(fsResp.ID, "") + if err != nil { + Log.Debugf("Find Volume response: %v Error: %v", newFs, err) + } + if newFs != nil { + vi := s.getCSIVolumeFromFilesystem(newFs, systemID) + vi.VolumeContext[KeyNasName] = nasName + vi.VolumeContext[KeyNfsACL] = nfsAcls + vi.VolumeContext[KeyFsType] = fsType + nfsTopology := s.GetNfsTopology(systemID) + vi.AccessibleTopology = nfsTopology + csiResp := &csi.CreateVolumeResponse{ + Volume: vi, + } + return csiResp, nil } } else { - id = createResp.ID - } + size, err := validateVolSize(cr) + if err != nil { + return nil, err + } - vol, err := s.getVolByID(id, systemID) - if err != nil { - return nil, status.Errorf(codes.Unavailable, - "error retrieving volume details: %s", err.Error()) - } - vi := s.getCSIVolume(vol, systemID) - vi.AccessibleTopology = volumeTopology + params = mergeStringMaps(params, req.GetSecrets()) - // since the volume could have already exists, double check that the - // volume has the expected parameters - spID, err := s.getStoragePoolID(sp, systemID, pdID) - if err != nil { - return nil, status.Errorf(codes.Unavailable, - "volume exists, but could not verify parameters: %s", - err.Error()) - } - if vol.StoragePoolID != spID { - return nil, status.Errorf(codes.AlreadyExists, - "volume exists in %s, but in different storage pool than requested %s", vol.StoragePoolID, spID) - } + // We require the storagePool name for creation + sp, ok := params[KeyStoragePool] + if !ok { + return nil, status.Errorf(codes.InvalidArgument, + "%s is a required parameter", KeyStoragePool) + } - if (vi.CapacityBytes / bytesInKiB) != sizeInKiB { - return nil, status.Errorf(codes.AlreadyExists, - "volume exists, but at different size than requested") - } - copyInterestingParameters(req.GetParameters(), vi.VolumeContext) + pdID := "" + pd, ok := params[KeyProtectionDomain] + if !ok { + Log.Printf("Protection Domain name not provided; there could be conflicts if two storage pools share a name") + } else { + pdID, err = s.getProtectionDomainIDFromName(systemID, pd) + if err != nil { + return nil, err + } + } - Log.Printf("volume %s (%s) created %s\n", vi.VolumeContext["Name"], vi.VolumeId, vi.VolumeContext["CreationTime"]) + volType := s.getVolProvisionType(params) // Thick or Thin - csiResp := &csi.CreateVolumeResponse{ - Volume: vi, - } + contentSource := req.GetVolumeContentSource() + if contentSource != nil { + volumeSource := contentSource.GetVolume() + if volumeSource != nil { + Log.Printf("volume %s specified as volume content source", volumeSource.VolumeId) + return s.Clone(req, volumeSource, name, size, sp) + } + snapshotSource := contentSource.GetSnapshot() + if snapshotSource != nil { + Log.Printf("snapshot %s specified as volume content source", snapshotSource.SnapshotId) + return s.createVolumeFromSnapshot(req, snapshotSource, name, size, sp) + } + } - s.clearCache() + // TODO handle Access mode in volume capability - volumeID := getVolumeIDFromCsiVolumeID(vi.VolumeId) - vol, err = s.getVolByID(volumeID, systemID) + fields := map[string]interface{}{ + "name": name, + "sizeInKiB": size, + "storagePool": sp, + "volType": volType, + HeaderPersistentVolumeName: params[CSIPersistentVolumeName], + HeaderPersistentVolumeClaimName: params[CSIPersistentVolumeClaimName], + HeaderPersistentVolumeClaimNamespace: params[CSIPersistentVolumeClaimNamespace], + } - counter := 0 + Log.WithFields(fields).Info("Executing CreateVolume with following fields") - for err != nil && counter < 100 { - time.Sleep(3 * time.Millisecond) + volumeParam := &siotypes.VolumeParam{ + Name: name, + VolumeSizeInKb: fmt.Sprintf("%d", size), + VolumeType: volType, + } + + // If the VolumeParam has a MetaData method, set the values accordingly. + if t, ok := interface{}(volumeParam).(interface { + MetaData() http.Header + }); ok { + t.MetaData().Set(HeaderPersistentVolumeName, params[CSIPersistentVolumeName]) + t.MetaData().Set(HeaderPersistentVolumeClaimName, params[CSIPersistentVolumeClaimName]) + t.MetaData().Set(HeaderPersistentVolumeClaimNamespace, params[CSIPersistentVolumeClaimNamespace]) + t.MetaData().Set(HeaderCSIPluginIdentifier, Name) + t.MetaData().Set(HeaderSystemIdentifier, systemID) + } else { + Log.Println("warning: goscaleio.VolumeParam: no MetaData method exists, consider updating goscaleio library.") + } + + createResp, err := s.adminClients[systemID].CreateVolume(volumeParam, sp, pdID) + if err != nil { + // handle case where volume already exists + if !strings.EqualFold(err.Error(), sioGatewayVolumeNameInUse) { + Log.Printf("error creating volume: %s pool %s error: %s", name, sp, err.Error()) + return nil, status.Errorf(codes.Internal, + "error when creating volume %s storagepool %s: %s", name, sp, err.Error()) + } + } + + var id string + if createResp == nil { + // volume already exists, look it up by name + id, err = s.adminClients[systemID].FindVolumeID(name) + if err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + } else { + id = createResp.ID + } + + vol, err := s.getVolByID(id, systemID) + if err != nil { + return nil, status.Errorf(codes.Unavailable, + "error retrieving volume details: %s", err.Error()) + } + vi := s.getCSIVolume(vol, systemID) + vi.AccessibleTopology = volumeTopology + + // since the volume could have already exists, double check that the + // volume has the expected parameters + spID, err := s.getStoragePoolID(sp, systemID, pdID) + if err != nil { + return nil, status.Errorf(codes.Unavailable, + "volume exists, but could not verify parameters: %s", + err.Error()) + } + if vol.StoragePoolID != spID { + return nil, status.Errorf(codes.AlreadyExists, + "volume exists in %s, but in different storage pool than requested %s", vol.StoragePoolID, spID) + } + + if (vi.CapacityBytes / bytesInKiB) != size { + return nil, status.Errorf(codes.AlreadyExists, + "volume exists, but at different size than requested") + } + copyInterestingParameters(req.GetParameters(), vi.VolumeContext) + + Log.Printf("volume %s (%s) created %s\n", vi.VolumeContext["Name"], vi.VolumeId, vi.VolumeContext["CreationTime"]) + + vi.VolumeContext[KeyFsType] = fsType + csiResp := &csi.CreateVolumeResponse{ + Volume: vi, + } + s.clearCache() + + volumeID := getVolumeIDFromCsiVolumeID(vi.VolumeId) vol, err = s.getVolByID(volumeID, systemID) - counter = counter + 1 - } - return csiResp, err + counter := 0 + + for err != nil && counter < 100 { + time.Sleep(3 * time.Millisecond) + vol, err = s.getVolByID(volumeID, systemID) + counter = counter + 1 + } + return csiResp, err + } + //return csiResp, err + return nil, status.Errorf(codes.NotFound, "Volume/Filesystem not found after create. %v", err) } // Copies the interesting parameters to the output map. @@ -508,7 +682,6 @@ func validateVolSize(cr *csi.CapacityRange) (int64, error) { minSize := cr.GetRequiredBytes() maxSize := cr.GetLimitBytes() - if minSize < 0 || maxSize < 0 { return 0, status.Errorf( codes.OutOfRange, @@ -562,6 +735,8 @@ func (s *service) DeleteVolume( return nil, status.Error(codes.InvalidArgument, "volume ID is required") } + + isNFS := strings.Contains(csiVolID, "/") //ensure no ambiguity if legacy vol err := s.checkVolumesMap(csiVolID) if err != nil { @@ -570,6 +745,70 @@ func (s *service) DeleteVolume( } + if isNFS { + // get systemID from req + systemID := s.getSystemIDFromCsiVolumeID(csiVolID) + if systemID == "" { + // use default system + systemID = s.opts.defaultSystemID + } + + if systemID == "" { + return nil, status.Error(codes.InvalidArgument, + "systemID is not found in the request and there is no default system") + } + + if err := s.requireProbe(ctx, systemID); err != nil { + return nil, err + } + + s.logStatistics() + system, err := s.adminClients[systemID].FindSystem(systemID, "", "") + if err != nil { + return nil, err + } + fsID := getFilesystemIDFromCsiVolumeID(csiVolID) + toBeDeletedFS, err := system.GetFileSystemByIDName(fsID, "") + if err != nil { + if strings.Contains(err.Error(), sioGatewayFileSystemNotFound) { + Log.WithFields(logrus.Fields{"id": fsID}).Debug("File System does not exist", fsID) + return &csi.DeleteVolumeResponse{}, nil + } + } + + fsName := toBeDeletedFS.Name + + // Check if nfs export exists for the File system + client := s.adminClients[systemID] + + nfsExport, err := s.getNFSExport(toBeDeletedFS, client) + if err != nil { + if !strings.Contains(err.Error(), "not found") { + return nil, status.Errorf(codes.Internal, + "error getting the NFS Export for the fs: %s", err.Error()) + } + + } else { + if nfsExport != nil { + return nil, status.Errorf(codes.FailedPrecondition, "Filesystem %s can not be deleted as it has associated NFS Export.", fsID) + } + } + + Log.WithFields(logrus.Fields{"name": fsName, "id": fsID}).Info("Deleting FileSystem") + err = system.DeleteFileSystem(fsName) + + if err != nil { + if strings.Contains(err.Error(), sioGatewayFileSystemNotFound) { + return &csi.DeleteVolumeResponse{}, nil + } + return nil, status.Errorf(codes.Internal, + "error removing filesystem: %s", err.Error()) + + } + + return &csi.DeleteVolumeResponse{}, nil + } + // get systemID from req systemID := s.getSystemIDFromCsiVolumeID(csiVolID) if systemID == "" { @@ -670,7 +909,14 @@ func (s *service) ControllerPublishVolume( } } + // create publish context + publishContext := make(map[string]string) + publishContext[KeyNasName] = volumeContext[KeyNasName] + publishContext[KeyNfsACL] = volumeContext[KeyNfsACL] + csiVolID := req.GetVolumeId() + publishContext["volumeContextId"] = csiVolID + if csiVolID == "" { return nil, status.Error(codes.InvalidArgument, "volume ID is required") @@ -702,6 +948,57 @@ func (s *service) ControllerPublishVolume( } + nodeID := req.GetNodeId() + if nodeID == "" { + return nil, status.Error(codes.InvalidArgument, + "node ID is required") + } + + // Check for NFS protocol + fsType := volumeContext[KeyFsType] + isNFS := false + if fsType == "nfs" { + isNFS = true + } + if isNFS { + fsID := getFilesystemIDFromCsiVolumeID(csiVolID) + fs, err := s.getFilesystemByID(fsID, systemID) + if err != nil { + if strings.EqualFold(err.Error(), sioGatewayFileSystemNotFound) || strings.Contains(err.Error(), "must be a hexadecimal number") { + return nil, status.Error(codes.NotFound, + "volume not found") + } + return nil, status.Errorf(codes.Internal, + "failure checking volume status before controller publish: %s", + err.Error()) + } + + sdcIP, err := s.getSDCIP(nodeID, systemID) + if err != nil { + return nil, status.Errorf(codes.NotFound, err.Error()) + } + + publishContext["host"] = sdcIP + + fsc := req.GetVolumeCapability() + if fsc == nil { + return nil, status.Error(codes.InvalidArgument, + "volume capability is required") + } + + am := fsc.GetAccessMode() + if am == nil { + return nil, status.Error(codes.InvalidArgument, + "access mode is required") + } + if am.Mode == csi.VolumeCapability_AccessMode_UNKNOWN { + return nil, status.Error(codes.InvalidArgument, + errUnknownAccessMode) + } + //Export for NFS + resp, err := s.exportFilesystem(ctx, req, adminClient, fs, sdcIP, nodeID, publishContext, am) + return resp, err + } volID := getVolumeIDFromCsiVolumeID(csiVolID) vol, err := s.getVolByID(volID, systemID) @@ -715,12 +1012,6 @@ func (s *service) ControllerPublishVolume( err.Error()) } - nodeID := req.GetNodeId() - if nodeID == "" { - return nil, status.Error(codes.InvalidArgument, - "node ID is required") - } - sdcID, err := s.getSDCID(nodeID, systemID) if err != nil { return nil, status.Errorf(codes.NotFound, err.Error()) @@ -981,6 +1272,42 @@ func (s *service) ControllerUnpublishVolume( "checkVolumesMap for id: %s failed : %s", csiVolID, err.Error()) } + nodeID := req.GetNodeId() + if nodeID == "" { + return nil, status.Error(codes.InvalidArgument, + "Node ID is required") + } + + adminClient := s.adminClients[systemID] + + isNFS := strings.Contains(csiVolID, "/") + + if isNFS { + fsID := getFilesystemIDFromCsiVolumeID(csiVolID) + fs, err := s.getFilesystemByID(fsID, systemID) + if err != nil { + if strings.EqualFold(err.Error(), sioGatewayFileSystemNotFound) || strings.Contains(err.Error(), "must be a hexadecimal number") { + return nil, status.Error(codes.NotFound, + "volume not found") + } + return nil, status.Errorf(codes.Internal, + "failure checking volume status before controller publish: %s", + err.Error()) + } + + sdcIP, err := s.getSDCIP(nodeID, systemID) + if err != nil { + return nil, status.Errorf(codes.NotFound, err.Error()) + } + + //unexport for NFS + err = s.unexportFilesystem(ctx, req, adminClient, fs, req.GetVolumeId(), sdcIP, nodeID) + if err != nil { + return nil, err + } + + return &csi.ControllerUnpublishVolumeResponse{}, nil + } volID := getVolumeIDFromCsiVolumeID(csiVolID) vol, err := s.getVolByID(volID, systemID) @@ -995,12 +1322,6 @@ func (s *service) ControllerUnpublishVolume( err.Error()) } - nodeID := req.GetNodeId() - if nodeID == "" { - return nil, status.Error(codes.InvalidArgument, - "Node ID is required") - } - sdcID, err := s.getSDCID(nodeID, systemID) if err != nil { return nil, status.Errorf(codes.NotFound, err.Error()) @@ -1019,7 +1340,6 @@ func (s *service) ControllerUnpublishVolume( Log.Debug("volume already unpublished") return &csi.ControllerUnpublishVolumeResponse{}, nil } - adminClient := s.adminClients[systemID] targetVolume := goscaleio.NewVolume(adminClient) targetVolume.Volume = vol @@ -2132,6 +2452,80 @@ func (s *service) ControllerExpandVolume(ctx context.Context, req *csi.Controlle } + isNFS := strings.Contains(csiVolID, "/") + + if isNFS { + fsID := getFilesystemIDFromCsiVolumeID(csiVolID) + systemID := s.getSystemIDFromCsiVolumeID(csiVolID) + if systemID == "" { + // use default system + systemID = s.opts.defaultSystemID + } + + if systemID == "" { + return nil, status.Error(codes.InvalidArgument, + "systemID is not found in the request and there is no default system") + } + + if err := s.requireProbe(ctx, systemID); err != nil { + return nil, err + } + fs, err := s.getFilesystemByID(fsID, systemID) + if err != nil { + if strings.EqualFold(err.Error(), sioGatewayFileSystemNotFound) || strings.Contains(err.Error(), "must be a hexadecimal number") { + return nil, status.Error(codes.NotFound, + "volume not found") + } + return nil, status.Errorf(codes.Internal, "failure to load volume: %s", err.Error()) + } + + fsName := fs.Name + cr := req.GetCapacityRange() + Log.Printf("cr:%d", cr) + requestedSize := int(cr.GetRequiredBytes()) + + Log.Printf("req.size:%d", requestedSize) + fields := map[string]interface{}{ + "RequestID": reqID, + "fileSystemName": fsName, + "RequestedSize": requestedSize, + } + Log.WithFields(fields).Info("Executing ExpandVolume with following fields") + allocatedSize := fs.SizeTotal + Log.Printf("allocatedsize:%d", allocatedSize) + + // nil response returned if volume shrink operation is tried + if requestedSize < allocatedSize { + Log.Printf("volume shrink tried") + return &csi.ControllerExpandVolumeResponse{}, nil + } + + // idempotency check + if requestedSize == allocatedSize { + Log.Infof("Idempotent call detected for volume (%s) with requested size (%d) SizeInKb and allocated size (%d) SizeInKb", + fsName, requestedSize, allocatedSize) + return &csi.ControllerExpandVolumeResponse{ + CapacityBytes: int64(requestedSize), + NodeExpansionRequired: false}, nil + } + + system, err := s.adminClients[systemID].FindSystem(systemID, "", "") + if err != nil { + return nil, err + } + + if err := system.ModifyFileSystem(&siotypes.FSModify{Size: requestedSize}, fsID); err != nil { + Log.Errorf("NFS volume expansion failed with error: %s", err.Error()) + return nil, status.Error(codes.Internal, err.Error()) + } + + csiResp := &csi.ControllerExpandVolumeResponse{ + CapacityBytes: int64(requestedSize), + NodeExpansionRequired: false, + } + return csiResp, nil + } + volID := getVolumeIDFromCsiVolumeID(csiVolID) systemID := s.getSystemIDFromCsiVolumeID(csiVolID) if systemID == "" { diff --git a/service/envvars.go b/service/envvars.go index 9afd1c47..9761066a 100644 --- a/service/envvars.go +++ b/service/envvars.go @@ -52,9 +52,15 @@ const ( // carried out or not. EnvIsApproveSDCEnabled = "X_CSI_APPROVE_SDC_ENABLED" - // EnvReplicationContextPrefix enables sidecars to read required information from volume context + // EnvReplicationContextPrefix enables sidecars to read required information from volume context. EnvReplicationContextPrefix = "X_CSI_REPLICATION_CONTEXT_PREFIX" - // EnvReplicationPrefix is used as a prefix to find out if replication is enabled + // EnvReplicationPrefix is used as a prefix to find out if replication is enabled. EnvReplicationPrefix = "X_CSI_REPLICATION_PREFIX" + + // EnvNfsAcls enables setting permissions on NFS mount directory. + EnvNfsAcls = "X_CSI_NFS_ACLS" + + // EnvExternalAccess is used to specify additional entries for host to access NFS volumes. + EnvExternalAccess = "X_CSI_POWERFLEX_EXTERNAL_ACCESS" ) diff --git a/service/features/array-config/config b/service/features/array-config/config index 498242e2..9da7f6f7 100644 --- a/service/features/array-config/config +++ b/service/features/array-config/config @@ -5,7 +5,8 @@ "password": "Password123", "insecure": true, "isDefault": true, - "systemID": "14dbbf5617523654" + "systemID": "14dbbf5617523654", + "nasName": "dummy-name" }, { "endpoint": "http://127.0.0.2", @@ -14,6 +15,7 @@ "skipCertificateValidation": true, "isDefault": false, "systemID": "15dbbf5617523655", - "AllSystemNames" : "15dbbf5617523655-previous-name" + "AllSystemNames" : "15dbbf5617523655-previous-name", + "nasName": "dummy-name" } ] diff --git a/service/features/array-config/config.2 b/service/features/array-config/config.2 index 9bdd6d8b..2730c6cf 100644 --- a/service/features/array-config/config.2 +++ b/service/features/array-config/config.2 @@ -6,6 +6,7 @@ "insecure": true, "isDefault": true, "systemID": "14dbbf5617523654" + "nasName": "dummy-name" }, { "endpoint": "http://127.0.0.2", @@ -14,6 +15,7 @@ "insecure": true, "isDefault": false, "systemID": "15dbbf5617523655" + "nasName": "dummy-name" }, { "username": "admin", @@ -22,5 +24,6 @@ "endpoint": "https://1.2.3.4", "insecure": true, "isDefault": false + "nasName": "dummy-name" } ] diff --git a/service/features/controller_publish_unpublish.feature b/service/features/controller_publish_unpublish.feature index b1701889..ddcc653c 100644 --- a/service/features/controller_publish_unpublish.feature +++ b/service/features/controller_publish_unpublish.feature @@ -16,7 +16,254 @@ Feature: VxFlex OS CSI interface | "single-writer" | | "single-node-single-writer" | | "single-node-multi-writer" | - + + Scenario: a Basic NFS controller Publish no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with + Then a valid PublishVolumeResponse is returned + Examples: + | access | + | "single-writer" | + | "single-node-single-writer" | + | "single-node-multi-writer" | + | "multiple-reader" | + | "multiple-writer" | + + Scenario: a Basic NFS controller Publish and unpublish no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I call UnpublishVolume nfs + And no error was received + Then a valid UnpublishVolumeResponse is returned + + Scenario: a Basic NFS controller Publish and unpublish NFS export not found error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I induce error "nfsExportNotFoundError" + And I call UnpublishVolume nfs + Then the error contains "Could not find NFS Export" + + Scenario: a Basic NFS controller Publish and unpublish get NFS exports error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I induce error "NFSExportsInstancesError" + And I call UnpublishVolume nfs + Then the error contains "error getting the NFS Exports" + + Scenario: a Basic NFS controller Publish and unpublish modify NFS export error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I induce error "nfsExportModifyError" + And I call UnpublishVolume nfs + Then the error contains "Allocating host access failed" + + Scenario: a Basic NFS controller Publish and unpublish no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I induce error "readHostsIncompatible" + And I call UnpublishVolume nfs + Then the error contains "none" + + Scenario: a Basic NFS controller Publish and unpublish no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I induce error "writeHostsIncompatible" + And I call UnpublishVolume nfs + Then the error contains "none" + + Scenario: a Basic NFS controller Publish and unpublish delete NFS export error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I induce error "deleteNFSExportError" + And I call UnpublishVolume nfs + Then the error contains "delete NFS Export failed" + + Scenario: a Basic NFS controller Publish Idempotent no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + + Scenario: a Basic NFS controller Publish Idempotent no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I call NFS PublishVolume with "single-node-multi-writer" + Then a valid PublishVolumeResponse is returned + + Scenario: a Basic NFS controller Publish incompatible access mode error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce error "readHostsIncompatible" + And I call NFS PublishVolume with "single-writer" + Then the error contains "with incompatible access mode" + + Scenario: a Basic NFS controller Publish incompatible access mode error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce error "writeHostsIncompatible" + And I call NFS PublishVolume with "single-writer" + Then the error contains "with incompatible access mode" + + Scenario: a Basic NFS controller Publish incompatible access mode error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I call NFS PublishVolume with "multiple-reader" + Then the error contains "with incompatible access mode" + + Scenario: a Basic NFS controller Publish Idempotent no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "multiple-reader" + Then a valid PublishVolumeResponse is returned + And I call NFS PublishVolume with "multiple-reader" + Then a valid PublishVolumeResponse is returned + + Scenario: a Basic NFS controller Publish incompatible access mode error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "multiple-reader" + Then a valid PublishVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then the error contains "with incompatible access mode" + + Scenario: a Basic NFS controller Publish and unpublish no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I call UnpublishVolume nfs + And no error was received + Then a valid UnpublishVolumeResponse is returned + + Scenario: a Basic NFS controller Publish and unpublish no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "multiple-reader" + Then a valid PublishVolumeResponse is returned + And I call NFS PublishVolume with "multiple-reader" + Then a valid PublishVolumeResponse is returned + And I call UnpublishVolume nfs + And no error was received + Then a valid UnpublishVolumeResponse is returned + + Scenario: a Basic NFS controller Publish NFS export create error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce error "nfsExportError" + And I call NFS PublishVolume with "single-writer" + Then the error contains "create NFS Export failed" + + Scenario: a Basic NFS controller Publish modify NFS export error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce error "nfsExportModifyError" + And I call NFS PublishVolume with "single-writer" + Then the error contains "Allocating host access failed" + + Scenario: a Basic NFS controller Publish modify NFS export error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce error "nfsExportModifyError" + And I call NFS PublishVolume with "multiple-reader" + Then the error contains "Allocating host access failed" + + Scenario: a Basic NFS controller Publish failure to check volume status error + Given a VxFlexOS service + And I call NFS PublishVolume with "single-writer" + Then the error contains "failure checking volume status before controller" + + Scenario: a Basic NFS controller Publish volume not found error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I set bad FileSystem Id + And I call NFS PublishVolume with "single-writer" + Then the error contains "volume not found" + + Scenario: a Basic NFS controller Publish NFS export not found error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce error "nfsExportNotFoundError" + And I call NFS PublishVolume with "single-writer" + Then the error contains "Could not find NFS Export" + + Scenario: a Basic NFS controller Publish and unpublish volume not found error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I set bad FileSystem Id + And I call UnpublishVolume nfs + Then the error contains "volume not found" + Scenario: Publish legacy volume that is on non default array Given a VxFlexOS service And I induce error "LegacyVolumeConflictError" @@ -245,6 +492,7 @@ Feature: VxFlex OS CSI interface And I call UnpublishVolume And no error was received Then a valid UnpublishVolumeResponse is returned + Scenario: Unpublish volume with no volume id Given a VxFlexOS service diff --git a/service/features/delete_volume.feature b/service/features/delete_volume.feature index 14d0f032..63c836d3 100644 --- a/service/features/delete_volume.feature +++ b/service/features/delete_volume.feature @@ -31,6 +31,46 @@ Feature: VxFlex OS CSI interface And I call DeleteVolume with "single-writer" And I call DeleteVolume with "single-writer" Then a valid DeleteVolumeResponse is returned + + Scenario: Test Basic nfs delete FileSystem + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call DeleteVolume nfs with "single-writer" + Then a valid DeleteVolumeResponse is returned + + Scenario: a Basic Nfs delete FileSystem Bad + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And I call DeleteVolume nfs with "single-writer" + Then the error contains "can not be deleted as it has associated NFS Export" + + Scenario: Test Idempotent Basic nfs delete FileSystem + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call DeleteVolume nfs with "single-writer" + Then a valid DeleteVolumeResponse is returned + And I call DeleteVolume nfs with "single-writer" + Then a valid DeleteVolumeResponse is returned + + Scenario: Test Basic nfs delete FileSystem + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce error "NFSExportsInstancesError" + And I call DeleteVolume nfs with "single-writer" + Then the error contains "error getting the NFS Export" Scenario: Delete volume with induced getVolByID error Given a VxFlexOS service @@ -63,3 +103,9 @@ Feature: VxFlex OS CSI interface And I call DeleteVolume with "single-writer" Then a valid DeleteVolumeResponse is returned + Scenario: Delete Volume negative + Given a VxFlexOS service + When I call Probe + And I call DeleteVolume with Bad "single-writer" + Then the error contains "volume ID is required" + diff --git a/service/features/filesystem.json.template b/service/features/filesystem.json.template new file mode 100644 index 00000000..128cf834 --- /dev/null +++ b/service/features/filesystem.json.template @@ -0,0 +1,39 @@ +{ + "id": "__ID__", + "name": "__NAME__", + "description": "FS modified", + "storage_pool_id": "e65f9c2700000000", + "nas_server_id": "63ec8e0d-4551-29a7-e79c-b202f2b914f3", + "parent_id": null, + "type": "PRIMARY", + "size_total": __SIZE_IN_Total__, + "size_used": 1620312064, + "protection_policy_id": null, + "access_policy": "NATIVE", + "locking_policy": "ADVISORY", + "folder_rename_policy": "ALL_FORBIDDEN", + "is_smb_sync_writes_enabled": false, + "is_smb_op_locks_enabled": true, + "is_smb_no_notify_enabled": false, + "is_smb_notify_on_access_enabled": false, + "is_smb_notify_on_write_enabled": false, + "smb_notify_on_change_dir_depth": 512, + "is_async_MTime_enabled": false, + "is_quota_enabled": true, + "grace_period": 86400, + "default_hard_limit": 6442450944, + "default_soft_limit": 3221225472, + "creation_timestamp": null, + "expiration_timestamp": null, + "last_refresh_timestamp": null, + "last_writable_timestamp": null, + "is_modified": null, + "access_type": null, + "creator_type": null, + "type_l10n": null, + "access_policy_l10n": null, + "locking_policy_l10n": null, + "folder_rename_policy_l10n": null, + "access_type_l10n": null, + "creator_type_l10n": null +} \ No newline at end of file diff --git a/service/features/get_file_interface.json b/service/features/get_file_interface.json new file mode 100644 index 00000000..355e39fb --- /dev/null +++ b/service/features/get_file_interface.json @@ -0,0 +1,12 @@ +{ + "id": "63ec8e22-c679-f44e-1fd2-b202f2b914f3", + "nas_server_id": "63ec8e0d-4551-29a7-e79c-b202f2b914f3", + "ip_address": "1.2.3.4", + "prefix_length": 21, + "gateway": "1.2.3.4", + "vlan_id": 0, + "name": "dummy_6d0b7cdc848d_3", + "role": "dummy", + "is_disabled": false, + "role_l10n": null +} \ No newline at end of file diff --git a/service/features/get_file_system_response.json b/service/features/get_file_system_response.json new file mode 100644 index 00000000..39428a97 --- /dev/null +++ b/service/features/get_file_system_response.json @@ -0,0 +1,39 @@ +{ + "id": "64a6fffa-dd25-7a70-7db6-5643ff849351", + "name": "dummy-file-system", + "description": "FS modified", + "storage_pool_id": "e65f9c2700000000", + "nas_server_id": "63ec8e0d-4551-29a7-e79c-b202f2b914f3", + "parent_id": null, + "type": "PRIMARY", + "size_total": 8589934592, + "size_used": 1620312064, + "protection_policy_id": null, + "access_policy": "NATIVE", + "locking_policy": "ADVISORY", + "folder_rename_policy": "ALL_FORBIDDEN", + "is_smb_sync_writes_enabled": false, + "is_smb_op_locks_enabled": true, + "is_smb_no_notify_enabled": false, + "is_smb_notify_on_access_enabled": false, + "is_smb_notify_on_write_enabled": false, + "smb_notify_on_change_dir_depth": 512, + "is_async_MTime_enabled": false, + "is_quota_enabled": true, + "grace_period": 86400, + "default_hard_limit": 6442450944, + "default_soft_limit": 3221225472, + "creation_timestamp": null, + "expiration_timestamp": null, + "last_refresh_timestamp": null, + "last_writable_timestamp": null, + "is_modified": null, + "access_type": null, + "creator_type": null, + "type_l10n": null, + "access_policy_l10n": null, + "locking_policy_l10n": null, + "folder_rename_policy_l10n": null, + "access_type_l10n": null, + "creator_type_l10n": null +} \ No newline at end of file diff --git a/service/features/get_nas_server_id.json b/service/features/get_nas_server_id.json new file mode 100644 index 00000000..b93d6d6d --- /dev/null +++ b/service/features/get_nas_server_id.json @@ -0,0 +1,25 @@ + + { + "id": "63ec8e0d-4551-29a7-e79c-b202f2b914f3", + "name": "dummy-nas-server", + "protection_domain_id": "b8b3919900000000", + "storage_pool_id": "e65f9c2700000000", + "description": "", + "operational_status": "Started", + "primary_node_id": "63ec82a8-e065-1fdf-05e0-c4c77e59be30", + "backup_node_id": "63ec82df-d022-5bfa-7877-e8030994cee3", + "default_unix_user": null, + "default_windows_user": null, + "nfs_servers": null, + "current_unix_directory_service": "None", + "is_username_translation_enabled": false, + "is_auto_user_mapping_enabled": false, + "production_IPv4_interface_id": null, + "production_IPv6_interface_id": null, + "backup_IPv4_interface_id": null, + "backup_IPv6_interface_id": null, + "current_preferred_IPv4_interface_id": "63ec8e22-c679-f44e-1fd2-b202f2b914f3", + "current_preferred_IPv6_interface_id": null, + "operational_status_l10n": null, + "current_unix_directory_service_l10n": null + } diff --git a/service/features/get_nas_servers.json b/service/features/get_nas_servers.json new file mode 100644 index 00000000..64e911fa --- /dev/null +++ b/service/features/get_nas_servers.json @@ -0,0 +1,28 @@ + [ + { + "id": "63ec8e0d-4551-29a7-e79c-b202f2b914f3", + "name": "dummy-nas-server", + "protection_domain_id": "b8b3919900000000", + "storage_pool_id": "e65f9c2700000000", + "description": "", + "operational_status": "Started", + "primary_node_id": "63ec82a8-e065-1fdf-05e0-c4c77e59be30", + "backup_node_id": "63ec82df-d022-5bfa-7877-e8030994cee3", + "default_unix_user": null, + "default_windows_user": null, + "nfs_servers": null, + "current_unix_directory_service": "None", + "is_username_translation_enabled": false, + "is_auto_user_mapping_enabled": false, + "production_IPv4_interface_id": null, + "production_IPv6_interface_id": null, + "backup_IPv4_interface_id": null, + "backup_IPv6_interface_id": null, + "current_preferred_IPv4_interface_id": "63ec8e22-c679-f44e-1fd2-b202f2b914f3", + "current_preferred_IPv6_interface_id": null, + "operational_status_l10n": null, + "current_unix_directory_service_l10n": null + } +] + + \ No newline at end of file diff --git a/service/features/nfsexport.json.template b/service/features/nfsexport.json.template new file mode 100644 index 00000000..cf732684 --- /dev/null +++ b/service/features/nfsexport.json.template @@ -0,0 +1,20 @@ +{ + "id": "__ID__", + "file_system_id": "__FS_ID__", + "name": "__NAME__", + "path": "__PATH__", + "description": null, + "default_access": "ROOT", + "min_security": "SYS", + "nfs_owner_username": "root", + "no_access_hosts": [], + "read_only_hosts": ["__READ_HOSTS__"], + "read_only_root_hosts": ["__READ_ROOT_HOSTS__"], + "read_write_hosts": ["__WRITE_HOSTS__"], + "read_write_root_hosts": ["__WRITE_ROOT_HOSTS__"], + "anonymous_UID": -2, + "anonymous_GID": -2, + "is_no_SUID": false, + "default_access_l10n": null, + "min_security_l10n": null +} diff --git a/service/features/node_publish_unpublish.feature b/service/features/node_publish_unpublish.feature index f5269a61..54e9cd2c 100644 --- a/service/features/node_publish_unpublish.feature +++ b/service/features/node_publish_unpublish.feature @@ -65,6 +65,224 @@ Feature: VxFlex OS CSI interface Examples: | error | errormsg | | "NodePublishPrivateTargetAlreadyMounted" | "Mount point already in use by device" | + + Scenario: a Basic NFS Node Publish unpublish Volume no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-writer" fstype "nfs" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + Then I call NodeUnpublishVolume "" + Then the error contains "none" + + + Scenario: a Basic NFS Node Publish filesystem not found error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-writer" fstype "nfs" + And I set bad FileSystem Id + Then I call NodePublishVolume NFS "" + Then the error contains "filesystem not found" + + + Scenario: a Basic NFS Node Publish Volume GetFileSystemsById error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-writer" fstype "nfs" + And I induce error "GetFileSystemsByIdError" + Then I call NodePublishVolume NFS "" + Then the error contains "filesystem not found" + + + Scenario: a Basic NFS Node Publish unpublish Volume no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-node-single-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + Then I call NodeUnpublishVolume "" + Then the error contains "none" + + + Scenario: a Basic Idempotent NFS Node Publish Unpublish Volume no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-node-single-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + Then I call NodeUnpublishVolume "" + Then the error contains "none" + + + Scenario: a Basic NFS Node Publish Volume NAS server not found error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-node-single-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + And I induce error "NasNotFoundError" + Then I call NodePublishVolume NFS "" + Then the error contains "could not find NAS server by id" + + + Scenario: a Basic NFS Node Publish Volume File interface not found error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-node-single-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + And I induce error "fileInterfaceNotFoundError" + Then I call NodePublishVolume NFS "" + Then the error contains "could not find the File interface using id" + + + Scenario: a Basic NFS Node Publish Volume unknown access mode error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-reader" + Then the error contains "access mode cannot be UNKNOWN" + + + Scenario: a Basic NFS Node Publish Unpublish Volume no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "multiple-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "multiple-writer" fstype "nfs" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + Then I call NodeUnpublishVolume "" + Then the error contains "none" + + + + Scenario: a Basic NFS Node Publish Unpublish Volume filesystem not found error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "multiple-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "multiple-writer" fstype "nfs" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + And I set bad FileSystem Id + Then I call NodeUnpublishVolume "" + Then the error contains "filesystem not found" + + + Scenario: a Basic NFS Node Publish Unpublish Volume GetFileSystemsById error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "multiple-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "multiple-writer" fstype "nfs" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + And I induce error "GetFileSystemsByIdError" + Then I call NodeUnpublishVolume "" + Then the error contains "filesystem not found" + + + Scenario Outline: Node Publish Unpublish mount volumes various induced error use cases from examples NFS volumes + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "multiple-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "multiple-writer" fstype "nfs" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + And I induce error + And I call NodeUnpublishVolume "" + Then the error contains + + Examples: + | error | errormsg | + | "NodeUnpublishBadVolume" | "none" | + | "GOFSMockGetMountsError" | "could not reliably determine existing mount status" | + | "NodeUnpublishNoTargetPath" | "target path argument is required" | + | "GOFSMockUnmountError" | "error unmounting target" | + | "PrivateDirectoryNotExistForNodePublish" | "none" | + | "NoCsiVolIDError" | "volume ID is required" | + | "none" | "none" | + + + Scenario: a Basic NFS Node Publish Unpublish Volume no error + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-node-multi-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-node-multi-writer" fstype "nfs" + Then I call NodePublishVolume NFS "" + Then the error contains "none" + Then I call NodeUnpublishVolume "" + Then the error contains "none" + + + Scenario Outline: Node publish mount volumes various induced error use cases from examples NFS volumes + Given a VxFlexOS service + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I call NFS PublishVolume with "single-node-multi-writer" + Then a valid PublishVolumeResponse is returned + And a capability with voltype "mount" access "single-node-multi-writer" fstype "nfs" + And get Node Publish Volume Request NFS + And I induce error + And I induce error + When I call Probe + Then I call NodePublishVolume NFS "" + Then the error contains + + Examples: + | error | errorb | errormsg | + | "GOFSMockDevMountsError" | "none" | "none" | + | "GOFSMockMountError" | "none" | "mount induced error" | + | "GOFSMockGetMountsError" | "none" | "could not reliably determine existing mount status" | + | "TargetNotCreatedForNodePublish" | "none" | "none" | + | "NodePublishNoTargetPath" | "none" | "Target Path is required" | + | "NodePublishNoVolumeCapability" | "none" | "Volume Capability is required" | + | "NodePublishNoAccessMode" | "none" | "Volume Access Mode is required" | + | "NodePublishNoAccessType" | "none" | "Invalid access type" | + | "NodePublishPrivateTargetAlreadyMounted" | "GOFSMockGetMountsError" | "could not reliably determine existing mount status" | + | "NodePublishBadTargetPath" | "none" | "cannot find the path specified@@no such file or directory" | + | "NoCsiVolIDError" | "none" | "volume ID is required" | Scenario Outline: Node publish mount volumes various induced error use cases from examples Given a VxFlexOS service @@ -157,7 +375,6 @@ Feature: VxFlex OS CSI interface | "mount" | "multiple-reader" | "ext4" | "none" | | "mount" | "multiple-writer" | "ext4" | "do not support AccessMode MULTI_NODE_MULTI_WRITER" | - Scenario: Node publish but access modes conflicts Given a VxFlexOS service And a controller published volume @@ -191,7 +408,6 @@ Feature: VxFlex OS CSI interface #| "mount" | "single-writer" | "ext4" | "Access mode conflicts with existing mounts" | | "mount" | "multiple-writer" | "ext4" | "do not support AccessMode MULTI_NODE_MULTI_WRITER" | - Scenario: Node publish when read-only mount volume already published and I change the target path, access mode conflicts Given a VxFlexOS service And a controller published volume @@ -204,9 +420,6 @@ Feature: VxFlex OS CSI interface And I call NodePublishVolume "SDC_GUID" Then the error contains "Access mode conflicts with existing mounts" - - - Scenario: Node publish volume with volume context Given a VxFlexOS service And a controller published volume diff --git a/service/features/service.feature b/service/features/service.feature index 5b407f4d..3a83801d 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -75,7 +75,28 @@ Feature: VxFlex OS CSI interface | "a:b" | "a:b" | | "a:b" | "a:b" | | "" | "" | + | "a/b" | "" | + + Scenario Outline: multi array getFilesystemIDFromCsiVolumeID for NFS volumes with different examples + Given a VxFlexOS service + And I call getFilesystemIDFromCsiVolumeID + Then the fileSystemID is + Examples: + | csiVolID | fsID | + | "abcd/nfs123" | "nfs123" | + | "badcsiVolID" | "" | + | "" | "" | + Scenario Outline: multi array getSystemIDFromCsiVolumeID for NFS volumes with different examples + Given a VxFlexOS service + And I call getSystemIDFromCsiVolumeIDNfs + Then the systemID is + Examples: + | csiVolID | systemID | + | "abcd/nfs123" | "abcd" | + | "badSystemID" | "" | + | "" | "" | + Scenario Outline: multi array getSystemIDFromCsiVolumeID good and with errors Given a VxFlexOS service And I call getSystemIDFromCsiVolumeID @@ -152,7 +173,7 @@ Feature: VxFlex OS CSI interface | "volume1" | | "thisnameiswaytoolongtopossiblybeunder31characters" | - + Scenario: Create volume with admin error Given a VxFlexOS service When I call Probe @@ -246,6 +267,34 @@ Feature: VxFlex OS CSI interface | sysID | | "f.service.opt.SystemName" | + + + Scenario Outline: Create volume with Accessiblity Requirements NFS volumes Invalid topology error + Given a VxFlexOS service + When I call Probe + And I specify bad NFS AccessibilityRequirements with a SystemID of + And I call CreateVolume "volume1" + Then the error contains "Invalid topology requested for NFS Volume" + Examples: + | sysID | + | "f.service.opt.SystemName" | + + + + Scenario Outline: Create volume with Accessibility Requirements for NFS volumes with different examples + Given a VxFlexOS service + When I call Probe + And I specify NFS AccessibilityRequirements with a SystemID of + And I call CreateVolume "volume1" + Then the error contains + + Examples: + | sysID | errormsg | + | "f.service.opt.SystemName" | "none" | + | "" | "is not accessible based on Preferred" | + | "Unknown" | "is not accessible based on Preferred" | + | "badSystem" | "is not accessible based on Preferred" | + Scenario: Create volume with AccessMode_MULTINODE_WRITER Given a VxFlexOS service When I call Probe @@ -279,7 +328,56 @@ Feature: VxFlex OS CSI interface When I specify CreateVolumeMountRequest "xfs" And I call CreateVolume "volume1" Then a valid CreateVolumeResponse is returned - + + + Scenario: Create mount volume NFS no error + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + + + Scenario: Create Volume with invalid probe cache, no endpoint, and no admin NFS system ID not found error + Given a VxFlexOS service + When I induce error "NoAdminError" + And I induce error "NoEndpointError" + And I invalidate the Probe cache + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume1" + Then the error contains "No system ID is found in parameters or as default" + + + Scenario: Create mount volume NFS nas server not found error + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I induce error "NasNotFoundError" + And I call CreateVolume "volume1" + Then the error contains "nas server not found" + + + Scenario: Idempotent create mount volume NFS storage pool not found error + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume4" + When I specify CreateVolumeMountRequest "nfs" + And I change the StoragePool "no_storage_pool" + And I call CreateVolume "volume4" + Then the error contains "Couldn't find storage pool" + + + + + Scenario: Create mount volume NFS with NoAdmin + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I induce error "NoAdminError" + And I call CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + Scenario: Create mount volume idempotent test Given a VxFlexOS service When I call Probe @@ -287,6 +385,33 @@ Feature: VxFlex OS CSI interface And I call CreateVolume "volume2" And I call CreateVolume "volume2" Then a valid CreateVolumeResponse is returned + + + Scenario: Create mount volume idempotent NFS no error + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I call CreateVolume "volume2" + And I call CreateVolume "volume2" + Then a valid CreateVolumeResponse is returned + + + Scenario: Create mount volume with bad capacity NFS bad capacity error + Given a VxFlexOS service + When I call Probe + When I specify CreateVolumeMountRequest "nfs" + And I specify a BadCapacity + And I induce error "BadCapacityError" + And I call CreateVolume "bad capacity" + Then the error contains "bad capacity" + + + Scenario: Idempotent create mount volume with different sizes NFS different size error + Given a VxFlexOS service + When I call Probe + And I call CreateVolumeSize nfs "volume3" "8" + And I call CreateVolumeSize nfs "volume3" "16" + Then the error contains "'Volume name' already exists and size is different" Scenario: Call NodeGetInfo and validate NodeId Given a VxFlexOS service @@ -987,5 +1112,45 @@ Feature: VxFlex OS CSI interface And I call Probe When I call Node Probe Then the error contains "The given GUID is invalid" - - + + Scenario: Controller expand volume for NFS + Given a VxFlexOS service + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + When I call CreateVolumeSize nfs "vol-inttest-nfs" "8" + And a controller published volume + When I call ControllerExpandVolume set to "10" + Then no error was received + + Scenario: Controller shrink volume for NFS + Given a VxFlexOS service + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + When I call CreateVolumeSize nfs "vol-inttest-nfs" "16" + And a controller published volume + When I call ControllerExpandVolume set to "8" + Then no error was received + + Scenario: Controller expand volume for NFS - idempotent case + Given a VxFlexOS service + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + When I call CreateVolumeSize nfs "vol-inttest-nfs" "10" + And a controller published volume + When I call ControllerExpandVolume set to "10" + Then no error was received + + Scenario: Controller expand volume for NFS - incorrect system name + Given a VxFlexOS service + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + When I call CreateVolumeSize nfs "vol-inttest-nfs" "10" + And a controller published volume + And I induce error "WrongSysNameError" + When I call ControllerExpandVolume set to "16" + Then the error contains "failure to load volume" + + Scenario: Call ControllerExpandVolume for NFS - volume ID not found + Given a VxFlexOS service + And a capability with voltype "mount" access "single-node-single-writer" fstype "nfs" + And I call CreateVolumeSize nfs "vol-inttest-nfs" "10" + And a controller published volume + And I induce error "NoVolumeIDError" + Then I call ControllerExpandVolume set to "16" + And the error contains "volume ID is required" \ No newline at end of file diff --git a/service/mount.go b/service/mount.go index 313c4687..5b5add75 100644 --- a/service/mount.go +++ b/service/mount.go @@ -331,6 +331,171 @@ func publishVolume( return nil } +// publishNFS mounts the NFS Volume to the targetpath +func publishNFS(ctx context.Context, req *csi.NodePublishVolumeRequest, nfsExportURL string) error { + volCap := req.GetVolumeCapability() + + if volCap == nil { + return status.Error(codes.InvalidArgument, + "Volume Capability is required") + } + + am := volCap.GetAccessMode() + + if am == nil { + return status.Error(codes.InvalidArgument, + "Volume Access Mode is required") + } + + mountVol := volCap.GetMount() + + if mountVol == nil { + return status.Error(codes.InvalidArgument, "Invalid access type") + } + + var mntOptions []string + mntOptions = mountVol.GetMountFlags() + Log.Infof("The mountOptions received are: %s", mntOptions) + + target := req.GetTargetPath() + if target == "" { + return status.Error(codes.InvalidArgument, + "Target Path is required") + } + + // make sure target is created + _, err := mkdir(target) + + if err != nil { + return status.Error(codes.FailedPrecondition, fmt.Sprintf("Could not create '%s': '%s'", target, err.Error())) + } + roFlag := req.GetReadonly() + rwOption := "rw" + if roFlag { + rwOption = "ro" + } + + mntOptions = append(mntOptions, rwOption) + + fields := map[string]interface{}{ + "ID": req.VolumeId, + "TargetPath": target, + "ExportPath": nfsExportURL, + "AccessMode": am.GetMode(), + } + Log.WithFields(fields).Info("Node publish volume params ") + + mnts, err := gofsutil.GetMounts(ctx) + if err != nil { + return status.Errorf(codes.Internal, + "could not reliably determine existing mount status: '%s'", + err.Error()) + } + + if len(mnts) != 0 { + for _, m := range mnts { + // check for idempotency + //same volume + if m.Device == nfsExportURL { + if m.Path == target { + //as per specs, T1=T2, P1=P2 - return OK + if contains(m.Opts, rwOption) { + Log.WithFields(fields).Debug( + "mount already in place with same options") + return nil + } + //T1=T2, P1!=P2 - return AlreadyExists + Log.WithFields(fields).Error("Mount point already in use by device with different options") + return status.Error(codes.AlreadyExists, "Mount point already in use by device with different options") + } + //T1!=T2, P1==P2 || P1 != P2 - return FailedPrecondition for single node + if am.GetMode() == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER || + am.GetMode() == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY || + am.GetMode() == csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER { + Log.WithFields(fields).Error("Mount point already in use for same device") + return status.Error(codes.FailedPrecondition, "Mount point already in use for same device") + } + } + } + } + + Log.Infof("The mountOptions being used for mount are: %s", mntOptions) + if err := gofsutil.Mount(context.Background(), nfsExportURL, target, "nfs", mntOptions...); err != nil { + var count = 0 + var errmsg = err.Error() + //Both substring validation is for NFSv3 and NFSv4 errors resp. + for (strings.Contains(strings.ToLower(errmsg), "access denied by server while mounting") || (strings.Contains(strings.ToLower(errmsg), "no such file or directory"))) && count < 5 { + time.Sleep(2 * time.Second) + Log.Infof("Mount re-trial attempt-%d", count) + err = gofsutil.Mount(context.Background(), nfsExportURL, target, "nfs", mntOptions...) + if err != nil { + errmsg = err.Error() + } else { + break + } + count++ + } + if err != nil { + Log.Errorf("%v", err) + return err + } + } + return nil + +} + +func unpublishNFS(ctx context.Context, req *csi.NodeUnpublishVolumeRequest, filterStr string) error { + target := req.GetTargetPath() + + Log.Debugf("attempting to unmount '%s'", target) + isMounted, err := isVolumeMounted(ctx, filterStr, target) + if err != nil { + return err + } + if !isMounted { + return nil + } + if err := gofsutil.Unmount(context.Background(), target); err != nil { + return status.Errorf(codes.Internal, + "error unmounting target'%s': '%s'", target, err.Error()) + } + Log.Debugf("unmounting '%s' succeeded", target) + + return nil +} + +func isVolumeMounted(ctx context.Context, filterStr string, target string) (bool, error) { + + mnts, err := gofsutil.GetMounts(ctx) + if err != nil { + return false, status.Errorf(codes.Internal, + "could not reliably determine existing mount status: '%s'", + err.Error()) + } + + if len(mnts) != 0 { + // Idempotence check not to return error if not published + mounted := false + for _, m := range mnts { + if strings.Contains(m.Device, filterStr) { + if m.Path == target { + mounted = true + return mounted, nil + } + } + } + if mounted == false { + Log.Debugf("target '%s' does not exist", target) + return mounted, nil + } + } else { + // No mount exists also means not published + Log.Debugf("target '%s' does not exist", target) + return false, nil + } + return false, nil +} + func handlePrivFSMount( ctx context.Context, accMode *csi.VolumeCapability_AccessMode, diff --git a/service/node.go b/service/node.go index dcf6512d..c490cfa6 100644 --- a/service/node.go +++ b/service/node.go @@ -142,6 +142,13 @@ func (s *service) NodePublishVolume( } Log.Printf("[NodePublishVolume] csiVolID: %s", csiVolID) + // Check for NFS protocol + fsType := volumeContext[KeyFsType] + isNFS := false + if fsType == "nfs" { + isNFS = true + } + volID := getVolumeIDFromCsiVolumeID(csiVolID) Log.Printf("[NodePublishVolume] volumeID: %s", volID) @@ -169,6 +176,41 @@ func (s *service) NodePublishVolume( "checkVolumesMap for id: %s failed : %s", csiVolID, err.Error()) } + // handle NFS nodePublish separately + if isNFS { + fsID := getFilesystemIDFromCsiVolumeID(csiVolID) + + fs, err := s.getFilesystemByID(fsID, systemID) + if err != nil { + if strings.EqualFold(err.Error(), sioGatewayFileSystemNotFound) || strings.Contains(err.Error(), "must be a hexadecimal number") { + return nil, status.Error(codes.NotFound, + "filesystem not found") + } + } + + client := s.adminClients[systemID] + + NFSExport, err := s.getNFSExport(fs, client) + + if err != nil { + return nil, err + } + + fileInterface, err := s.getFileInterface(systemID, fs, client) + if err != nil { + return nil, err + } + // Formulating nfsExportURl + // NFSExportURL = "nas_server_ip:NFSExport_Path" + // NFSExportURL = 10.1.1.1.1:/nfs-volume + path := fmt.Sprintf("%s:%s", fileInterface.IPAddress, NFSExport.Path) + + if err := publishNFS(ctx, req, path); err != nil { + return nil, err + } + + return &csi.NodePublishVolumeResponse{}, nil + } sdcMappedVol, err := s.getSDCMappedVol(volID, systemID, publishGetMappedVolMaxRetry) if err != nil { @@ -209,6 +251,7 @@ func (s *service) NodeUnpublishVolume( "volume ID is required") } + isNFS := strings.Contains(csiVolID, "/") var ephemeralVolume bool //For ephemeral volumes, kubernetes gives us an internal ID, so we need to use the lockfile to find the Powerflex ID this is mapped to. lockFile := ephemeralStagingMountPath + csiVolID + "/id" @@ -227,6 +270,51 @@ func (s *service) NodeUnpublishVolume( } + if isNFS { + fsID := getFilesystemIDFromCsiVolumeID(csiVolID) + Log.Printf("NodeUnpublishVolume fileSystemID: %s", fsID) + + systemID := s.getSystemIDFromCsiVolumeID(csiVolID) + if systemID == "" { + // use default system + systemID = s.opts.defaultSystemID + } + Log.Printf("NodeUnpublishVolume systemID: %s", systemID) + if systemID == "" { + return nil, status.Error(codes.InvalidArgument, + "systemID is not found in the request and there is no default system") + } + + fs, err := s.getFilesystemByID(fsID, systemID) + if err != nil { + if strings.EqualFold(err.Error(), sioGatewayFileSystemNotFound) || strings.Contains(err.Error(), "must be a hexadecimal number") { + return nil, status.Error(codes.NotFound, + "filesystem not found") + } + + } + + // Probe the system to make sure it is managed by driver + if err := s.requireProbe(ctx, systemID); err != nil { + return nil, err + } + + //ensure no ambiguity if legacy vol + err = s.checkVolumesMap(csiVolID) + if err != nil { + return nil, status.Errorf(codes.Internal, + "checkVolumesMap for id: %s failed : %s", csiVolID, err.Error()) + + } + + if err := unpublishNFS(ctx, req, fs.Name); err != nil { + return nil, err + } + + return &csi.NodeUnpublishVolumeResponse{}, nil + + } + volID := getVolumeIDFromCsiVolumeID(csiVolID) Log.Printf("NodeUnpublishVolume volumeID: %s", volID) @@ -651,6 +739,13 @@ func (s *service) NodeGetInfo( // csi-vxflexos.dellemc.com/: topology := map[string]string{} for _, sysID := range connectedSystemID { + isNFS, err := s.checkNFS(ctx, sysID) + if err != nil { + return nil, err + } + if isNFS { + topology[Name+"/"+sysID+"-nfs"] = "true" + } topology[Name+"/"+sysID] = SystemTopologySystemValue } diff --git a/service/service.go b/service/service.go index 84ca5688..34f62dc7 100644 --- a/service/service.go +++ b/service/service.go @@ -22,6 +22,7 @@ import ( "os" "path/filepath" "runtime" + "sort" "strconv" "strings" "sync" @@ -46,6 +47,9 @@ import ( "sigs.k8s.io/yaml" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -92,14 +96,16 @@ var Log = logrus.New() // ArrayConnectionData contains data required to connect to array type ArrayConnectionData struct { - SystemID string `json:"systemID"` - Username string `json:"username"` - Password string `json:"password"` - Endpoint string `json:"endpoint"` - SkipCertificateValidation bool `json:"skipCertificateValidation,omitempty"` - Insecure bool `json:"insecure,omitempty"` - IsDefault bool `json:"isDefault,omitempty"` - AllSystemNames string `json:"allSystemNames"` + SystemID string `json:"systemID"` + Username string `json:"username"` + Password string `json:"password"` + Endpoint string `json:"endpoint"` + SkipCertificateValidation bool `json:"skipCertificateValidation,omitempty"` + Insecure bool `json:"insecure,omitempty"` + IsDefault bool `json:"isDefault,omitempty"` + AllSystemNames string `json:"allSystemNames"` + NasName *string `json:"nasName"` + NfsAcls string `json:"nfsAcls"` } // Manifest is the SP's manifest. @@ -140,6 +146,8 @@ type Opts struct { IsApproveSDCEnabled bool replicationContextPrefix string replicationPrefix string + NfsAcls string + ExternalAccess string } type service struct { @@ -331,6 +339,8 @@ func (s *service) BeforeServe( "IsSdcRenameEnabled": s.opts.IsSdcRenameEnabled, "sdcPrefix": s.opts.SdcPrefix, "IsApproveSDCEnabled": s.opts.IsApproveSDCEnabled, + "nfsAcls": s.opts.NfsAcls, + "externalAccess": s.opts.ExternalAccess, } Log.WithFields(fields).Infof("configured %s", Name) @@ -381,15 +391,11 @@ func (s *service) BeforeServe( if healthMonitor == "true" { opts.IsHealthMonitorEnabled = true } - } else { - opts.IsHealthMonitorEnabled = false } if renameSDC, ok := csictx.LookupEnv(ctx, EnvIsSDCRenameEnabled); ok { if renameSDC == "true" { opts.IsSdcRenameEnabled = true } - } else { - opts.IsSdcRenameEnabled = false } if sdcPrefix, ok := csictx.LookupEnv(ctx, EnvSDCPrefix); ok { opts.SdcPrefix = sdcPrefix @@ -412,6 +418,13 @@ func (s *service) BeforeServe( opts.replicationPrefix = replicationPrefix } + if nfsAcls, ok := csictx.LookupEnv(ctx, EnvNfsAcls); ok { + opts.NfsAcls = nfsAcls + } + if externalAccess, ok := csictx.LookupEnv(ctx, EnvExternalAccess); ok { + opts.ExternalAccess = externalAccess + } + // log csiNode topology keys if err = s.logCsiNodeTopologyKeys(); err != nil { Log.WithError(err).Error("unable to log csiNode topology keys") @@ -445,6 +458,39 @@ func (s *service) BeforeServe( return nil } +func (s *service) checkNFS(ctx context.Context, systemID string) (bool, error) { + + err := s.systemProbeAll(ctx) + if err != nil { + return false, err + } + + c := s.adminClients[systemID] + if c == nil { + return false, nil + } + version, err := c.GetVersion() + if err != nil { + return false, err + } + ver, err := strconv.ParseFloat(version, 64) + if err != nil { + return false, err + } + if ver >= 4.0 { + arrayConData, err := getArrayConfig(ctx) + if err != nil { + return false, err + } + array := arrayConData[systemID] + if array.NasName == nil || *(array.NasName) == "" { + return false, nil + } + return true, nil + } + return false, nil +} + // Probe all systems managed by driver func (s *service) doProbe(ctx context.Context) error { @@ -520,6 +566,26 @@ func (s *service) getVolByID(id string, systemID string) (*siotypes.Volume, erro return vols[0], nil } +// getFilesystemByID returns the PowerFlex filesystem from the given Powerflex filesystem ID +func (s *service) getFilesystemByID(id string, systemID string) (*siotypes.FileSystem, error) { + + adminClient := s.adminClients[systemID] + if adminClient == nil { + return nil, fmt.Errorf("can't find adminClient by id %s", systemID) + } + system, err := adminClient.FindSystem(systemID, "", "") + if err != nil { + return nil, fmt.Errorf("can't find system by id %s", systemID) + } + // The GetFileSystemByIDName API returns a filesystem, but when only passing + // in a filesystem ID or name, the response will be just the one filesystem + fs, err := system.GetFileSystemByIDName(id, "") + if err != nil { + return nil, err + } + return fs, nil +} + // getSDCID returns SDC ID from the given sdc GUID and system ID. func (s *service) getSDCID(sdcGUID string, systemID string) (string, error) { sdcGUID = strings.ToUpper(sdcGUID) @@ -537,6 +603,23 @@ func (s *service) getSDCID(sdcGUID string, systemID string) (string, error) { return id.Sdc.ID, nil } +// getSDCID returns SDC ID from the given sdc GUID and system ID. +func (s *service) getSDCIP(sdcGUID string, systemID string) (string, error) { + sdcGUID = strings.ToUpper(sdcGUID) + + // Need to translate sdcGUID to fmt.Errorf("getSDCIP error systemID not found: %s", systemID) + if s.systems[systemID] == nil { + return "", fmt.Errorf("getSDCIP error systemID not found: %s", systemID) + } + id, err := s.systems[systemID].FindSdc("SdcGUID", sdcGUID) + if err != nil { + return "", fmt.Errorf("error finding SDC from GUID: %s, err: %s", + sdcGUID, err.Error()) + } + + return id.Sdc.SdcIP, nil +} + // getStoragePoolID returns pool ID from the given name, system ID, and protectionDomain name func (s *service) getStoragePoolID(name, systemID, pdID string) (string, error) { @@ -578,6 +661,39 @@ func (s *service) getCSIVolume(vol *siotypes.Volume, systemID string) *csi.Volum return vi } +// getCSIVolumeFromFilesystem converts the given siotypes.FileSystem to a CSI volume +func (s *service) getCSIVolumeFromFilesystem(fs *siotypes.FileSystem, systemID string) *csi.Volume { + + // Get storage pool name; add to cache of ID to Name if not present + storagePoolName := s.getStoragePoolNameFromID(systemID, fs.StoragePoolID) + installationID, err := s.getArrayInstallationID(systemID) + if err != nil { + Log.Printf("getCSIVolumeFromFilesystem error system not found: %s with error: %v\n", systemID, err) + } + + // Make the additional volume attributes + creationTime, _ := strconv.Atoi(fs.CreationTimestamp) + attributes := map[string]string{ + "Name": fs.Name, + "StoragePoolID": fs.StoragePoolID, + "StoragePoolName": storagePoolName, + "StorageSystem": systemID, + "CreationTime": time.Unix(int64(creationTime), 0).String(), + "InstallationID": installationID, + "NasServerID": fs.NasServerID, + "fsType": "nfs", + } + hyphen := "/" + + vi := &csi.Volume{ + VolumeId: systemID + hyphen + fs.ID, + CapacityBytes: int64(fs.SizeTotal), + VolumeContext: attributes, + } + + return vi +} + // getArryaInstallationID returns installation ID for the given system ID func (s *service) getArrayInstallationID(systemID string) (string, error) { system, err := s.adminClients[systemID].FindSystem(systemID, "", "") @@ -688,6 +804,15 @@ func getArrayConfig(ctx context.Context) (map[string]*ArrayConnectionData, error Log.Printf("Powerflex systemID %s AllSytemNames given %#v\n", systemID, names) } + // for PowerFlex v4.0 + str := "" + if c.NasName == nil || *(c.NasName) == "" { + c.NasName = &str + } + if c.NfsAcls == "" { + c.NfsAcls = str + } + skipCertificateValidation := c.SkipCertificateValidation || c.Insecure fields := map[string]interface{}{ @@ -698,6 +823,8 @@ func getArrayConfig(ctx context.Context) (map[string]*ArrayConnectionData, error "isDefault": c.IsDefault, "systemID": c.SystemID, "allSystemNames": c.AllSystemNames, + "nasName": c.NasName, + "nfsAcls": c.NfsAcls, } Log.WithFields(fields).Infof("configured %s", c.SystemID) @@ -738,29 +865,350 @@ func getVolumeIDFromCsiVolumeID(csiVolID string) string { } err := errors.New("csiVolID unexpected string") Log.WithError(err).Errorf("%s format error", csiVolID) - return "" } -// getSystemIDFromCsiVolumeId returns PowerFlex volume ID from CSI volume ID -func (s *service) getSystemIDFromCsiVolumeID(csiVolID string) string { - i := strings.LastIndex(csiVolID, "-") - if i == -1 { +// getFilesystemIDFromCsiVolumeID returns PowerFlex filesystem ID from CSI volume ID +func getFilesystemIDFromCsiVolumeID(csiVolID string) string { + if csiVolID == "" { return "" } - tokens := strings.Split(csiVolID, "-") - if len(tokens) > 1 { - sys := csiVolID[:i] - if id, ok := s.connectedSystemNameToID[sys]; ok { - return id + containsHyphen := strings.Contains(csiVolID, "/") + if containsHyphen { + i := strings.LastIndex(csiVolID, "/") + if i == -1 { + return csiVolID + } + tokens := strings.Split(csiVolID, "/") + index := len(tokens) + if index > 0 { + return tokens[index-1] } - return sys } + err := errors.New("csiVolID unexpected string") + Log.WithError(err).Errorf("%s format error", csiVolID) + return "" +} + +// getNFSExport method returns the NFSExport for a given filesystem +// and returns a not found error if the NFSExport does not exist for filesystem. +func (s *service) getNFSExport(fs *siotypes.FileSystem, client *goscaleio.Client) (*siotypes.NFSExport, error) { + nfsExportList, err := client.GetNFSExport() + + if err != nil { + return nil, err + } + + for _, nfsExport := range nfsExportList { + if nfsExport.FileSystemID == fs.ID { + return &nfsExport, nil + } + } + + return nil, status.Errorf(codes.NotFound, "NFS Export for the file system: %s not found", fs.Name) + +} + +// getFileInterface method returns the FileInterface for the given filesytem. +func (s *service) getFileInterface(systemID string, fs *siotypes.FileSystem, client *goscaleio.Client) (*siotypes.FileInterface, error) { + system, err := client.FindSystem(systemID, "", "") + + if err != nil { + return nil, err + } + + nas, err := system.GetNASByIDName(fs.NasServerID, "") + + if err != nil { + return nil, err + } + + fileInterface, err := system.GetFileInterface(nas.CurrentPreferredIPv4InterfaceID) + + if err != nil { + return nil, err + } + return fileInterface, err +} + +// getSystemIDFromCsiVolumeId returns PowerFlex volume ID from CSI volume ID +func (s *service) getSystemIDFromCsiVolumeID(csiVolID string) string { + containsHyphen := strings.Contains(csiVolID, "/") + if containsHyphen { + i := strings.LastIndex(csiVolID, "/") + if i == -1 { + return "" + } + tokens := strings.Split(csiVolID, "/") + if len(tokens) > 1 { + sys := csiVolID[:i] + if id, ok := s.connectedSystemNameToID[sys]; ok { + return id + } + return sys + } + } else { + i := strings.LastIndex(csiVolID, "-") + if i == -1 { + return "" + } + tokens := strings.Split(csiVolID, "-") + if len(tokens) > 1 { + sys := csiVolID[:i] + if id, ok := s.connectedSystemNameToID[sys]; ok { + return id + } + return sys + } + } // There is only volume ID in csi volume ID return "" } +// Contains checks if the a string is present in a slice of strings +func Contains(slice []string, element string) bool { + for _, a := range slice { + if a == element { + return true + } + } + return false +} + +func (s *service) unexportFilesystem(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest, client *goscaleio.Client, fs *siotypes.FileSystem, volumeContextID, nodeIP, nodeID string) error { + + var nfsExportName string + nfsExportName = NFSExportNamePrefix + fs.Name + + nfsExportExists := false + var nfsExportID string + deleteExport := true + // Check if nfs export exists for the File system + nfsExportList, err := client.GetNFSExport() + + if err != nil { + return err + } + + for _, nfsExport := range nfsExportList { + if nfsExport.FileSystemID == fs.ID { + nfsExportExists = true + if nfsExport.Name != nfsExportName { + //This means that share was created manually on array, hence don't delete via driver + deleteExport = false + nfsExportName = nfsExport.Name + } + nfsExportID = nfsExport.ID + nfsExportName = nfsExport.Name + } + } + + if !nfsExportExists { + Log.Infof("NFS Share: %s not found on array.", nfsExportName) + return nil + } + + // remove host access from NFS Export + nfsExportResp, err := client.GetNFSExportByIDName(nfsExportID, "") + + if err != nil { + return status.Errorf(codes.NotFound, "Could not find NFS Export: %s", err) + } + + fmt.Printf("%#v\n", nfsExportResp) + + var modifyParam *siotypes.NFSExportModify = &siotypes.NFSExportModify{} + + sort.Strings(nfsExportResp.ReadOnlyHosts) + index := sort.SearchStrings(nfsExportResp.ReadOnlyHosts, nodeIP) + if len(nfsExportResp.ReadOnlyHosts) > 0 { + if index >= 0 { + modifyParam.RemoveReadOnlyHosts = []string{nodeIP + "/255.255.255.255"} // we can't remove without netmask + Log.Debug("Going to remove IP from ROHosts: ", modifyParam.RemoveReadOnlyHosts[0]) + } + } + + sort.Strings(nfsExportResp.ReadOnlyRootHosts) + index = sort.SearchStrings(nfsExportResp.ReadOnlyRootHosts, nodeIP) + if len(nfsExportResp.ReadOnlyRootHosts) > 0 { + if index >= 0 { + modifyParam.RemoveReadOnlyRootHosts = []string{nodeIP + "/255.255.255.255"} // we can't remove without netmask + Log.Debug("Going to remove IP from RORootHosts: ", modifyParam.RemoveReadOnlyRootHosts[0]) + } + } + + if Contains(nfsExportResp.ReadWriteHosts, nodeIP+"/255.255.255.255") { + modifyParam.RemoveReadWriteHosts = []string{nodeIP + "/255.255.255.255"} // we can't remove without netmask + Log.Debug("Going to remove IP from RWHosts: ", modifyParam.RemoveReadWriteHosts[0]) + } + + if Contains(nfsExportResp.ReadWriteRootHosts, nodeIP+"/255.255.255.255") { + modifyParam.RemoveReadWriteRootHosts = []string{nodeIP + "/255.255.255.255"} // we can't remove without netmask + Log.Debug("Going to remove IP from RWRootHosts: ", modifyParam.RemoveReadWriteRootHosts[0]) + } + + err = client.ModifyNFSExport(modifyParam, nfsExportID) + + if err != nil { + return status.Errorf(codes.NotFound, "Allocating host %s access to NFS Export failed. Error: %v", nodeID, err) + + } + Log.Debugf("Host: %s access is removed from NFS Share: %s", nodeID, nfsExportID) + + if deleteExport { + err = client.DeleteNFSExport(nfsExportID) + + if err != nil { + return status.Errorf(codes.NotFound, "delete NFS Export failed. Error:%v", err) + } + + Log.Printf("NFS export %s deleted successfully", nfsExportID) + } + + Log.Debugf("ControllerUnpublishVolume successful for volid: [%s]", volumeContextID) + + return nil + +} + +// exportFilesystem - Method to export filesystem with idempotency +func (s *service) exportFilesystem(ctx context.Context, req *csi.ControllerPublishVolumeRequest, client *goscaleio.Client, fs *siotypes.FileSystem, nodeIP, nodeID string, pContext map[string]string, am *csi.VolumeCapability_AccessMode) (*csi.ControllerPublishVolumeResponse, error) { + hostURL := nodeIP + "/" + "255.255.255.255" + var nfsExportName string + nfsExportName = NFSExportNamePrefix + fs.Name + + nfsExportExists := false + var nfsExportID string + + // Check if nfs export exists for the File system + nfsExportList, err := client.GetNFSExport() + + if err != nil { + return nil, err + } + + for _, nfsExport := range nfsExportList { + if nfsExport.FileSystemID == fs.ID { + nfsExportExists = true + nfsExportID = nfsExport.ID + nfsExportName = nfsExport.Name + } + } + + // Create NFS export if it doesn't exist + if !nfsExportExists { + Log.Debugf("NFS Export does not exist for fs: %s ,proceeding to create NFS Export", fs.Name) + resp, err := client.CreateNFSExport(&siotypes.NFSExportCreate{ + Name: nfsExportName, + FileSystemID: fs.ID, + Path: NFSExportLocalPath + fs.Name, + }) + + if err != nil { + return nil, status.Errorf(codes.Internal, "create NFS Export failed. Error:%v", err) + } + + nfsExportID = resp.ID + } + + nfsExportResp, err := client.GetNFSExportByIDName(nfsExportID, "") + + if err != nil { + return nil, status.Errorf(codes.NotFound, "Could not find NFS Export: %s", err) + } + + readOnlyHosts := nfsExportResp.ReadOnlyHosts + readWriteHosts := nfsExportResp.ReadWriteHosts + readOnlyRootHosts := nfsExportResp.ReadOnlyRootHosts + readWriteRootHosts := nfsExportResp.ReadWriteRootHosts + + foundIncompatible := false + foundIdempotent := false + otherHostsWithAccess := len(readOnlyHosts) + + var readHostList, readWriteHostList []string + + for _, host := range readOnlyHosts { + if host == hostURL { + foundIncompatible = true + break + } + } + + otherHostsWithAccess += len(readWriteHosts) + if !foundIncompatible { + for _, host := range readWriteHosts { + if host == hostURL { + foundIncompatible = true + break + } + } + } + + otherHostsWithAccess += len(readOnlyRootHosts) + if !foundIncompatible { + for _, host := range readOnlyRootHosts { + readHostList = append(readHostList, host) + if host == hostURL { + if am.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { + foundIdempotent = true + } else { + foundIncompatible = true + } + } + } + } + otherHostsWithAccess += len(readWriteRootHosts) + + if !foundIncompatible && !foundIdempotent { + for _, host := range readWriteRootHosts { + readWriteHostList = append(readWriteHostList, hostURL) + if host == hostURL { + if am.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { + foundIncompatible = true + } else { + foundIdempotent = true + otherHostsWithAccess-- + } + } + } + } + + if foundIncompatible { + return nil, status.Errorf(codes.NotFound, "Host: %s has access on NFS Export: %s with incompatible access mode.", nodeID, nfsExportID) + } + + if (am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER || am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER || am.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER) && otherHostsWithAccess > 0 { + return nil, status.Errorf(codes.NotFound, "Other hosts have access on NFS Share: %s", nfsExportID) + } + + //Idempotent case + if foundIdempotent { + Log.Info("Host has access to the given host and exists in the required state.") + return &csi.ControllerPublishVolumeResponse{PublishContext: pContext}, nil + } + //Allocate host access to NFS Share with appropriate access mode + if am.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY { + readHostList = append(readHostList, hostURL) + err := client.ModifyNFSExport(&siotypes.NFSExportModify{AddReadOnlyRootHosts: readHostList}, nfsExportID) + if err != nil { + return nil, status.Errorf(codes.Internal, "Allocating host access failed with the error: %v", err) + } + } else { + readWriteHostList = append(readWriteHostList, hostURL) + err := client.ModifyNFSExport(&siotypes.NFSExportModify{AddReadWriteRootHosts: readWriteHostList}, nfsExportID) + if err != nil { + return nil, status.Errorf(codes.Internal, "Allocating host access failed with the error: %v", err) + } + } + + Log.Debugf("NFS Export: %s is accessible to host: %s with access mode: %s", nfsExportID, nodeID, am.Mode) + Log.Debugf("ControllerPublishVolume successful for volid: [%s]", pContext["volumeContextId"]) + + return &csi.ControllerPublishVolumeResponse{PublishContext: pContext}, nil +} + // this function updates volumePrefixToSystems, a map of volume ID prefixes -> system IDs // this is needed for checkSystemVolumes, a function that verifies that any legacy vol ID // is found on the default system, only @@ -1039,3 +1487,25 @@ func (s *service) expandReplicationPair(ctx context.Context, req *csi.Controller return nil } + +func (s *service) getNASServerIDFromName(systemID, nasName string) (string, error) { + if nasName == "" { + Log.Printf("NAS server not provided.") + return "", nil + } + system, err := s.adminClients[systemID].FindSystem(systemID, "", "") + if err != nil { + return "", err + } + nas, err := system.GetNASByIDName("", nasName) + if err != nil { + return "", err + } + return nas.ID, nil +} + +func (s *service) GetNfsTopology(systemID string) []*csi.Topology { + nfsTopology := new(csi.Topology) + nfsTopology.Segments = map[string]string{Name + "/" + systemID + "-nfs": "true"} + return []*csi.Topology{nfsTopology} +} diff --git a/service/service_unit_test.go b/service/service_unit_test.go index f322372f..66eea2e9 100644 --- a/service/service_unit_test.go +++ b/service/service_unit_test.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" + csi "github.com/container-storage-interface/spec/lib/go/csi" siotypes "github.com/dell/goscaleio/types/v1" "github.com/stretchr/testify/assert" diff --git a/service/step_defs_test.go b/service/step_defs_test.go index 773433af..44ac1bbb 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -141,6 +141,8 @@ type feature struct { clusterUID string createStorageProtectionGroupResponse *replication.CreateStorageProtectionGroupResponse deleteStorageProtectionGroupResponse *replication.DeleteStorageProtectionGroupResponse + fileSystemID string + systemID string } func (f *feature) checkGoRoutines(tag string) { @@ -635,6 +637,34 @@ func getTypicalCreateVolumeRequest() *csi.CreateVolumeRequest { return req } +func getTypicalNFSCreateVolumeRequest() *csi.CreateVolumeRequest { + req := new(csi.CreateVolumeRequest) + params := make(map[string]string) + params["storagepool"] = "viki_pool_HDD_20181031" + req.Parameters = params + req.Name = "mount1" + capacityRange := new(csi.CapacityRange) + capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 + req.CapacityRange = capacityRange + capability := new(csi.VolumeCapability) + mountVolume := new(csi.VolumeCapability_MountVolume) + mountVolume.FsType = "nfs" + if mountVolume.FsType == "nfs" { + req.Parameters["nasName"] = "dummy-nas-server" + } + mountVolume.MountFlags = make([]string, 0) + mount := new(csi.VolumeCapability_Mount) + mount.Mount = mountVolume + capability.AccessType = mount + accessMode := new(csi.VolumeCapability_AccessMode) + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + capability.AccessMode = accessMode + capabilities := make([]*csi.VolumeCapability, 0) + capabilities = append(capabilities, capability) + req.VolumeCapabilities = capabilities + return req +} + func (f *feature) iSpecifyCreateVolumeMountRequest(fstype string) error { req := new(csi.CreateVolumeRequest) params := make(map[string]string) @@ -647,6 +677,9 @@ func (f *feature) iSpecifyCreateVolumeMountRequest(fstype string) error { capability := new(csi.VolumeCapability) mountVolume := new(csi.VolumeCapability_MountVolume) mountVolume.FsType = fstype + if mountVolume.FsType == "nfs" { + req.Parameters["nasName"] = "dummy-nas-server" + } mountVolume.MountFlags = make([]string, 0) mount := new(csi.VolumeCapability_Mount) mount.Mount = mountVolume @@ -671,9 +704,12 @@ func (f *feature) iCallCreateVolume(name string) error { req.Name = name if stepHandlersErrors.NoAdminError { + fmt.Println("I am in Noadmin error.....") f.service.adminClients[arrayID] = nil } + fmt.Println("I am in iCallCreateVolume fn.....") + f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) if f.err != nil { log.Printf("CreateVolume called failed: %s\n", f.err.Error()) @@ -788,10 +824,97 @@ func (f *feature) iSpecifyAccessibilityRequirementsWithASystemIDOf(requestedSyst return nil } +func (f *feature) iSpecifyAccessibilityRequirementsNFSWithASystemIDOf(requestedSystem string) error { + if requestedSystem == "f.service.opt.SystemName" { + requestedSystem = f.service.opts.defaultSystemID + } + req := new(csi.CreateVolumeRequest) + params := make(map[string]string) + params["storagepool"] = "viki_pool_HDD_20181031" + req.Parameters = params + req.Name = "accessability" + capacityRange := new(csi.CapacityRange) + capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 + req.CapacityRange = capacityRange + req.AccessibilityRequirements = new(csi.TopologyRequirement) + top := new(csi.Topology) + top.Segments = map[string]string{ + "csi-vxflexos.dellemc.com/" + requestedSystem + "-nfs": "powerflex.dellemc.com", + } + req.AccessibilityRequirements.Preferred = append(req.AccessibilityRequirements.Preferred, top) + req.AccessibilityRequirements.Preferred = append(req.AccessibilityRequirements.Preferred, top) + capability := new(csi.VolumeCapability) + mountVolume := new(csi.VolumeCapability_MountVolume) + mountVolume.FsType = "nfs" + if mountVolume.FsType == "nfs" { + req.Parameters["nasName"] = "dummy-nas-server" + } + mountVolume.MountFlags = make([]string, 0) + mount := new(csi.VolumeCapability_Mount) + mount.Mount = mountVolume + capability.AccessType = mount + accessMode := new(csi.VolumeCapability_AccessMode) + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + capability.AccessMode = accessMode + capabilities := make([]*csi.VolumeCapability, 0) + capabilities = append(capabilities, capability) + req.VolumeCapabilities = capabilities + f.createVolumeRequest = req + return nil +} + +func (f *feature) iSpecifyBadAccessibilityRequirementsNFSWithASystemIDOf(requestedSystem string) error { + if requestedSystem == "f.service.opt.SystemName" { + requestedSystem = f.service.opts.defaultSystemID + } + req := new(csi.CreateVolumeRequest) + params := make(map[string]string) + params["storagepool"] = "viki_pool_HDD_20181031" + req.Parameters = params + req.Name = "accessability" + capacityRange := new(csi.CapacityRange) + capacityRange.RequiredBytes = 32 * 1024 * 1024 * 1024 + req.CapacityRange = capacityRange + req.AccessibilityRequirements = new(csi.TopologyRequirement) + top := new(csi.Topology) + top.Segments = map[string]string{ + "csi-vxflexos.dellemc.com/" + requestedSystem + "-abc": "powerflex.dellemc.com", + } + req.AccessibilityRequirements.Preferred = append(req.AccessibilityRequirements.Preferred, top) + capability := new(csi.VolumeCapability) + mountVolume := new(csi.VolumeCapability_MountVolume) + mountVolume.FsType = "nfs" + if mountVolume.FsType == "nfs" { + req.Parameters["nasName"] = "dummy-nas-server" + } + mountVolume.MountFlags = make([]string, 0) + mount := new(csi.VolumeCapability_Mount) + mount.Mount = mountVolume + capability.AccessType = mount + accessMode := new(csi.VolumeCapability_AccessMode) + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + capability.AccessMode = accessMode + capabilities := make([]*csi.VolumeCapability, 0) + capabilities = append(capabilities, capability) + req.VolumeCapabilities = capabilities + f.createVolumeRequest = req + return nil +} + func (f *feature) aValidCreateVolumeResponseWithTopologyIsReturned() error { if f.err != nil { return f.err } + + isNFS := false + var fsType string + if len(f.createVolumeRequest.VolumeCapabilities) != 0 { + fsType = f.createVolumeRequest.VolumeCapabilities[0].GetMount().GetFsType() + if fsType == "nfs" { + isNFS = true + } + } + f.volumeIDList = append(f.volumeIDList, f.createVolumeResponse.Volume.VolumeId) topology := f.createVolumeResponse.Volume.AccessibleTopology if len(topology) != 1 { @@ -815,7 +938,20 @@ func (f *feature) aValidCreateVolumeResponseWithTopologyIsReturned() error { if len(tokens) > 1 { constraint = tokens[1] } + log.Printf("Found topology constraint: VxFlex OS system: %s", constraint) + if isNFS { + nfsTokens := strings.Split(constraint, "-") + nfsLabel := "" + if len(nfsTokens) > 1 { + constraint = nfsTokens[0] + nfsLabel = nfsTokens[1] + if nfsLabel != "nfs" { + return status.Errorf(codes.InvalidArgument, + "Invalid topology requested for NFS Volume. Please validate your storage class has nfs topology.") + } + } + } if constraint != requestedSystem { fmt.Printf("Volume topology segement should have system %s. Found %s.", requestedSystem, constraint) return errors.New("wrong systemID in AccessibleTopology") @@ -862,7 +998,13 @@ func (f *feature) iSpecifyMULTINODEWRITER() error { } func (f *feature) iSpecifyABadCapacity() error { - req := getTypicalCreateVolumeRequest() + var req *csi.CreateVolumeRequest + if f.createVolumeRequest == nil { + req = getTypicalCreateVolumeRequest() + } else { + req = f.createVolumeRequest + } + capacityRange := new(csi.CapacityRange) capacityRange.RequiredBytes = -32 * 1024 * 1024 * 1024 req.CapacityRange = capacityRange @@ -881,7 +1023,39 @@ func (f *feature) iSpecifyNoStoragePool() error { func (f *feature) iCallCreateVolumeSize(name string, size int64) error { ctx := new(context.Context) - req := getTypicalCreateVolumeRequest() + var req *csi.CreateVolumeRequest + if f.createVolumeRequest == nil { + req = getTypicalCreateVolumeRequest() + } else { + req = f.createVolumeRequest + } + + capacityRange := new(csi.CapacityRange) + capacityRange.RequiredBytes = size * 1024 * 1024 * 1024 + req.CapacityRange = capacityRange + req.Name = name + f.createVolumeRequest = req + + f.createVolumeResponse, f.err = f.service.CreateVolume(*ctx, req) + if f.err != nil { + log.Printf("CreateVolumeSize called failed: %s\n", f.err.Error()) + } + if f.createVolumeResponse != nil { + log.Printf("vol id %s\n", f.createVolumeResponse.GetVolume().VolumeId) + } + + return nil +} + +func (f *feature) iCallCreateVolumeSizeNFS(name string, size int64) error { + ctx := new(context.Context) + var req *csi.CreateVolumeRequest + if f.createVolumeRequest == nil { + req = getTypicalNFSCreateVolumeRequest() + } else { + req = f.createVolumeRequest + } + capacityRange := new(csi.CapacityRange) capacityRange.RequiredBytes = size * 1024 * 1024 * 1024 req.CapacityRange = capacityRange @@ -900,9 +1074,9 @@ func (f *feature) iCallCreateVolumeSize(name string, size int64) error { } func (f *feature) iChangeTheStoragePool(storagePoolName string) error { - params := make(map[string]string) - params["storagepool"] = storagePoolName - f.createVolumeRequest.Parameters = params + // params := make(map[string]string) + // params["storagepool"] = storagePoolName + f.createVolumeRequest.Parameters["storagepool"] = storagePoolName return nil } @@ -912,6 +1086,8 @@ func (f *feature) iInduceError(errtype string) error { switch errtype { case "WrongSysNameError": stepHandlersErrors.WrongSysNameError = true + case "BadCapacityError": + stepHandlersErrors.BadCapacityError = true case "NoAdminError": stepHandlersErrors.NoAdminError = true case "NoUserError": @@ -926,6 +1102,8 @@ func (f *feature) iInduceError(errtype string) error { stepHandlersErrors.WrongVolIDError = true case "WrongSystemError": stepHandlersErrors.WrongSystemError = true + case "NFSExportsInstancesError": + stepHandlersErrors.NFSExportInstancesError = true case "BadVolIDError": stepHandlersErrors.BadVolIDError = true case "NoCsiVolIDError": @@ -1002,6 +1180,14 @@ func (f *feature) iInduceError(errtype string) error { stepHandlersErrors.RemoveVolumeError = true case "VolumeInstancesError": stepHandlersErrors.VolumeInstancesError = true + case "FileSystemInstancesError": + stepHandlersErrors.FileSystemInstancesError = true + case "GetFileSystemsByIdError": + stepHandlersErrors.GetFileSystemsByIDError = true + case "NasNotFoundError": + stepHandlersErrors.NasServerNotFoundError = true + case "fileInterfaceNotFoundError": + stepHandlersErrors.FileInterfaceNotFoundError = true case "NoVolumeIDError": stepHandlersErrors.NoVolumeIDError = true case "BadVolIDJSON": @@ -1213,6 +1399,67 @@ func (f *feature) getControllerPublishVolumeRequest(accessType string) *csi.Cont return req } +func (f *feature) getControllerPublishVolumeRequestNFS(accessType string) *csi.ControllerPublishVolumeRequest { + capability := new(csi.VolumeCapability) + block := new(csi.VolumeCapability_Block) + block.Block = new(csi.VolumeCapability_BlockVolume) + if f.useAccessTypeMount { + mountVolume := new(csi.VolumeCapability_MountVolume) + mountVolume.FsType = "nfs" + mountVolume.MountFlags = make([]string, 0) + mount := new(csi.VolumeCapability_Mount) + mount.Mount = mountVolume + capability.AccessType = mount + } else { + capability.AccessType = block + } + accessMode := new(csi.VolumeCapability_AccessMode) + switch accessType { + case "multi-single-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER + case "single-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + case "multiple-reader": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + case "multiple-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER + case "single-node-single-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER + case "single-node-multi-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER + case "unknown": + accessMode.Mode = csi.VolumeCapability_AccessMode_UNKNOWN + } + if !f.omitAccessMode { + capability.AccessMode = accessMode + } + fmt.Printf("capability.AccessType %v\n", capability.AccessType) + fmt.Printf("capability.AccessMode %v\n", capability.AccessMode) + req := new(csi.ControllerPublishVolumeRequest) + if !f.noVolumeID { + if f.invalidVolumeID { + req.VolumeId = badVolumeID2 + } else { + req.VolumeId = "14dbbf5617523654" + "/" + fileSystemNameToID["volume1"] + } + } + + if stepHandlersErrors.VolumeIDTooShortError { + req.VolumeId = badVolumeID3 + } + + if !f.noNodeID { + req.NodeId = goodNodeID + } + req.Readonly = false + if !f.omitVolumeCapability { + req.VolumeCapability = capability + } + req.VolumeContext = make(map[string]string) + req.VolumeContext[KeyFsType] = "nfs" + return req +} + func (f *feature) getControllerListVolumesRequest(maxEntries int32, startingToken string) *csi.ListVolumesRequest { return &csi.ListVolumesRequest{ MaxEntries: maxEntries, @@ -1261,6 +1508,84 @@ func (f *feature) getControllerDeleteVolumeRequest(accessType string) *csi.Delet return req } +func (f *feature) getControllerDeleteVolumeRequestBad(accessType string) *csi.DeleteVolumeRequest { + capability := new(csi.VolumeCapability) + block := new(csi.VolumeCapability_Block) + block.Block = new(csi.VolumeCapability_BlockVolume) + if f.useAccessTypeMount { + mountVolume := new(csi.VolumeCapability_MountVolume) + mountVolume.FsType = "xfs" + mountVolume.MountFlags = make([]string, 0) + mount := new(csi.VolumeCapability_Mount) + mount.Mount = mountVolume + capability.AccessType = mount + } else { + capability.AccessType = block + } + accessMode := new(csi.VolumeCapability_AccessMode) + switch accessType { + case "single-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + case "multiple-reader": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + case "multiple-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER + case "unknown": + accessMode.Mode = csi.VolumeCapability_AccessMode_UNKNOWN + } + if !f.omitAccessMode { + capability.AccessMode = accessMode + } + fmt.Printf("capability.AccessType %v\n", capability.AccessType) + fmt.Printf("capability.AccessMode %v\n", capability.AccessMode) + req := new(csi.DeleteVolumeRequest) + if !f.noVolumeID { + if f.invalidVolumeID { + req.VolumeId = badVolumeID2 + } else { + req.VolumeId = "" + } + } + return req +} + +func (f *feature) getControllerDeleteVolumeRequestNFS(accessType string) *csi.DeleteVolumeRequest { + capability := new(csi.VolumeCapability) + + mountVolume := new(csi.VolumeCapability_MountVolume) + mountVolume.FsType = "nfs" + mountVolume.MountFlags = make([]string, 0) + mount := new(csi.VolumeCapability_Mount) + mount.Mount = mountVolume + capability.AccessType = mount + + accessMode := new(csi.VolumeCapability_AccessMode) + switch accessType { + case "single-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + case "multiple-reader": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + case "multiple-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER + case "unknown": + accessMode.Mode = csi.VolumeCapability_AccessMode_UNKNOWN + } + if !f.omitAccessMode { + capability.AccessMode = accessMode + } + fmt.Printf("capability.AccessType %v\n", capability.AccessType) + fmt.Printf("capability.AccessMode %v\n", capability.AccessMode) + req := new(csi.DeleteVolumeRequest) + if !f.noVolumeID { + if f.invalidVolumeID { + req.VolumeId = badVolumeID2 + } else { + req.VolumeId = "14dbbf5617523654" + "/" + fileSystemNameToID["volume1"] + } + } + return req +} + func (f *feature) iCallPublishVolumeWith(arg1 string) error { ctx := new(context.Context) req := f.publishVolumeRequest @@ -1277,6 +1602,45 @@ func (f *feature) iCallPublishVolumeWith(arg1 string) error { return nil } +func (f *feature) iCallPublishVolumeWithNFS(arg1 string) error { + ctx := new(context.Context) + req := f.publishVolumeRequest + if f.publishVolumeRequest == nil { + req = f.getControllerPublishVolumeRequestNFS(arg1) + f.publishVolumeRequest = req + } else { + capability := new(csi.VolumeCapability) + accessMode := new(csi.VolumeCapability_AccessMode) + switch arg1 { + case "multi-single-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER + case "single-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER + case "multiple-reader": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY + case "multiple-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER + case "single-node-single-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER + case "single-node-multi-writer": + accessMode.Mode = csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER + case "unknown": + accessMode.Mode = csi.VolumeCapability_AccessMode_UNKNOWN + } + if !f.omitAccessMode { + capability.AccessMode = accessMode + } + req.VolumeCapability.AccessMode = accessMode + } + + log.Printf("Calling controllerPublishVolume") + f.publishVolumeResponse, f.err = f.service.ControllerPublishVolume(*ctx, req) + if f.err != nil { + log.Printf("PublishVolume call failed: %s\n", f.err.Error()) + } + return nil +} + func (f *feature) aValidPublishVolumeResponseIsReturned() error { if f.err != nil { return errors.New("PublishVolume returned error: " + f.err.Error()) @@ -1303,6 +1667,20 @@ func (f *feature) aValidVolume() error { return nil } +func (f *feature) aBadFileSystem() error { + for key := range fileSystemIDName { + fileSystemIDName[key] = "" + } + return nil +} + +func (f *feature) aBadNFSExport() error { + for key := range nfsExportIDName { + nfsExportIDName[key] = "" + } + return nil +} + func (f *feature) anInvalidVolume() error { f.invalidVolumeID = true return nil @@ -1363,6 +1741,21 @@ func (f *feature) getControllerUnpublishVolumeRequest() *csi.ControllerUnpublish return req } +func (f *feature) getControllerUnpublishVolumeRequestNFS() *csi.ControllerUnpublishVolumeRequest { + req := new(csi.ControllerUnpublishVolumeRequest) + if !f.noVolumeID { + if f.invalidVolumeID { + req.VolumeId = badVolumeID2 + } else { + req.VolumeId = "14dbbf5617523654" + "/" + fileSystemNameToID["volume1"] + } + } + if !f.noNodeID { + req.NodeId = goodNodeID + } + return req +} + func (f *feature) iCallUnpublishVolume() error { ctx := new(context.Context) req := f.unpublishVolumeRequest @@ -1378,6 +1771,21 @@ func (f *feature) iCallUnpublishVolume() error { return nil } +func (f *feature) iCallUnpublishVolumeNFS() error { + ctx := new(context.Context) + req := f.unpublishVolumeRequest + if f.unpublishVolumeRequest == nil { + req = f.getControllerUnpublishVolumeRequestNFS() + f.unpublishVolumeRequest = req + } + log.Printf("Calling controllerUnpublishVolume: %s", req.VolumeId) + f.unpublishVolumeResponse, f.err = f.service.ControllerUnpublishVolume(*ctx, req) + if f.err != nil { + log.Printf("UnpublishVolume call failed: %s\n", f.err.Error()) + } + return nil +} + func (f *feature) aValidUnpublishVolumeResponseIsReturned() error { if f.unpublishVolumeResponse == nil { return errors.New("expected unpublishVolumeResponse (with no contents)but did not get one") @@ -1440,6 +1848,36 @@ func (f *feature) iCallDeleteVolumeWith(arg1 string) error { return nil } +func (f *feature) iCallDeleteVolumeWithBad(arg1 string) error { + ctx := new(context.Context) + req := f.deleteVolumeRequest + if f.deleteVolumeRequest == nil { + req = f.getControllerDeleteVolumeRequestBad(arg1) + f.deleteVolumeRequest = req + } + log.Printf("Calling DeleteVolume") + f.deleteVolumeResponse, f.err = f.service.DeleteVolume(*ctx, req) + if f.err != nil { + log.Printf("DeleteVolume called failed: %s\n", f.err.Error()) + } + return nil +} + +func (f *feature) iCallDeleteVolumeNFSWith(arg1 string) error { + ctx := new(context.Context) + req := f.deleteVolumeRequest + if f.deleteVolumeRequest == nil { + req = f.getControllerDeleteVolumeRequestNFS(arg1) + f.deleteVolumeRequest = req + } + log.Printf("Calling DeleteVolume") + f.deleteVolumeResponse, f.err = f.service.DeleteVolume(*ctx, req) + if f.err != nil { + log.Printf("DeleteVolume called failed: %s\n", f.err.Error()) + } + return nil +} + func (f *feature) aValidDeleteVolumeResponseIsReturned() error { if f.deleteVolumeResponse == nil { return errors.New("expected deleteVolumeResponse (with no contents)but did not get one") @@ -2066,6 +2504,22 @@ func (f *feature) getNodePublishVolumeRequest() error { return nil } +func (f *feature) getNodePublishVolumeRequestNFS() error { + req := new(csi.NodePublishVolumeRequest) + req.VolumeId = "14dbbf5617523654" + "/" + fileSystemNameToID["volume1"] + req.Readonly = false + req.VolumeCapability = f.capability + + mount := f.capability.GetMount() + if mount != nil { + req.TargetPath = datadir + } + req.VolumeContext = make(map[string]string) + req.VolumeContext[KeyFsType] = "nfs" + f.nodePublishVolumeRequest = req + return nil +} + func (f *feature) iGiveRequestVolumeContext() error { volContext := map[string]string{ @@ -2137,6 +2591,29 @@ func (f *feature) iCallNodePublishVolume(arg1 string) error { return nil } +func (f *feature) iCallNodePublishVolumeNFS(arg1 string) error { + header := metadata.New(map[string]string{"csi.requestid": "1"}) + ctx := metadata.NewIncomingContext(context.Background(), header) + req := f.nodePublishVolumeRequest + if req == nil { + fmt.Printf("Request was Nil \n") + _ = f.getNodePublishVolumeRequestNFS() + req = f.nodePublishVolumeRequest + } + fmt.Printf("Calling NodePublishVolume\n") + fmt.Printf("nodePV req is: %v \n", req) + _, err := f.service.NodePublishVolume(ctx, req) + if err != nil { + fmt.Printf("NodePublishVolume failed: %s\n", err.Error()) + if f.err == nil { + f.err = err + } + } else { + fmt.Printf("NodePublishVolume completed successfully\n") + } + return nil +} + func (f *feature) iCallUnmountPrivMount() error { gofsutil.GOFSMock.InduceGetMountsError = true ctx := new(context.Context) @@ -2426,6 +2903,16 @@ func (f *feature) iCallBeforeServe() error { stringSlice = append(stringSlice, "X_CSI_PRIVATE_MOUNT_DIR=/csi") stringSlice = append(stringSlice, "X_CSI_VXFLEXOS_ENABLESNAPSHOTCGDELETE=true") stringSlice = append(stringSlice, "X_CSI_VXFLEXOS_ENABLELISTVOLUMESNAPSHOTS=true") + stringSlice = append(stringSlice, "X_CSI_HEALTH_MONITOR_ENABLED=true") + stringSlice = append(stringSlice, "X_CSI_RENAME_SDC_ENABLED=true") + stringSlice = append(stringSlice, "X_CSI_RENAME_SDC_PREFIX=test") + stringSlice = append(stringSlice, "X_CSI_NFS_ACLS=777") + stringSlice = append(stringSlice, "X_CSI_APPROVE_SDC_ENABLED=true") + stringSlice = append(stringSlice, "X_CSI_REPLICATION_CONTEXT_PREFIX=test") + stringSlice = append(stringSlice, "X_CSI_REPLICATION_PREFIX=test") + stringSlice = append(stringSlice, "X_CSI_POWERFLEX_EXTERNAL_ACCESS=test") + stringSlice = append(stringSlice, "X_CSI_VXFLEXOS_THICKPROVISIONING=dummy") + if os.Getenv("ALLOW_RWO_MULTI_POD") == "true" { fmt.Printf("debug set ALLOW_RWO_MULTI_POD\n") stringSlice = append(stringSlice, "X_CSI_ALLOW_RWO_MULTI_POD_ACCESS=true") @@ -3182,6 +3669,21 @@ func (f *feature) iCallGetVolumeIDFromCsiVolumeID(csiVolID string) error { return nil } +func (f *feature) iCallGetFileSystemIDFromCsiVolumeID(csiVolID string) error { + fmt.Println("csiVolID", csiVolID) + v := getFilesystemIDFromCsiVolumeID(csiVolID) + fmt.Println("i got", v) + f.fileSystemID = v + return nil +} + +func (f *feature) theFileSystemIDIs(fsID string) error { + if fsID == f.fileSystemID { + return nil + } + return fmt.Errorf("expected %s but got %s", fsID, f.fileSystemID) +} + func (f *feature) iCallGetSystemIDFromCsiVolumeID(csiVolID string) error { s := f.service.getSystemIDFromCsiVolumeID(csiVolID) fmt.Printf("DEBUG getSystem %s\n", s) @@ -3190,6 +3692,19 @@ func (f *feature) iCallGetSystemIDFromCsiVolumeID(csiVolID string) error { return nil } +func (f *feature) iCallGetSystemIDFromCsiVolumeIDNfs(csiVolID string) error { + s := f.service.getSystemIDFromCsiVolumeID(csiVolID) + f.systemID = s + return nil +} + +func (f *feature) theSystemIDIs(systemID string) error { + if systemID == f.systemID { + return nil + } + return fmt.Errorf("expected %s but got %s", systemID, f.fileSystemID) +} + func (f *feature) iCallGetSystemIDFromParameters(option string) error { params := make(map[string]string) saveID := f.service.opts.defaultSystemID @@ -3523,7 +4038,7 @@ func (f *feature) iCallCreateStorageProtectionGroup() error { req.VolumeHandle = "" } if stepHandlersErrors.BadVolIDError { - req.VolumeHandle = "0/" + req.VolumeHandle = "0%0" } f.createStorageProtectionGroupResponse, f.err = f.service.CreateStorageProtectionGroup(*ctx, req) return nil @@ -3677,6 +4192,8 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call Probe$`, f.iCallProbe) s.Step(`^a valid ProbeResponse is returned$`, f.aValidProbeResponseIsReturned) s.Step(`^the error contains "([^"]*)"$`, f.theErrorContains) + s.Step(`^the fileSystemID is "([^"]*)"$`, f.theFileSystemIDIs) + s.Step(`^the systemID is "([^"]*)"$`, f.theSystemIDIs) s.Step(`^the possible error contains "([^"]*)"$`, f.thePossibleErrorContains) s.Step(`^the Controller has no connection$`, f.theControllerHasNoConnection) s.Step(`^there is a Node Probe Lsmod error$`, f.thereIsANodeProbeLsmodError) @@ -3686,19 +4203,26 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call ValidateConnectivity$`, f.iCallValidateVolumeHostConnectivity) s.Step(`^a valid CreateVolumeResponse is returned$`, f.aValidCreateVolumeResponseIsReturned) s.Step(`^I specify AccessibilityRequirements with a SystemID of "([^"]*)"$`, f.iSpecifyAccessibilityRequirementsWithASystemIDOf) + s.Step(`^I specify NFS AccessibilityRequirements with a SystemID of "([^"]*)"$`, f.iSpecifyAccessibilityRequirementsNFSWithASystemIDOf) + s.Step(`^I specify bad NFS AccessibilityRequirements with a SystemID of "([^"]*)"$`, f.iSpecifyBadAccessibilityRequirementsNFSWithASystemIDOf) s.Step(`^a valid CreateVolumeResponse with topology is returned$`, f.aValidCreateVolumeResponseWithTopologyIsReturned) s.Step(`^I specify MULTINODE_WRITER$`, f.iSpecifyMULTINODEWRITER) s.Step(`^I specify a BadCapacity$`, f.iSpecifyABadCapacity) s.Step(`^I specify NoStoragePool$`, f.iSpecifyNoStoragePool) s.Step(`^I call CreateVolumeSize "([^"]*)" "(\d+)"$`, f.iCallCreateVolumeSize) + s.Step(`^I call CreateVolumeSize nfs "([^"]*)" "(\d+)"$`, f.iCallCreateVolumeSizeNFS) + s.Step(`^I change the StoragePool "([^"]*)"$`, f.iChangeTheStoragePool) s.Step(`^I induce error "([^"]*)"$`, f.iInduceError) s.Step(`^I specify VolumeContentSource$`, f.iSpecifyVolumeContentSource) s.Step(`^I specify CreateVolumeMountRequest "([^"]*)"$`, f.iSpecifyCreateVolumeMountRequest) s.Step(`^I call PublishVolume with "([^"]*)"$`, f.iCallPublishVolumeWith) + s.Step(`^I call NFS PublishVolume with "([^"]*)"$`, f.iCallPublishVolumeWithNFS) s.Step(`^a valid PublishVolumeResponse is returned$`, f.aValidPublishVolumeResponseIsReturned) s.Step(`^a valid volume$`, f.aValidVolume) s.Step(`^an invalid volume$`, f.anInvalidVolume) + s.Step(`^I set bad FileSystem Id`, f.aBadFileSystem) + s.Step(`^I set bad NFSExport Id`, f.aBadNFSExport) s.Step(`^no volume$`, f.noVolume) s.Step(`^no node$`, f.noNode) s.Step(`^no volume capability$`, f.noVolumeCapability) @@ -3707,12 +4231,15 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I use AccessType Mount$`, f.iUseAccessTypeMount) s.Step(`^no error was received$`, f.noErrorWasReceived) s.Step(`^I call UnpublishVolume$`, f.iCallUnpublishVolume) + s.Step(`^I call UnpublishVolume nfs`, f.iCallUnpublishVolumeNFS) s.Step(`^a valid UnpublishVolumeResponse is returned$`, f.aValidUnpublishVolumeResponseIsReturned) s.Step(`^the number of SDC mappings is (\d+)$`, f.theNumberOfSDCMappingsIs) s.Step(`^I call NodeGetInfo$`, f.iCallNodeGetInfo) s.Step(`^I call Node Probe$`, f.iCallNodeProbe) s.Step(`^a valid NodeGetInfoResponse is returned$`, f.aValidNodeGetInfoResponseIsReturned) s.Step(`^I call DeleteVolume with "([^"]*)"$`, f.iCallDeleteVolumeWith) + s.Step(`^I call DeleteVolume with Bad "([^"]*)"$`, f.iCallDeleteVolumeWithBad) + s.Step(`^I call DeleteVolume nfs with "([^"]*)"$`, f.iCallDeleteVolumeNFSWith) s.Step(`^a valid DeleteVolumeResponse is returned$`, f.aValidDeleteVolumeResponseIsReturned) s.Step(`^the volume is already mapped to an SDC$`, f.theVolumeIsAlreadyMappedToAnSDC) s.Step(`^I call GetCapacity with storage pool "([^"]*)"$`, f.iCallGetCapacityWithStoragePool) @@ -3731,6 +4258,7 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^a capability with voltype "([^"]*)" access "([^"]*)" fstype "([^"]*)"$`, f.aCapabilityWithVoltypeAccessFstype) s.Step(`^a controller published volume$`, f.aControllerPublishedVolume) s.Step(`^I call NodePublishVolume "([^"]*)"$`, f.iCallNodePublishVolume) + s.Step(`^I call NodePublishVolume NFS "([^"]*)"$`, f.iCallNodePublishVolumeNFS) s.Step(`^I call CleanupPrivateTarget$`, f.iCallCleanupPrivateTarget) s.Step(`^I call removeWithRetry$`, f.iCallRemoveWithRetry) s.Step(`^I call evalSymlinks$`, f.iCallEvalSymlinks) @@ -3742,6 +4270,7 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call blockValidateMountVolCapabilities$`, f.iCallBlockValidateVolCapabilities) s.Step(`^I call UnmountAndDeleteTarget$`, f.iCallUnmountAndDeleteTarget) s.Step(`^get Node Publish Volume Request$`, f.getNodePublishVolumeRequest) + s.Step(`^get Node Publish Volume Request NFS$`, f.getNodePublishVolumeRequestNFS) s.Step(`^get Node Publish Ephemeral Volume Request with name "([^"]*)" size "([^"]*)" storagepool "([^"]*)" and systemName "([^"]*)"$`, f.getNodeEphemeralVolumePublishRequest) s.Step(`^I mark request read only$`, f.iMarkRequestReadOnly) s.Step(`^I call NodeUnpublishVolume "([^"]*)"$`, f.iCallNodeUnpublishVolume) @@ -3768,6 +4297,7 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^the snapshot ID is "([^"]*)"$`, f.theSnapshotIDIs) s.Step(`^I invalidate the Probe cache$`, f.iInvalidateTheProbeCache) s.Step(`^I call ControllerExpandVolume set to (\d+)$`, f.iCallControllerExpandVolume) + s.Step(`^I call ControllerExpandVolume set to "([^"]*)"$`, f.iCallControllerExpandVolume) s.Step(`^I call NodeExpandVolume with volumePath as "([^"]*)"$`, f.iCallNodeExpandVolume) s.Step(`^I call NodeGetVolumeStats$`, f.iCallNodeGetVolumeStats) s.Step(`^a correct NodeGetVolumeStats Response is returned$`, f.aCorrectNodeGetVolumeStatsResponse) @@ -3790,7 +4320,9 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I call EphemeralNodeUnpublish$`, f.iCallEphemeralNodeUnpublish) s.Step(`^I call EphemeralNodePublish$`, f.iCallEphemeralNodePublish) s.Step(`^I call getVolumeIDFromCsiVolumeID "([^"]*)"$`, f.iCallGetVolumeIDFromCsiVolumeID) + s.Step(`^I call getFilesystemIDFromCsiVolumeID "([^"]*)"$`, f.iCallGetFileSystemIDFromCsiVolumeID) s.Step(`^I call getSystemIDFromCsiVolumeID "([^"]*)"$`, f.iCallGetSystemIDFromCsiVolumeID) + s.Step(`^I call getSystemIDFromCsiVolumeIDNfs "([^"]*)"$`, f.iCallGetSystemIDFromCsiVolumeIDNfs) s.Step(`^I call GetSystemIDFromParameters with bad params "([^"]*)"$`, f.iCallGetSystemIDFromParameters) s.Step(`^I call getSystemName$`, f.iCallGetSystemName) s.Step(`^I call mount publishVolume$`, f.iCallMountPublishVolume) diff --git a/service/step_handlers_test.go b/service/step_handlers_test.go index fa708fcd..ac2a0aa0 100644 --- a/service/step_handlers_test.go +++ b/service/step_handlers_test.go @@ -61,6 +61,11 @@ var ( CreateSnapshotError bool RemoveVolumeError bool VolumeInstancesError bool + FileSystemInstancesError bool + GetFileSystemsByIDError bool + NFSExportInstancesError bool + NasServerNotFoundError bool + FileInterfaceNotFoundError bool BadVolIDError bool NoCsiVolIDError bool WrongVolIDError bool @@ -71,6 +76,7 @@ var ( NoSysNameError bool NoAdminError bool WrongSysNameError bool + BadCapacityError bool NoVolumeIDError bool SetVolumeSizeError bool systemNameMatchingError bool @@ -124,11 +130,22 @@ func getHandler() http.Handler { }) log.Printf("Clearing volume caches\n") volumeIDToName = make(map[string]string) + fileSystemIDName = make(map[string]string) + fileSystemIDToSizeTotal = make(map[string]string) + nfsExportIDName = make(map[string]string) + fileSystemNameToID = make(map[string]string) + nfsExportNameID = make(map[string]string) + nfsExportIDtoFsID = make(map[string]string) + nfsExportIDPath = make(map[string]string) volumeIDToAncestorID = make(map[string]string) volumeNameToID = make(map[string]string) volumeIDToConsistencyGroupID = make(map[string]string) volumeIDToReplicationState = make(map[string]string) volumeIDToSizeInKB = make(map[string]string) + nfsExportIDReadOnlyRootHosts = make(map[string][]string) + nfsExportIDReadWriteRootHosts = make(map[string][]string) + nfsExportIDReadWriteHosts = make(map[string][]string) + nfsExportIDReadOnlyHosts = make(map[string][]string) debug = false stepHandlersErrors.FindVolumeIDError = false stepHandlersErrors.GetVolByIDError = false @@ -151,7 +168,14 @@ func getHandler() http.Handler { stepHandlersErrors.CreateSnapshotError = false stepHandlersErrors.RemoveVolumeError = false stepHandlersErrors.VolumeInstancesError = false + stepHandlersErrors.NasServerNotFoundError = false + stepHandlersErrors.FileInterfaceNotFoundError = false + stepHandlersErrors.FileSystemInstancesError = false + stepHandlersErrors.NFSExportInstancesError = false + stepHandlersErrors.NasServerNotFoundError = false + stepHandlersErrors.BadCapacityError = false stepHandlersErrors.BadVolIDError = false + stepHandlersErrors.GetFileSystemsByIDError = false stepHandlersErrors.NoCsiVolIDError = false stepHandlersErrors.WrongVolIDError = false stepHandlersErrors.WrongSystemError = false @@ -200,6 +224,13 @@ func getRouter() http.Handler { scaleioRouter.HandleFunc("/api/login", handleLogin) scaleioRouter.HandleFunc("/api/version", handleVersion) scaleioRouter.HandleFunc("/api/types/System/instances", handleSystemInstances) + scaleioRouter.HandleFunc("/rest/v1/nas-servers", handleNasInstances) + scaleioRouter.HandleFunc("/rest/v1/nas-servers/{id}", handleGetNasInstances) + scaleioRouter.HandleFunc("/rest/v1/file-systems", handleFileSystems) + scaleioRouter.HandleFunc("/rest/v1/nfs-exports", handleNFSExports) + scaleioRouter.HandleFunc("/rest/v1/file-systems/{id}", handleGetFileSystems) + scaleioRouter.HandleFunc("/rest/v1/nfs-exports/{id}", handleGetNFSExports) + scaleioRouter.HandleFunc("/rest/v1/file-interfaces/{id}", handleGetFileInterface) scaleioRouter.HandleFunc("/api/types/Volume/instances", handleVolumeInstances) scaleioRouter.HandleFunc("/api/types/StoragePool/instances", handleStoragePoolInstances) scaleioRouter.HandleFunc("{Volume}/relationship/Statistics", handleVolumeStatistics) @@ -250,7 +281,7 @@ func handleVersion(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusRequestTimeout) return } - w.Write([]byte("2.5")) + w.Write([]byte("4.0")) } // handleSystemInstances implements GET /api/types/System/instances @@ -279,6 +310,524 @@ func handleSystemInstances(w http.ResponseWriter, r *http.Request) { } } +// handleSystemInstances implements GET /api/types/System/instances +func handleNasInstances(w http.ResponseWriter, r *http.Request) { + + if stepHandlersErrors.NasServerNotFoundError { + writeError(w, "nas server not found", http.StatusNotFound, codes.NotFound) + return + } + + returnJSONFile("features", "get_nas_servers.json", w, nil) +} + +func handleGetNasInstances(w http.ResponseWriter, r *http.Request) { + + if stepHandlersErrors.NasServerNotFoundError { + writeError(w, "nas server not found", http.StatusNotFound, codes.NotFound) + return + } + + returnJSONFile("features", "get_nas_server_id.json", w, nil) +} + +func handleGetFileInterface(w http.ResponseWriter, r *http.Request) { + + if stepHandlersErrors.FileInterfaceNotFoundError { + writeError(w, "file interace not found", http.StatusNotFound, codes.NotFound) + return + } + + returnJSONFile("features", "get_file_interface.json", w, nil) +} + +func handleNFSExports(w http.ResponseWriter, r *http.Request) { + if nfsExportIDName == nil { + nfsExportIDName = make(map[string]string) + nfsExportNameID = make(map[string]string) + } + + switch r.Method { + + // Post is CreateVolume; here just return a volume id encoded from the name + case http.MethodPost: + if inducedError.Error() == "nfsExportError" { + writeError(w, "create NFS Export failed", http.StatusRequestTimeout, codes.Internal) + return + } + + req := types.NFSExportCreate{} + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&req) + if err != nil { + log.Printf("error decoding json: %s\n", err.Error()) + } + + // good response + resp := new(types.NFSExportCreateResponse) + resp.ID = hex.EncodeToString([]byte(req.Name)) + nfsExportIDName[resp.ID] = req.Name + nfsExportIDtoFsID[req.Name] = resp.ID + nfsExportIDtoFsID[resp.ID] = req.FileSystemID + nfsExportIDPath[resp.ID] = req.Path + + if array, ok := systemArrays[r.Host]; ok { + fmt.Printf("Host Endpoint %s\n", r.Host) + array.nfsExports[resp.ID] = make(map[string]string) + array.nfsExports[resp.ID]["name"] = req.Name + array.nfsExports[resp.ID]["id"] = resp.ID + array.nfsExports[resp.ID]["path"] = req.Path + array.nfsExports[resp.ID]["file_system_id"] = req.FileSystemID + + if len(req.ReadOnlyRootHosts) != 0 { + array.nfsExports[resp.ID]["read_root_hosts"] = req.ReadOnlyRootHosts[0] + } else if len(req.ReadWriteRootHosts) != 0 { + array.nfsExports[resp.ID]["write_root_hosts"] = req.ReadWriteRootHosts[0] + } else if len(req.ReadOnlyHosts) != 0 { + array.nfsExports[resp.ID]["read_hosts"] = req.ReadOnlyHosts[0] + } else if len(req.ReadWriteHosts) != 0 { + array.nfsExports[resp.ID]["write_hosts"] = req.ReadWriteHosts[0] + } else { + array.nfsExports[resp.ID]["read_hosts"] = "" + array.nfsExports[resp.ID]["write_hosts"] = "" + array.nfsExports[resp.ID]["read_root_hosts"] = "" + array.nfsExports[resp.ID]["write_root_hosts"] = "" + } + + } + + if debug { + log.Printf("request name: %s id: %s\n", req.Name, resp.ID) + } + encoder := json.NewEncoder(w) + err = encoder.Encode(resp) + if err != nil { + log.Printf("error encoding json: %s\n", err.Error()) + } + + log.Printf("end make nfsExports") + // Read all the Volumes + case http.MethodGet: + if stepHandlersErrors.NFSExportInstancesError { + writeError(w, "error getting the NFS Exports", http.StatusInternalServerError, codes.Internal) + return + } + instances := make([]*types.NFSExport, 0) + nfsExports := make(map[string]map[string]string) + + if array, ok := systemArrays[r.Host]; ok { + nfsExports = array.nfsExports + + for _, nfsExp := range nfsExports { + + replacementMap := make(map[string]string) + replacementMap["__ID__"] = nfsExp["id"] + replacementMap["__NAME__"] = nfsExp["name"] + replacementMap["__PATH__"] = nfsExp["path"] + replacementMap["__FS_ID__"] = nfsExp["file_system_id"] + replacementMap["__READ_HOSTS__"] = nfsExp["read_hosts"] + replacementMap["__WRITE_HOSTS__"] = nfsExp["write_hosts"] + replacementMap["__READ_ROOT_HOSTS__"] = nfsExp["read_root_hosts"] + replacementMap["__WRITE_ROOT_HOSTS__"] = nfsExp["write_root_hosts"] + data := returnJSONFile("features", "nfsexport.json.template", nil, replacementMap) + nfsExp := new(types.NFSExport) + err := json.Unmarshal(data, nfsExp) + if err != nil { + log.Printf("error unmarshalling json: %s\n", string(data)) + } + instances = append(instances, nfsExp) + } + } + + // Add none-created volumes (old) + for id, name := range nfsExportIDName { + if _, ok := nfsExports[id]; ok { + continue + } + + name = id + replacementMap := make(map[string]string) + replacementMap["__ID__"] = id + replacementMap["__NAME__"] = name + replacementMap["__PATH__"] = nfsExportIDPath[id] + replacementMap["__FS_ID__"] = nfsExportIDtoFsID[id] + if len(nfsExportIDReadOnlyRootHosts[id]) != 0 { + replacementMap["__READ_HOSTS__"] = nfsExportIDReadOnlyHosts[id][0] + } else if len(nfsExportIDReadWriteRootHosts[id]) != 0 { + replacementMap["__WRITE_HOSTS__"] = nfsExportIDReadWriteHosts[id][0] + } else if len(nfsExportIDReadWriteRootHosts[id]) != 0 { + replacementMap["__WRITE_ROOT_HOSTS__"] = nfsExportIDReadWriteRootHosts[id][0] + } else if len(nfsExportIDReadOnlyRootHosts[id]) != 0 { + replacementMap["__READ_ROOT_HOSTS__"] = nfsExportIDReadOnlyRootHosts[id][0] + } else { + replacementMap["__READ_HOSTS__"] = "" + replacementMap["__WRITE_HOSTS__"] = "" + replacementMap["__READ_ROOT_HOSTS__"] = "" + replacementMap["__WRITE_ROOT_HOSTS__"] = "" + } + data := returnJSONFile("features", "nfsexport.json.template", nil, replacementMap) + nfsExp := new(types.NFSExport) + err := json.Unmarshal(data, nfsExp) + if err != nil { + log.Printf("error unmarshalling json: %s\n", string(data)) + } + instances = append(instances, nfsExp) + } + + encoder := json.NewEncoder(w) + err := encoder.Encode(instances) + if err != nil { + log.Printf("error encoding json: %s\n", err) + } + } + +} + +func handleGetNFSExports(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + + vars := mux.Vars(r) + id := vars["id"] + + fmt.Println("id:", id) + fmt.Println("fsidname", nfsExportIDName[id]) + + if inducedError.Error() == "nfsExportNotFoundError" { + writeError(w, "Could not find NFS Export", http.StatusNotFound, codes.NotFound) + return + } + + // Insert to map if it doesn't exist. + if nfsExportIDName[id] == "" { + log.Printf("Did not find id %s \n", id) + writeError(w, "could not find nfsExport ", http.StatusNotFound, codes.NotFound) + return + } + + replacementMap := make(map[string]string) + nfsExp := make(map[string]string) + if array, ok := systemArrays[r.Host]; ok { + nfsExp = array.nfsExports[id] + } + + log.Printf("Get id %s\n", id) + if nfsExp != nil { + replacementMap["__ID__"] = nfsExp["id"] + replacementMap["__NAME__"] = nfsExp["name"] + replacementMap["__PATH__"] = nfsExp["path"] + replacementMap["__FS_ID__"] = nfsExp["file_system_id"] + replacementMap["__READ_HOSTS__"] = nfsExp["read_hosts"] + replacementMap["__WRITE_HOSTS__"] = nfsExp["write_hosts"] + replacementMap["__READ_ROOT_HOSTS__"] = nfsExp["read_root_hosts"] + replacementMap["__WRITE_ROOT_HOSTS__"] = nfsExp["write_root_hosts"] + } else { + replacementMap["__ID__"] = id + replacementMap["__NAME__"] = nfsExportIDName[id] + replacementMap["__PATH__"] = nfsExportIDPath[id] + replacementMap["__FS_ID__"] = nfsExportIDtoFsID[id] + if len(nfsExportIDReadOnlyRootHosts["id"]) != 0 { + replacementMap["__READ_HOSTS__"] = nfsExportIDReadOnlyHosts["id"][0] + } else if len(nfsExportIDReadWriteRootHosts["id"]) != 0 { + replacementMap["__WRITE_HOSTS__"] = nfsExportIDReadWriteHosts["id"][0] + } else if len(nfsExportIDReadWriteRootHosts["id"]) != 0 { + replacementMap["__WRITE_ROOT_HOSTS__"] = nfsExportIDReadWriteRootHosts["id"][0] + } else if len(nfsExportIDReadOnlyRootHosts["id"]) != 0 { + replacementMap["__READ_ROOT_HOSTS__"] = nfsExportIDReadOnlyRootHosts["id"][0] + } else { + replacementMap["__READ_HOSTS__"] = "" + replacementMap["__WRITE_HOSTS__"] = "" + replacementMap["__READ_ROOT_HOSTS__"] = "" + replacementMap["__WRITE_ROOT_HOSTS__"] = "" + } + } + + if inducedError.Error() == "readHostsIncompatible" { + replacementMap["__READ_HOSTS__"] = "127.1.1.11/255.255.255.255" + } + + if inducedError.Error() == "writeHostsIncompatible" { + replacementMap["__WRITE_HOSTS__"] = "127.1.1.11/255.255.255.255" + } + + data := returnJSONFile("features", "nfsexport.json.template", nil, replacementMap) + nfsExp1 := new(types.NFSExport) + err := json.Unmarshal(data, nfsExp1) + if err != nil { + log.Printf("error unmarshalling json: %s\n", string(data)) + } + + encoder := json.NewEncoder(w) + err = encoder.Encode(nfsExp1) + if err != nil { + log.Printf("error encoding json: %s\n", err) + } + case http.MethodDelete: + vars := mux.Vars(r) + id := vars["id"] + + // Insert to map if it doesn't exist. + if nfsExportIDName[id] == "" { + log.Printf("Did not find id %s \n", id) + writeError(w, "could not find nfsExport ", http.StatusNotFound, codes.NotFound) + return + } + + if inducedError.Error() == "deleteNFSExportError" { + writeError(w, "delete NFS Export failed", http.StatusNotFound, codes.NotFound) + return + } + + nfsExp := make(map[string]string) + + if array, ok := systemArrays[r.Host]; ok { + nfsExp = array.nfsExports[id] + delete(array.nfsExports, id) + } + nfsExportIDName[id] = "" + nfsExportNameID[nfsExp["name"]] = "" + nfsExportIDPath[id] = "" + nfsExportIDtoFsID[id] = "" + case http.MethodPatch: + vars := mux.Vars(r) + id := vars["id"] + fmt.Println("id:", id) + fmt.Println("fsidname", nfsExportIDName[id]) + + if nfsExportIDName[id] == "" { + log.Printf("Did not find id %s \n", id) + writeError(w, "could not find nfsExport ", http.StatusNotFound, codes.NotFound) + return + } + + if inducedError.Error() == "nfsExportModifyError" { + writeError(w, "Allocating host access failed", http.StatusGatewayTimeout, codes.Internal) + return + } + + req := types.NFSExportModify{} + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&req) + + if err != nil { + log.Printf("error decoding json: %s\n", err.Error()) + } + fmt.Printf("patchReq:%#v\n", req) + if len(req.AddReadOnlyRootHosts) != 0 { + nfsExportIDReadOnlyRootHosts[id] = req.AddReadOnlyRootHosts + } else if len(req.AddReadWriteRootHosts) != 0 { + nfsExportIDReadWriteRootHosts[id] = req.AddReadWriteRootHosts + } + if array, ok := systemArrays[r.Host]; ok { + if len(req.AddReadOnlyRootHosts) != 0 { + array.nfsExports[id]["read_root_hosts"] = req.AddReadOnlyRootHosts[0] + } else if len(req.AddReadWriteRootHosts) != 0 { + array.nfsExports[id]["write_root_hosts"] = req.AddReadWriteRootHosts[0] + } else if len(req.AddReadOnlyHosts) != 0 { + array.nfsExports[id]["read_hosts"] = req.AddReadOnlyHosts[0] + } else if len(req.AddReadWriteHosts) != 0 { + array.nfsExports[id]["write_hosts"] = req.AddReadWriteHosts[0] + } else { + array.nfsExports[id]["read_hosts"] = "" + array.nfsExports[id]["write_hosts"] = "" + array.nfsExports[id]["read_root_hosts"] = "" + array.nfsExports[id]["write_root_hosts"] = "" + } + + } + + w.WriteHeader(http.StatusNoContent) + + } + +} + +func handleFileSystems(w http.ResponseWriter, r *http.Request) { + + if fileSystemIDName == nil { + fileSystemIDName = make(map[string]string) + fileSystemNameToID = make(map[string]string) + fileSystemIDToSizeTotal = make(map[string]string) + } + + if stepHandlersErrors.FileSystemInstancesError { + writeError(w, "induced error", http.StatusRequestTimeout, codes.Internal) + return + } + + if stepHandlersErrors.BadCapacityError { + writeError(w, "bad capacity error", http.StatusBadRequest, codes.InvalidArgument) + return + } + + switch r.Method { + + // Post is CreateVolume; here just return a volume id encoded from the name + case http.MethodPost: + if inducedError.Error() == "CreateVolumeError" { + writeError(w, "create volume induced error", http.StatusRequestTimeout, codes.Internal) + return + } + + req := types.FsCreate{} + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&req) + if err != nil { + log.Printf("error decoding json: %s\n", err.Error()) + } + + // good response + resp := new(types.FileSystemResp) + resp.ID = hex.EncodeToString([]byte(req.Name)) + fileSystemIDName[resp.ID] = req.Name + fileSystemNameToID[req.Name] = resp.ID + fileSystemIDToSizeTotal[resp.ID] = strconv.Itoa(req.SizeTotal) + + if array, ok := systemArrays[r.Host]; ok { + fmt.Printf("Host Endpoint %s\n", r.Host) + //array.fileSystems = make(map[string]map[string]string) + array.fileSystems[resp.ID] = make(map[string]string) + array.fileSystems[resp.ID]["name"] = req.Name + array.fileSystems[resp.ID]["id"] = resp.ID + array.fileSystems[resp.ID]["size_total"] = strconv.Itoa(req.SizeTotal) + } + + if debug { + log.Printf("request name: %s id: %s\n", req.Name, resp.ID) + } + encoder := json.NewEncoder(w) + err = encoder.Encode(resp) + if err != nil { + log.Printf("error encoding json: %s\n", err.Error()) + } + + log.Printf("end make fileSystems") + // Read all the Volumes + case http.MethodGet: + instances := make([]*types.FileSystem, 0) + fileSystems := make(map[string]map[string]string) + + if array, ok := systemArrays[r.Host]; ok { + fileSystems = array.fileSystems + + for _, fs := range fileSystems { + replacementMap := make(map[string]string) + replacementMap["__ID__"] = fs["id"] + replacementMap["__NAME__"] = fs["name"] + replacementMap["__SIZE_IN_Total__"] = fs["size_total"] + data := returnJSONFile("features", "filesystem.json.template", nil, replacementMap) + fs := new(types.FileSystem) + err := json.Unmarshal(data, fs) + if err != nil { + log.Printf("error unmarshalling json: %s\n", string(data)) + } + instances = append(instances, fs) + } + } + + // Add none-created volumes (old) + for id, name := range fileSystemIDName { + if _, ok := fileSystems[id]; ok { + continue + } + + name = id + replacementMap := make(map[string]string) + replacementMap["__ID__"] = id + replacementMap["__NAME__"] = name + replacementMap["__SIZE_IN_Total__"] = fileSystemIDToSizeTotal[id] + data := returnJSONFile("features", "filesystem.json.template", nil, replacementMap) + fs := new(types.FileSystem) + err := json.Unmarshal(data, fs) + if err != nil { + log.Printf("error unmarshalling json: %s\n", string(data)) + } + instances = append(instances, fs) + } + + encoder := json.NewEncoder(w) + err := encoder.Encode(instances) + if err != nil { + log.Printf("error encoding json: %s\n", err) + } + } +} + +func handleGetFileSystems(w http.ResponseWriter, r *http.Request) { + switch r.Method { + + // Post is CreateVolume; here just return a volume id encoded from the name + case http.MethodGet: + vars := mux.Vars(r) + id := vars["id"] + + if stepHandlersErrors.GetFileSystemsByIDError { + writeError(w, "induced error", http.StatusRequestTimeout, codes.Internal) + return + } + + // Insert to map if it doesn't exist. + if fileSystemIDName[id] == "" { + log.Printf("Did not find id %s \n", id) + writeError(w, "could not find filesystem ", http.StatusNotFound, codes.NotFound) + return + } + + replacementMap := make(map[string]string) + fs := make(map[string]string) + if array, ok := systemArrays[r.Host]; ok { + fs = array.fileSystems[id] + } + + log.Printf("Get id %s\n", id) + if fs != nil { + replacementMap["__ID__"] = fs["id"] + replacementMap["__NAME__"] = fs["name"] + replacementMap["__SIZE_IN_Total__"] = fs["size_total"] + } else { + replacementMap["__ID__"] = id + replacementMap["__NAME__"] = fileSystemIDName[id] + replacementMap["__SIZE_IN_Total__"] = fileSystemIDToSizeTotal[id] + } + + data := returnJSONFile("features", "filesystem.json.template", nil, replacementMap) + fs1 := new(types.FileSystem) + err := json.Unmarshal(data, fs1) + if err != nil { + log.Printf("error unmarshalling json: %s\n", string(data)) + } + + encoder := json.NewEncoder(w) + err = encoder.Encode(fs1) + if err != nil { + log.Printf("error encoding json: %s\n", err) + } + case http.MethodDelete: + vars := mux.Vars(r) + id := vars["id"] + + // Insert to map if it doesn't exist. + if fileSystemIDName[id] == "" { + log.Printf("Did not find id %s \n", id) + writeError(w, "could not find filesystem ", http.StatusNotFound, codes.NotFound) + return + } + + fs := make(map[string]string) + + if array, ok := systemArrays[r.Host]; ok { + fs = array.fileSystems[id] + delete(array.fileSystems, id) + } + fileSystemIDName[id] = "" + fileSystemNameToID[fs["name"]] = "" + fileSystemIDToSizeTotal[id] = "" + + } + + // returnJSONFile("features", "get_file_system_response.json", w, nil) +} + // handleStoragePoolInstances implements GET /api/types/StoragePool/instances func handleStoragePoolInstances(w http.ResponseWriter, r *http.Request) { if stepHandlersErrors.GetStoragePoolsError { @@ -308,8 +857,24 @@ func returnJSONFile(directory, filename string, w http.ResponseWriter, replaceme if replacements != nil { jsonString := string(jsonBytes) for key, value := range replacements { + if value == "" { + if key == "__READ_HOSTS__" { + jsonString = strings.ReplaceAll(jsonString, `"read_only_hosts": ["__READ_HOSTS__"],`, "") + continue + } else if key == "__READ_ROOT_HOSTS__" { + jsonString = strings.ReplaceAll(jsonString, `"read_only_root_hosts": ["__READ_ROOT_HOSTS__"],`, "") + continue + } else if key == "__WRITE_HOSTS__" { + jsonString = strings.ReplaceAll(jsonString, `"read_write_hosts": ["__WRITE_HOSTS__"],`, "") + continue + } else if key == "__WRITE_ROOT_HOSTS__" { + jsonString = strings.ReplaceAll(jsonString, `"read_write_root_hosts": ["__WRITE_ROOT_HOSTS__"],`, "") + continue + } + } jsonString = strings.Replace(jsonString, key, value, -1) } + if debug { log.Printf("Edited payload:\n%s\n", jsonString) } @@ -332,9 +897,32 @@ func returnJSONFile(directory, filename string, w http.ResponseWriter, replaceme // Map of volume ID to name var volumeIDToName map[string]string +// Map of FileSystem ID to name +var fileSystemIDName map[string]string + +// Map of NFSExport ID to name +var nfsExportIDName map[string]string + // Map of volume name to ID var volumeNameToID map[string]string +// Map of FileSystem Name to ID +var fileSystemNameToID map[string]string + +// Map of NFSExport Name to ID +var nfsExportNameID map[string]string + +// Map of NFSExport ID to FilesystemID +var nfsExportIDtoFsID map[string]string + +// Map of NFSExport ID to path +var nfsExportIDPath map[string]string + +var nfsExportIDReadWriteRootHosts map[string][]string +var nfsExportIDReadOnlyRootHosts map[string][]string +var nfsExportIDReadOnlyHosts map[string][]string +var nfsExportIDReadWriteHosts map[string][]string + // Map of volume ID to ancestor ID var volumeIDToAncestorID map[string]string @@ -350,6 +938,9 @@ var volumeIDToReplicationState map[string]string // Map of volume ID to size in KB var volumeIDToSizeInKB map[string]string +// Map of FileSystem ID to size Total +var fileSystemIDToSizeTotal map[string]string + // Replication group state to replace for. var replicationGroupState string @@ -358,12 +949,16 @@ type systemArray struct { ID string replicationSystem *systemArray volumes map[string]map[string]string + fileSystems map[string]map[string]string + nfsExports map[string]map[string]string replicationConsistencyGroups map[string]map[string]string replicationPairs map[string]map[string]string } func (s *systemArray) Init() { s.volumes = make(map[string]map[string]string) + s.fileSystems = make(map[string]map[string]string) + s.nfsExports = make(map[string]map[string]string) s.replicationConsistencyGroups = make(map[string]map[string]string) s.replicationPairs = make(map[string]map[string]string) } diff --git a/test/helm/1vol-nfs/Chart.yaml b/test/helm/1vol-nfs/Chart.yaml new file mode 100644 index 00000000..f6d27f5d --- /dev/null +++ b/test/helm/1vol-nfs/Chart.yaml @@ -0,0 +1,10 @@ +name: 1vol-nfs +version: 1.0.0 +apiVersion: v1 +description: | + Tests VxFlexOS CSI deployments. +icon: https://avatars1.githubusercontent.com/u/20958494?s=200&v=4 +keywords: +- vxflexos-csi +- storage +engine: gotpl diff --git a/test/helm/1vol-nfs/templates/pvc0.yaml b/test/helm/1vol-nfs/templates/pvc0.yaml new file mode 100644 index 00000000..4864ae01 --- /dev/null +++ b/test/helm/1vol-nfs/templates/pvc0.yaml @@ -0,0 +1,13 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol0 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: vxflexos-nfs diff --git a/test/helm/1vol-nfs/templates/test.yaml b/test/helm/1vol-nfs/templates/test.yaml new file mode 100644 index 00000000..146eb4ed --- /dev/null +++ b/test/helm/1vol-nfs/templates/test.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vxflextest + namespace: helmtest-vxflexos +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: vxflextest + namespace: helmtest-vxflexos +spec: + selector: + matchLabels: + app: vxflextest + serviceName: 2vols + template: + metadata: + labels: + app: vxflextest + spec: + serviceAccount: vxflextest + containers: + - name: test + image: docker.io/centos:latest + command: [ "/bin/sleep", "3600" ] + volumeMounts: + - mountPath: "/data0" + name: pvol0 + volumes: + - name: pvol0 + persistentVolumeClaim: + claimName: pvol0 diff --git a/test/helm/3vols-nfs/Chart.yaml b/test/helm/3vols-nfs/Chart.yaml new file mode 100644 index 00000000..f83ccc6b --- /dev/null +++ b/test/helm/3vols-nfs/Chart.yaml @@ -0,0 +1,11 @@ +name: 3vols-nfs +version: 1.0.0 +appVersion: 1.0.0 +apiVersion: v1 +description: | + Tests VxFlex OS CSI deployments. +icon: https://avatars1.githubusercontent.com/u/20958494?s=200&v=4 +keywords: +- vxflexos-csi +- storage +engine: gotpl diff --git a/test/helm/3vols-nfs/templates/pvc.yaml b/test/helm/3vols-nfs/templates/pvc.yaml new file mode 100644 index 00000000..3a47028c --- /dev/null +++ b/test/helm/3vols-nfs/templates/pvc.yaml @@ -0,0 +1,42 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol0 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: vxflexos-nfs +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol1 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 12Gi + storageClassName: vxflexos-nfs +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol2 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 16Gi + storageClassName: vxflexos-nfs + diff --git a/test/helm/3vols-nfs/templates/test.yaml b/test/helm/3vols-nfs/templates/test.yaml new file mode 100644 index 00000000..58c28f3a --- /dev/null +++ b/test/helm/3vols-nfs/templates/test.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vxflextest + namespace: helmtest-vxflexos +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: vxflextest + namespace: helmtest-vxflexos +spec: + selector: + matchLabels: + app: vxflextest + serviceName: 3vols-nfs + template: + metadata: + labels: + app: vxflextest + spec: + serviceAccount: vxflextest + containers: + - name: test + image: docker.io/centos:latest + command: [ "/bin/sleep", "3600" ] + volumeMounts: + - mountPath: "/data0" + name: pvol0 + - mountPath: "/data1" + name: pvol1 + - mountPath: "/data2" + name: pvol2 + volumes: + - name: pvol0 + persistentVolumeClaim: + claimName: pvol0 + - name: pvol1 + persistentVolumeClaim: + claimName: pvol1 + - name: pvol2 + persistentVolumeClaim: + claimName: pvol2 \ No newline at end of file diff --git a/test/helm/5vols-nfs/Chart.yaml b/test/helm/5vols-nfs/Chart.yaml new file mode 100644 index 00000000..3581a247 --- /dev/null +++ b/test/helm/5vols-nfs/Chart.yaml @@ -0,0 +1,12 @@ +name: 5vols-nfs +version: 1.0.0 +apiVersion: v1 +appVersion: 1.0.0 + +description: | + Tests VxFlexOS CSI deployments. +icon: https://avatars1.githubusercontent.com/u/20958494?s=200&v=4 +keywords: +- vxflexos-csi +- storage +engine: gotpl diff --git a/test/helm/5vols-nfs/templates/pvc.yaml b/test/helm/5vols-nfs/templates/pvc.yaml new file mode 100644 index 00000000..d8a4212e --- /dev/null +++ b/test/helm/5vols-nfs/templates/pvc.yaml @@ -0,0 +1,70 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol0 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: vxflexos-nfs +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol1 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: vxflexos-nfs +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol2 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: vxflexos-nfs +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol3 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: vxflexos-nfs +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvol4 + namespace: helmtest-vxflexos +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 8Gi + storageClassName: vxflexos-nfs +--- diff --git a/test/helm/5vols-nfs/templates/test.yaml b/test/helm/5vols-nfs/templates/test.yaml new file mode 100644 index 00000000..54e012b6 --- /dev/null +++ b/test/helm/5vols-nfs/templates/test.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vxflextest + namespace: helmtest-vxflexos +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: vxflextest + namespace: helmtest-vxflexos +spec: + selector: + matchLabels: + app: vxflextest + serviceName: 10vols + template: + metadata: + labels: + app: vxflextest + spec: + serviceAccount: vxflextest + containers: + - name: test + image: docker.io/centos:latest + command: [ "/bin/sleep", "3600" ] + volumeMounts: + - mountPath: "/data0" + name: pvol0 + - mountPath: "/data1" + name: pvol1 + - mountPath: "/data2" + name: pvol2 + - mountPath: "/data3" + name: pvol3 + - mountPath: "/data4" + name: pvol4 + volumes: + - name: pvol0 + persistentVolumeClaim: + claimName: pvol0 + - name: pvol1 + persistentVolumeClaim: + claimName: pvol1 + - name: pvol2 + persistentVolumeClaim: + claimName: pvol2 + - name: pvol3 + persistentVolumeClaim: + claimName: pvol3 + - name: pvol4 + persistentVolumeClaim: + claimName: pvol4 diff --git a/test/integration/features/integration.feature b/test/integration/features/integration.feature index 4eba8c01..7b949330 100644 --- a/test/integration/features/integration.feature +++ b/test/integration/features/integration.feature @@ -505,6 +505,51 @@ Feature: VxFlex OS CSI interface And when I call DeleteVolume Then there are no errors + Scenario: Create and delete basic nfs volume + Given a VxFlexOS service + And a basic nfs volume request "nfsvolume1" "8" + When I call CreateVolume + When I call ListVolume + Then a valid ListVolumeResponse is returned + And when I call DeleteVolume + Then there are no errors + + Scenario: Idempotent create and delete basic nfs volume + Given a VxFlexOS service + And a basic nfs volume request "nfsvolume2" "8" + When I call CreateVolume + And I call CreateVolume + And when I call DeleteVolume + And when I call DeleteVolume + Then there are no errors + + Scenario: Create and delete 100000G NFS volume + Given a VxFlexOS service + And max retries 1 + And a basic nfs volume request "nfsvolume2" "100000" + When I call CreateVolume + And when I call DeleteVolume + Then the error message should contain "Requested volume size exceeds the volume allocation limit" + + Scenario: Create a NFS volume with wrong NasName + Given a VxFlexOS service + And a basic nfs volume request "nfsvolume3" "8" + And I set wrongNasName + When I call CreateVolume + Then the error message should contain + Examples: + | errormsg | + | "error_msg" | + + Scenario: Create a NFS volume with wrong FileSystemName + Given a VxFlexOS service + And a basic nfs volume request "nfsvolume3" "8" + And I set wrongFileSystemName + When I call CreateVolume + Then the error message should contain + Examples: + | errormsg | + | "error_msg" | Scenario Outline: Publish and Unpublish Ephemeral Volume Given a VxFlexOS service diff --git a/test/integration/step_defs_test.go b/test/integration/step_defs_test.go index 327a3ef4..4663f199 100644 --- a/test/integration/step_defs_test.go +++ b/test/integration/step_defs_test.go @@ -51,13 +51,15 @@ const ( // ArrayConnectionData contains data required to connect to array type ArrayConnectionData struct { - SystemID string `json:"systemID"` - Username string `json:"username"` - Password string `json:"password"` - Endpoint string `json:"endpoint"` - Insecure bool `json:"insecure,omitempty"` - IsDefault bool `json:"isDefault,omitempty"` - AllSystemNames string `json:"allSystemNames"` + SystemID string `json:"systemID"` + Username string `json:"username"` + Password string `json:"password"` + Endpoint string `json:"endpoint"` + Insecure bool `json:"insecure,omitempty"` + IsDefault bool `json:"isDefault,omitempty"` + AllSystemNames string `json:"allSystemNames"` + NasName *string `json:"nasname"` + NfsAcls string `json:"nfsAcls"` } type feature struct { @@ -1733,6 +1735,19 @@ func (f *feature) restCallToSetName(auth string, url string, name string) (strin return "", nil } +func (f *feature) iSetBadNasName() error { + //wip + for _, a := range f.arrays { + if a.NasName != nil { + badNasName := "badNas" + a.NasName = &badNasName + fmt.Printf("set bad NasName done \n") + return nil + } + } + return fmt.Errorf("Error during set bad NasName") +} + func FeatureContext(s *godog.ScenarioContext) { f := &feature{} s.Step(`^a VxFlexOS service$`, f.aVxFlexOSService) @@ -1793,4 +1808,5 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^the volumecondition is "([^"]*)"$`, f.theVolumeconditionIs) s.Step(`^I call NodeGetVolumeStats$`, f.iCallNodeGetVolumeStats) s.Step(`^the VolumeCondition is "([^"]*)"$`, f.theVolumeConditionIs) + s.Step(`^I set wrongNasName$`, f.iSetBadNasName) }