diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
index d52c656f6dbc..2011b1f08fbf 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
@@ -24,6 +24,7 @@
import org.apache.cloudstack.storage.command.CommandResult;
import com.cloud.host.Host;
+import com.cloud.offering.DiskOffering;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.Storage.StoragePoolType;
@@ -199,4 +200,9 @@ default boolean zoneWideVolumesAvailableWithoutClusterMotion() {
default long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) {
return volumeSize;
}
+ default boolean informStorageForDiskOfferingChange() {
+ return false;
+ }
+
+ default void updateStorageWithTheNewDiskOffering(Volume volume, DiskOffering newDiskOffering) {}
}
diff --git a/plugins/storage/volume/storpool/README.md b/plugins/storage/volume/storpool/README.md
index e5b84786c7cd..5cad0069621e 100644
--- a/plugins/storage/volume/storpool/README.md
+++ b/plugins/storage/volume/storpool/README.md
@@ -345,6 +345,46 @@ corresponding system disk offering.
CloudStack has no way to specify max BW. Do they want to be able to specify max BW only is sufficient.
+================================================================================
+
+StorPool provides the ‘storpool_qos’ service ([QoS user guide](https://kb.storpool.com/storpool_misc/qos.html#storpool-qos-user-guide)) that tracks and configures the storage tier for all volumes based on a specifically provided `qc` tag specifying the storage tier for each volume.
+
+To manage the QoS limits with a `qc` tag, you have to add a `qc` tag resource detail to each disk offering to which a tier should be applied, with a key `SP_QOSCLASS` and the value from the configuration file for the `storpool_qos` service:
+
+ add resourcedetail resourceid={diskofferingid} details[0].key=SP_QOSCLASS details[0].value={the name of the tier from the config} resourcetype=DiskOffering
+
+To change the tier via CloudStack, you can use the CloudStack API call `changeOfferingForVolume`. The size is required, but the user could use the current volume size. Example:
+
+ change offeringforvolume id={The UUID of the Volume} diskofferingid={The UUID of the disk offering} size={The current or a new size for the volume}
+
+Users who were using the offerings to change the StorPool template via the `SP_TEMPLATE` detail, will continue to have this functionality but should use `changeOfferingForVolume` API call instead of:
+ - `resizeVolume` API call for DATA disk
+ - `scaleVirtualMachine` API call for ROOT disk
+
+
+If the disk offering has both `SP_TEMPLATE` and `SP_QOSCLASS` defined, the `SP_QOSCLASS` detail will be prioritised, setting the volume’s QoS using the respective ‘qc’ tag value. In case the QoS for a volume is changed manually, the ‘storpool_qos’ service will automatically reset the QoS limits following the ‘qc’ tag value once per minute.
+
+
Usage
+
+Creating Disk Offering for each tier.
+
+Go to Service Offerings > Disk Offering > Add disk offering.
+
+Add disk offering detail with API call in CloudStack CLI.
+
+ add resourcedetail resourcetype=diskoffering resourceid=$UUID details[0].key=SP_QOSCLASS details[0].value=$Tier Name
+
+
+Creating VM with QoS
+
+Deploy virtual machine: Go to Compute> Instances> Add Instances.
+ - For the ROOT volume, choose the option `Override disk offering`. This will set the required `qc` tag from the disk offering (DO) detail.
+
+Creating DATA disk with QoS
+ - Create volume via GUI/CLI and choose a disk offering which has the required `SP_QOSCLASS` detail
+
+To update the tier of a ROOT/DATA volume go to Storage> Volumes and select the Volume and click on the Change disk offering for the volume in the upper right corner.
+
## Supported operations for Volume encryption
Supported Virtual machine operations - live migration of VM to another host, virtual machine snapshots (group snapshot without memory), revert VM snapshot, delete VM snapshot
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolVolumeDef.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolVolumeDef.java
new file mode 100644
index 000000000000..456f5b90639c
--- /dev/null
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/api/StorPoolVolumeDef.java
@@ -0,0 +1,109 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.storage.datastore.api;
+
+import java.io.Serializable;
+import java.util.Map;
+
+public class StorPoolVolumeDef implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+ private transient String name;
+ private Long size;
+ private Map tags;
+ private String parent;
+ private Long iops;
+ private String template;
+ private String baseOn;
+ private String rename;
+ private Boolean shrinkOk;
+
+ public StorPoolVolumeDef() {
+ }
+
+ public StorPoolVolumeDef(String name, Long size, Map tags, String parent, Long iops, String template,
+ String baseOn, String rename, Boolean shrinkOk) {
+ super();
+ this.name = name;
+ this.size = size;
+ this.tags = tags;
+ this.parent = parent;
+ this.iops = iops;
+ this.template = template;
+ this.baseOn = baseOn;
+ this.rename = rename;
+ this.shrinkOk = shrinkOk;
+ }
+
+ public String getName() {
+ return name;
+ }
+ public void setName(String name) {
+ this.name = name;
+ }
+ public Long getSize() {
+ return size;
+ }
+ public void setSize(Long size) {
+ this.size = size;
+ }
+ public Map getTags() {
+ return tags;
+ }
+ public void setTags(Map tags) {
+ this.tags = tags;
+ }
+ public String getParent() {
+ return parent;
+ }
+ public void setParent(String parent) {
+ this.parent = parent;
+ }
+ public Long getIops() {
+ return iops;
+ }
+ public void setIops(Long iops) {
+ this.iops = iops;
+ }
+ public String getTemplate() {
+ return template;
+ }
+ public void setTemplate(String template) {
+ this.template = template;
+ }
+ public String getBaseOn() {
+ return baseOn;
+ }
+ public void setBaseOn(String baseOn) {
+ this.baseOn = baseOn;
+ }
+ public String getRename() {
+ return rename;
+ }
+ public void setRename(String rename) {
+ this.rename = rename;
+ }
+
+ public Boolean getShrinkOk() {
+ return shrinkOk;
+ }
+
+ public void setShrinkOk(Boolean shrinkOk) {
+ this.shrinkOk = shrinkOk;
+ }
+}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
index b9c0b73f78d5..631186636ca2 100644
--- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/driver/StorPoolPrimaryDataStoreDriver.java
@@ -39,12 +39,15 @@
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO;
+import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao;
import org.apache.cloudstack.storage.RemoteHostEndPoint;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CopyCmdAnswer;
import org.apache.cloudstack.storage.command.CreateObjectAnswer;
import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
import org.apache.cloudstack.storage.datastore.api.StorPoolSnapshotDef;
+import org.apache.cloudstack.storage.datastore.api.StorPoolVolumeDef;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
@@ -81,12 +84,18 @@
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.StorageFilerTO;
import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.StorageUnavailableException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.kvm.storage.StorPoolStorageAdaptor;
+import com.cloud.offering.DiskOffering;
import com.cloud.server.ResourceTag;
import com.cloud.server.ResourceTag.ResourceObjectType;
+import com.cloud.service.ServiceOfferingDetailsVO;
+import com.cloud.service.ServiceOfferingVO;
+import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.service.dao.ServiceOfferingDetailsDao;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ResizeVolumePayload;
import com.cloud.storage.Snapshot;
@@ -156,6 +165,12 @@ public class StorPoolPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
private StoragePoolHostDao storagePoolHostDao;
@Inject
DataStoreManager dataStoreManager;
+ @Inject
+ private DiskOfferingDetailsDao diskOfferingDetailsDao;
+ @Inject
+ private ServiceOfferingDetailsDao serviceOfferingDetailDao;
+ @Inject
+ private ServiceOfferingDao serviceOfferingDao;
private SnapshotDataStoreVO getSnapshotImageStoreRef(long snapshotId, long zoneId) {
List snaps = snapshotDataStoreDao.listReadyBySnapshot(snapshotId, DataStoreRole.Image);
@@ -259,15 +274,25 @@ public ChapInfo getChapInfo(DataObject dataObject) {
public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) {
String path = null;
Answer answer;
+ String tier = null;
+ String template = null;
if (data.getType() == DataObjectType.VOLUME) {
try {
VolumeInfo vinfo = (VolumeInfo)data;
String name = vinfo.getUuid();
Long size = vinfo.getPassphraseId() == null ? vinfo.getSize() : vinfo.getSize() + 2097152;
+ Long vmId = vinfo.getInstanceId();
+
SpConnectionDesc conn = StorPoolUtil.getSpConnection(dataStore.getUuid(), dataStore.getId(), storagePoolDetailsDao, primaryStoreDao);
- StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s", vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(), vinfo.getpayload(), conn.getTemplateName());
- SpApiResponse resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vinfo.getInstanceId()), null, "volume", vinfo.getMaxIops(), conn);
+ if (vinfo.getDiskOfferingId() != null) {
+ tier = getTierFromOfferingDetail(vinfo.getDiskOfferingId());
+ if (tier == null) {
+ template = getTemplateFromOfferingDetail(vinfo.getDiskOfferingId());
+ }
+ }
+
+ SpApiResponse resp = createStorPoolVolume(template, tier, vinfo, name, size, vmId, conn);
if (resp.getError() == null) {
String volumeName = StorPoolUtil.getNameFromResponse(resp, false);
path = StorPoolUtil.devPath(volumeName);
@@ -298,6 +323,26 @@ public void createAsync(DataStore dataStore, DataObject data, AsyncCompletionCal
}
}
+ private SpApiResponse createStorPoolVolume(String template, String tier, VolumeInfo vinfo, String name, Long size,
+ Long vmId, SpConnectionDesc conn) {
+ SpApiResponse resp = new SpApiResponse();
+ Map tags = StorPoolHelper.addStorPoolTags(name, getVMInstanceUUID(vmId), "volume", getVcPolicyTag(vmId), tier);
+ if (tier != null || template != null) {
+ StorPoolUtil.spLog(
+ "Creating volume [%s] with template [%s] or tier tags [%s] described in disk/service offerings details",
+ vinfo.getUuid(), template, tier);
+ resp = StorPoolUtil.volumeCreate(size, null, template, tags, conn);
+ } else {
+ StorPoolUtil.spLog(
+ "StorpoolPrimaryDataStoreDriver.createAsync volume: name=%s, uuid=%s, isAttached=%s vm=%s, payload=%s, template: %s",
+ vinfo.getName(), vinfo.getUuid(), vinfo.isAttachedVM(), vinfo.getAttachedVmName(),
+ vinfo.getpayload(), conn.getTemplateName());
+ resp = StorPoolUtil.volumeCreate(name, null, size, getVMInstanceUUID(vinfo.getInstanceId()), null,
+ "volume", vinfo.getMaxIops(), conn);
+ }
+ return resp;
+ }
+
private void updateVolume(DataStore dataStore, String path, VolumeInfo vinfo) {
VolumeVO volume = volumeDao.findById(vinfo.getId());
volume.setPoolId(dataStore.getId());
@@ -336,66 +381,109 @@ private StorPoolSetVolumeEncryptionAnswer createEncryptedVolume(DataStore dataSt
public void resize(DataObject data, AsyncCompletionCallback callback) {
String path = null;
String err = null;
- ResizeVolumeAnswer answer = null;
if (data.getType() == DataObjectType.VOLUME) {
VolumeObject vol = (VolumeObject)data;
- StoragePool pool = (StoragePool)data.getDataStore();
- ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload();
+ path = vol.getPath();
- final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true);
- final long oldSize = vol.getSize();
- Long oldMaxIops = vol.getMaxIops();
+ err = resizeVolume(data, path, vol);
+ } else {
+ err = String.format("Invalid object type \"%s\" passed to resize", data.getType());
+ }
- try {
- SpConnectionDesc conn = StorPoolUtil.getSpConnection(data.getDataStore().getUuid(), data.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
+ CreateCmdResult res = new CreateCmdResult(path, new Answer(null, err != null, err));
+ res.setResult(err);
+ callback.complete(res);
+ }
- long maxIops = payload.newMaxIops == null ? Long.valueOf(0) : payload.newMaxIops;
+ private String resizeVolume(DataObject data, String path, VolumeObject vol) {
+ String err = null;
+ ResizeVolumePayload payload = (ResizeVolumePayload)vol.getpayload();
+ boolean needResize = vol.getSize() != payload.newSize;
- StorPoolUtil.spLog("StorpoolPrimaryDataStoreDriverImpl.resize: name=%s, uuid=%s, oldSize=%d, newSize=%s, shrinkOk=%s, maxIops=%s", name, vol.getUuid(), oldSize, payload.newSize, payload.shrinkOk, maxIops);
+ final String name = StorPoolStorageAdaptor.getVolumeNameFromPath(path, true);
+ final long oldSize = vol.getSize();
+ Long oldMaxIops = vol.getMaxIops();
- SpApiResponse resp = StorPoolUtil.volumeUpdate(name, payload.newSize, payload.shrinkOk, maxIops, conn);
- if (resp.getError() != null) {
- err = String.format("Could not resize StorPool volume %s. Error: %s", name, resp.getError());
- } else {
- StorPoolResizeVolumeCommand resizeCmd = new StorPoolResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), payload.newSize, payload.shrinkOk,
- payload.instanceName, payload.hosts == null ? false : true);
- answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, payload.hosts, resizeCmd);
+ try {
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(data.getDataStore().getUuid(), data.getDataStore().getId(), storagePoolDetailsDao, primaryStoreDao);
- if (answer == null || !answer.getResult()) {
- err = answer != null ? answer.getDetails() : "return a null answer, resize failed for unknown reason";
- } else {
- path = StorPoolUtil.devPath(StorPoolUtil.getNameFromResponse(resp, false));
-
- vol.setSize(payload.newSize);
- vol.update();
- if (payload.newMaxIops != null) {
- VolumeVO volume = volumeDao.findById(vol.getId());
- volume.setMaxIops(payload.newMaxIops);
- volumeDao.update(volume.getId(), volume);
- }
+ err = updateStorPoolVolume(vol, payload, conn);
+ if (err == null && needResize) {
+ err = notifyQemuForTheNewSize(data, err, vol, payload);
+ }
- updateStoragePool(vol.getPoolId(), payload.newSize - oldSize);
- }
- }
- if (err != null) {
- // try restoring volume to its initial size
- resp = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn);
- if (resp.getError() != null) {
- logger.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, resp.getError()));
- }
+ if (err != null) {
+ // try restoring volume to its initial size
+ SpApiResponse response = StorPoolUtil.volumeUpdate(name, oldSize, true, oldMaxIops, conn);
+ if (response.getError() != null) {
+ logger.debug(String.format("Could not resize StorPool volume %s back to its original size. Error: %s", name, response.getError()));
}
- } catch (Exception e) {
- logger.debug("sending resize command failed", e);
- err = e.toString();
}
+ } catch (Exception e) {
+ logger.debug("sending resize command failed", e);
+ err = e.toString();
+ }
+ return err;
+ }
+
+ private String notifyQemuForTheNewSize(DataObject data, String err, VolumeObject vol, ResizeVolumePayload payload)
+ throws StorageUnavailableException {
+ StoragePool pool = (StoragePool)data.getDataStore();
+
+ StorPoolResizeVolumeCommand resizeCmd = new StorPoolResizeVolumeCommand(vol.getPath(), new StorageFilerTO(pool), vol.getSize(), payload.newSize, payload.shrinkOk,
+ payload.instanceName, payload.hosts == null ? false : true);
+ ResizeVolumeAnswer answer = (ResizeVolumeAnswer) storageMgr.sendToPool(pool, payload.hosts, resizeCmd);
+
+ if (answer == null || !answer.getResult()) {
+ err = answer != null ? answer.getDetails() : "return a null answer, resize failed for unknown reason";
+ }
+ return err;
+ }
+
+ private String updateStorPoolVolume(VolumeObject vol, ResizeVolumePayload payload, SpConnectionDesc conn) {
+ String err = null;
+ String name = StorPoolStorageAdaptor.getVolumeNameFromPath(vol.getPath(), true);
+ Long newDiskOfferingId = payload.getNewDiskOfferingId();
+ String tier = null;
+ String template = null;
+ if (newDiskOfferingId != null) {
+ tier = getTierFromOfferingDetail(newDiskOfferingId);
+ if (tier == null) {
+ template = getTemplateFromOfferingDetail(newDiskOfferingId);
+ }
+ }
+ SpApiResponse resp = new SpApiResponse();
+ if (tier != null || template != null) {
+ Map tags = StorPoolHelper.addStorPoolTags(null, null, null, null, tier);
+ StorPoolVolumeDef spVolume = new StorPoolVolumeDef(name, payload.newSize, tags, null, null, template, null, null,
+ payload.shrinkOk);
+ resp = StorPoolUtil.volumeUpdate(spVolume, conn);
} else {
- err = String.format("Invalid object type \"%s\" passed to resize", data.getType());
+ long maxIops = payload.newMaxIops == null ? Long.valueOf(0) : payload.newMaxIops;
+
+ StorPoolVolumeDef spVolume = new StorPoolVolumeDef(name, payload.newSize, null, null, maxIops, null, null, null,
+ payload.shrinkOk);
+ StorPoolUtil.spLog(
+ "StorpoolPrimaryDataStoreDriverImpl.resize: name=%s, uuid=%s, oldSize=%d, newSize=%s, shrinkOk=%s, maxIops=%s",
+ name, vol.getUuid(), vol.getSize(), payload.newSize, payload.shrinkOk, maxIops);
+
+ resp = StorPoolUtil.volumeUpdate(spVolume, conn);
}
+ if (resp.getError() != null) {
+ err = String.format("Could not resize StorPool volume %s. Error: %s", name, resp.getError());
+ } else {
+ vol.setSize(payload.newSize);
+ vol.update();
+ if (payload.newMaxIops != null) {
+ VolumeVO volume = volumeDao.findById(vol.getId());
+ volume.setMaxIops(payload.newMaxIops);
+ volumeDao.update(volume.getId(), volume);
+ }
- CreateCmdResult res = new CreateCmdResult(path, answer);
- res.setResult(err);
- callback.complete(res);
+ updateStoragePool(vol.getPoolId(), payload.newSize - vol.getSize());
+ }
+ return err;
}
@Override
@@ -772,8 +860,30 @@ public void copyAsync(DataObject srcData, DataObject dstData, AsyncCompletionCal
}
StorPoolUtil.spLog(String.format("volume size is: %d", size));
Long vmId = vinfo.getInstanceId();
- SpApiResponse resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId), getVcPolicyTag(vmId),
- "volume", vinfo.getMaxIops(), conn);
+
+ String template = null;
+ String tier = null;
+ SpApiResponse resp = new SpApiResponse();
+
+ if (vinfo.getDiskOfferingId() != null) {
+ tier = getTierFromOfferingDetail(vinfo.getDiskOfferingId());
+ if (tier == null) {
+ template = getTemplateFromOfferingDetail(vinfo.getDiskOfferingId());
+ }
+ }
+
+ if (tier != null || template != null) {
+ Map tags = StorPoolHelper.addStorPoolTags(name, getVMInstanceUUID(vmId), "volume", getVcPolicyTag(vmId), tier);
+
+ StorPoolUtil.spLog(
+ "Creating volume [%s] with template [%s] or tier tags [%s] described in disk/service offerings details",
+ vinfo.getUuid(), template, tier);
+ resp = StorPoolUtil.volumeCreate(size, parentName, template, tags, conn);
+ } else {
+ resp = StorPoolUtil.volumeCreate(name, parentName, size, getVMInstanceUUID(vmId),
+ getVcPolicyTag(vmId), "volume", vinfo.getMaxIops(), conn);
+ }
+
if (resp.getError() == null) {
updateStoragePool(dstData.getDataStore().getId(), vinfo.getSize());
updateVolumePoolType(vinfo);
@@ -1255,4 +1365,67 @@ public void detachVolumeFromAllStorageNodes(Volume volume) {
StorPoolUtil.spLog("The volume [%s] is detach from all clusters [%s]", volName, resp);
}
}
+
+ @Override
+ public boolean informStorageForDiskOfferingChange() {
+ return true;
+ }
+
+ @Override
+ public void updateStorageWithTheNewDiskOffering(Volume volume, DiskOffering newDiskOffering) {
+ if (newDiskOffering == null) {
+ return;
+ }
+
+ StoragePoolVO pool = primaryStoreDao.findById(volume.getPoolId());
+ if (pool == null) {
+ return;
+ }
+
+ String tier = getTierFromOfferingDetail(newDiskOffering.getId());
+ String template = null;
+ if (tier == null) {
+ template = getTemplateFromOfferingDetail(newDiskOffering.getId());
+ }
+ if (tier == null && template == null) {
+ return;
+ }
+ SpConnectionDesc conn = StorPoolUtil.getSpConnection(pool.getUuid(), pool.getId(), storagePoolDetailsDao, primaryStoreDao);
+ StorPoolUtil.spLog("Updating volume [%s] with tier tag [%s] or template [%s] from Disk offering", volume.getId(), tier, template);
+ String volumeName = StorPoolStorageAdaptor.getVolumeNameFromPath(volume.getPath(), true);
+ Map tags = StorPoolHelper.addStorPoolTags(null, null, null, null, tier);
+ StorPoolVolumeDef spVolume = new StorPoolVolumeDef(volumeName, null, tags, null, null, template, null, null, null);
+ SpApiResponse response = StorPoolUtil.volumeUpdate(spVolume, conn);
+ if (response.getError() != null) {
+ StorPoolUtil.spLog("Could not update volume [%s] with tier tag [%s] or template [%s] from Disk offering due to [%s]", volume.getId(), tier, template, response.getError());
+ }
+ }
+
+ private String getTemplateFromOfferingDetail(Long diskOfferingId) {
+ String template = null;
+ DiskOfferingDetailVO diskOfferingDetail = diskOfferingDetailsDao.findDetail(diskOfferingId, StorPoolUtil.SP_TEMPLATE);
+ if (diskOfferingDetail == null ) {
+ ServiceOfferingVO serviceOffering = serviceOfferingDao.findServiceOfferingByComputeOnlyDiskOffering(diskOfferingId, true);
+ if (serviceOffering != null) {
+ ServiceOfferingDetailsVO serviceOfferingDetail = serviceOfferingDetailDao.findDetail(serviceOffering.getId(), StorPoolUtil.SP_TEMPLATE);
+ if (serviceOfferingDetail != null) {
+ template = serviceOfferingDetail.getValue();
+ }
+ }
+ } else {
+ template = diskOfferingDetail.getValue();
+ }
+ return template;
+ }
+
+ private String getTierFromOfferingDetail(Long diskOfferingId) {
+ String tier = null;
+ DiskOfferingDetailVO diskOfferingDetail = diskOfferingDetailsDao.findDetail(diskOfferingId, StorPoolUtil.SP_TIER);
+ if (diskOfferingDetail == null ) {
+ return tier;
+ } else {
+ tier = diskOfferingDetail.getValue();
+ }
+ return tier;
+ }
}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java
index 5a84e699f52e..3113ae8fdaaf 100644
--- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolHelper.java
@@ -163,11 +163,12 @@ public static String getVMInstanceUUID(Long id, VMInstanceDao vmInstanceDao) {
return null;
}
- public static Map addStorPoolTags(String name, String vmUuid, String csTag, String vcPolicy) {
+ public static Map addStorPoolTags(String name, String vmUuid, String csTag, String vcPolicy, String qcTier) {
Map tags = new HashMap<>();
tags.put("uuid", name);
tags.put("cvm", vmUuid);
tags.put(StorPoolUtil.SP_VC_POLICY, vcPolicy);
+ tags.put("qc", qcTier);
if (csTag != null) {
tags.put("cs", csTag);
}
diff --git a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
index 42809daec575..97f4e2fe155a 100644
--- a/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
+++ b/plugins/storage/volume/storpool/src/main/java/org/apache/cloudstack/storage/datastore/util/StorPoolUtil.java
@@ -30,6 +30,7 @@
import com.google.gson.JsonPrimitive;
import org.apache.cloudstack.storage.datastore.api.StorPoolSnapshotDef;
+import org.apache.cloudstack.storage.datastore.api.StorPoolVolumeDef;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
@@ -135,6 +136,7 @@ public static void spLog(String fmt, Object... args) {
public static final String DELAY_DELETE = "delayDelete";
+ public static final String SP_TIER = "SP_QOSCLASS";
public static enum StorpoolRights {
RO("ro"), RW("rw"), DETACH("detach");
@@ -499,7 +501,19 @@ public static SpApiResponse volumeCreate(final String name, final String parentN
json.put("parent", parentName);
json.put("size", size);
json.put("template", conn.getTemplateName());
- Map tags = StorPoolHelper.addStorPoolTags(name, vmUuid, csTag, vcPolicy);
+ Map tags = StorPoolHelper.addStorPoolTags(name, vmUuid, csTag, vcPolicy, null);
+ json.put("tags", tags);
+ return POST("MultiCluster/VolumeCreate", json, conn);
+ }
+
+ public static SpApiResponse volumeCreate(Long size, String parentName, String template, Map tags, SpConnectionDesc conn) {
+ template = template != null ? template : conn.getTemplateName();
+
+ Map json = new LinkedHashMap<>();
+ json.put("name", "");
+ json.put("parent", parentName);
+ json.put("size", size);
+ json.put("template", template);
json.put("tags", tags);
return POST("MultiCluster/VolumeCreate", json, conn);
}
@@ -523,7 +537,7 @@ public static SpApiResponse volumeCopy(final String name, final String baseOn, S
json.put("iops", iops);
}
json.put("template", conn.getTemplateName());
- Map tags = StorPoolHelper.addStorPoolTags(name, cvmTag, csTag, vcPolicyTag);
+ Map tags = StorPoolHelper.addStorPoolTags(name, cvmTag, csTag, vcPolicyTag, null);
json.put("tags", tags);
return POST("MultiCluster/VolumeCreate", json, conn);
}
@@ -551,7 +565,7 @@ public static SpApiResponse volumeUpdate(final String name, final Long newSize,
public static SpApiResponse volumeRemoveTags(String name, SpConnectionDesc conn) {
Map json = new HashMap<>();
- Map tags = StorPoolHelper.addStorPoolTags(null, "", null, "");
+ Map tags = StorPoolHelper.addStorPoolTags(null, "", null, "", null);
json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
@@ -559,7 +573,7 @@ public static SpApiResponse volumeRemoveTags(String name, SpConnectionDesc conn)
public static SpApiResponse volumeUpdateIopsAndTags(final String name, final String uuid, Long iops,
SpConnectionDesc conn, String vcPolicy) {
Map json = new HashMap<>();
- Map tags = StorPoolHelper.addStorPoolTags(null, uuid, null, vcPolicy);
+ Map tags = StorPoolHelper.addStorPoolTags(null, uuid, null, vcPolicy, null);
json.put("iops", iops);
json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
@@ -567,14 +581,14 @@ public static SpApiResponse volumeUpdateIopsAndTags(final String name, final Str
public static SpApiResponse volumeUpdateCvmTags(final String name, final String uuid, SpConnectionDesc conn) {
Map json = new HashMap<>();
- Map tags = StorPoolHelper.addStorPoolTags(null, uuid, null, null);
+ Map tags = StorPoolHelper.addStorPoolTags(null, uuid, null, null, null);
json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
public static SpApiResponse volumeUpdateVCTags(final String name, SpConnectionDesc conn, String vcPolicy) {
Map json = new HashMap<>();
- Map tags = StorPoolHelper.addStorPoolTags(null, null, null, vcPolicy);
+ Map tags = StorPoolHelper.addStorPoolTags(null, null, null, vcPolicy, null);
json.put("tags", tags);
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
@@ -585,10 +599,14 @@ public static SpApiResponse volumeUpdateTemplate(final String name, SpConnection
return POST("MultiCluster/VolumeUpdate/" + name, json, conn);
}
+ public static SpApiResponse volumeUpdate(StorPoolVolumeDef volume, SpConnectionDesc conn) {
+ return POST("MultiCluster/VolumeUpdate/" + volume.getName(), volume, conn);
+ }
+
public static SpApiResponse volumeSnapshot(final String volumeName, final String snapshotName, String vmUuid,
String csTag, String vcPolicy, SpConnectionDesc conn) {
Map json = new HashMap<>();
- Map tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, vcPolicy);
+ Map tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, vcPolicy, null);
json.put("name", "");
json.put("tags", tags);
@@ -602,7 +620,7 @@ public static SpApiResponse volumeSnapshot(StorPoolSnapshotDef snapshot, SpConne
public static SpApiResponse volumesGroupSnapshot(final List volumeTOs, final String vmUuid,
final String snapshotName, String csTag, SpConnectionDesc conn) {
Map json = new LinkedHashMap<>();
- Map tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, null);
+ Map tags = StorPoolHelper.addStorPoolTags(snapshotName, vmUuid, csTag, null, null);
List