This is an automated email from the ASF dual-hosted git repository.

rohit pushed a commit to branch 4.19
in repository https://gitbox.apache.org/repos/asf/cloudstack.git


The following commit(s) were added to refs/heads/4.19 by this push:
     new 730cc5d5b8f Change iops on offering change (#8872)
730cc5d5b8f is described below

commit 730cc5d5b8fc2ff6ba889e946b361abed3cc88ad
Author: Vishesh <[email protected]>
AuthorDate: Thu Apr 11 17:01:55 2024 +0530

    Change iops on offering change (#8872)
    
    * Change IOPS on disk offering change
    
    * Remove iops & bandwidth limits before copying template
    
    * minor refactor
    
    * Handle diskOfferingDetails
    
    * Fixup
---
 .../service/VolumeOrchestrationService.java        |   2 +
 .../engine/orchestration/VolumeOrchestrator.java   |  39 ++++---
 .../java/com/cloud/storage/VolumeDetailVO.java     |   3 +
 .../driver/ScaleIOPrimaryDataStoreDriver.java      | 112 ++++++++++++++-------
 .../com/cloud/storage/ResizeVolumePayload.java     |   8 ++
 .../com/cloud/storage/VolumeApiServiceImpl.java    |  12 ++-
 6 files changed, 129 insertions(+), 47 deletions(-)

diff --git 
a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
 
b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
index c4fbc2505aa..8a9d5fed028 100644
--- 
a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
+++ 
b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
@@ -130,6 +130,8 @@ public interface VolumeOrchestrationService {
 
     boolean canVmRestartOnAnotherServer(long vmId);
 
+    void saveVolumeDetails(Long diskOfferingId, Long volumeId);
+
     /**
      * Allocate a volume or multiple volumes in case of template is registered 
with the 'deploy-as-is' option, allowing multiple disks
      */
diff --git 
a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
 
b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index 409b5388d72..3a5b342b6e8 100644
--- 
a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ 
b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -863,18 +863,7 @@ public class VolumeOrchestrator extends ManagerBase 
implements VolumeOrchestrati
         
vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType()));
         vol = _volsDao.persist(vol);
 
-        List<VolumeDetailVO> volumeDetailsVO = new ArrayList<VolumeDetailVO>();
-        DiskOfferingDetailVO bandwidthLimitDetail = 
_diskOfferingDetailDao.findDetail(offering.getId(), 
Volume.BANDWIDTH_LIMIT_IN_MBPS);
-        if (bandwidthLimitDetail != null) {
-            volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), 
Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false));
-        }
-        DiskOfferingDetailVO iopsLimitDetail = 
_diskOfferingDetailDao.findDetail(offering.getId(), Volume.IOPS_LIMIT);
-        if (iopsLimitDetail != null) {
-            volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), 
Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false));
-        }
-        if (!volumeDetailsVO.isEmpty()) {
-            _volDetailDao.saveDetails(volumeDetailsVO);
-        }
+        saveVolumeDetails(offering.getId(), vol.getId());
 
         // Save usage event and update resource count for user vm volumes
         if (vm.getType() == VirtualMachine.Type.User) {
@@ -891,6 +880,32 @@ public class VolumeOrchestrator extends ManagerBase 
implements VolumeOrchestrati
         return diskProfile;
     }
 
+    @Override
+    public void saveVolumeDetails(Long diskOfferingId, Long volumeId) {
+        List<VolumeDetailVO> volumeDetailsVO = new ArrayList<>();
+        DiskOfferingDetailVO bandwidthLimitDetail = 
_diskOfferingDetailDao.findDetail(diskOfferingId, 
Volume.BANDWIDTH_LIMIT_IN_MBPS);
+        if (bandwidthLimitDetail != null) {
+            volumeDetailsVO.add(new VolumeDetailVO(volumeId, 
Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false));
+        } else {
+            VolumeDetailVO bandwidthLimit = _volDetailDao.findDetail(volumeId, 
Volume.BANDWIDTH_LIMIT_IN_MBPS);
+            if (bandwidthLimit != null) {
+                _volDetailDao.remove(bandwidthLimit.getId());
+            }
+        }
+        DiskOfferingDetailVO iopsLimitDetail = 
_diskOfferingDetailDao.findDetail(diskOfferingId, Volume.IOPS_LIMIT);
+        if (iopsLimitDetail != null) {
+            volumeDetailsVO.add(new VolumeDetailVO(volumeId, 
Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false));
+        } else {
+            VolumeDetailVO iopsLimit = _volDetailDao.findDetail(volumeId, 
Volume.IOPS_LIMIT);
+            if (iopsLimit != null) {
+                _volDetailDao.remove(iopsLimit.getId());
+            }
+        }
+        if (!volumeDetailsVO.isEmpty()) {
+            _volDetailDao.saveDetails(volumeDetailsVO);
+        }
+    }
+
     private DiskProfile allocateTemplatedVolume(Type type, String name, 
DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, 
VirtualMachineTemplate template, VirtualMachine vm,
                                                 Account owner, long deviceId, 
String configurationId) {
         assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a 
template.";
diff --git a/engine/schema/src/main/java/com/cloud/storage/VolumeDetailVO.java 
b/engine/schema/src/main/java/com/cloud/storage/VolumeDetailVO.java
index 6723f0b8bc1..42980e07b2b 100644
--- a/engine/schema/src/main/java/com/cloud/storage/VolumeDetailVO.java
+++ b/engine/schema/src/main/java/com/cloud/storage/VolumeDetailVO.java
@@ -80,4 +80,7 @@ public class VolumeDetailVO implements ResourceDetail {
         return display;
     }
 
+    public void setValue(String value) {
+        this.value = value;
+    }
 }
diff --git 
a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
 
b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
index 4cce6c6d075..1d2cace8d21 100644
--- 
a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
+++ 
b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import javax.inject.Inject;
 
+import 
org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
 import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
@@ -38,6 +39,8 @@ import 
org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO;
+import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao;
 import org.apache.cloudstack.storage.RemoteHostEndPoint;
 import org.apache.cloudstack.storage.command.CommandResult;
 import org.apache.cloudstack.storage.command.CopyCommand;
@@ -127,11 +130,15 @@ public class ScaleIOPrimaryDataStoreDriver implements 
PrimaryDataStoreDriver {
     @Inject
     private ConfigurationDao configDao;
     @Inject
+    private DiskOfferingDetailsDao diskOfferingDetailsDao;
+    @Inject
     private HostDao hostDao;
     @Inject
     private VMInstanceDao vmInstanceDao;
     @Inject
     private VolumeService volumeService;
+    @Inject
+    private VolumeOrchestrationService volumeMgr;
 
     public ScaleIOPrimaryDataStoreDriver() {
 
@@ -141,40 +148,47 @@ public class ScaleIOPrimaryDataStoreDriver implements 
PrimaryDataStoreDriver {
         return 
ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, 
storagePoolDetailsDao);
     }
 
+    private boolean setVolumeLimitsOnSDC(VolumeVO volume, Host host, DataStore 
dataStore, Long iopsLimit, Long bandwidthLimitInKbps) throws Exception {
+        final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
+        if (StringUtils.isBlank(sdcId)) {
+            alertHostSdcDisconnection(host);
+            throw new CloudRuntimeException("Unable to grant access to volume: 
" + volume.getId() + ", no Sdc connected with host ip: " + 
host.getPrivateIpAddress());
+        }
+
+        final ScaleIOGatewayClient client = 
getScaleIOClient(dataStore.getId());
+        return 
client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), 
sdcId, iopsLimit, bandwidthLimitInKbps);
+    }
+
+    private boolean setVolumeLimitsFromDetails(VolumeVO volume, Host host, 
DataStore dataStore) throws Exception {
+        Long bandwidthLimitInKbps = 0L; // Unlimited
+        // Check Bandwidth Limit parameter in volume details
+        final VolumeDetailVO bandwidthVolumeDetail = 
volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
+        if (bandwidthVolumeDetail != null && bandwidthVolumeDetail.getValue() 
!= null) {
+            bandwidthLimitInKbps = 
Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024;
+        }
+
+        Long iopsLimit = 0L; // Unlimited
+        // Check IOPS Limit parameter in volume details, else try MaxIOPS
+        final VolumeDetailVO iopsVolumeDetail = 
volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT);
+        if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != null) {
+            iopsLimit = Long.parseLong(iopsVolumeDetail.getValue());
+        } else if (volume.getMaxIops() != null) {
+            iopsLimit = volume.getMaxIops();
+        }
+        if (iopsLimit > 0 && iopsLimit < 
ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) {
+            iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT;
+        }
+
+        return setVolumeLimitsOnSDC(volume, host, dataStore, iopsLimit, 
bandwidthLimitInKbps);
+    }
+
     @Override
     public boolean grantAccess(DataObject dataObject, Host host, DataStore 
dataStore) {
         try {
             if (DataObjectType.VOLUME.equals(dataObject.getType())) {
                 final VolumeVO volume = volumeDao.findById(dataObject.getId());
                 LOGGER.debug("Granting access for PowerFlex volume: " + 
volume.getPath());
-
-                Long bandwidthLimitInKbps = Long.valueOf(0); // Unlimited
-                // Check Bandwidht Limit parameter in volume details
-                final VolumeDetailVO bandwidthVolumeDetail = 
volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
-                if (bandwidthVolumeDetail != null && 
bandwidthVolumeDetail.getValue() != null) {
-                    bandwidthLimitInKbps = 
Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024;
-                }
-
-                Long iopsLimit = Long.valueOf(0); // Unlimited
-                // Check IOPS Limit parameter in volume details, else try 
MaxIOPS
-                final VolumeDetailVO iopsVolumeDetail = 
volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT);
-                if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != 
null) {
-                    iopsLimit = Long.parseLong(iopsVolumeDetail.getValue());
-                } else if (volume.getMaxIops() != null) {
-                    iopsLimit = volume.getMaxIops();
-                }
-                if (iopsLimit > 0 && iopsLimit < 
ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) {
-                    iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT;
-                }
-
-                final String sdcId = getConnectedSdc(dataStore.getId(), 
host.getId());
-                if (StringUtils.isBlank(sdcId)) {
-                    alertHostSdcDisconnection(host);
-                    throw new CloudRuntimeException("Unable to grant access to 
volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + 
host.getPrivateIpAddress());
-                }
-
-                final ScaleIOGatewayClient client = 
getScaleIOClient(dataStore.getId());
-                return 
client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), 
sdcId, iopsLimit, bandwidthLimitInKbps);
+                return setVolumeLimitsFromDetails(volume, host, dataStore);
             } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
                 final VMTemplateStoragePoolVO templatePoolRef = 
vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), 
null);
                 LOGGER.debug("Granting access for PowerFlex template volume: " 
+ templatePoolRef.getInstallPath());
@@ -791,7 +805,15 @@ public class ScaleIOPrimaryDataStoreDriver implements 
PrimaryDataStoreDriver {
             LOGGER.error(errorMsg);
             answer = new Answer(cmd, false, errorMsg);
         } else {
-            answer = ep.sendMessage(cmd);
+            VolumeVO volume = volumeDao.findById(destData.getId());
+            Host host = destHost != null ? destHost : 
hostDao.findById(ep.getId());
+            try {
+                setVolumeLimitsOnSDC(volume, host, destData.getDataStore(), 
0L, 0L);
+                answer = ep.sendMessage(cmd);
+            } catch (Exception e) {
+                LOGGER.error("Failed to copy template to volume due to: " + 
e.getMessage(), e);
+                answer = new Answer(cmd, false, e.getMessage());
+            }
         }
 
         return answer;
@@ -1181,7 +1203,7 @@ public class ScaleIOPrimaryDataStoreDriver implements 
PrimaryDataStoreDriver {
             ResizeVolumePayload payload = 
(ResizeVolumePayload)volumeInfo.getpayload();
             long newSizeInBytes = payload.newSize != null ? payload.newSize : 
volumeInfo.getSize();
             // Only increase size is allowed and size should be specified in 
granularity of 8 GB
-            if (newSizeInBytes <= volumeInfo.getSize()) {
+            if (newSizeInBytes < volumeInfo.getSize()) {
                 throw new CloudRuntimeException("Only increase size is allowed 
for volume: " + volumeInfo.getName());
             }
 
@@ -1210,6 +1232,20 @@ public class ScaleIOPrimaryDataStoreDriver implements 
PrimaryDataStoreDriver {
                 }
             }
 
+            Long newMaxIops = payload.newMaxIops != null ? payload.newMaxIops 
: volumeInfo.getMaxIops();
+            long newBandwidthLimit = 0L;
+            Long newDiskOfferingId = payload.newDiskOfferingId != null ? 
payload.newDiskOfferingId : volumeInfo.getDiskOfferingId();
+            if (newDiskOfferingId != null) {
+                DiskOfferingDetailVO bandwidthLimitDetail = 
diskOfferingDetailsDao.findDetail(newDiskOfferingId, 
Volume.BANDWIDTH_LIMIT_IN_MBPS);
+                if (bandwidthLimitDetail != null) {
+                    newBandwidthLimit = 
Long.parseLong(bandwidthLimitDetail.getValue()) * 1024;
+                }
+                DiskOfferingDetailVO iopsLimitDetail = 
diskOfferingDetailsDao.findDetail(newDiskOfferingId, Volume.IOPS_LIMIT);
+                if (iopsLimitDetail != null) {
+                    newMaxIops = Long.parseLong(iopsLimitDetail.getValue());
+                }
+            }
+
             if (volumeInfo.getFormat().equals(Storage.ImageFormat.QCOW2) || 
attachedRunning) {
                 LOGGER.debug("Volume needs to be resized at the hypervisor 
host");
 
@@ -1229,9 +1265,8 @@ public class ScaleIOPrimaryDataStoreDriver implements 
PrimaryDataStoreDriver {
                         volumeInfo.getPassphrase(), 
volumeInfo.getEncryptFormat());
 
                 try {
-                    if (!attachedRunning) {
-                        grantAccess(volumeInfo, ep, volumeInfo.getDataStore());
-                    }
+                    VolumeVO volume = volumeDao.findById(volumeInfo.getId());
+                    setVolumeLimitsOnSDC(volume, host, 
volumeInfo.getDataStore(), newMaxIops != null ? newMaxIops : 0L, 
newBandwidthLimit);
                     Answer answer = ep.sendMessage(resizeVolumeCommand);
 
                     if (!answer.getResult() && 
volumeInfo.getFormat().equals(Storage.ImageFormat.QCOW2)) {
@@ -1253,14 +1288,23 @@ public class ScaleIOPrimaryDataStoreDriver implements 
PrimaryDataStoreDriver {
             VolumeVO volume = volumeDao.findById(volumeInfo.getId());
             long oldVolumeSize = volume.getSize();
             volume.setSize(scaleIOVolume.getSizeInKb() * 1024);
+            if (payload.newMinIops != null) {
+                volume.setMinIops(payload.newMinIops);
+            }
+            if (payload.newMaxIops != null) {
+                volume.setMaxIops(payload.newMaxIops);
+            }
             volumeDao.update(volume.getId(), volume);
+            if (payload.newDiskOfferingId != null) {
+                volumeMgr.saveVolumeDetails(payload.newDiskOfferingId, 
volume.getId());
+            }
 
             long capacityBytes = storagePool.getCapacityBytes();
             long usedBytes = storagePool.getUsedBytes();
 
             long newVolumeSize = volume.getSize();
             usedBytes += newVolumeSize - oldVolumeSize;
-            storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes 
: usedBytes);
+            storagePool.setUsedBytes(Math.min(usedBytes, capacityBytes));
             storagePoolDao.update(storagePoolId, storagePool);
         } catch (Exception e) {
             String errMsg = "Unable to resize PowerFlex volume: " + 
volumeInfo.getId() + " due to " + e.getMessage();
diff --git a/server/src/main/java/com/cloud/storage/ResizeVolumePayload.java 
b/server/src/main/java/com/cloud/storage/ResizeVolumePayload.java
index 9e4c3ec528c..84dcd302bdd 100644
--- a/server/src/main/java/com/cloud/storage/ResizeVolumePayload.java
+++ b/server/src/main/java/com/cloud/storage/ResizeVolumePayload.java
@@ -21,6 +21,7 @@ public class ResizeVolumePayload {
     public final Long newSize;
     public final Long newMinIops;
     public final Long newMaxIops;
+    public Long newDiskOfferingId;
     public final Integer newHypervisorSnapshotReserve;
     public final boolean shrinkOk;
     public final String instanceName;
@@ -37,5 +38,12 @@ public class ResizeVolumePayload {
         this.instanceName = instanceName;
         this.hosts = hosts;
         this.isManaged = isManaged;
+        this.newDiskOfferingId = null;
+    }
+
+    public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, 
Long newDiskOfferingId, Integer newHypervisorSnapshotReserve, boolean shrinkOk,
+            String instanceName, long[] hosts, boolean isManaged) {
+        this(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, 
shrinkOk, instanceName, hosts, isManaged);
+        this.newDiskOfferingId = newDiskOfferingId;
     }
 }
diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java 
b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
index 88d45d54aa5..e5a33a22859 100644
--- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
@@ -1475,7 +1475,7 @@ public class VolumeApiServiceImpl extends ManagerBase 
implements VolumeApiServic
             }
         }
 
-        ResizeVolumePayload payload = new ResizeVolumePayload(newSize, 
newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, 
hosts, isManaged);
+        ResizeVolumePayload payload = new ResizeVolumePayload(newSize, 
newMinIops, newMaxIops, newDiskOfferingId, newHypervisorSnapshotReserve, 
shrinkOk, instanceName, hosts, isManaged);
 
         try {
             VolumeInfo vol = volFactory.getVolume(volume.getId());
@@ -1514,6 +1514,15 @@ public class VolumeApiServiceImpl extends ManagerBase 
implements VolumeApiServic
 
             if (newDiskOfferingId != null) {
                 volume.setDiskOfferingId(newDiskOfferingId);
+                _volumeMgr.saveVolumeDetails(newDiskOfferingId, 
volume.getId());
+            }
+
+            if (newMinIops != null) {
+                volume.setMinIops(newMinIops);
+            }
+
+            if (newMaxIops != null) {
+                volume.setMaxIops(newMaxIops);
             }
 
             // Update size if volume has same size as before, else it is 
already updated
@@ -2033,6 +2042,7 @@ public class VolumeApiServiceImpl extends ManagerBase 
implements VolumeApiServic
 
             if (newDiskOffering != null) {
                 volume.setDiskOfferingId(newDiskOfferingId);
+                _volumeMgr.saveVolumeDetails(newDiskOfferingId, 
volume.getId());
             }
 
             _volsDao.update(volume.getId(), volume);

Reply via email to