This is an automated email from the ASF dual-hosted git repository.
rohit pushed a commit to branch 4.18
in repository https://gitbox.apache.org/repos/asf/cloudstack.git
The following commit(s) were added to refs/heads/4.18 by this push:
new 7eb36367c90 Add lock mechanism considering template id, pool id, host
id in PowerFlex Storage (#8233)
7eb36367c90 is described below
commit 7eb36367c905848d290c9a1871df8bfe400628fe
Author: Harikrishna <[email protected]>
AuthorDate: Fri Dec 8 13:21:16 2023 +0530
Add lock mechanism considering template id, pool id, host id in PowerFlex
Storage (#8233)
Observed a failure to start new virtual machine with PowerFlex storage.
Traced it to concurrent VM starts using the same template and the same host to
copy. Second mapping attempt failed.
While creating the volume clone from the seeded template in primary
storage, adding a lock with the string containing IDs of template, storage pool
and destination host avoids the situation of concurrent mapping attempts with
the same host.
---
.../storage/volume/VolumeServiceImpl.java | 30 ++++++++++++++++++++--
1 file changed, 28 insertions(+), 2 deletions(-)
diff --git
a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index ffc12b98c84..47577cc52b2 100644
---
a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++
b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -1478,8 +1478,8 @@ public class VolumeServiceImpl implements VolumeService {
createManagedVolumeCloneTemplateAsync(volumeInfo,
templateOnPrimary, destPrimaryDataStore, future);
} else {
// We have a template on PowerFlex primary storage. Create new
volume and copy to it.
- s_logger.debug("Copying the template to the volume on primary
storage");
- createManagedVolumeCopyManagedTemplateAsync(volumeInfo,
destPrimaryDataStore, templateOnPrimary, destHost, future);
+
createManagedVolumeCopyManagedTemplateAsyncWithLock(volumeInfo,
destPrimaryDataStore, templateOnPrimary,
+ destHost, future, destDataStoreId,
srcTemplateInfo.getId());
}
} else {
s_logger.debug("Primary storage does not support cloning or no
support for UUID resigning on the host side; copying the template normally");
@@ -1490,6 +1490,32 @@ public class VolumeServiceImpl implements VolumeService {
return future;
}
+ private void
createManagedVolumeCopyManagedTemplateAsyncWithLock(VolumeInfo volumeInfo,
PrimaryDataStore destPrimaryDataStore, TemplateInfo templateOnPrimary,
+ Host
destHost, AsyncCallFuture<VolumeApiResult> future, long destDataStoreId, long
srcTemplateId) {
+ GlobalLock lock = null;
+ try {
+ String tmplIdManagedPoolIdDestinationHostLockString = "tmplId:" +
srcTemplateId + "managedPoolId:" + destDataStoreId + "destinationHostId:" +
destHost.getId();
+ lock =
GlobalLock.getInternLock(tmplIdManagedPoolIdDestinationHostLockString);
+ if (lock == null) {
+ throw new CloudRuntimeException("Unable to create volume from
template, couldn't get global lock on " +
tmplIdManagedPoolIdDestinationHostLockString);
+ }
+
+ int storagePoolMaxWaitSeconds =
NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()),
3600);
+ if (!lock.lock(storagePoolMaxWaitSeconds)) {
+ s_logger.debug("Unable to create volume from template,
couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString);
+ throw new CloudRuntimeException("Unable to create volume from
template, couldn't lock on " + tmplIdManagedPoolIdDestinationHostLockString);
+ }
+
+ s_logger.debug("Copying the template to the volume on primary
storage");
+ createManagedVolumeCopyManagedTemplateAsync(volumeInfo,
destPrimaryDataStore, templateOnPrimary, destHost, future);
+ } finally {
+ if (lock != null) {
+ lock.unlock();
+ lock.releaseRef();
+ }
+ }
+ }
+
private boolean computeSupportsVolumeClone(long zoneId, HypervisorType
hypervisorType) {
if (HypervisorType.VMware.equals(hypervisorType) ||
HypervisorType.KVM.equals(hypervisorType)) {
return true;