This is an automated email from the ASF dual-hosted git repository.
nvazquez pushed a commit to branch 4.17
in repository https://gitbox.apache.org/repos/asf/cloudstack.git
The following commit(s) were added to refs/heads/4.17 by this push:
new 90a0ee0b6c fix pseudo random behaviour in pool selection (#6307)
90a0ee0b6c is described below
commit 90a0ee0b6c054cf772526070a032992a36fa7cbf
Author: dahn <[email protected]>
AuthorDate: Fri Jun 10 13:06:23 2022 +0200
fix pseudo random behaviour in pool selection (#6307)
* refactor and log trace
* tracelogs
* shuffle pools with real randomiser
* sinlge retrieval of async job context
* some review comments addressed
* Apply suggestions from code review
Co-authored-by: Daniel Augusto Veronezi Salvador
<[email protected]>
* log formatting
* integration test for distribution of volumes over storages
* move test to smoke tests
* imports
* sonarcloud issue # AYCOmVntKzsfKlhz0HDh
* spellos
* review comments
* review comments
* sonarcloud issues
* unittest
* import
* Update AbstractStoragePoolAllocatorTest.java
Co-authored-by: Daan Hoogland <[email protected]>
Co-authored-by: Daniel Augusto Veronezi Salvador
<[email protected]>
---
.../api/command/user/network/CreateNetworkCmd.java | 3 +-
.../main/java/com/cloud/storage/StorageUtil.java | 14 +
.../engine/orchestration/VolumeOrchestrator.java | 40 +--
.../allocator/AbstractStoragePoolAllocator.java | 53 +++-
.../AbstractStoragePoolAllocatorTest.java | 113 +++++++
.../com/cloud/storage/VolumeApiServiceImpl.java | 323 +++++++++++++--------
.../cloud/storage/VolumeApiServiceImplTest.java | 3 -
.../test_attach_multiple_volumes.py} | 161 ++++++++--
8 files changed, 531 insertions(+), 179 deletions(-)
diff --git
a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java
b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java
index 10aadee40e..903793b6e9 100644
---
a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java
+++
b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java
@@ -346,8 +346,7 @@ public class CreateNetworkCmd extends BaseCmd implements
UserCmd {
@Override
// an exception thrown by createNetwork() will be caught by the dispatcher.
- public
- void execute() throws InsufficientCapacityException,
ConcurrentOperationException, ResourceAllocationException {
+ public void execute() throws InsufficientCapacityException,
ConcurrentOperationException, ResourceAllocationException {
Network result = _networkService.createGuestNetwork(this);
if (result != null) {
NetworkResponse response =
_responseGenerator.createNetworkResponse(getResponseView(), result);
diff --git
a/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
b/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
index 044ae3c3fd..a7b5a64ec3 100644
--- a/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
+++ b/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
@@ -33,6 +33,8 @@ import com.cloud.storage.dao.VolumeDao;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
+import org.apache.log4j.Logger;
+
public class StorageUtil {
@Inject private ClusterDao clusterDao;
@Inject private HostDao hostDao;
@@ -40,6 +42,18 @@ public class StorageUtil {
@Inject private VMInstanceDao vmInstanceDao;
@Inject private VolumeDao volumeDao;
+ public static void traceLogStoragePools(List<StoragePool> poolList, Logger
logger, String initialMessage) {
+ if (logger.isTraceEnabled()) {
+ StringBuilder pooltable = new StringBuilder();
+ pooltable.append(initialMessage);
+ int i = 1;
+ for (StoragePool pool : poolList) {
+ pooltable.append("\nno ").append(i).append(":
").append(pool.getName()).append("/").append(pool.getUuid());
+ }
+ logger.trace(pooltable.toString());
+ }
+ }
+
private Long getClusterId(Long hostId) {
if (hostId == null) {
return null;
diff --git
a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index 3848a2b977..476ecf23cb 100644
---
a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++
b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -37,6 +37,7 @@ import java.util.stream.Collectors;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
+import com.cloud.storage.StorageUtil;
import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin;
import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
@@ -161,6 +162,7 @@ import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.UserVmDetailsDao;
import org.apache.cloudstack.snapshot.SnapshotHelper;
+import org.jetbrains.annotations.Nullable;
public class VolumeOrchestrator extends ManagerBase implements
VolumeOrchestrationService, Configurable {
@@ -335,15 +337,7 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
@Override
public StoragePool findStoragePool(DiskProfile dskCh, DataCenter dc, Pod
pod, Long clusterId, Long hostId, VirtualMachine vm, final Set<StoragePool>
avoid) {
- Long podId = null;
- if (pod != null) {
- podId = pod.getId();
- } else if (clusterId != null) {
- Cluster cluster = _entityMgr.findById(Cluster.class, clusterId);
- if (cluster != null) {
- podId = cluster.getPodId();
- }
- }
+ Long podId = retrievePod(pod, clusterId);
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
@@ -356,8 +350,12 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
final List<StoragePool> poolList = allocator.allocateToPool(dskCh,
profile, plan, avoidList, StoragePoolAllocator.RETURN_UPTO_ALL);
if (poolList != null && !poolList.isEmpty()) {
+ StorageUtil.traceLogStoragePools(poolList, s_logger, "pools to
choose from: ");
// Check if the preferred storage pool can be used. If yes,
use it.
Optional<StoragePool> storagePool =
getPreferredStoragePool(poolList, vm);
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace(String.format("we have a preferred pool:
%b", storagePool.isPresent()));
+ }
return (storagePool.isPresent()) ? (StoragePool)
this.dataStoreMgr.getDataStore(storagePool.get().getId(),
DataStoreRole.Primary) :
(StoragePool)dataStoreMgr.getDataStore(poolList.get(0).getId(),
DataStoreRole.Primary);
@@ -366,7 +364,8 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
return null;
}
- public List<StoragePool>
findStoragePoolsForVolumeWithNewDiskOffering(DiskProfile dskCh, DataCenter dc,
Pod pod, Long clusterId, Long hostId, VirtualMachine vm, final Set<StoragePool>
avoid) {
+ @Nullable
+ private Long retrievePod(Pod pod, Long clusterId) {
Long podId = null;
if (pod != null) {
podId = pod.getId();
@@ -376,6 +375,11 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
podId = cluster.getPodId();
}
}
+ return podId;
+ }
+
+ public List<StoragePool>
findStoragePoolsForVolumeWithNewDiskOffering(DiskProfile dskCh, DataCenter dc,
Pod pod, Long clusterId, Long hostId, VirtualMachine vm, final Set<StoragePool>
avoid) {
+ Long podId = retrievePod(pod, clusterId);
VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
List<StoragePool> suitablePools = new ArrayList<>();
@@ -398,15 +402,7 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
@Override
public StoragePool findChildDataStoreInDataStoreCluster(DataCenter dc, Pod
pod, Long clusterId, Long hostId, VirtualMachine vm, Long datastoreClusterId) {
- Long podId = null;
- if (pod != null) {
- podId = pod.getId();
- } else if (clusterId != null) {
- Cluster cluster = _entityMgr.findById(Cluster.class, clusterId);
- if (cluster != null) {
- podId = cluster.getPodId();
- }
- }
+ Long podId = retrievePod(pod, clusterId);
List<StoragePoolVO> childDatastores =
_storagePoolDao.listChildStoragePoolsInDatastoreCluster(datastoreClusterId);
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
@@ -1011,12 +1007,18 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm,
VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool)
throws NoTransitionException {
VirtualMachineTemplate rootDiskTmplt =
_entityMgr.findById(VirtualMachineTemplate.class, vm.getTemplateId());
DataCenter dcVO = _entityMgr.findById(DataCenter.class,
vm.getDataCenterId());
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace(String.format("storage-pool %s/%s is associated
with pod %d",storagePool.getName(), storagePool.getUuid(),
storagePool.getPodId()));
+ }
Long podId = storagePool.getPodId() != null ? storagePool.getPodId() :
vm.getPodIdToDeployIn();
Pod pod = _entityMgr.findById(Pod.class, podId);
ServiceOffering svo = _entityMgr.findById(ServiceOffering.class,
vm.getServiceOfferingId());
DiskOffering diskVO = _entityMgr.findById(DiskOffering.class,
volume.getDiskOfferingId());
Long clusterId = storagePool.getClusterId();
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace(String.format("storage-pool %s/%s is associated
with cluster %d",storagePool.getName(), storagePool.getUuid(), clusterId));
+ }
VolumeInfo vol = null;
if (volume.getState() == Volume.State.Allocated) {
diff --git
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
index 41a8325558..dbe612b32c 100644
---
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
+++
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
@@ -17,6 +17,7 @@
package org.apache.cloudstack.storage.allocator;
import java.math.BigDecimal;
+import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@@ -31,6 +32,7 @@ import com.cloud.storage.StoragePoolStatus;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
import com.cloud.utils.Pair;
@@ -74,6 +76,11 @@ public abstract class AbstractStoragePoolAllocator extends
AdapterBase implement
@Inject private StorageUtil storageUtil;
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+ /**
+ * make sure shuffled lists of Pools are really shuffled
+ */
+ private SecureRandom secureRandom = new SecureRandom();
+
@Override
public boolean configure(String name, Map<String, Object> params) throws
ConfigurationException {
super.configure(name, params);
@@ -169,25 +176,26 @@ public abstract class AbstractStoragePoolAllocator
extends AdapterBase implement
@Override
public List<StoragePool> reorderPools(List<StoragePool> pools,
VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh) {
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace("reordering pools");
+ }
if (pools == null) {
return null;
}
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace(String.format("reordering %d pools", pools.size()));
+ }
Account account = null;
if (vmProfile.getVirtualMachine() != null) {
account = vmProfile.getOwner();
}
- if (allocationAlgorithm.equals("random") ||
allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
- // Shuffle this so that we don't check the pools in the same order.
- Collections.shuffle(pools);
- } else if (allocationAlgorithm.equals("userdispersing")) {
- pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
- } else if(allocationAlgorithm.equals("firstfitleastconsumed")){
- pools = reorderPoolsByCapacity(plan, pools);
- }
+ pools = reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
if (vmProfile.getVirtualMachine() == null) {
- s_logger.trace("The VM is null, skipping pools reordering by disk
provisioning type.");
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace("The VM is null, skipping pools reordering by
disk provisioning type.");
+ }
return pools;
}
@@ -199,6 +207,33 @@ public abstract class AbstractStoragePoolAllocator extends
AdapterBase implement
return pools;
}
+ List<StoragePool> reorderStoragePoolsBasedOnAlgorithm(List<StoragePool>
pools, DeploymentPlan plan, Account account) {
+ if (allocationAlgorithm.equals("random") ||
allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
+ reorderRandomPools(pools);
+ } else if (StringUtils.equalsAny(allocationAlgorithm,
"userdispersing", "firstfitleastconsumed")) {
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace(String.format("Using reordering algorithm
[%s]", allocationAlgorithm));
+ }
+
+ if (allocationAlgorithm.equals("userdispersing")) {
+ pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
+ } else {
+ pools = reorderPoolsByCapacity(plan, pools);
+ }
+ }
+ return pools;
+ }
+
+ void reorderRandomPools(List<StoragePool> pools) {
+ StorageUtil.traceLogStoragePools(pools, s_logger, "pools to choose
from: ");
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace(String.format("Shuffle this so that we don't check
the pools in the same order. Algorithm == '%s' (or no account?)",
allocationAlgorithm));
+ }
+ StorageUtil.traceLogStoragePools(pools, s_logger, "pools to shuffle:
");
+ Collections.shuffle(pools, secureRandom);
+ StorageUtil.traceLogStoragePools(pools, s_logger, "shuffled list of
pools to choose from: ");
+ }
+
private List<StoragePool>
reorderPoolsByDiskProvisioningType(List<StoragePool> pools, DiskProfile
diskProfile) {
if (diskProfile != null && diskProfile.getProvisioningType() != null
&& !diskProfile.getProvisioningType().equals(Storage.ProvisioningType.THIN)) {
List<StoragePool> reorderedPools = new ArrayList<>();
diff --git
a/engine/storage/src/test/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocatorTest.java
b/engine/storage/src/test/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocatorTest.java
new file mode 100644
index 0000000000..58b6fc99a5
--- /dev/null
+++
b/engine/storage/src/test/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocatorTest.java
@@ -0,0 +1,113 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.allocator;
+
+
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StoragePool;
+import com.cloud.user.Account;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachineProfile;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+@RunWith(MockitoJUnitRunner.class)
+public class AbstractStoragePoolAllocatorTest {
+
+ AbstractStoragePoolAllocator allocator =
Mockito.spy(MockStorapoolAllocater.class);
+
+ @Mock
+ DeploymentPlan plan;
+
+ @Mock
+ Account account;
+ private List<StoragePool> pools;
+
+ @Before
+ public void setUp() {
+ pools = new ArrayList<>();
+ for (int i = 0 ; i < 10 ; ++i) {
+ pools.add(new StoragePoolVO(i, "pool-"+i, "pool-"+i+"-uuid",
Storage.StoragePoolType.NetworkFilesystem,
+ 1, 1l, 10000000000l, 10000000000l, "10.10.10.10",
+ 1000, "/spool/share-" + i));
+ }
+ }
+
+ @After
+ public void tearDown() {
+ }
+
+ @Test
+ public void reorderStoragePoolsBasedOnAlgorithm_random() {
+ allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
+ Mockito.verify(allocator,
Mockito.times(0)).reorderPoolsByCapacity(plan, pools);
+ Mockito.verify(allocator,
Mockito.times(0)).reorderPoolsByNumberOfVolumes(plan, pools, account);
+ Mockito.verify(allocator, Mockito.times(1)).reorderRandomPools(pools);
+ }
+
+ @Test
+ public void reorderStoragePoolsBasedOnAlgorithm_userdispersing() {
+ allocator.allocationAlgorithm = "userdispersing";
+
Mockito.doReturn(pools).when(allocator).reorderPoolsByNumberOfVolumes(plan,
pools, account);
+ allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
+ Mockito.verify(allocator,
Mockito.times(0)).reorderPoolsByCapacity(plan, pools);
+ Mockito.verify(allocator,
Mockito.times(1)).reorderPoolsByNumberOfVolumes(plan, pools, account);
+ Mockito.verify(allocator, Mockito.times(0)).reorderRandomPools(pools);
+ }
+
+ @Test
+ public void reorderStoragePoolsBasedOnAlgorithm_firstfitleastconsumed() {
+ allocator.allocationAlgorithm = "firstfitleastconsumed";
+ Mockito.doReturn(pools).when(allocator).reorderPoolsByCapacity(plan,
pools);
+ allocator.reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);
+ Mockito.verify(allocator,
Mockito.times(1)).reorderPoolsByCapacity(plan, pools);
+ Mockito.verify(allocator,
Mockito.times(0)).reorderPoolsByNumberOfVolumes(plan, pools, account);
+ Mockito.verify(allocator, Mockito.times(0)).reorderRandomPools(pools);
+ }
+
+ @Test
+ public void reorderRandomPools() {
+ Set<Long> firstchoice = new HashSet<>();
+ for (int i = 0; i <= 20; ++i) {
+ allocator.reorderRandomPools(pools);
+ firstchoice.add(pools.get(0).getId());
+ }
+ Assert.assertTrue(firstchoice.size() > 2);
+ }
+}
+
+class MockStorapoolAllocater extends AbstractStoragePoolAllocator {
+
+ @Override
+ protected List<StoragePool> select(DiskProfile dskCh,
VirtualMachineProfile vmProfile, DeploymentPlan plan,
DeploymentPlanner.ExcludeList avoid, int returnUpTo, boolean
bypassStorageTypeCheck) {
+ return null;
+ }
+}
diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
index 7e666c5e14..a14cb373d1 100644
--- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
@@ -92,6 +92,7 @@ import
org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
import org.apache.cloudstack.utils.imagestore.ImageStoreUtil;
+import
org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
@@ -99,6 +100,8 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.log4j.Logger;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@@ -339,6 +342,7 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
private static final long GiB_TO_BYTES = 1024 * 1024 * 1024;
private static final String CUSTOM_DISK_OFFERING_UNIQUE_NAME =
"Cloud.com-Custom";
+ private static final List<Volume.State> validAttachStates =
Arrays.asList(Volume.State.Allocated, Volume.State.Ready,
Volume.State.Uploaded);
protected VolumeApiServiceImpl() {
_volStateMachine = Volume.State.getStateMachine();
@@ -2082,6 +2086,16 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
}
}
}
+ if (s_logger.isTraceEnabled()) {
+ String msg = "attaching volume %s/%s to a VM (%s/%s) with an
existing volume %s/%s on primary storage %s";
+ if (existingVolumeOfVm != null) {
+ s_logger.trace(String.format(msg,
+ volumeToAttach.getName(), volumeToAttach.getUuid(),
+ vm.getName(), vm.getUuid(),
+ existingVolumeOfVm.getName(),
existingVolumeOfVm.getUuid(),
+ existingVolumeOfVm.getPoolId()));
+ }
+ }
HypervisorType rootDiskHyperType = vm.getHypervisorType();
HypervisorType volumeToAttachHyperType =
_volsDao.getHypervisorType(volumeToAttach.getId());
@@ -2092,6 +2106,9 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
StoragePoolVO destPrimaryStorage = null;
if (existingVolumeOfVm != null &&
!existingVolumeOfVm.getState().equals(Volume.State.Allocated)) {
destPrimaryStorage =
_storagePoolDao.findById(existingVolumeOfVm.getPoolId());
+ if (s_logger.isTraceEnabled() && destPrimaryStorage != null) {
+ s_logger.trace(String.format("decided on target storage:
%s/%s", destPrimaryStorage.getName(), destPrimaryStorage.getUuid()));
+ }
}
boolean volumeOnSecondary = volumeToAttach.getState() ==
Volume.State.Uploaded;
@@ -2111,6 +2128,10 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
// reload the volume from db
newVolumeOnPrimaryStorage =
volFactory.getVolume(newVolumeOnPrimaryStorage.getId());
boolean moveVolumeNeeded = needMoveVolume(existingVolumeOfVm,
newVolumeOnPrimaryStorage);
+ if (s_logger.isTraceEnabled()) {
+ s_logger.trace(String.format("is this a new volume: %s == %s ?",
volumeToAttach, newVolumeOnPrimaryStorage));
+ s_logger.trace(String.format("is it needed to move the volume:
%b?", moveVolumeNeeded));
+ }
if (moveVolumeNeeded) {
PrimaryDataStoreInfo primaryStore =
(PrimaryDataStoreInfo)newVolumeOnPrimaryStorage.getDataStore();
@@ -2146,97 +2167,108 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId) {
Account caller = CallContext.current().getCallingAccount();
- // Check that the volume ID is valid
- VolumeInfo volumeToAttach = volFactory.getVolume(volumeId);
- // Check that the volume is a data volume
- if (volumeToAttach == null || !(volumeToAttach.getVolumeType() ==
Volume.Type.DATADISK || volumeToAttach.getVolumeType() == Volume.Type.ROOT)) {
- throw new InvalidParameterValueException("Please specify a volume
with the valid type: " + Volume.Type.ROOT.toString() + " or " +
Volume.Type.DATADISK.toString());
- }
+ VolumeInfo volumeToAttach = getAndCheckVolumeInfo(volumeId);
- // Check that the volume is not currently attached to any VM
- if (volumeToAttach.getInstanceId() != null) {
- throw new InvalidParameterValueException("Please specify a volume
that is not attached to any VM.");
- }
+ UserVmVO vm = getAndCheckUserVmVO(vmId, volumeToAttach);
- // Check that the volume is not destroyed
- if (volumeToAttach.getState() == Volume.State.Destroy) {
- throw new InvalidParameterValueException("Please specify a volume
that is not destroyed.");
- }
+ checkDeviceId(deviceId, volumeToAttach, vm);
- // Check that the virtual machine ID is valid and it's a user vm
- UserVmVO vm = _userVmDao.findById(vmId);
- if (vm == null || vm.getType() != VirtualMachine.Type.User) {
- throw new InvalidParameterValueException("Please specify a valid
User VM.");
- }
+ checkNumberOfAttachedVolumes(deviceId, vm);
- // Check that the VM is in the correct state
- if (vm.getState() != State.Running && vm.getState() != State.Stopped) {
- throw new InvalidParameterValueException("Please specify a VM that
is either running or stopped.");
+ excludeLocalStorageIfNeeded(volumeToAttach);
+
+ checkForDevicesInCopies(vmId, vm);
+
+ checkRightsToAttach(caller, volumeToAttach, vm);
+
+ HypervisorType rootDiskHyperType = vm.getHypervisorType();
+ HypervisorType volumeToAttachHyperType =
_volsDao.getHypervisorType(volumeToAttach.getId());
+
+ StoragePoolVO volumeToAttachStoragePool =
_storagePoolDao.findById(volumeToAttach.getPoolId());
+ if (s_logger.isTraceEnabled() && volumeToAttachStoragePool != null) {
+ s_logger.trace(String.format("volume to attach (%s/%s) has a
primary storage assigned to begin with (%s/%s)",
+ volumeToAttach.getName(), volumeToAttach.getUuid(),
volumeToAttachStoragePool.getName(), volumeToAttachStoragePool.getUuid()));
}
- // Check that the VM and the volume are in the same zone
- if (vm.getDataCenterId() != volumeToAttach.getDataCenterId()) {
- throw new InvalidParameterValueException("Please specify a VM that
is in the same zone as the volume.");
+ checkForMatchingHypervisorTypesIf(volumeToAttachStoragePool != null &&
!volumeToAttachStoragePool.isManaged(), rootDiskHyperType,
volumeToAttachHyperType);
+
+ AsyncJobExecutionContext asyncExecutionContext =
AsyncJobExecutionContext.getCurrentExecutionContext();
+
+ AsyncJob job = asyncExecutionContext.getJob();
+
+ if (s_logger.isInfoEnabled()) {
+ s_logger.info(String.format("Trying to attach volume [%s/%s] to VM
instance [%s/%s], update async job-%s progress status",
+ volumeToAttach.getName(),
+ volumeToAttach.getUuid(),
+ vm.getName(),
+ vm.getUuid(),
+ job.getId()));
}
- // Check that the device ID is valid
- if (deviceId != null) {
- // validate ROOT volume type
- if (deviceId.longValue() == 0) {
-
validateRootVolumeDetachAttach(_volsDao.findById(volumeToAttach.getId()), vm);
- // vm shouldn't have any volume with deviceId 0
- if (!_volsDao.findByInstanceAndDeviceId(vm.getId(),
0).isEmpty()) {
- throw new InvalidParameterValueException("Vm already has
root volume attached to it");
- }
- // volume can't be in Uploaded state
- if (volumeToAttach.getState() == Volume.State.Uploaded) {
- throw new InvalidParameterValueException("No support for
Root volume attach in state " + Volume.State.Uploaded);
- }
- }
+ _jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
+
+ if
(asyncExecutionContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER))
{
+ return safelyOrchestrateAttachVolume(vmId, volumeId, deviceId);
+ } else {
+ return getVolumeAttachJobResult(vmId, volumeId, deviceId);
}
+ }
- // Check that the number of data volumes attached to VM is less than
- // that supported by hypervisor
- if (deviceId == null || deviceId.longValue() != 0) {
- List<VolumeVO> existingDataVolumes =
_volsDao.findByInstanceAndType(vmId, Volume.Type.DATADISK);
- int maxAttachableDataVolumesSupported =
getMaxDataVolumesSupported(vm);
- if (existingDataVolumes.size() >=
maxAttachableDataVolumesSupported) {
- throw new InvalidParameterValueException(
- "The specified VM already has the maximum number of
data disks (" + maxAttachableDataVolumesSupported + ") attached. Please specify
another VM.");
- }
+ @Nullable private Volume getVolumeAttachJobResult(Long vmId, Long
volumeId, Long deviceId) {
+ Outcome<Volume> outcome = attachVolumeToVmThroughJobQueue(vmId,
volumeId, deviceId);
+
+ Volume vol = null;
+ try {
+ outcome.get();
+ } catch (InterruptedException | ExecutionException e) {
+ throw new CloudRuntimeException(String.format("Could not get
attach volume job result for VM [%s], volume[%s] and device [%s], due to
[%s].", vmId, volumeId, deviceId, e.getMessage()), e);
}
- // If local storage is disabled then attaching a volume with local disk
- // offering not allowed
- DataCenterVO dataCenter =
_dcDao.findById(volumeToAttach.getDataCenterId());
- if (!dataCenter.isLocalStorageEnabled()) {
- DiskOfferingVO diskOffering =
_diskOfferingDao.findById(volumeToAttach.getDiskOfferingId());
- if (diskOffering.isUseLocalStorage()) {
- throw new InvalidParameterValueException("Zone is not
configured to use local storage but volume's disk offering " +
diskOffering.getName() + " uses it");
+ Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
+ if (jobResult != null) {
+ if (jobResult instanceof ConcurrentOperationException) {
+ throw (ConcurrentOperationException)jobResult;
+ } else if (jobResult instanceof InvalidParameterValueException) {
+ throw (InvalidParameterValueException)jobResult;
+ } else if (jobResult instanceof RuntimeException) {
+ throw (RuntimeException)jobResult;
+ } else if (jobResult instanceof Throwable) {
+ throw new RuntimeException("Unexpected exception",
(Throwable)jobResult);
+ } else if (jobResult instanceof Long) {
+ vol = _volsDao.findById((Long)jobResult);
}
}
+ return vol;
+ }
- // if target VM has associated VM snapshots
- List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vmId);
- if (vmSnapshots.size() > 0) {
- throw new InvalidParameterValueException("Unable to attach volume,
please specify a VM that does not have VM snapshots");
+ private Volume safelyOrchestrateAttachVolume(Long vmId, Long volumeId,
Long deviceId) {
+ // avoid re-entrance
+
+ VmWorkJobVO placeHolder = null;
+ placeHolder = createPlaceHolderWork(vmId);
+ try {
+ return orchestrateAttachVolumeToVM(vmId, volumeId, deviceId);
+ } finally {
+ _workJobDao.expunge(placeHolder.getId());
}
+ }
- // if target VM has backups
- if (vm.getBackupOfferingId() != null ||
vm.getBackupVolumeList().size() > 0) {
- throw new InvalidParameterValueException("Unable to attach volume,
please specify a VM that does not have any backups");
+ /**
+ * managed storage can be used for different types of hypervisors
+ * only perform this check if the volume's storage pool is not null and
not managed
+ */
+ private void checkForMatchingHypervisorTypesIf(boolean checkNeeded,
HypervisorType rootDiskHyperType, HypervisorType volumeToAttachHyperType) {
+ if (checkNeeded && volumeToAttachHyperType != HypervisorType.None &&
rootDiskHyperType != volumeToAttachHyperType) {
+ throw new InvalidParameterValueException("Can't attach a volume
created by: " + volumeToAttachHyperType + " to a " + rootDiskHyperType + " vm");
}
+ }
- // permission check
+ private void checkRightsToAttach(Account caller, VolumeInfo
volumeToAttach, UserVmVO vm) {
_accountMgr.checkAccess(caller, null, true, volumeToAttach, vm);
- if (!(Volume.State.Allocated.equals(volumeToAttach.getState()) ||
Volume.State.Ready.equals(volumeToAttach.getState()) ||
Volume.State.Uploaded.equals(volumeToAttach.getState()))) {
- throw new InvalidParameterValueException("Volume state must be in
Allocated, Ready or in Uploaded state");
- }
-
Account owner = _accountDao.findById(volumeToAttach.getAccountId());
- if (!(volumeToAttach.getState() == Volume.State.Allocated ||
volumeToAttach.getState() == Volume.State.Ready)) {
+ if (!Arrays.asList(Volume.State.Allocated,
Volume.State.Ready).contains(volumeToAttach.getState())) {
try {
_resourceLimitMgr.checkResourceLimit(owner,
ResourceType.primary_storage, volumeToAttach.getSize());
} catch (ResourceAllocationException e) {
@@ -2244,72 +2276,119 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
throw new InvalidParameterValueException(e.getMessage());
}
}
+ }
- HypervisorType rootDiskHyperType = vm.getHypervisorType();
- HypervisorType volumeToAttachHyperType =
_volsDao.getHypervisorType(volumeToAttach.getId());
+ private void checkForDevicesInCopies(Long vmId, UserVmVO vm) {
+ // if target VM has associated VM snapshots
+ List<VMSnapshotVO> vmSnapshots = _vmSnapshotDao.findByVm(vmId);
+ if (vmSnapshots.size() > 0) {
+ throw new InvalidParameterValueException(String.format("Unable to
attach volume to VM %s/%s, please specify a VM that does not have VM
snapshots", vm.getName(), vm.getUuid()));
+ }
- StoragePoolVO volumeToAttachStoragePool =
_storagePoolDao.findById(volumeToAttach.getPoolId());
+ // if target VM has backups
+ if (vm.getBackupOfferingId() != null ||
vm.getBackupVolumeList().size() > 0) {
+ throw new InvalidParameterValueException(String.format("Unable to
attach volume to VM %s/%s, please specify a VM that does not have any backups",
vm.getName(), vm.getUuid()));
+ }
+ }
- // managed storage can be used for different types of hypervisors
- // only perform this check if the volume's storage pool is not null
and not managed
- if (volumeToAttachStoragePool != null &&
!volumeToAttachStoragePool.isManaged()) {
- if (volumeToAttachHyperType != HypervisorType.None &&
rootDiskHyperType != volumeToAttachHyperType) {
- throw new InvalidParameterValueException("Can't attach a
volume created by: " + volumeToAttachHyperType + " to a " + rootDiskHyperType +
" vm");
+ /**
+ * If local storage is disabled then attaching a volume with a local
diskoffering is not allowed
+ */
+ private void excludeLocalStorageIfNeeded(VolumeInfo volumeToAttach) {
+ DataCenterVO dataCenter =
_dcDao.findById(volumeToAttach.getDataCenterId());
+ if (!dataCenter.isLocalStorageEnabled()) {
+ DiskOfferingVO diskOffering =
_diskOfferingDao.findById(volumeToAttach.getDiskOfferingId());
+ if (diskOffering.isUseLocalStorage()) {
+ throw new InvalidParameterValueException("Zone is not
configured to use local storage but volume's disk offering " +
diskOffering.getName() + " uses it");
}
}
+ }
- AsyncJobExecutionContext asyncExecutionContext =
AsyncJobExecutionContext.getCurrentExecutionContext();
-
- if (asyncExecutionContext != null) {
- AsyncJob job = asyncExecutionContext.getJob();
+ /**
+ * Check that the number of data volumes attached to VM is less than the
number that are supported by the hypervisor
+ */
+ private void checkNumberOfAttachedVolumes(Long deviceId, UserVmVO vm) {
+ if (deviceId == null || deviceId.longValue() != 0) {
+ List<VolumeVO> existingDataVolumes =
_volsDao.findByInstanceAndType(vm.getId(), Volume.Type.DATADISK);
+ int maxAttachableDataVolumesSupported =
getMaxDataVolumesSupported(vm);
+ if (existingDataVolumes.size() >=
maxAttachableDataVolumesSupported) {
+ throw new InvalidParameterValueException(
+ "The specified VM already has the maximum number of
data disks (" + maxAttachableDataVolumesSupported + ") attached. Please specify
another VM.");
+ }
+ }
+ }
- if (s_logger.isInfoEnabled()) {
- s_logger.info("Trying to attaching volume " + volumeId + " to
vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress
status");
+ /**
+ * validate ROOT volume type;
+ * 1. vm shouldn't have any volume with deviceId 0
+ * 2. volume can't be in Uploaded state
+ *
+ * @param deviceId requested device number to attach as
+ * @param volumeToAttach
+ * @param vm
+ */
+ private void checkDeviceId(Long deviceId, VolumeInfo volumeToAttach,
UserVmVO vm) {
+ if (deviceId != null && deviceId.longValue() == 0) {
+
validateRootVolumeDetachAttach(_volsDao.findById(volumeToAttach.getId()), vm);
+ if (!_volsDao.findByInstanceAndDeviceId(vm.getId(), 0).isEmpty()) {
+ throw new InvalidParameterValueException("Vm already has root
volume attached to it");
}
+ if (volumeToAttach.getState() == Volume.State.Uploaded) {
+ throw new InvalidParameterValueException("No support for Root
volume attach in state " + Volume.State.Uploaded);
+ }
+ }
+ }
- _jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
+ /**
+ * Check that the virtual machine ID is valid and it's a user vm
+ *
+ * @return the user vm vo object correcponding to the vmId to attach to
+ */
+ @NotNull private UserVmVO getAndCheckUserVmVO(Long vmId, VolumeInfo
volumeToAttach) {
+ UserVmVO vm = _userVmDao.findById(vmId);
+ if (vm == null || vm.getType() != VirtualMachine.Type.User) {
+ throw new InvalidParameterValueException("Please specify a valid
User VM.");
}
- AsyncJobExecutionContext jobContext =
AsyncJobExecutionContext.getCurrentExecutionContext();
- if
(jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
- // avoid re-entrance
+ // Check that the VM is in the correct state
+ if (vm.getState() != State.Running && vm.getState() != State.Stopped) {
+ throw new InvalidParameterValueException("Please specify a VM that
is either running or stopped.");
+ }
- VmWorkJobVO placeHolder = null;
- placeHolder = createPlaceHolderWork(vmId);
- try {
- return orchestrateAttachVolumeToVM(vmId, volumeId, deviceId);
- } finally {
- _workJobDao.expunge(placeHolder.getId());
- }
+ // Check that the VM and the volume are in the same zone
+ if (vm.getDataCenterId() != volumeToAttach.getDataCenterId()) {
+ throw new InvalidParameterValueException("Please specify a VM that
is in the same zone as the volume.");
+ }
+ return vm;
+ }
- } else {
- Outcome<Volume> outcome = attachVolumeToVmThroughJobQueue(vmId,
volumeId, deviceId);
+ /**
+ * Check that the volume ID is valid
+ * Check that the volume is a data volume
+ * Check that the volume is not currently attached to any VM
+ * Check that the volume is not destroyed
+ *
+ * @param volumeId the id of the volume to attach
+ * @return the volume info object representing the volume to attach
+ */
+ @NotNull private VolumeInfo getAndCheckVolumeInfo(Long volumeId) {
+ VolumeInfo volumeToAttach = volFactory.getVolume(volumeId);
+ if (volumeToAttach == null || !(volumeToAttach.getVolumeType() ==
Volume.Type.DATADISK || volumeToAttach.getVolumeType() == Volume.Type.ROOT)) {
+ throw new InvalidParameterValueException("Please specify a volume
with the valid type: " + Volume.Type.ROOT.toString() + " or " +
Volume.Type.DATADISK.toString());
+ }
- Volume vol = null;
- try {
- outcome.get();
- } catch (InterruptedException e) {
- throw new RuntimeException("Operation is interrupted", e);
- } catch (java.util.concurrent.ExecutionException e) {
- throw new RuntimeException("Execution excetion", e);
- }
+ if (volumeToAttach.getInstanceId() != null) {
+ throw new InvalidParameterValueException("Please specify a volume
that is not attached to any VM.");
+ }
- Object jobResult =
_jobMgr.unmarshallResultObject(outcome.getJob());
- if (jobResult != null) {
- if (jobResult instanceof ConcurrentOperationException) {
- throw (ConcurrentOperationException)jobResult;
- } else if (jobResult instanceof
InvalidParameterValueException) {
- throw (InvalidParameterValueException)jobResult;
- } else if (jobResult instanceof RuntimeException) {
- throw (RuntimeException)jobResult;
- } else if (jobResult instanceof Throwable) {
- throw new RuntimeException("Unexpected exception",
(Throwable)jobResult);
- } else if (jobResult instanceof Long) {
- vol = _volsDao.findById((Long)jobResult);
- }
- }
- return vol;
+ if (volumeToAttach.getState() == Volume.State.Destroy) {
+ throw new InvalidParameterValueException("Please specify a volume
that is not destroyed.");
}
+
+ if (!validAttachStates.contains(volumeToAttach.getState())) {
+ throw new InvalidParameterValueException("Volume state must be in
Allocated, Ready or in Uploaded state");
+ }
+ return volumeToAttach;
}
@Override
@@ -2497,7 +2576,10 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
AsyncJob job = asyncExecutionContext.getJob();
if (s_logger.isInfoEnabled()) {
- s_logger.info("Trying to attaching volume " + volumeId + "to
vm instance:" + vm.getId() + ", update async job-" + job.getId() + " progress
status");
+ s_logger.info(String.format("Trying to attach volume %s to VM
instance %s, update async job-%s progress status",
+
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(volume, "name",
"uuid"),
+
ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "name", "uuid"),
+ job.getId()));
}
_jobMgr.updateAsyncJobAttachment(job.getId(), "Volume", volumeId);
@@ -3757,6 +3839,9 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
boolean sendCommand = vm.getState() == State.Running;
AttachAnswer answer = null;
StoragePoolVO volumeToAttachStoragePool =
_storagePoolDao.findById(volumeToAttach.getPoolId());
+ if (s_logger.isTraceEnabled() && volumeToAttachStoragePool != null) {
+ s_logger.trace(String.format("storage is gotten from volume to
attach:
%s/%s",volumeToAttachStoragePool.getName(),volumeToAttachStoragePool.getUuid()));
+ }
HostVO host = getHostForVmVolumeAttach(vm, volumeToAttachStoragePool);
Long hostId = host == null ? null : host.getId();
if (host != null && host.getHypervisorType() == HypervisorType.VMware)
{
diff --git
a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java
b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java
index 4320e5f6a8..5b9875bc61 100644
--- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java
+++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java
@@ -276,8 +276,6 @@ public class VolumeApiServiceImplTest {
// managed root volume
VolumeInfo managedVolume = Mockito.mock(VolumeInfo.class);
- when(managedVolume.getId()).thenReturn(7L);
- when(managedVolume.getDataCenterId()).thenReturn(1L);
when(managedVolume.getVolumeType()).thenReturn(Volume.Type.ROOT);
when(managedVolume.getInstanceId()).thenReturn(null);
lenient().when(managedVolume.getPoolId()).thenReturn(2L);
@@ -286,7 +284,6 @@ public class VolumeApiServiceImplTest {
VolumeVO managedVolume1 = new VolumeVO("root", 1L, 1L, 1L, 1L, 2L,
"root", "root", Storage.ProvisioningType.THIN, 1, null, null, "root",
Volume.Type.ROOT);
managedVolume1.setPoolId(2L);
managedVolume1.setDataCenterId(1L);
- when(volumeDaoMock.findById(7L)).thenReturn(managedVolume1);
// vm having root volume
UserVmVO vmHavingRootVolume = new UserVmVO(4L, "vm", "vm", 1,
HypervisorType.XenServer, 1L, false, false, 1L, 1L, 1, 1L, null, "vm");
diff --git a/test/integration/component/test_simultaneous_volume_attach.py
b/test/integration/smoke/test_attach_multiple_volumes.py
similarity index 61%
rename from test/integration/component/test_simultaneous_volume_attach.py
rename to test/integration/smoke/test_attach_multiple_volumes.py
index aee5cf6002..d9b4b0aaf5 100644
--- a/test/integration/component/test_simultaneous_volume_attach.py
+++ b/test/integration/smoke/test_attach_multiple_volumes.py
@@ -18,11 +18,11 @@
#Import Local Modules
from marvin.cloudstackAPI import *
from marvin.cloudstackTestCase import cloudstackTestCase
-import unittest
-from marvin.lib.utils import (cleanup_resources,
- validateList)
+from marvin.lib.utils import (validateList)
from marvin.lib.base import (ServiceOffering,
VirtualMachine,
+ Host,
+ StoragePool,
Account,
Volume,
DiskOffering,
@@ -67,6 +67,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
cls.apiclient,
cls.services["disk_offering"]
)
+ cls._cleanup.append(cls.disk_offering)
template = get_template(
cls.apiclient,
@@ -87,10 +88,14 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
cls.services["account"],
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
+
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
+
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services,
@@ -99,6 +104,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
+ cls._cleanup.append(cls.virtual_machine)
#Create volumes (data disks)
cls.volume1 = Volume.create(
@@ -107,6 +113,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
account=cls.account.name,
domainid=cls.account.domainid
)
+ cls._cleanup.append(cls.volume1)
cls.volume2 = Volume.create(
cls.apiclient,
@@ -114,6 +121,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
account=cls.account.name,
domainid=cls.account.domainid
)
+ cls._cleanup.append(cls.volume2)
cls.volume3 = Volume.create(
cls.apiclient,
@@ -121,6 +129,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
account=cls.account.name,
domainid=cls.account.domainid
)
+ cls._cleanup.append(cls.volume3)
cls.volume4 = Volume.create(
cls.apiclient,
@@ -128,18 +137,27 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
account=cls.account.name,
domainid=cls.account.domainid
)
- cls._cleanup = [
- cls.service_offering,
- cls.disk_offering,
- cls.account
- ]
+ cls._cleanup.append(cls.volume4)
+
+ cls.volume5 = Volume.create(
+ cls.apiclient,
+ cls.services,
+ account=cls.account.name,
+ domainid=cls.account.domainid
+ )
+ cls._cleanup.append(cls.volume5)
+
+ cls.volume6 = Volume.create(
+ cls.apiclient,
+ cls.services,
+ account=cls.account.name,
+ domainid=cls.account.domainid
+ )
+ cls._cleanup.append(cls.volume6)
@classmethod
def tearDownClass(cls):
- try:
- cleanup_resources(cls.apiclient, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ super(TestMultipleVolumeAttach, cls).tearDownClass()
def setUp(self):
self.apiClient = self.testClient.getApiClient()
@@ -151,20 +169,54 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
available")
def tearDown(self):
- cleanup_resources(self.apiClient, self.cleanup)
- return
+
+ self.detach_volume(self.volume1)
+ self.detach_volume(self.volume2)
+ self.detach_volume(self.volume3)
+ self.detach_volume(self.volume4)
+ self.detach_volume(self.volume5)
+ self.detach_volume(self.volume6)
+
+ super(TestMultipleVolumeAttach, self).tearDown
# Method to attach volume but will return immediately as an asynchronous
task does.
- def attach_volume(self, apiclient,virtualmachineid, volume):
+ def attach_volume(self, virtualmachineid, volume):
"""Attach volume to instance"""
cmd = attachVolume.attachVolumeCmd()
cmd.isAsync = "false"
cmd.id = volume.id
cmd.virtualmachineid = virtualmachineid
- return apiclient.attachVolume(cmd)
+ return self.apiClient.attachVolume(cmd)
+
+ # Method to detach a volume.
+ def detach_volume(self, volume):
+ """Detach volume from instance"""
+ cmd = detachVolume.detachVolumeCmd()
+ cmd.id = volume.id
+ try:
+ self.apiClient.detachVolume(cmd)
+ except Exception as e:
+ self.debug("======exception e %s " % e)
+ if "The specified volume is not attached to a VM." not in str(e):
+ raise e
+
+ # list Primare storage pools
+ def check_storage_pools(self, virtualmachineid):
+ """
+ list storage pools available to the VM
+ """
+ vm = VirtualMachine.list(self.apiClient, id=virtualmachineid)[0]
+ hostid = vm.histid
+ host = Host.list(self.apiClient, id=hostid)[0]
+ clusterid = host.clusterid
+ storage_pools = StoragePool.list(self.apiClient, clusterid=clusterid)
+ if len(storage_pools) < 2:
+ self.skipTest("at least two accesible primary storage pools needed
for the vm to perform this test")
+ return storage_pools
+
# Method to check the volume attach async jobs' status
- def query_async_job(self, apiclient, jobid):
+ def query_async_job(self, jobid):
"""Query the status for Async Job"""
try:
asyncTimeout = 3600
@@ -173,7 +225,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
timeout = asyncTimeout
async_response = FAILED
while timeout > 0:
- async_response = apiclient.queryAsyncJobResult(cmd)
+ async_response = self.apiClient.queryAsyncJobResult(cmd)
if async_response != FAILED:
job_status = async_response.jobstatus
if job_status in [JOB_CANCELLED,
@@ -193,7 +245,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
str(e))
return FAILED
- @attr(tags = ["advanced", "advancedns", "basic"], required_hardware="true")
+ @attr(tags = ["advanced", "advancedns", "basic", "blob"],
required_hardware="true")
def test_attach_multiple_volumes(self):
"""Attach multiple Volumes simultaneously to a Running VM
"""
@@ -205,33 +257,33 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
self.volume1.id,
self.virtual_machine.id
))
- vol1_jobId = self.attach_volume(self.apiClient,
self.virtual_machine.id,self.volume1)
+ vol1_jobId = self.attach_volume(self.virtual_machine.id,self.volume1)
self.debug(
"Attaching volume (ID: %s) to VM (ID: %s)" % (
self.volume2.id,
self.virtual_machine.id
))
- vol2_jobId =
self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume2)
+ vol2_jobId = self.attach_volume(self.virtual_machine.id, self.volume2)
self.debug(
"Attaching volume (ID: %s) to VM (ID: %s)" % (
self.volume3.id,
self.virtual_machine.id
))
- vol3_jobId =
self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume3)
+ vol3_jobId = self.attach_volume(self.virtual_machine.id, self.volume3)
self.debug(
"Attaching volume (ID: %s) to VM (ID: %s)" % (
self.volume4.id,
self.virtual_machine.id
))
- vol4_jobId =
self.attach_volume(self.apiClient,self.virtual_machine.id, self.volume4)
+ vol4_jobId = self.attach_volume(self.virtual_machine.id, self.volume4)
- self.query_async_job(self.apiClient,vol1_jobId.jobid)
- self.query_async_job(self.apiClient,vol2_jobId.jobid)
- self.query_async_job(self.apiClient,vol3_jobId.jobid)
- self.query_async_job(self.apiClient,vol4_jobId.jobid)
+ self.query_async_job(vol1_jobId.jobid)
+ self.query_async_job(vol2_jobId.jobid)
+ self.query_async_job(vol3_jobId.jobid)
+ self.query_async_job(vol4_jobId.jobid)
# List all the volumes attached to the instance. Includes even the
Root disk.
list_volume_response = Volume.list(
@@ -241,6 +293,7 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
account=self.account.name,
domainid=self.account.domainid
)
+ self.debug("====== test paralalisation : %s ===" %
str(list_volume_response).replace("}, {", "},\n {"))
self.assertEqual(
validateList(list_volume_response)[0],
PASS,
@@ -254,3 +307,57 @@ class TestMultipleVolumeAttach(cloudstackTestCase):
)
return
+
+ @attr(tags = ["advanced", "advancedns", "basic", "bla"],
required_hardware="true")
+ def test_attach_and_distribute_multiple_volumes(self):
+ """
+ Test volume distribution over storages
+
+ Given multiple primary storage pools
+ Attach multiple Volumes to a VM
+ check to see if these do not all end up on the same storage pool
+ """
+ storage_pools = self.check_storage_pools(self.virtual_machine.id)
+ storage_usage={}
+ for storage_pool in storage_pools:
+ storage_usage[storage_pool.name] = 0
+ self.debug("====== test pools for usage : %s ===" %
str(storage_usage).replace("}, {", "},\n {"))
+
+ vol1_jobId = self.attach_volume(self.virtual_machine.id,self.volume1)
+ vol2_jobId = self.attach_volume(self.virtual_machine.id,self.volume2)
+ vol3_jobId = self.attach_volume(self.virtual_machine.id,self.volume3)
+ vol4_jobId = self.attach_volume(self.virtual_machine.id,self.volume4)
+ vol5_jobId = self.attach_volume(self.virtual_machine.id,self.volume5)
+ vol6_jobId = self.attach_volume(self.virtual_machine.id,self.volume6)
+
+ self.query_async_job(vol1_jobId.jobid)
+ self.query_async_job(vol2_jobId.jobid)
+ self.query_async_job(vol3_jobId.jobid)
+ self.query_async_job(vol4_jobId.jobid)
+ self.query_async_job(vol5_jobId.jobid)
+ self.query_async_job(vol6_jobId.jobid)
+
+ volumes = Volume.list(self.apiClient,
+ virtualmachineid=self.virtual_machine.id,
+ type="DATADISK",
+ account=self.account.name,
+ domainid=self.account.domainid
+ )
+ self.debug("====== test distribution : %s ===" %
str(volumes).replace("}, {", "},\n {"))
+ for volume in volumes:
+ self.debug("====== checking storage : %s ===" % str(volume))
+ storage_usage[volume.storage] += 1
+
+ self.debug("====== test distribution : %s ===" %
str(storage_usage).replace("}, {", "},\n {"))
+ # TODO decide what an acceptable 'random' distribution is
+ # all of the loads on storages should be less than the number of
volumes for a distribution to exist (statistically this may fail once every 6⁵
runs (7776)
+ self.assertEqual(
+ len(volumes),
+ 6,
+ "All 6 data disks are not attached to VM Successfully"
+ )
+ for storage_use in storage_usage:
+ self.assertTrue(
+ storage_usage[storage_use] < 6,
+ "not all volumes should be on one storage: %s" % storage_use)
+ return