This is an automated email from the ASF dual-hosted git repository.
mtutkowski pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/cloudstack.git
The following commit(s) were added to refs/heads/master by this push:
new d12c106 Restrict the number of managed clustered file systems per
compute cluster (#2500)
d12c106 is described below
commit d12c106a47db1e2fc5634a9f8dc8c521eba65bfc
Author: Mike Tutkowski <[email protected]>
AuthorDate: Tue Sep 11 08:23:19 2018 -0600
Restrict the number of managed clustered file systems per compute cluster
(#2500)
* Restrict the number of managed clustered file systems per compute cluster
---
.../java/com/cloud/storage/StorageManager.java | 8 +
.../main/java/com/cloud/storage/StorageUtil.java | 148 +++++++
.../spring-engine-components-api-core-context.xml | 4 +-
.../allocator/AbstractStoragePoolAllocator.java | 85 ++--
.../ClusterScopeStoragePoolAllocator.java | 12 +-
.../allocator/LocalStoragePoolAllocator.java | 12 +-
.../allocator/ZoneWideStoragePoolAllocator.java | 48 +--
.../allocator/RandomStoragePoolAllocator.java | 2 +-
.../java/com/cloud/storage/StorageManagerImpl.java | 3 +-
.../com/cloud/storage/VolumeApiServiceImpl.java | 40 ++
.../solidfire/TestManagedClusteredFilesystems.py | 431 +++++++++++++++++++++
11 files changed, 708 insertions(+), 85 deletions(-)
diff --git
a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
index 530a7de..2fd732b 100644
--- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
+++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
@@ -92,6 +92,14 @@ public interface StorageManager extends StorageService {
true,
ConfigKey.Scope.Global,
null);
+ ConfigKey<Integer> MaxNumberOfManagedClusteredFileSystems = new
ConfigKey<>(Integer.class,
+ "max.number.managed.clustered.file.systems",
+ "Storage",
+ "200",
+ "XenServer and VMware only: Maximum number of managed SRs or
datastores per compute cluster",
+ true,
+ ConfigKey.Scope.Cluster,
+ null);
/**
* Returns a comma separated list of tags for the specified storage pool
diff --git
a/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
b/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
new file mode 100644
index 0000000..97354e2
--- /dev/null
+++ b/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
@@ -0,0 +1,148 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.storage;
+
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.commons.collections.CollectionUtils;
+
+import java.util.List;
+import javax.inject.Inject;
+
+public class StorageUtil {
+ @Inject private ClusterDao clusterDao;
+ @Inject private HostDao hostDao;
+ @Inject private PrimaryDataStoreDao storagePoolDao;
+ @Inject private VMInstanceDao vmInstanceDao;
+ @Inject private VolumeDao volumeDao;
+
+ private Long getClusterId(Long hostId) {
+ if (hostId == null) {
+ return null;
+ }
+
+ HostVO hostVO = hostDao.findById(hostId);
+
+ if (hostVO == null) {
+ return null;
+ }
+
+ return hostVO.getClusterId();
+ }
+
+ /**
+ * This method relates to managed storage only. CloudStack currently
supports managed storage with XenServer, vSphere, and KVM.
+ * With managed storage on XenServer and vSphere, CloudStack needs to use
an iSCSI SR (XenServer) or datastore (vSphere) per CloudStack
+ * volume. Since XenServer and vSphere are limited to the hundreds with
regards to how many SRs or datastores can be leveraged per
+ * compute cluster, this method is used to check a Global Setting (that
specifies the maximum number of SRs or datastores per compute cluster)
+ * against what is being requested. KVM does not apply here here because
it does not suffer from the same scalability limits as XenServer and
+ * vSphere do. With XenServer and vSphere, each host is configured to see
all the SRs/datastores of the cluster. With KVM, each host typically
+ * is only configured to see the managed volumes of the VMs that are
currently running on that host.
+ *
+ * If the clusterId is passed in, we use it. Otherwise, we use the hostId.
If neither leads to a cluster, we just return true.
+ */
+ public boolean managedStoragePoolCanScale(StoragePool storagePool, Long
clusterId, Long hostId) {
+ if (clusterId == null) {
+ clusterId = getClusterId(hostId);
+
+ if (clusterId == null) {
+ return true;
+ }
+ }
+
+ ClusterVO clusterVO = clusterDao.findById(clusterId);
+
+ if (clusterVO == null) {
+ return true;
+ }
+
+ Hypervisor.HypervisorType hypervisorType =
clusterVO.getHypervisorType();
+
+ if (hypervisorType == null) {
+ return true;
+ }
+
+ if (Hypervisor.HypervisorType.XenServer.equals(hypervisorType) ||
Hypervisor.HypervisorType.VMware.equals(hypervisorType)) {
+ int maxValue =
StorageManager.MaxNumberOfManagedClusteredFileSystems.valueIn(clusterId);
+
+ return
getNumberOfManagedClusteredFileSystemsInComputeCluster(storagePool.getDataCenterId(),
clusterId) < maxValue;
+ }
+
+ return true;
+ }
+
+ private int getNumberOfManagedClusteredFileSystemsInComputeCluster(long
zoneId, long clusterId) {
+ int numberOfManagedClusteredFileSystemsInComputeCluster = 0;
+
+ List<VolumeVO> volumes = volumeDao.findByDc(zoneId);
+
+ if (CollectionUtils.isEmpty(volumes)) {
+ return numberOfManagedClusteredFileSystemsInComputeCluster;
+ }
+
+ for (VolumeVO volume : volumes) {
+ Long instanceId = volume.getInstanceId();
+
+ if (instanceId == null) {
+ continue;
+ }
+
+ VMInstanceVO vmInstanceVO = vmInstanceDao.findById(instanceId);
+
+ if (vmInstanceVO == null) {
+ continue;
+ }
+
+ Long vmHostId = vmInstanceVO.getHostId();
+
+ if (vmHostId == null) {
+ vmHostId = vmInstanceVO.getLastHostId();
+ }
+
+ if (vmHostId == null) {
+ continue;
+ }
+
+ HostVO vmHostVO = hostDao.findById(vmHostId);
+
+ if (vmHostVO == null) {
+ continue;
+ }
+
+ Long vmHostClusterId = vmHostVO.getClusterId();
+
+ if (vmHostClusterId != null && vmHostClusterId == clusterId) {
+ StoragePoolVO storagePoolVO =
storagePoolDao.findById(volume.getPoolId());
+
+ if (storagePoolVO != null && storagePoolVO.isManaged()) {
+ numberOfManagedClusteredFileSystemsInComputeCluster++;
+ }
+ }
+ }
+
+ return numberOfManagedClusteredFileSystemsInComputeCluster;
+ }
+}
diff --git
a/engine/components-api/src/main/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml
b/engine/components-api/src/main/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml
index b644176..be02640 100644
---
a/engine/components-api/src/main/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml
+++
b/engine/components-api/src/main/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml
@@ -25,6 +25,6 @@
http://www.springframework.org/schema/aop
http://www.springframework.org/schema/aop/spring-aop.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context.xsd"
- >
-
+ >
+ <bean id="storageUtil" class="com.cloud.storage.StorageUtil" />
</beans>
diff --git
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
index 194f7bd..ef5e21d 100644
---
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
+++
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
@@ -22,12 +22,10 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Random;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
-import com.cloud.storage.Storage;
import org.apache.log4j.Logger;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@@ -42,10 +40,11 @@ import com.cloud.dc.dao.ClusterDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
+import com.cloud.storage.StorageUtil;
import com.cloud.storage.Volume;
-import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.utils.NumbersUtil;
@@ -55,41 +54,30 @@ import com.cloud.vm.VirtualMachineProfile;
public abstract class AbstractStoragePoolAllocator extends AdapterBase
implements StoragePoolAllocator {
private static final Logger s_logger =
Logger.getLogger(AbstractStoragePoolAllocator.class);
- @Inject
- StorageManager storageMgr;
- protected @Inject
- PrimaryDataStoreDao _storagePoolDao;
- @Inject
- VolumeDao _volumeDao;
- @Inject
- ConfigurationDao _configDao;
- @Inject
- ClusterDao _clusterDao;
- protected @Inject
- DataStoreManager dataStoreMgr;
- protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1);
- long _extraBytesPerVolume = 0;
- Random _rand;
- boolean _dontMatter;
- protected String _allocationAlgorithm = "random";
- @Inject
- DiskOfferingDao _diskOfferingDao;
- @Inject
- CapacityDao _capacityDao;
+
+ protected BigDecimal storageOverprovisioningFactor = new BigDecimal(1);
+ protected String allocationAlgorithm = "random";
+ protected long extraBytesPerVolume = 0;
+ @Inject protected DataStoreManager dataStoreMgr;
+ @Inject protected PrimaryDataStoreDao storagePoolDao;
+ @Inject protected VolumeDao volumeDao;
+ @Inject protected ConfigurationDao configDao;
+ @Inject private CapacityDao capacityDao;
+ @Inject private ClusterDao clusterDao;
+ @Inject private StorageManager storageMgr;
+ @Inject private StorageUtil storageUtil;
@Override
public boolean configure(String name, Map<String, Object> params) throws
ConfigurationException {
super.configure(name, params);
- if(_configDao != null) {
- Map<String, String> configs = _configDao.getConfiguration(null,
params);
+ if(configDao != null) {
+ Map<String, String> configs = configDao.getConfiguration(null,
params);
String globalStorageOverprovisioningFactor =
configs.get("storage.overprovisioning.factor");
- _storageOverprovisioningFactor = new
BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
- _extraBytesPerVolume = 0;
- _rand = new Random(System.currentTimeMillis());
- _dontMatter =
Boolean.parseBoolean(configs.get("storage.overwrite.provisioning"));
+ storageOverprovisioningFactor = new
BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
+ extraBytesPerVolume = 0;
String allocationAlgorithm =
configs.get("vm.allocation.algorithm");
if (allocationAlgorithm != null) {
- _allocationAlgorithm = allocationAlgorithm;
+ this.allocationAlgorithm = allocationAlgorithm;
}
return true;
}
@@ -109,27 +97,26 @@ public abstract class AbstractStoragePoolAllocator extends
AdapterBase implement
Long clusterId = plan.getClusterId();
short capacityType;
if(pools != null && pools.size() != 0){
- capacityType = pools.get(0).getPoolType().isShared() == true ?
- Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED :
Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
+ capacityType = pools.get(0).getPoolType().isShared() ?
Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED : Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
} else{
return null;
}
- List<Long> poolIdsByCapacity =
_capacityDao.orderHostsByFreeCapacity(clusterId, capacityType);
+ List<Long> poolIdsByCapacity =
capacityDao.orderHostsByFreeCapacity(clusterId, capacityType);
if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in descending order of free
capacity: "+ poolIdsByCapacity);
}
//now filter the given list of Pools by this ordered list
- Map<Long, StoragePool> poolMap = new HashMap<Long, StoragePool>();
+ Map<Long, StoragePool> poolMap = new HashMap<>();
for (StoragePool pool : pools) {
poolMap.put(pool.getId(), pool);
}
- List<Long> matchingPoolIds = new ArrayList<Long>(poolMap.keySet());
+ List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
poolIdsByCapacity.retainAll(matchingPoolIds);
- List<StoragePool> reorderedPools = new ArrayList<StoragePool>();
+ List<StoragePool> reorderedPools = new ArrayList<>();
for(Long id: poolIdsByCapacity){
reorderedPools.add(poolMap.get(id));
}
@@ -145,21 +132,21 @@ public abstract class AbstractStoragePoolAllocator
extends AdapterBase implement
Long podId = plan.getPodId();
Long clusterId = plan.getClusterId();
- List<Long> poolIdsByVolCount =
_volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId,
account.getAccountId());
+ List<Long> poolIdsByVolCount =
volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId,
account.getAccountId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in ascending order of number of
volumes for account id: " + account.getAccountId() + " is: " +
poolIdsByVolCount);
}
// now filter the given list of Pools by this ordered list
- Map<Long, StoragePool> poolMap = new HashMap<Long, StoragePool>();
+ Map<Long, StoragePool> poolMap = new HashMap<>();
for (StoragePool pool : pools) {
poolMap.put(pool.getId(), pool);
}
- List<Long> matchingPoolIds = new ArrayList<Long>(poolMap.keySet());
+ List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
poolIdsByVolCount.retainAll(matchingPoolIds);
- List<StoragePool> reorderedPools = new ArrayList<StoragePool>();
+ List<StoragePool> reorderedPools = new ArrayList<>();
for (Long id : poolIdsByVolCount) {
reorderedPools.add(poolMap.get(id));
}
@@ -176,12 +163,12 @@ public abstract class AbstractStoragePoolAllocator
extends AdapterBase implement
account = vmProfile.getOwner();
}
- if (_allocationAlgorithm.equals("random") ||
_allocationAlgorithm.equals("userconcentratedpod_random") || (account == null))
{
+ if (allocationAlgorithm.equals("random") ||
allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
// Shuffle this so that we don't check the pools in the same order.
Collections.shuffle(pools);
- } else if (_allocationAlgorithm.equals("userdispersing")) {
+ } else if (allocationAlgorithm.equals("userdispersing")) {
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
- } else if(_allocationAlgorithm.equals("firstfitleastconsumed")){
+ } else if(allocationAlgorithm.equals("firstfitleastconsumed")){
pools = reorderPoolsByCapacity(plan, pools);
}
return pools;
@@ -201,7 +188,7 @@ public abstract class AbstractStoragePoolAllocator extends
AdapterBase implement
Long clusterId = pool.getClusterId();
if (clusterId != null) {
- ClusterVO cluster = _clusterDao.findById(clusterId);
+ ClusterVO cluster = clusterDao.findById(clusterId);
if (!(cluster.getHypervisorType() == dskCh.getHypervisorType())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("StoragePool's Cluster does not have
required hypervisorType, skipping this pool");
@@ -219,9 +206,13 @@ public abstract class AbstractStoragePoolAllocator extends
AdapterBase implement
return false;
}
+ if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool,
plan.getClusterId(), plan.getHostId())) {
+ return false;
+ }
+
// check capacity
- Volume volume = _volumeDao.findById(dskCh.getVolumeId());
- List<Volume> requestVolumes = new ArrayList<Volume>();
+ Volume volume = volumeDao.findById(dskCh.getVolumeId());
+ List<Volume> requestVolumes = new ArrayList<>();
requestVolumes.add(volume);
return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) &&
storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool, plan.getClusterId());
}
diff --git
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
index 6e9c682..12884d5 100644
---
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
+++
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
@@ -70,7 +70,7 @@ public class ClusterScopeStoragePoolAllocator extends
AbstractStoragePoolAllocat
if (s_logger.isTraceEnabled()) {
// Log the pools details that are ignored because they are in
disabled state
- List<StoragePoolVO> disabledPools =
_storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId,
ScopeType.CLUSTER);
+ List<StoragePoolVO> disabledPools =
storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId,
ScopeType.CLUSTER);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
s_logger.trace("Ignoring pool " + pool + " as it is in
disabled state.");
@@ -78,11 +78,11 @@ public class ClusterScopeStoragePoolAllocator extends
AbstractStoragePoolAllocat
}
}
- List<StoragePoolVO> pools = _storagePoolDao.findPoolsByTags(dcId,
podId, clusterId, dskCh.getTags());
+ List<StoragePoolVO> pools = storagePoolDao.findPoolsByTags(dcId,
podId, clusterId, dskCh.getTags());
s_logger.debug("Found pools matching tags: " + pools);
// add remaining pools in cluster, that did not match tags, to avoid
set
- List<StoragePoolVO> allPools = _storagePoolDao.findPoolsByTags(dcId,
podId, clusterId, null);
+ List<StoragePoolVO> allPools = storagePoolDao.findPoolsByTags(dcId,
podId, clusterId, null);
allPools.removeAll(pools);
for (StoragePoolVO pool : allPools) {
s_logger.debug("Adding pool " + pool + " to avoid set since it did
not match tags");
@@ -119,11 +119,11 @@ public class ClusterScopeStoragePoolAllocator extends
AbstractStoragePoolAllocat
public boolean configure(String name, Map<String, Object> params) throws
ConfigurationException {
super.configure(name, params);
- if (_configDao != null) {
- Map<String, String> configs = _configDao.getConfiguration(params);
+ if (configDao != null) {
+ Map<String, String> configs = configDao.getConfiguration(params);
String allocationAlgorithm =
configs.get("vm.allocation.algorithm");
if (allocationAlgorithm != null) {
- _allocationAlgorithm = allocationAlgorithm;
+ this.allocationAlgorithm = allocationAlgorithm;
}
}
return true;
diff --git
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
index 0949036..390272e 100644
---
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
+++
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
@@ -69,7 +69,7 @@ public class LocalStoragePoolAllocator extends
AbstractStoragePoolAllocator {
if (s_logger.isTraceEnabled()) {
// Log the pools details that are ignored because they are in
disabled state
- List<StoragePoolVO> disabledPools =
_storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(),
plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
+ List<StoragePoolVO> disabledPools =
storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(),
plan.getPodId(), plan.getClusterId(), ScopeType.HOST);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
s_logger.trace("Ignoring pool " + pool + " as it is in
disabled state.");
@@ -81,7 +81,7 @@ public class LocalStoragePoolAllocator extends
AbstractStoragePoolAllocator {
// data disk and host identified from deploying vm (attach volume case)
if (plan.getHostId() != null) {
- List<StoragePoolVO> hostTagsPools =
_storagePoolDao.findLocalStoragePoolsByHostAndTags(plan.getHostId(),
dskCh.getTags());
+ List<StoragePoolVO> hostTagsPools =
storagePoolDao.findLocalStoragePoolsByHostAndTags(plan.getHostId(),
dskCh.getTags());
for (StoragePoolVO pool : hostTagsPools) {
if (pool != null && pool.isLocal()) {
StoragePool storagePool =
(StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
@@ -103,7 +103,7 @@ public class LocalStoragePoolAllocator extends
AbstractStoragePoolAllocator {
return null;
}
List<StoragePoolVO> availablePools =
-
_storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(),
plan.getPodId(), plan.getClusterId(), dskCh.getTags());
+
storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(),
plan.getPodId(), plan.getClusterId(), dskCh.getTags());
for (StoragePoolVO pool : availablePools) {
if (suitablePools.size() == returnUpTo) {
break;
@@ -118,7 +118,7 @@ public class LocalStoragePoolAllocator extends
AbstractStoragePoolAllocator {
// add remaining pools in cluster, that did not match tags, to
avoid
// set
- List<StoragePoolVO> allPools =
_storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(),
plan.getPodId(), plan.getClusterId(), null);
+ List<StoragePoolVO> allPools =
storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(),
plan.getPodId(), plan.getClusterId(), null);
allPools.removeAll(availablePools);
for (StoragePoolVO pool : allPools) {
avoid.addPool(pool.getId());
@@ -136,8 +136,8 @@ public class LocalStoragePoolAllocator extends
AbstractStoragePoolAllocator {
public boolean configure(String name, Map<String, Object> params) throws
ConfigurationException {
super.configure(name, params);
- _storageOverprovisioningFactor = new BigDecimal(1);
- _extraBytesPerVolume =
NumbersUtil.parseLong((String)params.get("extra.bytes.per.volume"), 50 * 1024L
* 1024L);
+ storageOverprovisioningFactor = new BigDecimal(1);
+ extraBytesPerVolume =
NumbersUtil.parseLong((String)params.get("extra.bytes.per.volume"), 50 * 1024L
* 1024L);
return true;
}
diff --git
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
index 7a10966..aa077f3 100644
---
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
+++
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
@@ -27,7 +27,6 @@ import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.deploy.DeploymentPlan;
@@ -41,51 +40,49 @@ import com.cloud.vm.VirtualMachineProfile;
@Component
public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator
{
- private static final Logger s_logger =
Logger.getLogger(ZoneWideStoragePoolAllocator.class);
+ private static final Logger LOGGER =
Logger.getLogger(ZoneWideStoragePoolAllocator.class);
@Inject
- PrimaryDataStoreDao _storagePoolDao;
- @Inject
- DataStoreManager dataStoreMgr;
+ private DataStoreManager dataStoreMgr;
@Override
protected List<StoragePool> select(DiskProfile dskCh,
VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int
returnUpTo) {
- s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
+ LOGGER.debug("ZoneWideStoragePoolAllocator to find storage pool");
if (dskCh.useLocalStorage()) {
return null;
}
- if (s_logger.isTraceEnabled()) {
+ if (LOGGER.isTraceEnabled()) {
// Log the pools details that are ignored because they are in
disabled state
- List<StoragePoolVO> disabledPools =
_storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null,
ScopeType.ZONE);
+ List<StoragePoolVO> disabledPools =
storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null,
ScopeType.ZONE);
if (disabledPools != null && !disabledPools.isEmpty()) {
for (StoragePoolVO pool : disabledPools) {
- s_logger.trace("Ignoring pool " + pool + " as it is in
disabled state.");
+ LOGGER.trace("Ignoring pool " + pool + " as it is in
disabled state.");
}
}
}
- List<StoragePool> suitablePools = new ArrayList<StoragePool>();
+ List<StoragePool> suitablePools = new ArrayList<>();
- List<StoragePoolVO> storagePools =
_storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(),
dskCh.getTags());
+ List<StoragePoolVO> storagePools =
storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(),
dskCh.getTags());
if (storagePools == null) {
- storagePools = new ArrayList<StoragePoolVO>();
+ storagePools = new ArrayList<>();
}
- List<StoragePoolVO> anyHypervisorStoragePools = new
ArrayList<StoragePoolVO>();
+ List<StoragePoolVO> anyHypervisorStoragePools = new ArrayList<>();
for (StoragePoolVO storagePool : storagePools) {
if (HypervisorType.Any.equals(storagePool.getHypervisor())) {
anyHypervisorStoragePools.add(storagePool);
}
}
- List<StoragePoolVO> storagePoolsByHypervisor =
_storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(),
dskCh.getHypervisorType());
+ List<StoragePoolVO> storagePoolsByHypervisor =
storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(),
dskCh.getHypervisorType());
storagePools.retainAll(storagePoolsByHypervisor);
storagePools.addAll(anyHypervisorStoragePools);
// add remaining pools in zone, that did not match tags, to avoid set
- List<StoragePoolVO> allPools =
_storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
+ List<StoragePoolVO> allPools =
storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
allPools.removeAll(storagePools);
for (StoragePoolVO pool : allPools) {
avoid.addPool(pool.getId());
@@ -100,12 +97,19 @@ public class ZoneWideStoragePoolAllocator extends
AbstractStoragePoolAllocator {
if (filter(avoid, storagePool, dskCh, plan)) {
suitablePools.add(storagePool);
} else {
- avoid.addPool(storagePool.getId());
+ if (canAddStoragePoolToAvoidSet(storage)) {
+ avoid.addPool(storagePool.getId());
+ }
}
}
return suitablePools;
}
+ // Don't add zone-wide, managed storage to the avoid list because it may
be usable for another cluster.
+ private boolean canAddStoragePoolToAvoidSet(StoragePoolVO storagePoolVO) {
+ return !ScopeType.ZONE.equals(storagePoolVO.getScope()) ||
!storagePoolVO.isManaged();
+ }
+
@Override
protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan
plan, List<StoragePool> pools, Account account) {
if (account == null) {
@@ -113,21 +117,21 @@ public class ZoneWideStoragePoolAllocator extends
AbstractStoragePoolAllocator {
}
long dcId = plan.getDataCenterId();
- List<Long> poolIdsByVolCount =
_volumeDao.listZoneWidePoolIdsByVolumeCount(dcId, account.getAccountId());
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("List of pools in ascending order of number of
volumes for account id: " + account.getAccountId() + " is: " +
poolIdsByVolCount);
+ List<Long> poolIdsByVolCount =
volumeDao.listZoneWidePoolIdsByVolumeCount(dcId, account.getAccountId());
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("List of pools in ascending order of number of
volumes for account id: " + account.getAccountId() + " is: " +
poolIdsByVolCount);
}
// now filter the given list of Pools by this ordered list
- Map<Long, StoragePool> poolMap = new HashMap<Long, StoragePool>();
+ Map<Long, StoragePool> poolMap = new HashMap<>();
for (StoragePool pool : pools) {
poolMap.put(pool.getId(), pool);
}
- List<Long> matchingPoolIds = new ArrayList<Long>(poolMap.keySet());
+ List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
poolIdsByVolCount.retainAll(matchingPoolIds);
- List<StoragePool> reorderedPools = new ArrayList<StoragePool>();
+ List<StoragePool> reorderedPools = new ArrayList<>();
for (Long id : poolIdsByVolCount) {
reorderedPools.add(poolMap.get(id));
}
diff --git
a/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java
b/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java
index 4417490..6b912fb 100644
---
a/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java
+++
b/plugins/storage-allocators/random/src/main/java/org/apache/cloudstack/storage/allocator/RandomStoragePoolAllocator.java
@@ -48,7 +48,7 @@ public class RandomStoragePoolAllocator extends
AbstractStoragePoolAllocator {
}
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId +
" cluster:" + clusterId);
- List<StoragePoolVO> pools = _storagePoolDao.listBy(dcId, podId,
clusterId, ScopeType.CLUSTER);
+ List<StoragePoolVO> pools = storagePoolDao.listBy(dcId, podId,
clusterId, ScopeType.CLUSTER);
if (pools.size() == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("No storage pools available for allocation,
returning");
diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
index 62ec13c..fc40981 100644
--- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
@@ -2457,7 +2457,8 @@ public class StorageManagerImpl extends ManagerBase
implements StorageManager, C
@Override
public ConfigKey<?>[] getConfigKeys() {
- return new ConfigKey<?>[] {StorageCleanupInterval,
StorageCleanupDelay, StorageCleanupEnabled, TemplateCleanupEnabled};
+ return new ConfigKey<?>[] { StorageCleanupInterval,
StorageCleanupDelay, StorageCleanupEnabled, TemplateCleanupEnabled,
+ KvmStorageOfflineMigrationWait, KvmStorageOnlineMigrationWait,
MaxNumberOfManagedClusteredFileSystems };
}
@Override
diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
index 3fcf761..3160dd3 100644
--- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
@@ -253,6 +253,8 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
private StorageManager storageMgr;
@Inject
private StoragePoolDetailsDao storagePoolDetailsDao;
+ @Inject
+ private StorageUtil storageUtil;
protected Gson _gson;
@@ -2741,6 +2743,42 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
}
}
+ private void verifyManagedStorage(Long storagePoolId, Long hostId) {
+ if (storagePoolId == null || hostId == null) {
+ return;
+ }
+
+ StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
+
+ if (storagePoolVO == null || !storagePoolVO.isManaged()) {
+ return;
+ }
+
+ HostVO hostVO = _hostDao.findById(hostId);
+
+ if (hostVO == null) {
+ return;
+ }
+
+ if (!storageUtil.managedStoragePoolCanScale(storagePoolVO,
hostVO.getClusterId(), hostVO.getId())) {
+ throw new CloudRuntimeException("Insufficient number of available
" + getNameOfClusteredFileSystem(hostVO));
+ }
+ }
+
+ private String getNameOfClusteredFileSystem(HostVO hostVO) {
+ HypervisorType hypervisorType = hostVO.getHypervisorType();
+
+ if (HypervisorType.XenServer.equals(hypervisorType)) {
+ return "SRs";
+ }
+
+ if (HypervisorType.VMware.equals(hypervisorType)) {
+ return "datastores";
+ }
+
+ return "clustered file systems";
+ }
+
private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO
volumeToAttach, Long deviceId) {
String errorMsg = "Failed to attach volume " +
volumeToAttach.getName() + " to VM " + vm.getHostName();
boolean sendCommand = vm.getState() == State.Running;
@@ -2768,6 +2806,8 @@ public class VolumeApiServiceImpl extends ManagerBase
implements VolumeApiServic
}
}
+ verifyManagedStorage(volumeToAttachStoragePool.getId(), hostId);
+
// volumeToAttachStoragePool should be null if the VM we are attaching
the disk to has never been started before
DataStore dataStore = volumeToAttachStoragePool != null ?
dataStoreMgr.getDataStore(volumeToAttachStoragePool.getId(),
DataStoreRole.Primary) : null;
diff --git
a/test/integration/plugins/solidfire/TestManagedClusteredFilesystems.py
b/test/integration/plugins/solidfire/TestManagedClusteredFilesystems.py
new file mode 100644
index 0000000..053dfb6
--- /dev/null
+++ b/test/integration/plugins/solidfire/TestManagedClusteredFilesystems.py
@@ -0,0 +1,431 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import logging
+import random
+import SignedAPICall
+import XenAPI
+
+from solidfire.factory import ElementFactory
+
+from util import sf_util
+
+# All tests inherit from cloudstackTestCase
+from marvin.cloudstackTestCase import cloudstackTestCase
+
+# Import Integration Libraries
+
+# base - contains all resources as entities and defines create, delete, list
operations on them
+from marvin.lib.base import Account, Cluster, ServiceOffering, Snapshot,
StoragePool, User, VirtualMachine, Volume
+
+# common - commonly used methods for all tests are listed here
+from marvin.lib.common import get_domain, get_template, get_zone, list_hosts,
list_volumes
+
+# utils - utility classes for common cleanup, external library wrappers, etc.
+from marvin.lib.utils import cleanup_resources
+
+# Prerequisites:
+# Only one zone
+# Only one pod
+# Two clusters
+#
+# Running the tests:
+# If using XenServer, verify the "xen_server_hostname" variable is correct.
+# Set the Global Setting "max.number.managed.clustered.file.systems" equal to
2.
+#
+# Note:
+# Verify that TestData.clusterId and TestData.clusterId2 are set properly.
+
+
+class TestData():
+ # constants
+ account = "account"
+ allocationstate = "allocationstate"
+ capacityBytes = "capacitybytes"
+ capacityIops = "capacityiops"
+ clusterId = "clusterId"
+ clusterId2 = "clusterId2"
+ computeOffering = "computeoffering"
+ domainId = "domainId"
+ email = "email"
+ firstname = "firstname"
+ hypervisor = "hypervisor"
+ lastname = "lastname"
+ mvip = "mvip"
+ name = "name"
+ password = "password"
+ port = "port"
+ primaryStorage = "primarystorage"
+ provider = "provider"
+ scope = "scope"
+ solidFire = "solidfire"
+ storageTag = "SolidFire_SAN_1"
+ tags = "tags"
+ url = "url"
+ user = "user"
+ username = "username"
+ xenServer = "xenserver"
+ zoneId = "zoneId"
+
+ hypervisor_type = xenServer
+ xen_server_hostname = "XenServer-6.5-1"
+
+ def __init__(self):
+ self.testdata = {
+ TestData.solidFire: {
+ TestData.mvip: "10.117.78.225",
+ TestData.username: "admin",
+ TestData.password: "admin",
+ TestData.port: 443,
+ TestData.url: "https://10.117.78.225:443"
+ },
+ TestData.xenServer: {
+ TestData.username: "root",
+ TestData.password: "solidfire"
+ },
+ TestData.account: {
+ TestData.email: "[email protected]",
+ TestData.firstname: "John",
+ TestData.lastname: "Doe",
+ TestData.username: "test",
+ TestData.password: "test"
+ },
+ TestData.user: {
+ TestData.email: "[email protected]",
+ TestData.firstname: "Jane",
+ TestData.lastname: "Doe",
+ TestData.username: "testuser",
+ TestData.password: "password"
+ },
+ TestData.primaryStorage: {
+ TestData.name: "SolidFire-%d" % random.randint(0, 100),
+ TestData.scope: "ZONE",
+ TestData.url: "MVIP=10.117.78.225;SVIP=10.117.94.225;" +
+
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
+
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
+ "clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
+ TestData.provider: "SolidFire",
+ TestData.tags: TestData.storageTag,
+ TestData.capacityIops: 4500000,
+ TestData.capacityBytes: 2251799813685248,
+ TestData.hypervisor: "Any"
+ },
+ TestData.computeOffering: {
+ TestData.name: "SF_CO_1",
+ "displaytext": "SF_CO_1 (Min IOPS = 300; Max IOPS = 600)",
+ "cpunumber": 1,
+ "cpuspeed": 100,
+ "memory": 128,
+ "storagetype": "shared",
+ "customizediops": False,
+ "miniops": "300",
+ "maxiops": "600",
+ "hypervisorsnapshotreserve": 200,
+ TestData.tags: TestData.storageTag
+ },
+ TestData.zoneId: 1,
+ TestData.clusterId: 1,
+ TestData.clusterId2: 6,
+ TestData.domainId: 1,
+ TestData.url: "10.117.40.114"
+ }
+
+
+class TestManagedClusteredFilesystems(cloudstackTestCase):
+ _should_only_be_one_volume_in_list_err_msg = "There should only be one
volume in this list."
+ _volume_should_have_failed_to_attach_to_vm = "The volume should have
failed to attach to the VM."
+
+ @classmethod
+ def setUpClass(cls):
+ # Set up API client
+ testclient = super(TestManagedClusteredFilesystems,
cls).getClsTestClient()
+
+ cls.apiClient = testclient.getApiClient()
+ cls.configData = testclient.getParsedTestDataConfig()
+ cls.dbConnection = testclient.getDbConnection()
+
+ cls.testdata = TestData().testdata
+
+ sf_util.set_supports_resign(True, cls.dbConnection)
+
+ cls._connect_to_hypervisor()
+
+ # Set up SolidFire connection
+ solidfire = cls.testdata[TestData.solidFire]
+
+ cls.sfe = ElementFactory.create(solidfire[TestData.mvip],
solidfire[TestData.username], solidfire[TestData.password])
+
+ # Get Resources from Cloud Infrastructure
+ cls.zone = get_zone(cls.apiClient,
zone_id=cls.testdata[TestData.zoneId])
+ cls.template = get_template(cls.apiClient, cls.zone.id,
hypervisor=TestData.hypervisor_type)
+ cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
+
+ # Create test account
+ cls.account = Account.create(
+ cls.apiClient,
+ cls.testdata["account"],
+ admin=1
+ )
+
+ # Set up connection to make customized API calls
+ cls.user = User.create(
+ cls.apiClient,
+ cls.testdata["user"],
+ account=cls.account.name,
+ domainid=cls.domain.id
+ )
+
+ url = cls.testdata[TestData.url]
+
+ api_url = "http://" + url + ":8080/client/api"
+ userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)
+
+ cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey,
userkeys.secretkey)
+
+ primarystorage = cls.testdata[TestData.primaryStorage]
+
+ cls.primary_storage = StoragePool.create(
+ cls.apiClient,
+ primarystorage,
+ scope=primarystorage[TestData.scope],
+ zoneid=cls.zone.id,
+ provider=primarystorage[TestData.provider],
+ tags=primarystorage[TestData.tags],
+ capacityiops=primarystorage[TestData.capacityIops],
+ capacitybytes=primarystorage[TestData.capacityBytes],
+ hypervisor=primarystorage[TestData.hypervisor]
+ )
+
+ cls.compute_offering = ServiceOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.computeOffering]
+ )
+
+ # Resources that are to be destroyed
+ cls._cleanup = [
+ cls.compute_offering,
+ cls.user,
+ cls.account
+ ]
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ cleanup_resources(cls.apiClient, cls._cleanup)
+
+ cls.primary_storage.delete(cls.apiClient)
+
+ sf_util.purge_solidfire_volumes(cls.sfe)
+ except Exception as e:
+ logging.debug("Exception in tearDownClass(cls): %s" % e)
+
+ def setUp(self):
+ self.cleanup = []
+
+ def tearDown(self):
+ cleanup_resources(self.apiClient, self.cleanup)
+
+# Only two 'permanent' SRs per cluster
+#
+# Disable the second cluster
+#
+# Create VM
+# Create VM
+# Create VM (should fail)
+# Take snapshot of first root disk
+# Create a volume from this snapshot
+# Attach new volume to second VM (should fail)
+#
+# Enable the second cluster
+#
+# Attach new volume to second VM (should fail)
+# Create VM (should end up in new cluster)
+# Delete first VM (this should free up one SR in the first cluster)
+# Attach new volume to second VM
+# Detach new volume from second VM
+# Attach new volume to second VM
+# Create a volume from the snapshot
+# Attach this new volume to the second VM (should fail)
+# Attach this new volume to the first VM in the new cluster
+ def test_managed_clustered_filesystems_limit(self):
+ args = { "id": self.testdata[TestData.clusterId2],
TestData.allocationstate: "Disabled" }
+
+ Cluster.update(self.apiClient, **args)
+
+ virtual_machine_names = {
+ "name": "TestVM1",
+ "displayname": "Test VM 1"
+ }
+
+ virtual_machine_1 = self._create_vm(virtual_machine_names)
+
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ virtualmachineid=virtual_machine_1.id,
+ listall=True
+ )
+
+ sf_util.check_list(list_volumes_response, 1, self,
TestManagedClusteredFilesystems._should_only_be_one_volume_in_list_err_msg)
+
+ vm_1_root_volume = list_volumes_response[0]
+
+ virtual_machine_names = {
+ "name": "TestVM2",
+ "displayname": "Test VM 2"
+ }
+
+ virtual_machine_2 = self._create_vm(virtual_machine_names)
+
+ virtual_machine_names = {
+ "name": "TestVM3",
+ "displayname": "Test VM 3"
+ }
+
+ class VMStartedException(Exception):
+ def __init__(self, *args, **kwargs):
+ Exception.__init__(self, *args, **kwargs)
+
+ try:
+ # The VM should fail to be created as there should be an
insufficient number of clustered filesystems
+ # remaining in the compute cluster.
+ self._create_vm(virtual_machine_names)
+
+ raise VMStartedException("The VM should have failed to start.")
+ except VMStartedException:
+ raise
+ except Exception:
+ pass
+
+ vol_snap = Snapshot.create(
+ self.apiClient,
+ volume_id=vm_1_root_volume.id
+ )
+
+ services = {"diskname": "Vol-1", "zoneid":
self.testdata[TestData.zoneId], "ispublic": True}
+
+ volume_created_from_snapshot_1 =
Volume.create_from_snapshot(self.apiClient, vol_snap.id, services,
account=self.account.name, domainid=self.domain.id)
+
+ class VolumeAttachedException(Exception):
+ def __init__(self, *args, **kwargs):
+ Exception.__init__(self, *args, **kwargs)
+
+ try:
+ # The volume should fail to be attached as there should be an
insufficient number of clustered filesystems
+ # remaining in the compute cluster.
+ virtual_machine_2.attach_volume(
+ self.apiClient,
+ volume_created_from_snapshot_1
+ )
+
+ raise
VolumeAttachedException(TestManagedClusteredFilesystems._volume_should_have_failed_to_attach_to_vm)
+ except VolumeAttachedException:
+ raise
+ except Exception:
+ pass
+
+ args = { "id": self.testdata[TestData.clusterId2],
TestData.allocationstate: "Enabled" }
+
+ Cluster.update(self.apiClient, **args)
+
+ try:
+ # The volume should fail to be attached as there should be an
insufficient number of clustered filesystems
+ # remaining in the compute cluster.
+ virtual_machine_2.attach_volume(
+ self.apiClient,
+ volume_created_from_snapshot_1
+ )
+
+ raise
VolumeAttachedException(TestManagedClusteredFilesystems._volume_should_have_failed_to_attach_to_vm)
+ except VolumeAttachedException:
+ raise
+ except Exception:
+ pass
+
+ virtual_machine_names = {
+ "name": "TestVMA",
+ "displayname": "Test VM A"
+ }
+
+ virtual_machine_a = self._create_vm(virtual_machine_names)
+
+ host_for_vm_1 = list_hosts(self.apiClient,
id=virtual_machine_1.hostid)[0]
+ host_for_vm_a = list_hosts(self.apiClient,
id=virtual_machine_a.hostid)[0]
+
+ self.assertTrue(
+ host_for_vm_1.clusterid != host_for_vm_a.clusterid,
+ "VMs 1 and VM a should be in different clusters."
+ )
+
+ virtual_machine_1.delete(self.apiClient, True)
+
+ volume_created_from_snapshot_1 = virtual_machine_2.attach_volume(
+ self.apiClient,
+ volume_created_from_snapshot_1
+ )
+
+ virtual_machine_2.detach_volume(self.apiClient,
volume_created_from_snapshot_1)
+
+ volume_created_from_snapshot_1 = virtual_machine_2.attach_volume(
+ self.apiClient,
+ volume_created_from_snapshot_1
+ )
+
+ services = {"diskname": "Vol-2", "zoneid":
self.testdata[TestData.zoneId], "ispublic": True}
+
+ volume_created_from_snapshot_2 =
Volume.create_from_snapshot(self.apiClient, vol_snap.id, services,
account=self.account.name, domainid=self.domain.id)
+
+ try:
+ # The volume should fail to be attached as there should be an
insufficient number of clustered filesystems
+ # remaining in the compute cluster.
+ virtual_machine_2.attach_volume(
+ self.apiClient,
+ volume_created_from_snapshot_2
+ )
+
+ raise
VolumeAttachedException(TestManagedClusteredFilesystems._volume_should_have_failed_to_attach_to_vm)
+ except VolumeAttachedException:
+ raise
+ except Exception:
+ pass
+
+ virtual_machine_a.attach_volume(
+ self.apiClient,
+ volume_created_from_snapshot_2
+ )
+
+ def _create_vm(self, virtual_machine_names):
+ return VirtualMachine.create(
+ self.apiClient,
+ virtual_machine_names,
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering.id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ startvm=True
+ )
+
+ @classmethod
+ def _connect_to_hypervisor(cls):
+ host_ip = "https://" + \
+ list_hosts(cls.apiClient,
clusterid=cls.testdata[TestData.clusterId],
name=TestData.xen_server_hostname)[0].ipaddress
+
+ cls.xen_session = XenAPI.Session(host_ip)
+
+ xen_server = cls.testdata[TestData.xenServer]
+
+
cls.xen_session.xenapi.login_with_password(xen_server[TestData.username],
xen_server[TestData.password])