mike-tutkowski closed pull request #2500: Restrict the number of managed 
clustered file systems per compute cluster
URL: https://github.com/apache/cloudstack/pull/2500
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java 
b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
index 530a7dea3cc..2fd732bb267 100644
--- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
+++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java
@@ -92,6 +92,14 @@
             true,
             ConfigKey.Scope.Global,
             null);
+    ConfigKey<Integer> MaxNumberOfManagedClusteredFileSystems = new 
ConfigKey<>(Integer.class,
+            "max.number.managed.clustered.file.systems",
+            "Storage",
+            "200",
+            "XenServer and VMware only: Maximum number of managed SRs or 
datastores per compute cluster",
+            true,
+            ConfigKey.Scope.Cluster,
+            null);
 
     /**
      * Returns a comma separated list of tags for the specified storage pool
diff --git 
a/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java 
b/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
new file mode 100644
index 00000000000..bb447c2341c
--- /dev/null
+++ b/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java
@@ -0,0 +1,112 @@
+package com.cloud.storage;
+
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+
+import java.util.List;
+import javax.inject.Inject;
+
+public class StorageUtil {
+    @Inject private ClusterDao clusterDao;
+    @Inject private HostDao hostDao;
+    @Inject private PrimaryDataStoreDao storagePoolDao;
+    @Inject private VMInstanceDao vmInstanceDao;
+    @Inject private VolumeDao volumeDao;
+
+    private Long getClusterId(Long hostId) {
+        if (hostId == null) {
+            return null;
+        }
+
+        HostVO hostVO = hostDao.findById(hostId);
+
+        if (hostVO == null) {
+            return null;
+        }
+
+        return hostVO.getClusterId();
+    }
+
+    public boolean managedStoragePoolCanScale(StoragePool storagePool, Long 
clusterId, Long hostId) {
+        if (clusterId == null) {
+            clusterId = getClusterId(hostId);
+
+            if (clusterId == null) {
+                return true;
+            }
+        }
+
+        ClusterVO clusterVO = clusterDao.findById(clusterId);
+
+        if (clusterVO == null) {
+            return true;
+        }
+
+        Hypervisor.HypervisorType hypervisorType = 
clusterVO.getHypervisorType();
+
+        if (hypervisorType == null) {
+            return true;
+        }
+
+        if (Hypervisor.HypervisorType.XenServer.equals(hypervisorType) || 
Hypervisor.HypervisorType.VMware.equals(hypervisorType)) {
+            int maxValue = 
StorageManager.MaxNumberOfManagedClusteredFileSystems.valueIn(clusterId);
+
+            return 
getNumberOfManagedClusteredFileSystemsInComputeCluster(storagePool.getDataCenterId(),
 clusterId) < maxValue;
+        }
+
+        return true;
+    }
+
+    private int getNumberOfManagedClusteredFileSystemsInComputeCluster(long 
zoneId, long clusterId) {
+        int numberOfManagedClusteredFileSystemsInComputeCluster = 0;
+
+        List<VolumeVO> volumes = volumeDao.findByDc(zoneId);
+
+        if (volumes == null || volumes.size() == 0) {
+            return numberOfManagedClusteredFileSystemsInComputeCluster;
+        }
+
+        for (VolumeVO volume : volumes) {
+            Long instanceId = volume.getInstanceId();
+
+            if (instanceId != null) {
+                VMInstanceVO vmInstanceVO = vmInstanceDao.findById(instanceId);
+
+                if (vmInstanceVO != null) {
+                    Long vmHostId = vmInstanceVO.getHostId();
+
+                    if (vmHostId == null) {
+                        vmHostId = vmInstanceVO.getLastHostId();
+                    }
+
+                    if (vmHostId != null) {
+                        HostVO vmHostVO = hostDao.findById(vmHostId);
+
+                        if (vmHostVO != null) {
+                            Long vmHostClusterId = vmHostVO.getClusterId();
+
+                            if (vmHostClusterId != null && vmHostClusterId == 
clusterId) {
+                                StoragePoolVO storagePoolVO = 
storagePoolDao.findById(volume.getPoolId());
+
+                                if (storagePoolVO != null && 
storagePoolVO.isManaged()) {
+                                    
numberOfManagedClusteredFileSystemsInComputeCluster++;
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        return numberOfManagedClusteredFileSystemsInComputeCluster;
+    }
+}
diff --git 
a/engine/components-api/src/main/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml
 
b/engine/components-api/src/main/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml
index b644176565f..be026407e30 100644
--- 
a/engine/components-api/src/main/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml
+++ 
b/engine/components-api/src/main/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml
@@ -25,6 +25,6 @@
                       http://www.springframework.org/schema/aop 
http://www.springframework.org/schema/aop/spring-aop.xsd
                       http://www.springframework.org/schema/context
                       
http://www.springframework.org/schema/context/spring-context.xsd";
-                      >                     
-
+                      >
+    <bean id="storageUtil" class="com.cloud.storage.StorageUtil" />
 </beans>
diff --git 
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
 
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
index 194f7bd857c..2638fe573d0 100644
--- 
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
+++ 
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
@@ -22,12 +22,10 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
-import com.cloud.storage.Storage;
 import org.apache.log4j.Logger;
 
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
@@ -42,10 +40,11 @@
 import com.cloud.deploy.DeploymentPlan;
 import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.Storage;
 import com.cloud.storage.StorageManager;
 import com.cloud.storage.StoragePool;
+import com.cloud.storage.StorageUtil;
 import com.cloud.storage.Volume;
-import com.cloud.storage.dao.DiskOfferingDao;
 import com.cloud.storage.dao.VolumeDao;
 import com.cloud.user.Account;
 import com.cloud.utils.NumbersUtil;
@@ -55,38 +54,27 @@
 
 public abstract class AbstractStoragePoolAllocator extends AdapterBase 
implements StoragePoolAllocator {
     private static final Logger s_logger = 
Logger.getLogger(AbstractStoragePoolAllocator.class);
-    @Inject
-    StorageManager storageMgr;
-    protected @Inject
-    PrimaryDataStoreDao _storagePoolDao;
-    @Inject
-    VolumeDao _volumeDao;
-    @Inject
-    ConfigurationDao _configDao;
-    @Inject
-    ClusterDao _clusterDao;
-    protected @Inject
-    DataStoreManager dataStoreMgr;
+
     protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1);
-    long _extraBytesPerVolume = 0;
-    Random _rand;
-    boolean _dontMatter;
+    protected DataStoreManager dataStoreMgr;
     protected String _allocationAlgorithm = "random";
-    @Inject
-    DiskOfferingDao _diskOfferingDao;
-    @Inject
-    CapacityDao _capacityDao;
+    @Inject PrimaryDataStoreDao _storagePoolDao;
+    @Inject VolumeDao volumeDao;
+    @Inject ConfigurationDao configDao;
+    long _extraBytesPerVolume = 0;
+    @Inject private CapacityDao capacityDao;
+    @Inject private ClusterDao clusterDao;
+    @Inject private StorageManager storageMgr;
+    @Inject private StorageUtil storageUtil;
 
     @Override
     public boolean configure(String name, Map<String, Object> params) throws 
ConfigurationException {
         super.configure(name, params);
-        if(_configDao != null) {
-            Map<String, String> configs = _configDao.getConfiguration(null, 
params);
+        if(configDao != null) {
+            Map<String, String> configs = configDao.getConfiguration(null, 
params);
             String globalStorageOverprovisioningFactor = 
configs.get("storage.overprovisioning.factor");
             _storageOverprovisioningFactor = new 
BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
             _extraBytesPerVolume = 0;
-            _rand = new Random(System.currentTimeMillis());
-            _dontMatter = 
Boolean.parseBoolean(configs.get("storage.overwrite.provisioning"));
             String allocationAlgorithm = 
configs.get("vm.allocation.algorithm");
             if (allocationAlgorithm != null) {
                 _allocationAlgorithm = allocationAlgorithm;
@@ -109,27 +97,26 @@ public boolean configure(String name, Map<String, Object> 
params) throws Configu
         Long clusterId = plan.getClusterId();
         short capacityType;
         if(pools != null && pools.size() != 0){
-            capacityType = pools.get(0).getPoolType().isShared() == true ?
-                    Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED : 
Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
+            capacityType = pools.get(0).getPoolType().isShared() ? 
Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED : Capacity.CAPACITY_TYPE_LOCAL_STORAGE;
         } else{
             return null;
         }
 
-        List<Long> poolIdsByCapacity = 
_capacityDao.orderHostsByFreeCapacity(clusterId, capacityType);
+        List<Long> poolIdsByCapacity = 
capacityDao.orderHostsByFreeCapacity(clusterId, capacityType);
         if (s_logger.isDebugEnabled()) {
             s_logger.debug("List of pools in descending order of free 
capacity: "+ poolIdsByCapacity);
         }
 
       //now filter the given list of Pools by this ordered list
-      Map<Long, StoragePool> poolMap = new HashMap<Long, StoragePool>();
+      Map<Long, StoragePool> poolMap = new HashMap<>();
       for (StoragePool pool : pools) {
           poolMap.put(pool.getId(), pool);
       }
-      List<Long> matchingPoolIds = new ArrayList<Long>(poolMap.keySet());
+      List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
 
       poolIdsByCapacity.retainAll(matchingPoolIds);
 
-      List<StoragePool> reorderedPools = new ArrayList<StoragePool>();
+      List<StoragePool> reorderedPools = new ArrayList<>();
       for(Long id: poolIdsByCapacity){
           reorderedPools.add(poolMap.get(id));
       }
@@ -145,21 +132,21 @@ public boolean configure(String name, Map<String, Object> 
params) throws Configu
         Long podId = plan.getPodId();
         Long clusterId = plan.getClusterId();
 
-        List<Long> poolIdsByVolCount = 
_volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, 
account.getAccountId());
+        List<Long> poolIdsByVolCount = 
volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, 
account.getAccountId());
         if (s_logger.isDebugEnabled()) {
             s_logger.debug("List of pools in ascending order of number of 
volumes for account id: " + account.getAccountId() + " is: " + 
poolIdsByVolCount);
         }
 
         // now filter the given list of Pools by this ordered list
-        Map<Long, StoragePool> poolMap = new HashMap<Long, StoragePool>();
+        Map<Long, StoragePool> poolMap = new HashMap<>();
         for (StoragePool pool : pools) {
             poolMap.put(pool.getId(), pool);
         }
-        List<Long> matchingPoolIds = new ArrayList<Long>(poolMap.keySet());
+        List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
 
         poolIdsByVolCount.retainAll(matchingPoolIds);
 
-        List<StoragePool> reorderedPools = new ArrayList<StoragePool>();
+        List<StoragePool> reorderedPools = new ArrayList<>();
         for (Long id : poolIdsByVolCount) {
             reorderedPools.add(poolMap.get(id));
         }
@@ -201,7 +188,7 @@ protected boolean filter(ExcludeList avoid, StoragePool 
pool, DiskProfile dskCh,
 
         Long clusterId = pool.getClusterId();
         if (clusterId != null) {
-            ClusterVO cluster = _clusterDao.findById(clusterId);
+            ClusterVO cluster = clusterDao.findById(clusterId);
             if (!(cluster.getHypervisorType() == dskCh.getHypervisorType())) {
                 if (s_logger.isDebugEnabled()) {
                     s_logger.debug("StoragePool's Cluster does not have 
required hypervisorType, skipping this pool");
@@ -219,13 +206,21 @@ protected boolean filter(ExcludeList avoid, StoragePool 
pool, DiskProfile dskCh,
             return false;
         }
 
+        if (pool.isManaged() && !managedStoragePoolCanScale(pool, plan)) {
+            return false;
+        }
+
         // check capacity
-        Volume volume = _volumeDao.findById(dskCh.getVolumeId());
-        List<Volume> requestVolumes = new ArrayList<Volume>();
+        Volume volume = volumeDao.findById(dskCh.getVolumeId());
+        List<Volume> requestVolumes = new ArrayList<>();
         requestVolumes.add(volume);
         return storageMgr.storagePoolHasEnoughIops(requestVolumes, pool) && 
storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool, plan.getClusterId());
     }
 
+    private boolean managedStoragePoolCanScale(StoragePool storagePool, 
DeploymentPlan plan) {
+        return storageUtil.managedStoragePoolCanScale(storagePool, 
plan.getClusterId(), plan.getHostId());
+    }
+
     /*
     Check StoragePool and Volume type compatibility for the hypervisor
      */
diff --git 
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
 
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
index 6e9c682e970..1755be16c4a 100644
--- 
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
+++ 
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
@@ -119,8 +119,8 @@
     public boolean configure(String name, Map<String, Object> params) throws 
ConfigurationException {
         super.configure(name, params);
 
-        if (_configDao != null) {
-            Map<String, String> configs = _configDao.getConfiguration(params);
+        if (configDao != null) {
+            Map<String, String> configs = configDao.getConfiguration(params);
             String allocationAlgorithm = 
configs.get("vm.allocation.algorithm");
             if (allocationAlgorithm != null) {
                 _allocationAlgorithm = allocationAlgorithm;
diff --git 
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
 
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
index 7a109669ab7..86590585a7d 100644
--- 
a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
+++ 
b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
@@ -41,51 +41,51 @@
 
 @Component
 public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator 
{
-    private static final Logger s_logger = 
Logger.getLogger(ZoneWideStoragePoolAllocator.class);
+    private static final Logger LOGGER = 
Logger.getLogger(ZoneWideStoragePoolAllocator.class);
     @Inject
-    PrimaryDataStoreDao _storagePoolDao;
+    private PrimaryDataStoreDao storagePoolDao;
     @Inject
-    DataStoreManager dataStoreMgr;
+    private DataStoreManager dataStoreMgr;
 
 
     @Override
     protected List<StoragePool> select(DiskProfile dskCh, 
VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int 
returnUpTo) {
-        s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
+        LOGGER.debug("ZoneWideStoragePoolAllocator to find storage pool");
 
         if (dskCh.useLocalStorage()) {
             return null;
         }
 
-        if (s_logger.isTraceEnabled()) {
+        if (LOGGER.isTraceEnabled()) {
             // Log the pools details that are ignored because they are in 
disabled state
-            List<StoragePoolVO> disabledPools = 
_storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, 
ScopeType.ZONE);
+            List<StoragePoolVO> disabledPools = 
storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, 
ScopeType.ZONE);
             if (disabledPools != null && !disabledPools.isEmpty()) {
                 for (StoragePoolVO pool : disabledPools) {
-                    s_logger.trace("Ignoring pool " + pool + " as it is in 
disabled state.");
+                    LOGGER.trace("Ignoring pool " + pool + " as it is in 
disabled state.");
                 }
             }
         }
 
-        List<StoragePool> suitablePools = new ArrayList<StoragePool>();
+        List<StoragePool> suitablePools = new ArrayList<>();
 
-        List<StoragePoolVO> storagePools = 
_storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), 
dskCh.getTags());
+        List<StoragePoolVO> storagePools = 
storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), 
dskCh.getTags());
         if (storagePools == null) {
-            storagePools = new ArrayList<StoragePoolVO>();
+            storagePools = new ArrayList<>();
         }
 
-        List<StoragePoolVO> anyHypervisorStoragePools = new 
ArrayList<StoragePoolVO>();
+        List<StoragePoolVO> anyHypervisorStoragePools = new ArrayList<>();
         for (StoragePoolVO storagePool : storagePools) {
             if (HypervisorType.Any.equals(storagePool.getHypervisor())) {
                 anyHypervisorStoragePools.add(storagePool);
             }
         }
 
-        List<StoragePoolVO> storagePoolsByHypervisor = 
_storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(), 
dskCh.getHypervisorType());
+        List<StoragePoolVO> storagePoolsByHypervisor = 
storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(), 
dskCh.getHypervisorType());
         storagePools.retainAll(storagePoolsByHypervisor);
         storagePools.addAll(anyHypervisorStoragePools);
 
         // add remaining pools in zone, that did not match tags, to avoid set
-        List<StoragePoolVO> allPools = 
_storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
+        List<StoragePoolVO> allPools = 
storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
         allPools.removeAll(storagePools);
         for (StoragePoolVO pool : allPools) {
             avoid.addPool(pool.getId());
@@ -100,12 +100,19 @@
             if (filter(avoid, storagePool, dskCh, plan)) {
                 suitablePools.add(storagePool);
             } else {
-                avoid.addPool(storagePool.getId());
+                if (isAddStoragePoolToAvoidSet(storage)) {
+                    avoid.addPool(storagePool.getId());
+                }
             }
         }
         return suitablePools;
     }
 
+    // Don't add zone-wide, managed storage to the avoid list because it may 
be usable for another cluster.
+    private boolean isAddStoragePoolToAvoidSet(StoragePoolVO storagePoolVO) {
+        return !ScopeType.ZONE.equals(storagePoolVO.getScope()) || 
!storagePoolVO.isManaged();
+    }
+
     @Override
     protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan 
plan, List<StoragePool> pools, Account account) {
         if (account == null) {
@@ -113,21 +120,21 @@
         }
         long dcId = plan.getDataCenterId();
 
-        List<Long> poolIdsByVolCount = 
_volumeDao.listZoneWidePoolIdsByVolumeCount(dcId, account.getAccountId());
-        if (s_logger.isDebugEnabled()) {
-            s_logger.debug("List of pools in ascending order of number of 
volumes for account id: " + account.getAccountId() + " is: " + 
poolIdsByVolCount);
+        List<Long> poolIdsByVolCount = 
volumeDao.listZoneWidePoolIdsByVolumeCount(dcId, account.getAccountId());
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("List of pools in ascending order of number of 
volumes for account id: " + account.getAccountId() + " is: " + 
poolIdsByVolCount);
         }
 
         // now filter the given list of Pools by this ordered list
-        Map<Long, StoragePool> poolMap = new HashMap<Long, StoragePool>();
+        Map<Long, StoragePool> poolMap = new HashMap<>();
         for (StoragePool pool : pools) {
             poolMap.put(pool.getId(), pool);
         }
-        List<Long> matchingPoolIds = new ArrayList<Long>(poolMap.keySet());
+        List<Long> matchingPoolIds = new ArrayList<>(poolMap.keySet());
 
         poolIdsByVolCount.retainAll(matchingPoolIds);
 
-        List<StoragePool> reorderedPools = new ArrayList<StoragePool>();
+        List<StoragePool> reorderedPools = new ArrayList<>();
         for (Long id : poolIdsByVolCount) {
             reorderedPools.add(poolMap.get(id));
         }
diff --git 
a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java
 
b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java
index f9c27e9d840..701a511a14b 100644
--- 
a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java
+++ 
b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java
@@ -99,10 +99,7 @@ public boolean hostConnect(long hostId, long storagePoolId) {
             storagePoolHostDao.persist(storagePoolHost);
         }
 
-        if (host.getHypervisorType().equals(HypervisorType.XenServer)) {
-            handleXenServer(host.getClusterId(), host.getId(), storagePoolId);
-        }
-        else if (host.getHypervisorType().equals(HypervisorType.KVM)) {
+        if (host.getHypervisorType().equals(HypervisorType.KVM)) {
             handleKVM(hostId, storagePoolId);
         }
 
@@ -137,20 +134,6 @@ public boolean hostRemoved(long hostId, long clusterId) {
         return true;
     }
 
-    private void handleXenServer(long clusterId, long hostId, long 
storagePoolId) {
-        List<String> storagePaths = getStoragePaths(clusterId, storagePoolId);
-
-        StoragePool storagePool = 
(StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
-
-        for (String storagePath : storagePaths) {
-            ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, 
storagePool);
-
-            cmd.setStoragePath(storagePath);
-
-            sendModifyStoragePoolCommand(cmd, storagePool, hostId);
-        }
-    }
-
     private void handleVMware(HostVO host, boolean add, 
ModifyTargetsCommand.TargetTypeToRemove targetTypeToRemove) {
         if (HypervisorType.VMware.equals(host.getHypervisorType())) {
             List<StoragePoolVO> storagePools = 
_storagePoolDao.findPoolsByProvider(SolidFireUtil.PROVIDER_NAME);
@@ -183,35 +166,6 @@ private void handleKVM(long hostId, long storagePoolId) {
         sendModifyStoragePoolCommand(cmd, storagePool, hostId);
     }
 
-    private List<String> getStoragePaths(long clusterId, long storagePoolId) {
-        List<String> storagePaths = new ArrayList<>();
-
-        // If you do not pass in null for the second parameter, you only get 
back applicable ROOT disks.
-        List<VolumeVO> volumes = _volumeDao.findByPoolId(storagePoolId, null);
-
-        if (volumes != null) {
-            for (VolumeVO volume : volumes) {
-                Long instanceId = volume.getInstanceId();
-
-                if (instanceId != null) {
-                    VMInstanceVO vmInstance = _vmDao.findById(instanceId);
-
-                    Long hostIdForVm = vmInstance.getHostId() != null ? 
vmInstance.getHostId() : vmInstance.getLastHostId();
-
-                    if (hostIdForVm != null) {
-                        HostVO hostForVm = _hostDao.findById(hostIdForVm);
-
-                        if (hostForVm != null && 
hostForVm.getClusterId().equals(clusterId)) {
-                            storagePaths.add(volume.get_iScsiName());
-                        }
-                    }
-                }
-            }
-        }
-
-        return storagePaths;
-    }
-
     private List<Map<String, String>> getTargets(long clusterId, long 
storagePoolId) {
         List<Map<String, String>> targets = new ArrayList<>();
 
diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java 
b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
index 724ca8a4745..d5a4f43a707 100644
--- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java
@@ -2458,7 +2458,8 @@ public String getConfigComponentName() {
 
     @Override
     public ConfigKey<?>[] getConfigKeys() {
-        return new ConfigKey<?>[] {StorageCleanupInterval, 
StorageCleanupDelay, StorageCleanupEnabled, TemplateCleanupEnabled};
+        return new ConfigKey<?>[] { StorageCleanupInterval, 
StorageCleanupDelay, StorageCleanupEnabled, TemplateCleanupEnabled,
+                KvmStorageOfflineMigrationWait, KvmStorageOnlineMigrationWait, 
MaxNumberOfManagedClusteredFileSystems };
     }
 
     @Override
diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java 
b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
index 2ac317c660f..2ec1d69990b 100644
--- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
@@ -247,6 +247,8 @@
     private ClusterDetailsDao _clusterDetailsDao;
     @Inject
     private StorageManager storageMgr;
+    @Inject
+    private StorageUtil storageUtil;
 
     protected Gson _gson;
 
@@ -2587,6 +2589,42 @@ private synchronized void checkAndSetAttaching(Long 
volumeId, Long hostId) {
         }
     }
 
+    private void verifyManagedStorage(Long storagePoolId, Long hostId) {
+        if (storagePoolId == null || hostId == null) {
+            return;
+        }
+
+        StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId);
+
+        if (storagePoolVO == null || !storagePoolVO.isManaged()) {
+            return;
+        }
+
+        HostVO hostVO = _hostDao.findById(hostId);
+
+        if (hostVO == null) {
+            return;
+        }
+
+        if (!storageUtil.managedStoragePoolCanScale(storagePoolVO, 
hostVO.getClusterId(), hostVO.getId())) {
+            throw new CloudRuntimeException("Insufficient number of available 
" + getNameOfClusteredFileSystem(hostVO));
+        }
+    }
+
+    private String getNameOfClusteredFileSystem(HostVO hostVO) {
+        HypervisorType hypervisorType = hostVO.getHypervisorType();
+
+        if (HypervisorType.XenServer.equals(hypervisorType)) {
+            return "SRs";
+        }
+
+        if (HypervisorType.VMware.equals(hypervisorType)) {
+            return "datastores";
+        }
+
+        return "clustered file systems";
+    }
+
     private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO 
volumeToAttach, Long deviceId) {
         String errorMsg = "Failed to attach volume " + 
volumeToAttach.getName() + " to VM " + vm.getHostName();
         boolean sendCommand = vm.getState() == State.Running;
@@ -2614,6 +2652,8 @@ private VolumeVO sendAttachVolumeCommand(UserVmVO vm, 
VolumeVO volumeToAttach, L
             }
         }
 
+        verifyManagedStorage(volumeToAttachStoragePool.getId(), hostId);
+
         // volumeToAttachStoragePool should be null if the VM we are attaching 
the disk to has never been started before
         DataStore dataStore = volumeToAttachStoragePool != null ? 
dataStoreMgr.getDataStore(volumeToAttachStoragePool.getId(), 
DataStoreRole.Primary) : null;
 


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to