This is an automated email from the ASF dual-hosted git repository.

dahn pushed a commit to branch 4.20
in repository https://gitbox.apache.org/repos/asf/cloudstack.git


The following commit(s) were added to refs/heads/4.20 by this push:
     new 79ebf6959e1 refactor storapool automation (#11789)
79ebf6959e1 is described below

commit 79ebf6959e174ef8d366f4770d136be46fc46ba4
Author: dahn <[email protected]>
AuthorDate: Thu Dec 11 09:04:23 2025 +0100

    refactor storapool automation (#11789)
    
    Co-authored-by: Daan Hoogland <[email protected]>
---
 .../cloud/storage/StoragePoolAutomationImpl.java   | 517 +++++++++------------
 1 file changed, 227 insertions(+), 290 deletions(-)

diff --git 
a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java 
b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java
index 3ce23a0bd3b..612582640f4 100644
--- a/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java
+++ b/server/src/main/java/com/cloud/storage/StoragePoolAutomationImpl.java
@@ -18,19 +18,18 @@
  */
 package com.cloud.storage;
 
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
 import javax.inject.Inject;
 
-import org.apache.cloudstack.context.CallContext;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.OperationTimedoutException;
+import com.cloud.exception.ResourceUnavailableException;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import 
org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.commons.collections4.CollectionUtils;
 import org.apache.logging.log4j.Logger;
 import org.apache.logging.log4j.LogManager;
 import org.springframework.stereotype.Component;
@@ -39,32 +38,18 @@ import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.ModifyStoragePoolCommand;
 import com.cloud.agent.api.ModifyStoragePoolAnswer;
-import com.cloud.alert.AlertManager;
 import com.cloud.host.HostVO;
 import com.cloud.host.Status;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.resource.ResourceManager;
 import com.cloud.server.ManagementServer;
-import com.cloud.storage.dao.StoragePoolHostDao;
 import com.cloud.storage.dao.StoragePoolWorkDao;
 import com.cloud.storage.dao.VolumeDao;
-import com.cloud.user.Account;
-import com.cloud.user.User;
-import com.cloud.user.dao.UserDao;
 import com.cloud.utils.Pair;
 import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.vm.ConsoleProxyVO;
-import com.cloud.vm.DomainRouterVO;
-import com.cloud.vm.SecondaryStorageVmVO;
-import com.cloud.vm.UserVmVO;
 import com.cloud.vm.VMInstanceVO;
-import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachine.State;
 import com.cloud.vm.VirtualMachineManager;
-import com.cloud.vm.dao.ConsoleProxyDao;
-import com.cloud.vm.dao.DomainRouterDao;
-import com.cloud.vm.dao.SecondaryStorageVmDao;
-import com.cloud.vm.dao.UserVmDao;
 import com.cloud.vm.dao.VMInstanceDao;
 
 @Component
@@ -72,30 +57,11 @@ public class StoragePoolAutomationImpl implements 
StoragePoolAutomation {
     protected Logger logger = LogManager.getLogger(getClass());
     @Inject
     protected VirtualMachineManager vmMgr;
-    @Inject
-    protected SecondaryStorageVmDao _secStrgDao;
-    @Inject
-    UserVmDao userVmDao;
-    @Inject
-    protected UserDao _userDao;
-    @Inject
-    protected DomainRouterDao _domrDao;
-    @Inject
-    protected StoragePoolHostDao _storagePoolHostDao;
-    @Inject
-    protected AlertManager _alertMgr;
-    @Inject
-    protected ConsoleProxyDao _consoleProxyDao;
-
     @Inject
     protected StoragePoolWorkDao _storagePoolWorkDao;
     @Inject
     PrimaryDataStoreDao primaryDataStoreDao;
     @Inject
-    StoragePoolDetailsDao storagePoolDetailsDao;
-    @Inject
-    DataStoreManager dataStoreMgr;
-    @Inject
     protected ResourceManager _resourceMgr;
     @Inject
     AgentManager agentMgr;
@@ -106,235 +72,281 @@ public class StoragePoolAutomationImpl implements 
StoragePoolAutomation {
     @Inject
     ManagementServer server;
     @Inject
-    DataStoreProviderManager providerMgr;
-    @Inject
     StorageManager storageManager;
 
     @Override
     public boolean maintain(DataStore store) {
-        Long userId = CallContext.current().getCallingUserId();
-        User user = _userDao.findById(userId);
-        Account account = CallContext.current().getCallingAccount();
         StoragePoolVO pool = primaryDataStoreDao.findById(store.getId());
         try {
-            List<StoragePoolVO> spes = null;
-            // Handling Zone and Cluster wide storage scopes.
-            // if the storage is ZONE wide then we pass podid and cluster id 
as null as they will be empty for ZWPS
-            if (pool.getScope() == ScopeType.ZONE) {
-                spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), 
null, null, ScopeType.ZONE);
-            } else {
-                spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), 
pool.getPodId(), pool.getClusterId(), ScopeType.CLUSTER);
-            }
-            for (StoragePoolVO sp : spes) {
-                if (sp.getParent() != pool.getParent() && sp.getId() != 
pool.getParent()) { // If Datastore cluster is tried to prepare for maintenance 
then child storage pools are also kept in PrepareForMaintenance mode
-                    if (sp.getStatus() == 
StoragePoolStatus.PrepareForMaintenance) {
-                        throw new CloudRuntimeException(String.format("Only 
one storage pool in a cluster can be in PrepareForMaintenance mode, %s is 
already in  PrepareForMaintenance mode ", sp));
-                    }
-                }
-            }
-            StoragePool storagePool = (StoragePool)store;
-
-            //Handeling the Zone wide and cluster wide primay storage
-            List<HostVO> hosts = new ArrayList<HostVO>();
-            // if the storage scope is ZONE wide, then get all the hosts for 
which hypervisor ZWSP created to send Modifystoragepoolcommand
-            //TODO: if it's zone wide, this code will list a lot of hosts in 
the zone, which may cause performance/OOM issue.
-            if (pool.getScope().equals(ScopeType.ZONE)) {
-                if (HypervisorType.Any.equals(pool.getHypervisor())) {
-                    hosts = 
_resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId());
-                }
-                else {
-                    hosts = 
_resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor(),
 pool.getDataCenterId());
-                }
-            } else {
-                hosts = 
_resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up);
+            getStoragePoolForSpecification(pool);
+
+            List<HostVO> hosts = getHostsForStoragePool(pool);
+
+            if (setNextStateForMaintenance(hosts, pool) == 
StoragePoolStatus.PrepareForMaintenance) {
+                removeHeartbeatForHostsFromPool(hosts, pool);
+                // check to see if other ps exist
+                // if they do, then we can migrate over the system vms to them
+                // if they don't, then just stop all vms on this one
+                List<StoragePoolVO> upPools = 
primaryDataStoreDao.listByStatusInZone(pool.getDataCenterId(), 
StoragePoolStatus.Up);
+                boolean restart = !CollectionUtils.isEmpty(upPools);
+
+                // 2. Get a list of all the ROOT volumes within this storage 
pool
+                List<VolumeVO> allVolumes = 
volumeDao.findByPoolId(pool.getId());
+                // 3. Enqueue to the work queue
+                enqueueMigrationsForVolumes(allVolumes, pool);
+                // 4. Process the queue
+                processMigrationWorkloads(pool, restart);
             }
+        } catch (Exception e) {
+            logger.error("Exception in enabling primary storage maintenance:", 
e);
+            pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
+            primaryDataStoreDao.update(pool.getId(), pool);
+            // TODO decide on what recovery is possible
+            throw new CloudRuntimeException(e.getMessage());
+        }
+        return true;
+    }
 
-            if (hosts == null || hosts.size() == 0) {
-                pool.setStatus(StoragePoolStatus.Maintenance);
-                primaryDataStoreDao.update(pool.getId(), pool);
-                return true;
-            } else {
-                // set the pool state to prepare for maintenance
-                pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
-                primaryDataStoreDao.update(pool.getId(), pool);
-            }
-            // remove heartbeat
-            for (HostVO host : hosts) {
-                ModifyStoragePoolCommand cmd = new 
ModifyStoragePoolCommand(false, storagePool);
-                final Answer answer = agentMgr.easySend(host.getId(), cmd);
-                if (answer == null || !answer.getResult()) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("ModifyStoragePool false failed due to " 
+ ((answer == null) ? "answer null" : answer.getDetails()));
-                    }
-                } else {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("ModifyStoragePool false succeeded");
-                    }
-                    if (pool.getPoolType() == 
Storage.StoragePoolType.DatastoreCluster) {
-                        logger.debug("Started synchronising datastore cluster 
storage pool {} with vCenter", pool);
-                        
storageManager.syncDatastoreClusterStoragePool(pool.getId(), 
((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId());
-                    }
-                }
-            }
-            // check to see if other ps exist
-            // if they do, then we can migrate over the system vms to them
-            // if they don't, then just stop all vms on this one
-            List<StoragePoolVO> upPools = 
primaryDataStoreDao.listByStatusInZone(pool.getDataCenterId(), 
StoragePoolStatus.Up);
-            boolean restart = true;
-            if (upPools == null || upPools.size() == 0) {
-                restart = false;
-            }
+    @Override
+    public boolean cancelMaintain(DataStore store) {
+        // Change the storage state back to up
+        StoragePoolVO poolVO = primaryDataStoreDao.findById(store.getId());
+        StoragePool pool = (StoragePool)store;
 
-            // 2. Get a list of all the ROOT volumes within this storage pool
-            List<VolumeVO> allVolumes = volumeDao.findByPoolId(pool.getId());
+        List<HostVO> hosts = getHostsForStoragePool(poolVO);
 
-            // 3. Enqueue to the work queue
-            for (VolumeVO volume : allVolumes) {
-                VMInstanceVO vmInstance = 
vmDao.findById(volume.getInstanceId());
+        if (CollectionUtils.isEmpty(hosts)) {
+            return true;
+        }
 
-                if (vmInstance == null) {
-                    continue;
-                }
+        Pair<Map<String, String>, Boolean> nfsMountOpts = 
storageManager.getStoragePoolNFSMountOpts(pool, null);
+        addHeartbeatToHostsInPool(hosts, pool, nfsMountOpts);
 
-                // enqueue sp work
-                if (vmInstance.getState().equals(State.Running) || 
vmInstance.getState().equals(State.Starting) || 
vmInstance.getState().equals(State.Stopping)) {
-
-                    try {
-                        StoragePoolWorkVO work = new 
StoragePoolWorkVO(vmInstance.getId(), pool.getId(), false, false, 
server.getId());
-                        _storagePoolWorkDao.persist(work);
-                    } catch (Exception e) {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("Work record already exists, re-using 
by re-setting values");
-                        }
-                        StoragePoolWorkVO work = 
_storagePoolWorkDao.findByPoolIdAndVmId(pool.getId(), vmInstance.getId());
-                        work.setStartedAfterMaintenance(false);
-                        work.setStoppedForMaintenance(false);
-                        work.setManagementServerId(server.getId());
-                        _storagePoolWorkDao.update(work.getId(), work);
-                    }
-                }
+        // 2. Get a list of pending work for this queue
+        List<StoragePoolWorkVO> pendingWork = 
_storagePoolWorkDao.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
+
+        // 3. work through the queue
+        cancelMigrationWorkloads(pendingWork);
+        return false;
+    }
+
+    private StoragePoolStatus setNextStateForMaintenance(List<HostVO> hosts, 
StoragePoolVO pool) {
+        if (CollectionUtils.isEmpty(hosts)) {
+            pool.setStatus(StoragePoolStatus.Maintenance);
+            primaryDataStoreDao.update(pool.getId(), pool);
+            return StoragePoolStatus.Maintenance;
+        } else {
+            // set the pool state to prepare for maintenance
+            pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
+            primaryDataStoreDao.update(pool.getId(), pool);
+            return StoragePoolStatus.PrepareForMaintenance;
+        }
+    }
+
+    private void processMigrationWorkloads(StoragePoolVO pool, boolean 
restart) throws ResourceUnavailableException, OperationTimedoutException, 
InsufficientCapacityException {
+        List<StoragePoolWorkVO> pendingWork = 
_storagePoolWorkDao.listPendingWorkForPrepareForMaintenanceByPoolId(pool.getId());
+
+        for (StoragePoolWorkVO work : pendingWork) {
+            // shut down the running vms
+            VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
+
+            if (vmInstance == null) {
+                continue;
             }
 
-            // 4. Process the queue
-            List<StoragePoolWorkVO> pendingWork = 
_storagePoolWorkDao.listPendingWorkForPrepareForMaintenanceByPoolId(pool.getId());
+            switch (vmInstance.getType()) {
+                case ConsoleProxy:
+                case SecondaryStorageVm:
+                case DomainRouter:
+                    handleVmMigration(restart, work, vmInstance);
+                    break;
+                case User:
+                    handleStopVmForMigration(work, vmInstance);
+                    break;
+            }
+        }
+    }
 
-            for (StoragePoolWorkVO work : pendingWork) {
-                // shut down the running vms
+    private void cancelMigrationWorkloads(List<StoragePoolWorkVO> pendingWork) 
{
+        for (StoragePoolWorkVO work : pendingWork) {
+            try {
                 VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
 
                 if (vmInstance == null) {
                     continue;
                 }
 
-                // if the instance is of type consoleproxy, call the console
-                // proxy
-                if 
(vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) {
-                    // call the consoleproxymanager
-                    ConsoleProxyVO consoleProxy = 
_consoleProxyDao.findById(vmInstance.getId());
-                    vmMgr.advanceStop(consoleProxy.getUuid(), false);
-                    // update work status
-                    work.setStoppedForMaintenance(true);
-                    _storagePoolWorkDao.update(work.getId(), work);
+                switch (vmInstance.getType()) {
+                    case ConsoleProxy:
+                    case SecondaryStorageVm:
+                    case DomainRouter:
+                        handleVmStart(work, vmInstance);
+                        break;
+                    case User:
+                        handleUserVmStart(work, vmInstance);
+                        break;
+                }
+            } catch (Exception e) {
+                logger.debug("Failed start vm", e);
+                throw new CloudRuntimeException(e.toString());
+            }
+        }
+    }
 
-                    if (restart) {
+    private void handleStopVmForMigration(StoragePoolWorkVO work, VMInstanceVO 
vmInstance) throws ResourceUnavailableException, OperationTimedoutException {
+        vmMgr.advanceStop(vmInstance.getUuid(), false);
+        // update work status
+        work.setStoppedForMaintenance(true);
+        _storagePoolWorkDao.update(work.getId(), work);
+    }
 
-                        vmMgr.advanceStart(consoleProxy.getUuid(), null, null);
-                        // update work status
-                        work.setStartedAfterMaintenance(true);
-                        _storagePoolWorkDao.update(work.getId(), work);
-                    }
-                }
+    private void handleVmMigration(boolean restart, StoragePoolWorkVO work, 
VMInstanceVO vmInstance) throws ResourceUnavailableException, 
OperationTimedoutException, InsufficientCapacityException {
+        handleStopVmForMigration(work, vmInstance);
 
-                // if the instance is of type uservm, call the user vm manager
-                if (vmInstance.getType() == VirtualMachine.Type.User) {
-                    UserVmVO userVm = userVmDao.findById(vmInstance.getId());
-                    vmMgr.advanceStop(userVm.getUuid(), false);
-                    // update work status
-                    work.setStoppedForMaintenance(true);
-                    _storagePoolWorkDao.update(work.getId(), work);
-                }
+        if (restart) {
+            handleVmStart(work, vmInstance);
+        }
+    }
+    private void handleVmStart(StoragePoolWorkVO work, VMInstanceVO 
vmInstance) throws InsufficientCapacityException, ResourceUnavailableException, 
OperationTimedoutException {
+        vmMgr.advanceStart(vmInstance.getUuid(), null, null);
+        // update work queue
+        work.setStartedAfterMaintenance(true);
+        _storagePoolWorkDao.update(work.getId(), work);
+    }
 
-                // if the instance is of type secondary storage vm, call the
-                // secondary storage vm manager
-                if 
(vmInstance.getType().equals(VirtualMachine.Type.SecondaryStorageVm)) {
-                    SecondaryStorageVmVO secStrgVm = 
_secStrgDao.findById(vmInstance.getId());
-                    vmMgr.advanceStop(secStrgVm.getUuid(), false);
-                    // update work status
-                    work.setStoppedForMaintenance(true);
-                    _storagePoolWorkDao.update(work.getId(), work);
+    private void enqueueMigrationsForVolumes(List<VolumeVO> allVolumes, 
StoragePoolVO pool) {
+        for (VolumeVO volume : allVolumes) {
+            VMInstanceVO vmInstance = vmDao.findById(volume.getInstanceId());
 
-                    if (restart) {
-                        vmMgr.advanceStart(secStrgVm.getUuid(), null, null);
-                        // update work status
-                        work.setStartedAfterMaintenance(true);
-                        _storagePoolWorkDao.update(work.getId(), work);
-                    }
-                }
+            if (vmInstance == null) {
+                continue;
+            }
 
-                // if the instance is of type domain router vm, call the 
network
-                // manager
-                if 
(vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) {
-                    DomainRouterVO domR = 
_domrDao.findById(vmInstance.getId());
-                    vmMgr.advanceStop(domR.getUuid(), false);
-                    // update work status
-                    work.setStoppedForMaintenance(true);
-                    _storagePoolWorkDao.update(work.getId(), work);
+            // enqueue sp work
+            if (vmInstance.getState().equals(State.Running) || 
vmInstance.getState().equals(State.Starting) || 
vmInstance.getState().equals(State.Stopping)) {
 
-                    if (restart) {
-                        vmMgr.advanceStart(domR.getUuid(), null, null);
-                        // update work status
-                        work.setStartedAfterMaintenance(true);
-                        _storagePoolWorkDao.update(work.getId(), work);
+                try {
+                    StoragePoolWorkVO work = new 
StoragePoolWorkVO(vmInstance.getId(), pool.getId(), false, false, 
server.getId());
+                    _storagePoolWorkDao.persist(work);
+                } catch (Exception e) {
+                    if (logger.isDebugEnabled()) {
+                        logger.debug("Work record already exists, re-using by 
re-setting values");
                     }
+                    StoragePoolWorkVO work = 
_storagePoolWorkDao.findByPoolIdAndVmId(pool.getId(), vmInstance.getId());
+                    work.setStartedAfterMaintenance(false);
+                    work.setStoppedForMaintenance(false);
+                    work.setManagementServerId(server.getId());
+                    _storagePoolWorkDao.update(work.getId(), work);
                 }
             }
-        } catch (Exception e) {
-            logger.error("Exception in enabling primary storage maintenance:", 
e);
-            pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
-            primaryDataStoreDao.update(pool.getId(), pool);
-            throw new CloudRuntimeException(e.getMessage());
         }
-        return true;
     }
 
-    @Override
-    public boolean cancelMaintain(DataStore store) {
-        // Change the storage state back to up
-        Long userId = CallContext.current().getCallingUserId();
-        User user = _userDao.findById(userId);
-        Account account = CallContext.current().getCallingAccount();
-        StoragePoolVO poolVO = primaryDataStoreDao.findById(store.getId());
-        StoragePool pool = (StoragePool)store;
+    private void removeHeartbeatForHostsFromPool(List<HostVO> hosts, 
StoragePool storagePool) {
+        // remove heartbeat
+        for (HostVO host : hosts) {
+            ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(false, 
storagePool);
+            final Answer answer = agentMgr.easySend(host.getId(), cmd);
+            if (answer == null || !answer.getResult()) {
+                if (logger.isDebugEnabled()) {
+                    logger.debug("ModifyStoragePool false failed due to {}", 
((answer == null) ? "answer null" : answer.getDetails()));
+                }
+            } else {
+                reportSucceededModifyStorePool(storagePool, 
(ModifyStoragePoolAnswer) answer, host, false);
+            }
+        }
+    }
 
-        //Handeling the Zone wide and cluster wide primay storage
-        List<HostVO> hosts = new ArrayList<HostVO>();
-        // if the storage scope is ZONE wide, then get all the hosts for which 
hypervisor ZWSP created to send Modifystoragepoolcommand
-        if (poolVO.getScope().equals(ScopeType.ZONE)) {
+    private void reportSucceededModifyStorePool(StoragePool storagePool, 
ModifyStoragePoolAnswer answer, HostVO host, boolean add) {
+        if (logger.isDebugEnabled()) {
+            logger.debug("ModifyStoragePool succeeded for {}", add ? "adding" 
: "removing");
+        }
+        if (storagePool.getPoolType() == 
Storage.StoragePoolType.DatastoreCluster) {
+            logger.debug("Started synchronising datastore cluster storage pool 
{} with vCenter", storagePool);
+            
storageManager.syncDatastoreClusterStoragePool(storagePool.getId(), 
answer.getDatastoreClusterChildren(), host.getId());
+        }
+    }
+
+    /**
+     * Handling the Zone wide and cluster wide primary storage
+     * if the storage scope is ZONE wide, then get all the hosts for which 
hypervisor ZoneWideStoragePools created to send ModifyStoragePoolCommand
+     * TODO: if it's zone wide, this code will list a lot of hosts in the 
zone, which may cause performance/OOM issue.
+     * @param pool pool to check for connected hosts
+     * @return a list of connected hosts
+     */
+    private List<HostVO> getHostsForStoragePool(StoragePoolVO pool) {
+        List<HostVO> hosts;
+        if (pool.getScope().equals(ScopeType.ZONE)) {
             if (HypervisorType.Any.equals(pool.getHypervisor())) {
                 hosts = 
_resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId());
             }
             else {
-                hosts = 
_resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(poolVO.getHypervisor(),
 pool.getDataCenterId());
+                hosts = 
_resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor(),
 pool.getDataCenterId());
             }
         } else {
             hosts = 
_resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up);
         }
+        return hosts;
+    }
 
-        if (hosts == null || hosts.size() == 0) {
-            return true;
+    /**
+     * Handling Zone and Cluster wide storage scopes. Depending on the scope 
of the pool, check for other storage pools in the same scope
+     * If the storage is ZONE wide then we pass podId and cluster id as null 
as they will be empty for Zone wide storage
+     *
+     * @param pool pool to check for other pools in the same scope
+     */
+    private void getStoragePoolForSpecification(StoragePoolVO pool) {
+        List<StoragePoolVO> storagePools;
+        if (pool.getScope() == ScopeType.ZONE) {
+            storagePools = primaryDataStoreDao.listBy(pool.getDataCenterId(), 
null, null, ScopeType.ZONE);
+        } else {
+            storagePools = primaryDataStoreDao.listBy(pool.getDataCenterId(), 
pool.getPodId(), pool.getClusterId(), ScopeType.CLUSTER);
         }
+        checkHierarchyForPreparingForMaintenance(pool, storagePools);
+    }
 
-        Pair<Map<String, String>, Boolean> nfsMountOpts = 
storageManager.getStoragePoolNFSMountOpts(pool, null);
-        // add heartbeat
+    /**
+     * If Datastore cluster is tried to prepare for maintenance then child 
storage pools are also kept in PrepareForMaintenance mode
+     * @param pool target to put in maintenance
+     * @param storagePools list of possible peers/parents/children
+     */
+    private static void checkHierarchyForPreparingForMaintenance(StoragePoolVO 
pool, List<StoragePoolVO> storagePools) {
+        for (StoragePoolVO storagePool : storagePools) {
+            if (!(storagePool.getParent().equals(pool.getParent()) || 
!pool.getParent().equals(storagePool.getId())) &&
+                (storagePool.getStatus() == 
StoragePoolStatus.PrepareForMaintenance)) {
+                    throw new CloudRuntimeException(String.format("Only one 
storage pool in a cluster can be in PrepareForMaintenance mode, %s is already 
in  PrepareForMaintenance mode ", storagePool));
+            }
+        }
+    }
+
+    /**
+     *         // check if the vm has a root volume. If not, remove the item 
from the queue, the vm should be
+     *         // started only when it has at least one root volume attached 
to it
+     *         // don't allow to start vm that doesn't have a root volume
+     * @param work work item to handle for this VM
+     * @param vmInstance VM to start
+     * @throws InsufficientCapacityException no migration target found
+     * @throws ResourceUnavailableException a resource required for migration 
is not in the expected state
+     * @throws OperationTimedoutException migration operation took too long
+     */
+    private void handleUserVmStart(StoragePoolWorkVO work, VMInstanceVO 
vmInstance) throws InsufficientCapacityException, ResourceUnavailableException, 
OperationTimedoutException {
+        if (volumeDao.findByInstanceAndType(vmInstance.getId(), 
Volume.Type.ROOT).isEmpty()) {
+            _storagePoolWorkDao.remove(work.getId());
+        } else {
+            handleVmStart(work, vmInstance);
+        }
+    }
+
+    private void addHeartbeatToHostsInPool(List<HostVO> hosts, StoragePool 
pool, Pair<Map<String, String>, Boolean> nfsMountOpts) {
         for (HostVO host : hosts) {
             ModifyStoragePoolCommand msPoolCmd = new 
ModifyStoragePoolCommand(true, pool, nfsMountOpts.first());
             final Answer answer = agentMgr.easySend(host.getId(), msPoolCmd);
             if (answer == null || !answer.getResult()) {
                 if (logger.isDebugEnabled()) {
-                    logger.debug("ModifyStoragePool add failed due to " + 
((answer == null) ? "answer null" : answer.getDetails()));
+                    logger.debug("ModifyStoragePool add failed due to {}", 
((answer == null) ? "answer null" : answer.getDetails()));
                 }
                 if (answer != null && nfsMountOpts.second()) {
-                    logger.error(String.format("Unable to attach storage pool 
to the host %s due to %s",  host,  answer.getDetails()));
+                    logger.error("Unable to attach storage pool to the host {} 
due to {}",  host,  answer.getDetails());
                     StringBuilder exceptionSB = new StringBuilder("Unable to 
attach storage pool to the host ").append(host.getName());
                     String reason = 
storageManager.getStoragePoolMountFailureReason(answer.getDetails());
                     if (reason!= null) {
@@ -343,84 +355,9 @@ public class StoragePoolAutomationImpl implements 
StoragePoolAutomation {
                     throw new CloudRuntimeException(exceptionSB.toString());
                 }
             } else {
-                if (logger.isDebugEnabled()) {
-                    logger.debug("ModifyStoragePool add succeeded");
-                }
                 storageManager.updateStoragePoolHostVOAndBytes(pool, 
host.getId(), (ModifyStoragePoolAnswer) answer);
-                if (pool.getPoolType() == 
Storage.StoragePoolType.DatastoreCluster) {
-                    logger.debug("Started synchronising datastore cluster 
storage pool {} with vCenter", pool);
-                    
storageManager.syncDatastoreClusterStoragePool(pool.getId(), 
((ModifyStoragePoolAnswer) answer).getDatastoreClusterChildren(), host.getId());
-                }
+                reportSucceededModifyStorePool(pool, (ModifyStoragePoolAnswer) 
answer, host, true);
             }
         }
-
-        // 2. Get a list of pending work for this queue
-        List<StoragePoolWorkVO> pendingWork = 
_storagePoolWorkDao.listPendingWorkForCancelMaintenanceByPoolId(poolVO.getId());
-
-        // 3. work through the queue
-        for (StoragePoolWorkVO work : pendingWork) {
-            try {
-                VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
-
-                if (vmInstance == null) {
-                    continue;
-                }
-
-                // if the instance is of type consoleproxy, call the console
-                // proxy
-                if 
(vmInstance.getType().equals(VirtualMachine.Type.ConsoleProxy)) {
-
-                    ConsoleProxyVO consoleProxy = _consoleProxyDao
-                            .findById(vmInstance.getId());
-                    vmMgr.advanceStart(consoleProxy.getUuid(), null, null);
-                    // update work queue
-                    work.setStartedAfterMaintenance(true);
-                    _storagePoolWorkDao.update(work.getId(), work);
-                }
-
-                // if the instance is of type ssvm, call the ssvm manager
-                if (vmInstance.getType().equals(
-                        VirtualMachine.Type.SecondaryStorageVm)) {
-                    SecondaryStorageVmVO ssVm = _secStrgDao.findById(vmInstance
-                            .getId());
-                    vmMgr.advanceStart(ssVm.getUuid(), null, null);
-
-                    // update work queue
-                    work.setStartedAfterMaintenance(true);
-                    _storagePoolWorkDao.update(work.getId(), work);
-                }
-
-                // if the instance is of type domain router vm, call the 
network
-                // manager
-                if 
(vmInstance.getType().equals(VirtualMachine.Type.DomainRouter)) {
-                    DomainRouterVO domR = 
_domrDao.findById(vmInstance.getId());
-                    vmMgr.advanceStart(domR.getUuid(), null, null);
-                    // update work queue
-                    work.setStartedAfterMaintenance(true);
-                    _storagePoolWorkDao.update(work.getId(), work);
-                }
-
-                // if the instance is of type user vm, call the user vm manager
-                if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
-                    // check if the vm has a root volume. If not, remove the 
item from the queue, the vm should be
-                    // started only when it has at least one root volume 
attached to it
-                    // don't allow to start vm that doesn't have a root volume
-                    if (volumeDao.findByInstanceAndType(vmInstance.getId(), 
Volume.Type.ROOT).isEmpty()) {
-                        _storagePoolWorkDao.remove(work.getId());
-                    } else {
-                        UserVmVO userVm = 
userVmDao.findById(vmInstance.getId());
-
-                        vmMgr.advanceStart(userVm.getUuid(), null, null);
-                        work.setStartedAfterMaintenance(true);
-                        _storagePoolWorkDao.update(work.getId(), work);
-                    }
-                }
-            } catch (Exception e) {
-                logger.debug("Failed start vm", e);
-                throw new CloudRuntimeException(e.toString());
-            }
-        }
-        return false;
     }
-
 }

Reply via email to