rafaelweingartner commented on a change in pull request #2761: Add managed 
storage pool constraints to MigrateWithVolume API method
URL: https://github.com/apache/cloudstack/pull/2761#discussion_r205643152
 
 

 ##########
 File path: 
engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
 ##########
 @@ -2279,95 +2279,151 @@ protected void migrate(final VMInstanceVO vm, final 
long srcHostId, final Deploy
     }
 
     /**
-     * Create the mapping of volumes and storage pools. If the user did not 
enter a mapping on her/his own, we create one using {@link 
#getDefaultMappingOfVolumesAndStoragePoolForMigration(VirtualMachineProfile, 
Host)}.
-     * If the user provided a mapping, we use whatever the user has provided 
(check the method {@link 
#createMappingVolumeAndStoragePoolEnteredByUser(VirtualMachineProfile, Host, 
Map)}).
+     * We create the mapping of volumes and storage pool to migrate the VMs 
according to the information sent by the user.
+     * If the user did not enter a complete mapping, the volumes that were 
left behind will be auto mapped using {@link 
#createStoragePoolMappingsForVolumes(VirtualMachineProfile, Host, Map, List)}
      */
-    private Map<Volume, StoragePool> 
getPoolListForVolumesForMigration(VirtualMachineProfile profile, Host 
targetHost, Map<Long, Long> volumeToPool) {
-        if (MapUtils.isEmpty(volumeToPool)) {
-            return 
getDefaultMappingOfVolumesAndStoragePoolForMigration(profile, targetHost);
-        }
+    protected Map<Volume, StoragePool> 
createMappingVolumeAndStoragePool(VirtualMachineProfile profile, Host 
targetHost, Map<Long, Long> userDefinedMapOfVolumesAndStoragePools) {
+        Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
+        buildMapUsingUserInformation(profile, targetHost, 
userDefinedMapOfVolumesAndStoragePools, volumeToPoolObjectMap);
 
-        return createMappingVolumeAndStoragePoolEnteredByUser(profile, 
targetHost, volumeToPool);
+        List<Volume> volumesNotMapped = 
findVolumesThatWereNotMappedByTheUser(profile, volumeToPoolObjectMap);
+        createStoragePoolMappingsForVolumes(profile, targetHost, 
volumeToPoolObjectMap, volumesNotMapped);
+        return volumeToPoolObjectMap;
     }
 
     /**
-     * We create the mapping of volumes and storage pool to migrate the VMs 
according to the information sent by the user.
+     *  Given the map of volume to target storage pool entered by the user, we 
check for other volumes that the VM might have and were not configured.
+     *  This map can be then used by CloudStack to find new target storage 
pools according to the target host.
+     */
+    private List<Volume> 
findVolumesThatWereNotMappedByTheUser(VirtualMachineProfile profile, 
Map<Volume, StoragePool> volumeToStoragePoolObjectMap) {
+        List<VolumeVO> allVolumes = 
_volsDao.findUsableVolumesForInstance(profile.getId());
+        List<Volume> volumesNotMapped = new ArrayList<>();
+        for (Volume volume : volumeToStoragePoolObjectMap.keySet()) {
+            if (!allVolumes.contains(volume)) {
+                volumesNotMapped.add(volume);
+            }
+        }
+        return volumesNotMapped;
+    }
+
+    /**
+     *  Builds the map of storage pools and volumes with the information 
entered by the user. Before creating the an entry we validate if the migration 
is feasible checking if the migration is allowed and if the target host can 
access the defined target storage pool.
      */
-    private Map<Volume, StoragePool> 
createMappingVolumeAndStoragePoolEnteredByUser(VirtualMachineProfile profile, 
Host host, Map<Long, Long> volumeToPool) {
-        Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<Volume, 
StoragePool>();
-        for(Long volumeId: volumeToPool.keySet()) {
+    private void buildMapUsingUserInformation(VirtualMachineProfile profile, 
Host targetHost, Map<Long, Long> userDefinedVolumeToStoragePoolMap, Map<Volume, 
StoragePool> volumeToPoolObjectMap) {
+        if (MapUtils.isEmpty(userDefinedVolumeToStoragePoolMap)) {
+            return;
+        }
+        for(Long volumeId: userDefinedVolumeToStoragePoolMap.keySet()) {
             VolumeVO volume = _volsDao.findById(volumeId);
 
-            Long poolId = volumeToPool.get(volumeId);
+            Long poolId = userDefinedVolumeToStoragePoolMap.get(volumeId);
             StoragePoolVO targetPool = _storagePoolDao.findById(poolId);
             StoragePoolVO currentPool = 
_storagePoolDao.findById(volume.getPoolId());
 
-            if (_poolHostDao.findByPoolHost(targetPool.getId(), host.getId()) 
== null) {
-                throw new CloudRuntimeException(String.format("Cannot migrate 
the volume [%s] to the storage pool [%s] while migrating VM [%s] to target host 
[%s]. The host does not have access to the storage pool entered.", 
volume.getUuid(), targetPool.getUuid(), profile.getUuid(), host.getUuid()));
+            
executeManagedStorageChecksWhenTargetStoragePoolProvided(currentPool, volume, 
targetPool);
+            if (_poolHostDao.findByPoolHost(targetPool.getId(), 
targetHost.getId()) == null) {
+                throw new CloudRuntimeException(
+                        String.format("Cannot migrate the volume [%s] to the 
storage pool [%s] while migrating VM [%s] to target host [%s]. The host does 
not have access to the storage pool entered.",
+                                volume.getUuid(), targetPool.getUuid(), 
profile.getUuid(), targetHost.getUuid()));
             }
             if (currentPool.getId() == targetPool.getId()) {
                 s_logger.info(String.format("The volume [%s] is already 
allocated in storage pool [%s].", volume.getUuid(), targetPool.getUuid()));
             }
             volumeToPoolObjectMap.put(volume, targetPool);
         }
-        return volumeToPoolObjectMap;
     }
 
     /**
-     * We create the default mapping of volumes and storage pools for the 
migration of the VM to the target host.
-     * If the current storage pool of one of the volumes is using local 
storage in the host, it then needs to be migrated to a local storage in the 
target host.
-     * Otherwise, we do not need to migrate, and the volume can be kept in its 
current storage pool.
+     * For each one of the volumes we will map it to a storage pool that is 
available via the target host.
+     * An exception is thrown if we cannot find a storage pool that is 
accessible in the target host to migrate the volume to.
      */
-    private Map<Volume, StoragePool> 
getDefaultMappingOfVolumesAndStoragePoolForMigration(VirtualMachineProfile 
profile, Host targetHost) {
-        Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<Volume, 
StoragePool>();
-        List<VolumeVO> allVolumes = 
_volsDao.findUsableVolumesForInstance(profile.getId());
-        for (VolumeVO volume : allVolumes) {
+    private void createStoragePoolMappingsForVolumes(VirtualMachineProfile 
profile, Host targetHost, Map<Volume, StoragePool> volumeToPoolObjectMap, 
List<Volume> allVolumes) {
+        for (Volume volume : allVolumes) {
             StoragePoolVO currentPool = 
_storagePoolDao.findById(volume.getPoolId());
-            if (ScopeType.HOST.equals(currentPool.getScope())) {
-                createVolumeToStoragePoolMappingIfNeeded(profile, targetHost, 
volumeToPoolObjectMap, volume, currentPool);
+
+            
executeManagedStorageChecksWhenTargetStoragePoolNotProvided(targetHost, 
currentPool, volume);
 
 Review comment:
   This is the bit where I am confused. Let's say a VM has two  volumes. Volume 
A is in a managed storage at the cluster level, and volume B is in a local 
storage. Then, when I want to migrate this VM between hosts of the same cluster 
this method is triggered. 
   
   However, an exception will be raised here since the VM needs to migrate a 
volume between local storage of hosts. Volume B is going to be mapped to a 
local storage at the target host, and volume A can be left where it is. The 
problem is that this validation here will raise an exception for volume A, 
because it is a cluster wide managed storage and we are not executing that 
checks between cluster IDs of the targetHost and current storage pool.
   
   Do you understand what I am saying?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to