This is an automated email from the ASF dual-hosted git repository. shwstppr pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/cloudstack.git
commit cce736709e0ef13f23584fc32e68b26833a75743 Merge: 22f6c19 a64ad9d Author: Abhishek Kumar <[email protected]> AuthorDate: Mon Apr 12 11:43:57 2021 +0530 Merge remote-tracking branch 'apache/4.15' .../java/com/cloud/vm/snapshot/VMSnapshot.java | 1 + .../command/admin/usage/ListUsageRecordsCmd.java | 9 ++ .../agent/api/storage/MigrateVolumeCommand.java | 16 ++-- .../com/cloud/vm/VirtualMachineManagerImpl.java | 34 +++---- .../vmsnapshot/DefaultVMSnapshotStrategy.java | 15 +-- .../java/com/cloud/hypervisor/guru/VMwareGuru.java | 73 ++++++++------ .../hypervisor/vmware/resource/VmwareResource.java | 22 ++--- .../motion/VmwareStorageMotionStrategy.java | 106 ++++++++++----------- .../apache/cloudstack/ldap/LdapManagerImpl.java | 38 +++++++- .../com/cloud/storage/VolumeApiServiceImpl.java | 8 +- .../java/com/cloud/usage/UsageServiceImpl.java | 105 ++++++++++++++++---- .../main/java/com/cloud/vm/UserVmManagerImpl.java | 6 ++ systemvm/debian/etc/iptables/iptables-dhcpsrvr | 1 - systemvm/debian/opt/cloud/bin/cs/CsAddress.py | 5 +- systemvm/debian/opt/cloud/bin/cs/CsApp.py | 10 -- test/integration/smoke/test_vm_life_cycle.py | 5 + .../hypervisor/vmware/mo/HypervisorHostHelper.java | 17 ++++ .../hypervisor/vmware/mo/VirtualMachineMO.java | 29 +++--- .../cloud/hypervisor/vmware/util/VmwareHelper.java | 25 +++-- 19 files changed, 333 insertions(+), 192 deletions(-) diff --cc engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 1e92c3e,6faeeb5..1a91ea5 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@@ -131,6 -124,6 +126,10 @@@ import com.cloud.agent.api.to.VirtualMa import com.cloud.agent.manager.Commands; import com.cloud.agent.manager.allocator.HostAllocator; import com.cloud.alert.AlertManager; ++import com.cloud.api.query.dao.DomainRouterJoinDao; ++import com.cloud.api.query.dao.UserVmJoinDao; ++import com.cloud.api.query.vo.DomainRouterJoinVO; ++import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.ClusterDetailsDao; @@@ -2324,34 -2235,32 +2324,33 @@@ public class VirtualMachineManagerImpl return null; } - private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm, Long srcClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { + private void afterHypervisorMigrationCleanup(VMInstanceVO vm, Map<Volume, StoragePool> volumeToPool, Long sourceClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { boolean isDebugEnabled = s_logger.isDebugEnabled(); if(isDebugEnabled) { - String msg = String.format("cleaning up after hypervisor pool migration volumes for VM %s(%s) to pool %s(%s)", vm.getInstanceName(), vm.getUuid(), destPool.getName(), destPool.getUuid()); + String msg = String.format("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); s_logger.debug(msg); } - setDestinationPoolAndReallocateNetwork(destPool, vm); - // OfflineVmwareMigration: don't set this to null or have another way to address the command; twice migrating will lead to an NPE - Long destPodId = destPool.getPodId(); - - if (destPodId == null || !destPodId.equals(vm.getPodIdToDeployIn())) { - if(isDebugEnabled) { - String msg = String.format("resetting lasHost for VM %s(%s) as pod (%s) is no good.", vm.getInstanceName(), vm.getUuid(), destPodId); - s_logger.debug(msg); + StoragePool rootVolumePool = null; + if (MapUtils.isNotEmpty(volumeToPool)) { + for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) { + if (Type.ROOT.equals(entry.getKey().getVolumeType())) { + rootVolumePool = entry.getValue(); + break; + } } - vm.setLastHostId(null); - vm.setPodIdToDeployIn(destPodId); - // OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod) - } else if (srcClusterId != null && destPool.getClusterId() != null && !srcClusterId.equals(destPool.getClusterId())) { + } + setDestinationPoolAndReallocateNetwork(rootVolumePool, vm); + Long destClusterId = rootVolumePool != null ? rootVolumePool.getClusterId() : null; + if (destClusterId != null && !destClusterId.equals(sourceClusterId)) { if(isDebugEnabled) { - String msg = String.format("resetting lasHost for VM %s(%s) as cluster changed", vm.getInstanceName(), vm.getUuid()); + String msg = String.format("Resetting lastHost for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); s_logger.debug(msg); } - vm.setLastHostId(null); - } // else keep last host set for this vm - markVolumesInPool(vm, destPool, hypervisorMigrationResults); + vm.setPodIdToDeployIn(rootVolumePool.getPodId()); + // OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod) + }// else keep last host set for this vm + markVolumesInPool(vm, hypervisorMigrationResults); // OfflineVmwareMigration: deal with answers, if (hypervisorMigrationResults.length > 0) // OfflineVmwareMigration: iterate over the volumes for data updates } @@@ -2527,18 -2401,9 +2528,17 @@@ return result; } -- - private void afterStorageMigrationCleanup(StoragePool destPool, VMInstanceVO vm, Long srcHostId, Long srcClusterId) throws InsufficientCapacityException { - setDestinationPoolAndReallocateNetwork(destPool, vm); + private void postStorageMigrationCleanup(VMInstanceVO vm, Map<Volume, StoragePool> volumeToPool, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException { + StoragePool rootVolumePool = null; + if (MapUtils.isNotEmpty(volumeToPool)) { + for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) { + if (Type.ROOT.equals(entry.getKey().getVolumeType())) { + rootVolumePool = entry.getValue(); + break; + } + } + } + setDestinationPoolAndReallocateNetwork(rootVolumePool, vm); //when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool vm.setLastHostId(null); @@@ -2549,7 -2412,7 +2549,7 @@@ // If VM was cold migrated between clusters belonging to two different VMware DCs, // unregister the VM from the source host and cleanup the associated VM files. if (vm.getHypervisorType().equals(HypervisorType.VMware)) { - afterStorageMigrationVmwareVMcleanup(rootVolumePool, vm, srcHost, srcClusterId); - afterStorageMigrationVmwareVMCleanup(destPool, vm, srcHostId, srcClusterId); ++ afterStorageMigrationVmwareVMCleanup(rootVolumePool, vm, srcHost, srcClusterId); } } @@@ -2567,10 -2430,10 +2567,10 @@@ } } - private void afterStorageMigrationVmwareVMcleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) { - private void afterStorageMigrationVmwareVMCleanup(StoragePool destPool, VMInstanceVO vm, Long srcHostId, Long srcClusterId) { ++ private void afterStorageMigrationVmwareVMCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) { // OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command final Long destClusterId = destPool.getClusterId(); - if (srcHostId != null && srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) { + if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId) && srcHost != null) { final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { diff --cc plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index a592126,edb3b88..afb8a29 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@@ -1078,42 -1085,45 +1086,53 @@@ public class VMwareGuru extends Hypervi return null; } - @Override public List<Command> finalizeMigrate(VirtualMachine vm, Map<Volume, StoragePool> volumeToPool) { + private boolean isInterClusterMigration(Long srcClusterId, Long destClusterId) { + return srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId); + } + + private String getHostGuidInTargetCluster(boolean isInterClusterMigration, Long destClusterId) { + String hostGuidInTargetCluster = null; + if (isInterClusterMigration) { + Host hostInTargetCluster = null; + // Without host vMotion might fail between non-shared storages with error similar to, + // https://kb.vmware.com/s/article/1003795 + // As this is offline migration VM won't be started on this host + List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(destClusterId); + if (CollectionUtils.isNotEmpty(hosts)) { + hostInTargetCluster = hosts.get(0); + } + if (hostInTargetCluster == null) { + throw new CloudRuntimeException("Migration failed, unable to find suitable target host for VM placement while migrating between storage pools of different clusters without shared storages"); + } + hostGuidInTargetCluster = hostInTargetCluster.getGuid(); + } + return hostGuidInTargetCluster; + } + - @Override public List<Command> finalizeMigrate(VirtualMachine vm, StoragePool destination) { ++ @Override ++ public List<Command> finalizeMigrate(VirtualMachine vm, Map<Volume, StoragePool> volumeToPool) { List<Command> commands = new ArrayList<Command>(); // OfflineVmwareMigration: specialised migration command - List<VolumeVO> volumes = _volumeDao.findByInstance(vm.getId()); List<VolumeTO> vols = new ArrayList<>(); - for (Volume volume : volumes) { - VolumeTO vol = new VolumeTO(volume, destination); - vols.add(vol); + List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerTo = new ArrayList<Pair<VolumeTO, StorageFilerTO>>(); + Long poolClusterId = null; - Host hostInTargetCluster = null; + for (Map.Entry<Volume, StoragePool> entry : volumeToPool.entrySet()) { + Volume volume = entry.getKey(); + StoragePool pool = entry.getValue(); + VolumeTO volumeTo = new VolumeTO(volume, _storagePoolDao.findById(pool.getId())); + StorageFilerTO filerTo = new StorageFilerTO(pool); + if (pool.getClusterId() != null) { + poolClusterId = pool.getClusterId(); + } + volumeToFilerTo.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo)); + vols.add(volumeTo); } - - final Long destClusterId = destination.getClusterId(); + final Long destClusterId = poolClusterId; final Long srcClusterId = getClusterId(vm.getId()); - final boolean isInterClusterMigration = srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId); - if (isInterClusterMigration) { - // Without host vMotion might fail between non-shared storages with error similar to, - // https://kb.vmware.com/s/article/1003795 - // As this is offline migration VM won't be started on this host - List<HostVO> hosts = _hostDao.findHypervisorHostInCluster(destClusterId); - if (CollectionUtils.isNotEmpty(hosts)) { - hostInTargetCluster = hosts.get(0); - } - if (hostInTargetCluster == null) { - throw new CloudRuntimeException("Migration failed, unable to find suitable target host for VM placement while migrating between storage pools of different clusters without shared storages"); - } - } + final boolean isInterClusterMigration = isInterClusterMigration(destClusterId, srcClusterId); MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), - volumeToFilerTo, hostInTargetCluster == null ? null : hostInTargetCluster.getGuid(), true); - vols, destination.getUuid(), getHostGuidInTargetCluster(isInterClusterMigration, destClusterId), true); ++ volumeToFilerTo, getHostGuidInTargetCluster(isInterClusterMigration, destClusterId), true); commands.add(migrateVmToPoolCommand); // OfflineVmwareMigration: cleanup if needed diff --cc plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 8264785,6dfdeb5..2114c7d --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@@ -154,6 -151,6 +152,8 @@@ import com.cloud.agent.api.ScaleVmComma import com.cloud.agent.api.SetupAnswer; import com.cloud.agent.api.SetupCommand; import com.cloud.agent.api.SetupGuestNetworkCommand; ++import com.cloud.agent.api.SetupPersistentNetworkAnswer; ++import com.cloud.agent.api.SetupPersistentNetworkCommand; import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StartCommand; import com.cloud.agent.api.StartupCommand; @@@ -4511,7 -4505,9 +4511,8 @@@ public class VmwareResource implements volumeDeviceKey.put(diskId, volumeId); } - private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool, VmwareHypervisorHost hyperHost) { + private ManagedObjectReference getTargetDatastoreMOReference(String destinationPool, - VmwareHypervisorHost hyperHost, - VmwareHypervisorHost targetHyperHost) { ++ VmwareHypervisorHost hyperHost) { ManagedObjectReference morDs; try { if (s_logger.isDebugEnabled()) { @@@ -4631,20 -4873,11 +4632,20 @@@ // OfflineVmwareMigration: this method is 100 lines and needs refactorring anyway // we need to spawn a worker VM to attach the volume to and move it morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid()); - dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); - morDestintionDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(targetDSHost, cmd.getTargetPool().getUuid()); - destinationDsMo = new DatastoreMO(targetDSHost.getContext(), morDestintionDS); + sourceDsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); - VmwareHypervisorHost hostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(), ++ VmwareHypervisorHost hyperHostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(), + cmd.getHostGuidInTargetCluster()); - VmwareHypervisorHost dsHost = hostInTargetCluster == null ? hyperHost : hostInTargetCluster; ++ VmwareHypervisorHost dsHost = hyperHostInTargetCluster == null ? hyperHost : hyperHostInTargetCluster; + String targetDsName = cmd.getTargetPool().getUuid(); + morDestinationDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(dsHost, targetDsName); + if(morDestinationDS == null) { + String msg = "Unable to find the target datastore: " + targetDsName + " on host: " + dsHost.getHyperHostName(); + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + destinationDsMo = new DatastoreMO(hyperHost.getContext(), morDestinationDS); - vmName = getWorkerName(getServiceContext(), cmd, 0, dsMo); + vmName = getWorkerName(getServiceContext(), cmd, 0, sourceDsMo); if (destinationDsMo.getDatastoreType().equalsIgnoreCase("VVOL")) { isvVolsInvolved = true; vmName = getWorkerName(getServiceContext(), cmd, 0, destinationDsMo); @@@ -4665,7 -4890,8 +4658,8 @@@ s_logger.info("Create worker VM " + vmName); // OfflineVmwareMigration: 2. create the worker with access to the data(store) - vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName, hardwareVersion); - vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName, ++ vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName, + HypervisorHostHelper.getMinimumHostHardwareVersion(hyperHost, hyperHostInTargetCluster)); if (vmMo == null) { // OfflineVmwareMigration: don't throw a general Exception but think of a specific one throw new CloudRuntimeException("Unable to create a worker VM for volume operation"); diff --cc plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java index 8d08c38,2854a7c..1a8ca9f --- a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java +++ b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java @@@ -187,41 -205,17 +205,15 @@@ public class VmwareStorageMotionStrateg // OfflineVmwareMigration: we shouldn't be here as we would have refused in the canHandle call throw new UnsupportedOperationException(); } + Pair<Long, String> hostIdForVmAndHostGuidInTargetCluster = getHostIdForVmAndHostGuidInTargetCluster(srcData, destData); + Long hostId = hostIdForVmAndHostGuidInTargetCluster.first(); - String hostGuidInTargetCluster = hostIdForVmAndHostGuidInTargetCluster.second(); StoragePool sourcePool = (StoragePool) srcData.getDataStore(); - ScopeType sourceScopeType = srcData.getDataStore().getScope().getScopeType(); StoragePool targetPool = (StoragePool) destData.getDataStore(); - ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType(); - Long hostId = null; - String hostGuidInTargetCluster = null; - if (ScopeType.CLUSTER.equals(sourceScopeType)) { - // Find Volume source cluster and select any Vmware hypervisor host to attach worker VM - hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId()); - if (hostId == null) { - throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName()); - } - if (ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) { - // Without host vMotion might fail between non-shared storages with error similar to, - // https://kb.vmware.com/s/article/1003795 - List<HostVO> hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId()); - if (CollectionUtils.isNotEmpty(hosts)) { - hostGuidInTargetCluster = hosts.get(0).getGuid(); - } - if (hostGuidInTargetCluster == null) { - throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages"); - } - } - } else if (ScopeType.CLUSTER.equals(targetScopeType)) { - hostId = findSuitableHostIdForWorkerVmPlacement(targetPool.getClusterId()); - if (hostId == null) { - throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + targetPool.getName()); - } - } MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId() , srcData.getTO().getPath() , sourcePool , targetPool -- , hostGuidInTargetCluster); -- // OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding ++ , hostIdForVmAndHostGuidInTargetCluster.second()); Answer answer; if (hostId != null) { answer = agentMgr.easySend(hostId, cmd);
