This is an automated email from the ASF dual-hosted git repository. vishesh pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/cloudstack.git
commit 3923f80c225c593a79e32cc7d9d5567fb63ecca2 Merge: f6ceeab3b3a 620ed164d8b Author: Vishesh <[email protected]> AuthorDate: Tue Jun 25 18:53:57 2024 +0530 Merge branch '4.19' .../config/UpdateHypervisorCapabilitiesCmd.java | 14 + .../admin/vm/ImportUnmanagedInstanceCmd.java | 8 +- .../apache/cloudstack/vm/UnmanagedVMsManager.java | 9 + .../com/cloud/agent/api/CheckVolumeAnswer.java | 1 - .../com/cloud/agent/api/CheckVolumeCommand.java | 1 - .../cloud/agent/api/CopyRemoteVolumeAnswer.java | 1 - .../cloud/agent/api/CopyRemoteVolumeCommand.java | 5 +- .../com/cloud/agent/api/GetRemoteVmsAnswer.java | 2 +- .../com/cloud/agent/api/GetRemoteVmsCommand.java | 2 +- .../agent/api/GetUnmanagedInstancesAnswer.java | 2 +- .../agent/api/GetUnmanagedInstancesCommand.java | 2 +- debian/control | 2 +- .../engine/orchestration/NetworkOrchestrator.java | 12 +- .../engine/orchestration/VolumeOrchestrator.java | 9 +- .../orchestration/NetworkOrchestratorTest.java | 272 ++++++++------- .../cloud/hypervisor/HypervisorCapabilitiesVO.java | 12 + .../src/main/java/com/cloud/vm/dao/VmStatsDao.java | 6 +- .../main/java/com/cloud/vm/dao/VmStatsDaoImpl.java | 22 +- .../storage/datastore/db/ImageStoreDaoImpl.java | 2 +- .../resources/META-INF/db/schema-41900to41910.sql | 5 + .../motion/StorageSystemDataMotionStrategy.java | 370 ++++++++++++--------- .../KvmNonManagedStorageSystemDataMotionTest.java | 8 +- .../StorageSystemDataMotionStrategyTest.java | 71 ---- .../manager/ImageStoreProviderManagerImpl.java | 2 +- .../main/java/com/cloud/utils/db/GenericDao.java | 8 + .../java/com/cloud/utils/db/GenericDaoBase.java | 6 + .../model/impl/DefaultModuleDefinitionSet.java | 4 +- .../acl/DynamicRoleBasedAPIAccessChecker.java | 4 +- .../acl/ProjectRoleBasedApiAccessChecker.java | 8 +- .../kvm/resource/LibvirtComputingResource.java | 41 ++- .../LibvirtCopyRemoteVolumeCommandWrapper.java | 17 +- .../wrapper/LibvirtGetRemoteVmsCommandWrapper.java | 21 +- .../LibvirtMigrateVolumeCommandWrapper.java | 18 +- .../kvm/storage/FiberChannelAdapter.java | 28 ++ .../kvm/storage/KVMStorageProcessor.java | 72 ++-- .../kvm/storage/LibvirtStorageAdaptor.java | 20 +- .../kvm/storage/MultipathSCSIAdapterBase.java | 262 +++++---------- .../kvm/storage/KVMStorageProcessorTest.java | 66 ++-- .../com/cloud/agent/manager/MockVmManagerImpl.java | 79 ++--- .../vmware/manager/VmwareManagerImpl.java | 13 +- .../hypervisor/vmware/resource/VmwareResource.java | 19 +- .../cluster/KubernetesServiceHelperImpl.java | 3 + .../cluster/KubernetesServiceHelperImplTest.java | 20 +- .../cloudstack/shutdown/ShutdownManagerImpl.java | 5 +- plugins/storage/volume/adaptive/README.md | 41 +++ .../storage/datastore/adapter/ProviderAdapter.java | 22 +- .../datastore/adapter/ProviderAdapterFactory.java | 4 + .../datastore/adapter/ProviderVolumeNamer.java | 1 - .../driver/AdaptiveDataStoreDriverImpl.java | 222 +++++++++---- .../lifecycle/AdaptiveDataStoreLifeCycleImpl.java | 11 +- .../AdaptivePrimaryDatastoreAdapterFactoryMap.java | 4 + .../provider/AdaptivePrimaryHostListener.java | 2 + .../adapter/flasharray/FlashArrayAdapter.java | 274 ++++++++------- .../flasharray/FlashArrayAdapterFactory.java | 5 + ...lashArrayVolumePod.java => FlashArrayHost.java} | 29 +- .../adapter/flasharray/FlashArrayVolume.java | 4 +- .../adapter/flasharray/FlashArrayVolumePod.java | 17 +- .../datastore/adapter/primera/PrimeraAdapter.java | 201 +++++------ .../adapter/primera/PrimeraAdapterFactory.java | 5 + .../datastore/adapter/primera/PrimeraHost.java} | 33 +- .../adapter/primera/PrimeraHostDescriptor.java} | 31 +- .../datastore/adapter/primera/PrimeraHostset.java | 44 ++- .../datastore/adapter/primera/PrimeraPort.java} | 31 +- .../datastore/adapter/primera/PrimeraPortPos.java} | 38 ++- .../PrimeraVolumeCopyRequestParameters.java | 2 +- .../primera/PrimeraVolumePromoteRequest.java | 5 +- .../StorPoolBackupSnapshotCommandWrapper.java | 63 +++- .../cloudstack/oauth2/OAuth2UserAuthenticator.java | 5 + scripts/storage/multipath/copyVolume.sh | 2 +- .../com/cloud/api/query/dao/VolumeJoinDaoImpl.java | 7 +- .../configuration/ConfigurationManagerImpl.java | 2 + .../AgentBasedConsoleProxyManager.java | 2 +- .../java/com/cloud/consoleproxy/AgentHookBase.java | 2 +- .../cloud/consoleproxy/ConsoleProxyManager.java | 3 + .../consoleproxy/ConsoleProxyManagerImpl.java | 4 +- .../consoleproxy/StaticConsoleProxyManager.java | 2 +- .../kvm/discoverer/LibvirtServerDiscoverer.java | 18 +- .../com/cloud/network/guru/PublicNetworkGuru.java | 2 +- .../com/cloud/server/ManagementServerImpl.java | 52 ++- .../main/java/com/cloud/server/StatsCollector.java | 12 +- .../com/cloud/storage/VolumeApiServiceImpl.java | 43 +-- .../com/cloud/template/TemplateManagerImpl.java | 7 +- .../main/java/com/cloud/vm/UserVmManagerImpl.java | 11 +- .../apache/cloudstack/snapshot/SnapshotHelper.java | 4 +- .../cloudstack/vm/UnmanagedVMsManagerImpl.java | 27 +- .../discoverer/LibvirtServerDiscovererTest.java | 54 +++ .../java/com/cloud/server/StatsCollectorTest.java | 8 +- .../java/com/cloud/user/MockUsageEventDao.java | 5 + test/integration/smoke/test_service_offerings.py | 2 +- ui/src/views/compute/DeployVM.vue | 2 + .../main/java/com/cloud/utils/ssh/SshHelper.java | 8 +- 91 files changed, 1718 insertions(+), 1209 deletions(-) diff --cc api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java index dd897218a4d,3d8b23318dd..ae6ceff26c7 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java @@@ -201,8 -203,8 +201,8 @@@ public class ImportUnmanagedInstanceCm for (Map<String, String> entry : (Collection<Map<String, String>>)nicNetworkList.values()) { String nic = entry.get(VmDetailConstants.NIC); String networkUuid = entry.get(VmDetailConstants.NETWORK); - if (logger.isTraceEnabled()) { - logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); ++ if (logger.isDebugEnabled()) { ++ logger.debug(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); } if (StringUtils.isAnyEmpty(nic, networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); @@@ -219,8 -221,8 +219,8 @@@ for (Map<String, String> entry : (Collection<Map<String, String>>)nicIpAddressList.values()) { String nic = entry.get(VmDetailConstants.NIC); String ipAddress = StringUtils.defaultIfEmpty(entry.get(VmDetailConstants.IP4_ADDRESS), null); - if (logger.isTraceEnabled()) { - logger.trace(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress)); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress)); ++ if (logger.isDebugEnabled()) { ++ logger.debug(String.format("nic, '%s', gets ip, '%s'", nic, ipAddress)); } if (StringUtils.isEmpty(nic)) { throw new InvalidParameterValueException(String.format("NIC ID: '%s' is invalid for IP address mapping", nic)); diff --cc debian/control index 3508c7b5f75,c968f337bd5..dab7b254b88 --- a/debian/control +++ b/debian/control @@@ -24,7 -24,7 +24,7 @@@ Description: CloudStack server librar Package: cloudstack-agent Architecture: all - Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor -Depends: ${python:Depends}, ${python3:Depends}, openjdk-11-jre-headless | java11-runtime-headless | java11-runtime | openjdk-11-jre-headless | zulu-11, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor, cpu-checker ++Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor, cpu-checker Recommends: init-system-helpers Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts Description: CloudStack agent diff --cc engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java index d1532cdbef1,45ed646240f..a4700f6cdc0 --- a/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java +++ b/engine/orchestration/src/test/java/org/apache/cloudstack/engine/orchestration/NetworkOrchestratorTest.java @@@ -92,8 -95,9 +94,8 @@@ import junit.framework.TestCase */ @RunWith(JUnit4.class) public class NetworkOrchestratorTest extends TestCase { - static final Logger s_logger = Logger.getLogger(NetworkOrchestratorTest.class); - NetworkOrchestrator testOrchastrator = Mockito.spy(new NetworkOrchestrator()); + NetworkOrchestrator testOrchestrator = Mockito.spy(new NetworkOrchestrator()); private String guruName = "GuestNetworkGuru"; private String dhcpProvider = "VirtualRouter"; @@@ -135,8 -140,8 +138,8 @@@ when(provider.getCapabilities()).thenReturn(services); capabilities.put(Network.Capability.DhcpAccrossMultipleSubnets, "true"); - when(testOrchastrator._ntwkSrvcDao.getProviderForServiceInNetwork(ArgumentMatchers.anyLong(), ArgumentMatchers.eq(Service.Dhcp))).thenReturn(dhcpProvider); - when(testOrchastrator._networkModel.getElementImplementingProvider(dhcpProvider)).thenReturn(provider); - when(testOrchestrator._ntwkSrvcDao.getProviderForServiceInNetwork(Matchers.anyLong(), Matchers.eq(Service.Dhcp))).thenReturn(dhcpProvider); ++ when(testOrchestrator._ntwkSrvcDao.getProviderForServiceInNetwork(ArgumentMatchers.anyLong(), ArgumentMatchers.eq(Service.Dhcp))).thenReturn(dhcpProvider); + when(testOrchestrator._networkModel.getElementImplementingProvider(dhcpProvider)).thenReturn(provider); when(guru.getName()).thenReturn(guruName); List<NetworkGuru> networkGurus = new ArrayList<NetworkGuru>(); diff --cc engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDaoImpl.java index 1bef8f0626c,a98302e2136..aa58e489364 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VmStatsDaoImpl.java @@@ -21,6 -21,7 +21,8 @@@ import java.util.List import javax.annotation.PostConstruct; -import org.apache.log4j.Logger; ++import org.apache.logging.log4j.LogManager; ++import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.utils.db.Filter; @@@ -33,6 -34,8 +35,8 @@@ import com.cloud.vm.VmStatsVO @Component public class VmStatsDaoImpl extends GenericDaoBase<VmStatsVO, Long> implements VmStatsDao { - protected Logger logger = Logger.getLogger(getClass()); ++ protected Logger logger = LogManager.getLogger(getClass()); + protected SearchBuilder<VmStatsVO> vmIdSearch; protected SearchBuilder<VmStatsVO> vmIdTimestampGreaterThanEqualSearch; protected SearchBuilder<VmStatsVO> vmIdTimestampLessThanEqualSearch; diff --cc engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 03aa5b50988,f3e6049c792..70e79e3252b --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@@ -69,10 -71,8 +71,9 @@@ import org.apache.cloudstack.storage.da import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; - import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@@ -141,8 -140,11 +141,11 @@@ import java.util.HashSet import java.util.stream.Collectors; import org.apache.commons.collections.CollectionUtils; + import static org.apache.cloudstack.vm.UnmanagedVMsManagerImpl.KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME; + import static org.apache.cloudstack.vm.UnmanagedVMsManagerImpl.VM_IMPORT_DEFAULT_TEMPLATE_NAME; + public class StorageSystemDataMotionStrategy implements DataMotionStrategy { - private static final Logger LOGGER = Logger.getLogger(StorageSystemDataMotionStrategy.class); + protected Logger logger = LogManager.getLogger(getClass()); private static final Random RANDOM = new Random(System.nanoTime()); private static final int LOCK_TIME_IN_SECONDS = 300; private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported."; @@@ -871,6 -869,18 +870,18 @@@ throw new CloudRuntimeException(errMsg, ex); } } finally { + // revoke access (for managed volumes) + if (hostVO != null) { + try { + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + } catch (Exception e) { - LOGGER.warn(String.format("Failed to revoke access for volume 'name=%s,uuid=%s' after a migration attempt", destVolumeInfo.getVolume(), destVolumeInfo.getUuid()), e); ++ logger.warn(String.format("Failed to revoke access for volume 'name=%s,uuid=%s' after a migration attempt", destVolumeInfo.getVolume(), destVolumeInfo.getUuid()), e); + } + } + + // re-retrieve volume to get any updated information from grant + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + CopyCmdAnswer copyCmdAnswer; if (errMsg != null) { copyCmdAnswer = new CopyCmdAnswer(errMsg); @@@ -921,6 -931,125 +932,125 @@@ return hostVO; } + private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snapshotInfo) { + VolumeInfo tempVolumeInfo = null; + VolumeVO tempVolumeVO = null; + try { + tempVolumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", + snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); + tempVolumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + _volumeDao.persist(tempVolumeVO); + tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId()); + + if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { + snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); + // refresh volume info as data could have changed + tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId()); + } else { + throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); + } + return tempVolumeInfo; + } catch (Throwable e) { + try { + if (tempVolumeInfo != null) { + tempVolumeInfo.getDataStore().getDriver().deleteAsync(tempVolumeInfo.getDataStore(), tempVolumeInfo, null); + } + + // cleanup temporary volume + if (tempVolumeVO != null) { + _volumeDao.remove(tempVolumeVO.getId()); + } + } catch (Throwable e2) { - LOGGER.warn("Failed to delete temporary volume created for copy", e2); ++ logger.warn("Failed to delete temporary volume created for copy", e2); + } + + throw e; + } + } + + /** + * Simplier logic for copy from snapshot for adaptive driver only. + * @param snapshotInfo + * @param destData + * @param callback + */ + private void handleCopyAsyncToSecondaryStorageAdaptive(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback<CopyCommandResult> callback) { + CopyCmdAnswer copyCmdAnswer = null; + DataObject srcFinal = null; + HostVO hostVO = null; + DataStore srcDataStore = null; + boolean tempRequired = false; + + try { + snapshotInfo.processEvent(Event.CopyingRequested); + hostVO = getHost(snapshotInfo); + DataObject destOnStore = destData; + srcDataStore = snapshotInfo.getDataStore(); + int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); + CopyCommand copyCommand = null; + if (!Boolean.parseBoolean(srcDataStore.getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT"))) { + srcFinal = createTemporaryVolumeCopyOfSnapshotAdaptive(snapshotInfo); + tempRequired = true; + } else { + srcFinal = snapshotInfo; + } + + _volumeService.grantAccess(srcFinal, hostVO, srcDataStore); + + DataTO srcTo = srcFinal.getTO(); + + // have to set PATH as extraOptions due to logic in KVM hypervisor processor + HashMap<String,String> extraDetails = new HashMap<>(); + extraDetails.put(DiskTO.PATH, srcTo.getPath()); + + copyCommand = new CopyCommand(srcFinal.getTO(), destOnStore.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); + copyCommand.setOptions(extraDetails); + copyCmdAnswer = (CopyCmdAnswer)agentManager.send(hostVO.getId(), copyCommand); + } catch (Exception ex) { + String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : "; - LOGGER.warn(msg, ex); ++ logger.warn(msg, ex); + throw new CloudRuntimeException(msg + ex.getMessage(), ex); + } + finally { + // remove access tot he volume that was used + if (srcFinal != null && hostVO != null && srcDataStore != null) { + _volumeService.revokeAccess(srcFinal, hostVO, srcDataStore); + } + + // delete the temporary volume if it was needed + if (srcFinal != null && tempRequired) { + try { + srcFinal.getDataStore().getDriver().deleteAsync(srcFinal.getDataStore(), srcFinal, null); + } catch (Throwable e) { - LOGGER.warn("Failed to delete temporary volume created for copy", e); ++ logger.warn("Failed to delete temporary volume created for copy", e); + } + } + + // check we have a reasonable result + String errMsg = null; + if (copyCmdAnswer == null || (!copyCmdAnswer.getResult() && copyCmdAnswer.getDetails() == null)) { + errMsg = "Unable to create template from snapshot"; + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } else if (!copyCmdAnswer.getResult() && StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + errMsg = "Unable to create template from snapshot"; + } else if (!copyCmdAnswer.getResult()) { + errMsg = copyCmdAnswer.getDetails(); + } + + //submit processEvent + if (StringUtils.isEmpty(errMsg)) { + snapshotInfo.processEvent(Event.OperationSuccessed); + } else { + snapshotInfo.processEvent(Event.OperationFailed); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(copyCmdAnswer.getDetails()); + callback.complete(result); + } + } + /** * This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases: * 1) When creating a template from a snapshot @@@ -1797,31 -1891,20 +1892,20 @@@ * invocation of createVolumeFromSnapshot(SnapshotInfo). */ private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - VolumeVO volumeVO = null; - // cleanup any temporary volume previously created for copy from a snapshot - if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { - SnapshotDetailsVO tempUuid = null; - tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); - if (tempUuid == null || tempUuid.getValue() == null) { - return; - } + try { - LOGGER.debug("Cleaning up temporary volume created for copy from a snapshot"); ++ logger.debug("Cleaning up temporary volume created for copy from a snapshot"); - volumeVO = _volumeDao.findByUuid(tempUuid.getValue()); - if (volumeVO != null) { - _volumeDao.remove(volumeVO.getId()); - } - _snapshotDetailsDao.remove(tempUuid.getId()); - _snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); - return; - } + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); + try { + snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); + } + finally { + _snapshotDetailsDao.remove(snapshotDetails.getId()); + } - try { - snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); - } - finally { - _snapshotDetailsDao.remove(snapshotDetails.getId()); + } catch (Throwable e) { - LOGGER.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e); ++ logger.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e); } } @@@ -1933,11 -2016,14 +2017,14 @@@ continue; } - if (srcVolumeInfo.getTemplateId() != null) { + VMTemplateVO vmTemplate = _vmTemplateDao.findById(vmInstance.getTemplateId()); + if (srcVolumeInfo.getTemplateId() != null && + Objects.nonNull(vmTemplate) && + !Arrays.asList(KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME, VM_IMPORT_DEFAULT_TEMPLATE_NAME).contains(vmTemplate.getName())) { - LOGGER.debug(String.format("Copying template [%s] of volume [%s] from source storage pool [%s] to target storage pool [%s].", srcVolumeInfo.getTemplateId(), srcVolumeInfo.getId(), sourceStoragePool.getId(), destStoragePool.getId())); + logger.debug(String.format("Copying template [%s] of volume [%s] from source storage pool [%s] to target storage pool [%s].", srcVolumeInfo.getTemplateId(), srcVolumeInfo.getId(), sourceStoragePool.getId(), destStoragePool.getId())); copyTemplateToTargetFilesystemStorageIfNeeded(srcVolumeInfo, sourceStoragePool, destDataStore, destStoragePool, destHost); } else { - LOGGER.debug(String.format("Skipping copy template from source storage pool [%s] to target storage pool [%s] before migration due to volume [%s] does not have a template.", sourceStoragePool.getId(), destStoragePool.getId(), srcVolumeInfo.getId())); + logger.debug(String.format("Skipping copy template from source storage pool [%s] to target storage pool [%s] before migration due to volume [%s] does not have a template.", sourceStoragePool.getId(), destStoragePool.getId(), srcVolumeInfo.getId())); } VolumeVO destVolume = duplicateVolumeOnAnotherStorage(srcVolume, destStoragePool); diff --cc engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java index cea9de3f1b4,e619b40fae0..45357fa64b2 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategyTest.java @@@ -47,8 -46,7 +46,7 @@@ import org.mockito.InjectMocks import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.Spy; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; - import org.mockito.verification.VerificationMode; import com.cloud.agent.api.MigrateCommand; import com.cloud.host.HostVO; diff --cc engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index 11a13e7ccb4,27fb77660ac..d2f08260aa3 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@@ -201,8 -200,8 +201,8 @@@ public class ImageStoreProviderManagerI } // No store with space found - s_logger.error(String.format("Can't find an image storage in zone with less than %d usage", + logger.error(String.format("Can't find an image storage in zone with less than %d usage", - Math.round(_statsCollector.getImageStoreCapacityThreshold()*100))); + Math.round(_statsCollector.getImageStoreCapacityThreshold() * 100))); return null; } diff --cc framework/db/src/main/java/com/cloud/utils/db/GenericDao.java index de8838b0999,b9199468bd1..84750c2068c --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDao.java @@@ -229,24 -229,14 +229,32 @@@ public interface GenericDao<T, ID exten */ int expunge(final SearchCriteria<T> sc); + /** + * remove the entity bean specified by the search criteria and filter + * @param sc + * @param filter + * @return number of rows deleted + */ + int expunge(final SearchCriteria<T> sc, final Filter filter); + + /** + * remove the entity bean specified by the search criteria and batchSize + * @param sc + * @param batchSize + * @return number of rows deleted + */ + int batchExpunge(final SearchCriteria<T> sc, final Long batchSize); + + int expungeList(List<ID> ids); + + /** + * Delete the entity beans specified by the search criteria with a given limit + * @param sc Search criteria + * @param limit Maximum number of rows that will be affected + * @return Number of rows deleted + */ + int expunge(SearchCriteria<T> sc, long limit); + /** * expunge the removed rows. */ diff --cc framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java index 4202f6996c1,3b950e1983d..b7e4f44cf8c --- a/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/main/java/com/cloud/utils/db/GenericDaoBase.java @@@ -1235,9 -1226,14 +1235,15 @@@ public abstract class GenericDaoBase<T } } + // FIXME: Does not work for joins. + @Override - public int expunge(final SearchCriteria<T> sc) { - return expunge(sc, -1); ++ public int expunge(final SearchCriteria<T> sc, long limit) { ++ Filter filter = new Filter(limit); ++ return expunge(sc, filter); + } + - // FIXME: Does not work for joins. @Override - public int expunge(final SearchCriteria<T> sc, long limit) { + public int expunge(final SearchCriteria<T> sc, final Filter filter) { if (sc == null) { throw new CloudRuntimeException("Call to throw new expunge with null search Criteria"); } diff --cc framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java index d61e26fc3a8,cda07c9dee6..2a6d0b63e5c --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java @@@ -99,20 -98,22 +99,22 @@@ public class DefaultModuleDefinitionSe public void with(ModuleDefinition def, Stack<ModuleDefinition> parents) { try { String moduleDefinitionName = def.getName(); - log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); + logger.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); ApplicationContext context = getApplicationContext(moduleDefinitionName); try { - if (context.containsBean("moduleStartup")) { + if (context == null) { - log.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName)); ++ logger.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName)); + } else if (context.containsBean("moduleStartup")) { Runnable runnable = context.getBean("moduleStartup", Runnable.class); - log.info(String.format("Starting module [%s].", moduleDefinitionName)); + logger.info(String.format("Starting module [%s].", moduleDefinitionName)); runnable.run(); } else { - log.debug(String.format("Could not get module [%s] context bean.", moduleDefinitionName)); + logger.debug(String.format("Could not get module [%s] context bean.", moduleDefinitionName)); } } catch (BeansException e) { - log.warn(String.format("Failed to start module [%s] due to: [%s].", moduleDefinitionName, e.getMessage())); - if (log.isDebugEnabled()) { - log.debug(String.format("module start failure of module [%s] was due to: ", moduleDefinitionName), e); + logger.warn(String.format("Failed to start module [%s] due to: [%s].", moduleDefinitionName, e.getMessage())); + if (logger.isDebugEnabled()) { + logger.debug(String.format("module start failure of module [%s] was due to: ", moduleDefinitionName), e); } } } catch (EmptyStackException e) { diff --cc plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index 94b763d013f,1dfe20a10be..db40b6e68dd --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@@ -120,7 -122,9 +120,9 @@@ public class DynamicRoleBasedAPIAccessC } if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { - logger.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); ++ if (logger.isTraceEnabled()) { ++ logger.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); + } return true; } diff --cc plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 1e766468ba8,cffda4681c6..2e7ae23d6f1 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@@ -72,7 -76,9 +72,9 @@@ public class ProjectRoleBasedApiAccessC Project project = CallContext.current().getProject(); if (project == null) { - logger.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); ++ if (logger.isTraceEnabled()) { ++ logger.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); + } return apiNames; } @@@ -110,8 -116,10 +112,10 @@@ Project project = CallContext.current().getProject(); if (project == null) { - logger.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, - if (LOGGER.isTraceEnabled()) { - LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, ++ if (logger.isTraceEnabled()) { ++ logger.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, user)); + } return true; } diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index b5ec716e805,86ece3c8c66..5cffa77c297 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@@ -3801,27 -3775,27 +3801,27 @@@ public class LibvirtComputingResource e try { final String names[] = conn.listDefinedDomains(); for (int i = 0; i < names.length; i++) { - la.add(names[i]); + domainNames.add(names[i]); } } catch (final LibvirtException e) { - LOGGER.warn("Failed to list Defined domains", e); - s_logger.warn("Failed to list defined domains", e); ++ logger.warn("Failed to list defined domains", e); } int[] ids = null; try { ids = conn.listDomains(); } catch (final LibvirtException e) { - LOGGER.warn("Failed to list domains", e); - return la; - s_logger.warn("Failed to list domains", e); ++ logger.warn("Failed to list domains", e); + return domainNames; } Domain dm = null; for (int i = 0; i < ids.length; i++) { try { dm = conn.domainLookupByID(ids[i]); - la.add(dm.getName()); + domainNames.add(dm.getName()); } catch (final LibvirtException e) { - s_logger.warn("Unable to get vms", e); + LOGGER.warn("Unable to get vms", e); } finally { try { if (dm != null) { @@@ -5379,20 -5336,31 +5379,31 @@@ /* Scp volume from remote host to local directory */ - public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath) { + public String copyVolume(String srcIp, String username, String password, String localDir, String remoteFile, String tmpPath, int timeoutInSecs) { + String outputFile = UUID.randomUUID().toString(); try { - String outputFile = UUID.randomUUID().toString(); StringBuilder command = new StringBuilder("qemu-img convert -O qcow2 "); command.append(remoteFile); - command.append(" "+tmpPath); + command.append(" " + tmpPath); command.append(outputFile); - logger.debug("Converting remoteFile: "+remoteFile); - SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString()); - logger.debug("Copying remoteFile to: "+localDir); - SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath+outputFile); - logger.debug("Successfully copyied remoteFile to: "+localDir+"/"+outputFile); - s_logger.debug(String.format("Converting remote disk file: %s, output file: %s%s (timeout: %d secs)", remoteFile, tmpPath, outputFile, timeoutInSecs)); ++ logger.debug(String.format("Converting remote disk file: %s, output file: %s%s (timeout: %d secs)", remoteFile, tmpPath, outputFile, timeoutInSecs)); + SshHelper.sshExecute(srcIp, 22, username, null, password, command.toString(), timeoutInSecs * 1000); - s_logger.debug("Copying converted remote disk file " + outputFile + " to: " + localDir); ++ logger.debug("Copying converted remote disk file " + outputFile + " to: " + localDir); + SshHelper.scpFrom(srcIp, 22, username, null, password, localDir, tmpPath + outputFile); - s_logger.debug("Successfully copied converted remote disk file to: " + localDir + "/" + outputFile); ++ logger.debug("Successfully copied converted remote disk file to: " + localDir + "/" + outputFile); return outputFile; } catch (Exception e) { + try { + String deleteRemoteConvertedFileCmd = String.format("rm -f %s%s", tmpPath, outputFile); + SshHelper.sshExecute(srcIp, 22, username, null, password, deleteRemoteConvertedFileCmd); + } catch (Exception ignored) { + } + + try { + FileUtils.deleteQuietly(new File(localDir + "/" + outputFile)); + } catch (Exception ignored) { + } + throw new RuntimeException(e); } } diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java index 025a5ed192c,a5e1716da2e..e6ec05fec23 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCopyRemoteVolumeCommandWrapper.java @@@ -41,9 -42,10 +41,8 @@@ import java.util.Map @ResourceWrapper(handles = CopyRemoteVolumeCommand.class) public final class LibvirtCopyRemoteVolumeCommandWrapper extends CommandWrapper<CopyRemoteVolumeCommand, Answer, LibvirtComputingResource> { - private static final Logger s_logger = Logger.getLogger(LibvirtCopyRemoteVolumeCommandWrapper.class); - @Override public Answer execute(final CopyRemoteVolumeCommand command, final LibvirtComputingResource libvirtComputingResource) { - String result = null; String srcIp = command.getRemoteIp(); String username = command.getUsername(); String password = command.getPassword(); @@@ -57,19 -60,20 +57,20 @@@ try { if (storageFilerTO.getType() == Storage.StoragePoolType.Filesystem || storageFilerTO.getType() == Storage.StoragePoolType.NetworkFilesystem) { - String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath); - logger.debug("Volume Copy Successful"); + String filename = libvirtComputingResource.copyVolume(srcIp, username, password, dstPath, srcFile, tmpPath, timeoutInSecs); - s_logger.debug("Volume " + srcFile + " copy successful, copied to file: " + filename); ++ logger.debug("Volume " + srcFile + " copy successful, copied to file: " + filename); final KVMPhysicalDisk vol = pool.getPhysicalDisk(filename); final String path = vol.getPath(); long size = getVirtualSizeFromFile(path); - return new CopyRemoteVolumeAnswer(command, "", filename, size); + return new CopyRemoteVolumeAnswer(command, "", filename, size); } else { - return new Answer(command, false, "Unsupported Storage Pool"); + String msg = "Unsupported storage pool type: " + storageFilerTO.getType().toString() + ", only local and NFS pools are supported"; + return new Answer(command, false, msg); } - } catch (final Exception e) { - logger.error("Error while copying file from remote host: "+ e.getMessage()); - return new Answer(command, false, result); - s_logger.error("Error while copying volume file from remote host: " + e.getMessage(), e); ++ logger.error("Error while copying volume file from remote host: " + e.getMessage(), e); + String msg = "Failed to copy volume due to: " + e.getMessage(); + return new Answer(command, false, msg); } } diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java index a9da4a50452,942a68b8074..114b27d3a5b --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java @@@ -45,39 -46,41 +45,40 @@@ import java.util.List @ResourceWrapper(handles = GetRemoteVmsCommand.class) public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper<GetRemoteVmsCommand, Answer, LibvirtComputingResource> { - private static final Logger s_logger = Logger.getLogger(LibvirtGetRemoteVmsCommandWrapper.class); - @Override public Answer execute(final GetRemoteVmsCommand command, final LibvirtComputingResource libvirtComputingResource) { - String hypervisorURI = "qemu+tcp://" + command.getRemoteIp() + "/system"; + String remoteIp = command.getRemoteIp(); + String hypervisorURI = "qemu+tcp://" + remoteIp + "/system"; HashMap<String, UnmanagedInstanceTO> unmanagedInstances = new HashMap<>(); try { Connect conn = LibvirtConnection.getConnection(hypervisorURI); final List<String> allVmNames = libvirtComputingResource.getAllVmNames(conn); - s_logger.info(String.format("Found %d VMs on the remote host %s", allVmNames.size(), remoteIp)); ++ logger.info(String.format("Found %d VMs on the remote host %s", allVmNames.size(), remoteIp)); for (String name : allVmNames) { final Domain domain = libvirtComputingResource.getDomain(conn, name); - final DomainInfo.DomainState ps = domain.getInfo().state; final VirtualMachine.PowerState state = libvirtComputingResource.convertToPowerState(ps); - s_logger.debug(String.format("Remote VM %s - powerstate: %s, state: %s", domain.getName(), ps.toString(), state.toString())); + - logger.debug("VM " + domain.getName() + " - powerstate: " + ps + ", state: " + state.toString()); ++ logger.debug(String.format("Remote VM %s - powerstate: %s, state: %s", domain.getName(), ps.toString(), state.toString())); if (state == VirtualMachine.PowerState.PowerOff) { try { UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn); unmanagedInstances.put(instance.getName(), instance); } catch (Exception e) { - logger.error("Couldn't fetch VM " + domain.getName() + " details, due to: " + e.getMessage(), e); - s_logger.error("Couldn't fetch remote VM " + domain.getName() + " details, due to: " + e.getMessage(), e); ++ logger.error("Couldn't fetch remote VM " + domain.getName() + " details, due to: " + e.getMessage(), e); } } domain.free(); } - logger.debug("Found " + unmanagedInstances.size() + " stopped VMs on host " + command.getRemoteIp()); - s_logger.debug("Found " + unmanagedInstances.size() + " stopped VMs on remote host " + remoteIp); ++ logger.debug("Found " + unmanagedInstances.size() + " stopped VMs on remote host " + remoteIp); return new GetRemoteVmsAnswer(command, "", unmanagedInstances); } catch (final LibvirtException e) { - logger.error("Failed to list stopped VMs on remote host " + command.getRemoteIp() + ", due to: " + e.getMessage(), e); - s_logger.error("Failed to list stopped VMs on remote host " + remoteIp + ", due to: " + e.getMessage(), e); ++ logger.error("Failed to list stopped VMs on remote host " + remoteIp + ", due to: " + e.getMessage(), e); if (e.getMessage().toLowerCase().contains("connection refused")) { - return new Answer(command, false, "Unable to connect to remote host " + command.getRemoteIp() + ", please check the libvirtd tcp connectivity and retry"); + return new Answer(command, false, "Unable to connect to remote host " + remoteIp + ", please check the libvirtd tcp connectivity and retry"); } - return new Answer(command, false, "Unable to list stopped VMs on remote host " + command.getRemoteIp() + ", due to: " + e.getMessage()); + return new Answer(command, false, "Unable to list stopped VMs on remote host " + remoteIp + ", due to: " + e.getMessage()); } } @@@ -103,8 -106,8 +104,8 @@@ return instance; } catch (Exception e) { - logger.debug("Unable to retrieve unmanaged instance info, due to: " + e.getMessage(), e); - throw new CloudRuntimeException("Unable to retrieve unmanaged instance info, due to: " + e.getMessage()); - s_logger.debug("Unable to retrieve remote unmanaged instance info, due to: " + e.getMessage(), e); ++ logger.debug("Unable to retrieve remote unmanaged instance info, due to: " + e.getMessage(), e); + throw new CloudRuntimeException("Unable to retrieve remote unmanaged instance info, due to: " + e.getMessage()); } } diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java index 83636b9a9c3,d5192bfdb71..917435479f2 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java @@@ -19,16 -24,30 +24,34 @@@ import org.apache.log4j.Logger import com.cloud.storage.Storage; import com.cloud.utils.exception.CloudRuntimeException; -@StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel) public class FiberChannelAdapter extends MultipathSCSIAdapterBase { + + private Logger LOGGER = Logger.getLogger(getClass()); + + private String hostname = null; + private String hostnameFq = null; + public FiberChannelAdapter() { LOGGER.info("Loaded FiberChannelAdapter for StorageLayer"); + // get the hostname - we need this to compare to connid values + try { + InetAddress inetAddress = InetAddress.getLocalHost(); + hostname = inetAddress.getHostName(); // basic hostname + if (hostname.indexOf(".") > 0) { + hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain + } + hostnameFq = inetAddress.getCanonicalHostName(); // fully qualified hostname + LOGGER.info("Loaded FiberChannelAdapter for StorageLayer on host [" + hostname + "]"); + } catch (UnknownHostException e) { + LOGGER.error("Error getting hostname", e); + } } + @Override + public Storage.StoragePoolType getStoragePoolType() { + return Storage.StoragePoolType.FiberChannel; + } + @Override public KVMStoragePool getStoragePool(String uuid) { KVMStoragePool pool = MapStorageUuidToStoragePool.get(uuid); diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index b9671c872d1,a3a79de6bf5..f6242444d91 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@@ -134,9 -66,78 +134,13 @@@ import com.cloud.utils.exception.CloudR import com.cloud.utils.script.Script; import com.cloud.utils.storage.S3.S3Utils; import com.cloud.vm.VmDetailConstants; -import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer; -import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; -import org.apache.cloudstack.direct.download.DirectDownloadHelper; -import org.apache.cloudstack.direct.download.DirectTemplateDownloader; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.storage.command.AttachAnswer; -import org.apache.cloudstack.storage.command.AttachCommand; -import org.apache.cloudstack.storage.command.CheckDataStoreStoragePolicyComplainceCommand; -import org.apache.cloudstack.storage.command.CopyCmdAnswer; -import org.apache.cloudstack.storage.command.CopyCommand; -import org.apache.cloudstack.storage.command.CreateObjectAnswer; -import org.apache.cloudstack.storage.command.CreateObjectCommand; -import org.apache.cloudstack.storage.command.DeleteCommand; -import org.apache.cloudstack.storage.command.DettachAnswer; -import org.apache.cloudstack.storage.command.DettachCommand; -import org.apache.cloudstack.storage.command.ForgetObjectCmd; -import org.apache.cloudstack.storage.command.IntroduceObjectCmd; -import org.apache.cloudstack.storage.command.ResignatureAnswer; -import org.apache.cloudstack.storage.command.ResignatureCommand; -import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; -import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; -import org.apache.cloudstack.storage.command.SyncVolumePathCommand; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.cloudstack.storage.to.TemplateObjectTO; -import org.apache.cloudstack.storage.to.VolumeObjectTO; + import org.apache.cloudstack.utils.cryptsetup.KeyFile; + import org.apache.cloudstack.utils.qemu.QemuImageOptions; -import org.apache.cloudstack.utils.qemu.QemuImg; -import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.cloudstack.utils.qemu.QemuImgException; -import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.cloudstack.utils.qemu.QemuObject; + import org.apache.cloudstack.utils.qemu.QemuObject.EncryptFormat; -import org.apache.commons.collections.MapUtils; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.BooleanUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.log4j.Logger; -import org.libvirt.Connect; -import org.libvirt.Domain; -import org.libvirt.DomainInfo; -import org.libvirt.DomainSnapshot; -import org.libvirt.LibvirtException; - -import javax.naming.ConfigurationException; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.text.DateFormat; -import java.text.SimpleDateFormat; + import java.util.ArrayList; -import java.util.Arrays; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; - -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; -import static com.cloud.utils.storage.S3.S3Utils.putFile; public class KVMStorageProcessor implements StorageProcessor { - private static final Logger s_logger = Logger.getLogger(KVMStorageProcessor.class); + protected Logger logger = LogManager.getLogger(getClass()); private final KVMStoragePoolManager storagePoolMgr; private final LibvirtComputingResource resource; private StorageLayer storageLayer; @@@ -267,10 -268,10 +271,10 @@@ Map<String, String> details = primaryStore.getDetails(); - String path = details != null ? details.get("managedStoreTarget") : null; + String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); @@@ -404,12 -415,12 +418,12 @@@ if (primaryPool.getType() == StoragePoolType.CLVM) { templatePath = ((NfsTO)imageStore).getUrl() + File.separator + templatePath; vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); - } if (primaryPool.getType() == StoragePoolType.PowerFlex) { + } if (storagePoolMgr.supportsPhysicalDiskCopy(primaryPool.getType())) { Map<String, String> details = primaryStore.getDetails(); - String path = details != null ? details.get("managedStoreTarget") : null; + String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { - s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); + logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); } BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); @@@ -1046,8 -1057,8 +1060,8 @@@ } finally { srcVolume.clearPassphrase(); if (isCreatedFromVmSnapshot) { - s_logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot"); + logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot"); - } else if (primaryPool.getType() != StoragePoolType.RBD) { + } else if (primaryPool != null && primaryPool.getType() != StoragePoolType.RBD) { deleteSnapshotOnPrimary(cmd, snapshot, primaryPool); } @@@ -1940,26 -1948,43 +1954,43 @@@ * @param snapshotPath Path to convert the base file; * @return null if the conversion occurs successfully or an error message that must be handled. */ - protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool, String baseFile, String snapshotPath, VolumeObjectTO volume, int wait) { - try { - logger.debug(String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath)); + protected String convertBaseFileToSnapshotFileInPrimaryStorageDir(KVMStoragePool primaryPool, + KVMPhysicalDisk baseFile, String snapshotPath, VolumeObjectTO volume, int wait) { + try (KeyFile srcKey = new KeyFile(volume.getPassphrase())) { - s_logger.debug( ++ logger.debug( + String.format("Trying to convert volume [%s] (%s) to snapshot [%s].", volume, baseFile, snapshotPath)); primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR); + convertTheBaseFileToSnapshot(baseFile, snapshotPath, wait, srcKey); + } catch (QemuImgException | LibvirtException | IOException ex) { + return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile, + snapshotPath, ex.getMessage()); + } - QemuImgFile srcFile = new QemuImgFile(baseFile); - srcFile.setFormat(PhysicalDiskFormat.QCOW2); - s_logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, ++ logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, + snapshotPath)); + return null; + } - QemuImgFile destFile = new QemuImgFile(snapshotPath); - destFile.setFormat(PhysicalDiskFormat.QCOW2); + private void convertTheBaseFileToSnapshot(KVMPhysicalDisk baseFile, String snapshotPath, int wait, KeyFile srcKey) + throws LibvirtException, QemuImgException { + List<QemuObject> qemuObjects = new ArrayList<>(); + Map<String, String> options = new HashMap<>(); + QemuImageOptions qemuImageOpts = new QemuImageOptions(baseFile.getPath()); + if (srcKey.isSet()) { + String srcKeyName = "sec0"; + qemuObjects.add(QemuObject.prepareSecretForQemuImg(baseFile.getFormat(), EncryptFormat.LUKS, + srcKey.toString(), srcKeyName, options)); + qemuImageOpts = new QemuImageOptions(baseFile.getFormat(), baseFile.getPath(), srcKeyName); + } + QemuImgFile srcFile = new QemuImgFile(baseFile.getPath()); + srcFile.setFormat(PhysicalDiskFormat.QCOW2); - QemuImg q = new QemuImg(wait); - q.convert(srcFile, destFile); + QemuImgFile destFile = new QemuImgFile(snapshotPath); + destFile.setFormat(PhysicalDiskFormat.QCOW2); - logger.debug(String.format("Converted volume [%s] (from path \"%s\") to snapshot [%s].", volume, baseFile, snapshotPath)); - return null; - } catch (QemuImgException | LibvirtException ex) { - return String.format("Failed to convert %s snapshot of volume [%s] to [%s] due to [%s].", volume, baseFile, snapshotPath, ex.getMessage()); - } + QemuImg q = new QemuImg(wait); + q.convert(srcFile, destFile, options, qemuObjects, qemuImageOpts, null, true); } /** @@@ -2465,10 -2490,9 +2496,9 @@@ String destVolumeName = null; if (destPrimaryStore.isManaged()) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { - s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); + logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); } - String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; - destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; + destVolumeName = derivePath(destPrimaryStore, destData, destPrimaryStore.getDetails()); } else { final String volumeName = UUID.randomUUID().toString(); destVolumeName = volumeName + "." + destFormat.getFileExtension(); diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index b80accd6018,31281615bce..97a4c4dc044 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@@ -273,6 -272,16 +273,16 @@@ public class LibvirtStorageAdaptor impl } } + private void checkNetfsStoragePoolMounted(String uuid) { + String targetPath = _mountPoint + File.separator + uuid; + int mountpointResult = Script.runSimpleBashScriptForExitValue("mountpoint -q " + targetPath); + if (mountpointResult != 0) { + String errMsg = String.format("libvirt failed to mount storage pool %s at %s", uuid, targetPath); - s_logger.error(errMsg); ++ logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + private StoragePool createNetfsStoragePool(PoolType fsType, Connect conn, String uuid, String host, String path) throws LibvirtException { String targetPath = _mountPoint + File.separator + uuid; LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(fsType, uuid, uuid, host, path, targetPath); @@@ -762,14 -769,14 +776,14 @@@ // handle ebusy error when pool is quickly destroyed if (e.toString().contains("exit status 16")) { String targetPath = _mountPoint + File.separator + uuid; - logger.error("deleteStoragePool removed pool from libvirt, but libvirt had trouble unmounting the pool. Trying umount location " + targetPath + - "again in a few seconds"); - s_logger.error("deleteStoragePool removed pool from libvirt, but libvirt had trouble unmounting the pool. Trying umount location " + targetPath + ++ logger.error("deleteStoragePool removed pool from libvirt, but libvirt had trouble unmounting the pool. Trying umount location " + targetPath + + " again in a few seconds"); String result = Script.runSimpleBashScript("sleep 5 && umount " + targetPath); if (result == null) { - logger.error("Succeeded in unmounting " + targetPath); - s_logger.info("Succeeded in unmounting " + targetPath); ++ logger.info("Succeeded in unmounting " + targetPath); return true; } - s_logger.error("Failed to unmount " + targetPath); + logger.error("Failed to unmount " + targetPath); } throw new CloudRuntimeException(e.toString(), e); } diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 1625ecc171a,5bcb6e48d97..03acfcc89ad --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@@ -32,8 -30,8 +30,7 @@@ import java.util.concurrent.TimeUnit import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; - import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.joda.time.Duration; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; @@@ -42,13 -40,10 +39,12 @@@ import com.cloud.utils.exception.CloudR import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; - import org.libvirt.LibvirtException; +import org.joda.time.Duration; public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { - static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class); + protected static Logger LOGGER = LogManager.getLogger(MultipathSCSIAdapterBase.class); static final Map<String, KVMStoragePool> MapStorageUuidToStoragePool = new HashMap<>(); /** diff --cc plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 826c613798a,61a949f42d3..561034155c2 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@@ -570,14 -571,13 +569,13 @@@ public class VmwareManagerImpl extends } if (secUrl == null) { - // we are using non-NFS image store, then use cache storage instead - logger.info("Secondary storage is not NFS, we need to use staging storage"); - s_logger.info("Secondary storage is either not having free capacity or not NFS, then use cache/staging storage instead"); ++ logger.info("Secondary storage is either not having free capacity or not NFS, then use cache/staging storage instead"); DataStore cacheStore = _dataStoreMgr.getImageCacheStore(dcId); if (cacheStore != null) { secUrl = cacheStore.getUri(); secId = cacheStore.getId(); } else { - logger.warn("No staging storage is found when non-NFS secondary storage is used"); - s_logger.warn("No cache/staging storage found when NFS secondary storage with free capacity not available or non-NFS secondary storage is used"); ++ logger.warn("No cache/staging storage found when NFS secondary storage with free capacity not available or non-NFS secondary storage is used"); } } @@@ -597,13 -597,12 +595,12 @@@ } if (urlIdList.isEmpty()) { - // we are using non-NFS image store, then use cache storage instead - logger.info("Secondary storage is not NFS, we need to use staging storage"); - s_logger.info("Secondary storage is either not having free capacity or not NFS, then use cache/staging storage instead"); ++ logger.info("Secondary storage is either not having free capacity or not NFS, then use cache/staging storage instead"); DataStore cacheStore = _dataStoreMgr.getImageCacheStore(dcId); if (cacheStore != null) { urlIdList.add(new Pair<>(cacheStore.getUri(), cacheStore.getId())); } else { - logger.warn("No staging storage is found when non-NFS secondary storage is used"); - s_logger.warn("No cache/staging storage found when NFS secondary storage with free capacity not available or non-NFS secondary storage is used"); ++ logger.warn("No cache/staging storage found when NFS secondary storage with free capacity not available or non-NFS secondary storage is used"); } } diff --cc plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index 87dd67f72af,549604b3577..329de5a398a --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@@ -303,18 -350,10 +351,10 @@@ public class AdaptiveDataStoreDriverImp api.resize(context, destIn, destdata.getSize()); } - String connectionId = api.attach(context, destIn); - - String finalPath; - // format: type=fiberwwn; address=<address>; connid=<connid> - if (connectionId != null) { - finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId); - } else { - finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase()); - } - - persistVolumeData(storagePool, details, destdata, outVolume, connectionId); + // initial volume info does not have connection map yet. That is added when grantAccess is called later. + String finalPath = generatePathInfo(outVolume, null); + persistVolumeData(storagePool, details, destdata, outVolume, null); - s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); + logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); VolumeObjectTO voto = new VolumeObjectTO(); voto.setPath(finalPath); @@@ -443,6 -482,66 +483,66 @@@ } + public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { - s_logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid()); ++ logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid()); + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); + Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool); + api.attach(context, sourceIn, host.getName()); + + // rewrite the volume data, especially the connection string for informational purposes - unless it was turned off above + ProviderVolume vol = api.getVolume(context, sourceIn); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + Map<String,String> connIdMap = api.getConnectionIdMap(dataIn); + persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); + + - s_logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid()); ++ logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid()); + return true; + } catch (Throwable e) { + String msg = "Error granting host " + host.getName() + " access to volume " + dataObject.getUuid() + ":" + e.getMessage(); - s_logger.error(msg); ++ logger.error(msg); + throw new CloudRuntimeException(msg, e); + } + } + + public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { + // nothing to do if the host is null + if (dataObject == null || host == null || dataStore == null) { + return; + } + - s_logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid()); ++ logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid()); + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); + Map<String, String> details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool); + + api.detach(context, sourceIn, host.getName()); + + // rewrite the volume data, especially the connection string for informational purposes + ProviderVolume vol = api.getVolume(context, sourceIn); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + Map<String,String> connIdMap = api.getConnectionIdMap(dataIn); + persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); + - s_logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid()); ++ logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid()); + } catch (Throwable e) { + String msg = "Error revoking access for host " + host.getName() + " to volume " + dataObject.getUuid() + ":" + e.getMessage(); - s_logger.error(msg); ++ logger.error(msg); + throw new CloudRuntimeException(msg, e); + } + } + @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { diff --cc plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java index ade9e8370a8,58264161bd6..b3f49b015d1 --- a/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java +++ b/plugins/storage/volume/storpool/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/StorPoolBackupSnapshotCommandWrapper.java @@@ -25,10 -30,16 +30,15 @@@ import java.util.Map import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.to.SnapshotObjectTO; + import org.apache.cloudstack.utils.cryptsetup.KeyFile; + import org.apache.cloudstack.utils.qemu.QemuImageOptions; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; + import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; + import org.apache.cloudstack.utils.qemu.QemuObject; + import org.apache.cloudstack.utils.qemu.QemuObject.EncryptFormat; import org.apache.commons.io.FileUtils; -import org.apache.log4j.Logger; import com.cloud.agent.api.storage.StorPoolBackupSnapshotCommand; import com.cloud.agent.api.to.DataStoreTO; diff --cc server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 4f70149ad11,eb72a626036..d9277377cbf --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@@ -139,11 -135,12 +139,12 @@@ import org.apache.cloudstack.storage.da import org.apache.cloudstack.userdata.UserDataManager; import org.apache.cloudstack.utils.jsinterpreter.TagAsRuleHelper; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import org.apache.cloudstack.vm.UnmanagedVMsManager; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; +import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; diff --cc server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index 390ea155b3c,927637ab918..ffd482b711d --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@@ -69,7 -71,12 +70,11 @@@ import java.util.UUID import static com.cloud.configuration.ConfigurationManagerImpl.ADD_HOST_ON_SERVICE_RESTART_KVM; public abstract class LibvirtServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { - private static final Logger s_logger = Logger.getLogger(LibvirtServerDiscoverer.class); private final int _waitTime = 5; /* wait for 5 minutes */ + + private final static HashSet<String> COMPATIBLE_HOST_OSES = new HashSet<>(Arrays.asList("Rocky", "Rocky Linux", + "Red", "Red Hat Enterprise Linux", "Oracle", "Oracle Linux Server", "AlmaLinux")); + private String _kvmPrivateNic; private String _kvmPublicNic; private String _kvmGuestNic; @@@ -468,10 -475,10 +473,10 @@@ _hostDao.loadDetails(oneHost); String hostOsInCluster = oneHost.getDetail("Host.OS"); String hostOs = ssCmd.getHostDetails().get("Host.OS"); - if (!hostOsInCluster.equalsIgnoreCase(hostOs)) { + if (!isHostOsCompatibleWithOtherHost(hostOsInCluster, hostOs)) { String msg = String.format("host: %s with hostOS, \"%s\"into a cluster, in which there are \"%s\" hosts added", firstCmd.getPrivateIpAddress(), hostOs, hostOsInCluster); if (hostOs != null && hostOs.startsWith(hostOsInCluster)) { - s_logger.warn(String.format("Adding %s. This may or may not be ok!", msg)); + logger.warn(String.format("Adding %s. This may or may not be ok!", msg)); } else { throw new IllegalArgumentException(String.format("Can't add %s.", msg)); } @@@ -483,6 -490,17 +488,17 @@@ return _resourceMgr.fillRoutingHostVO(host, ssCmd, getHypervisorType(), host.getDetails(), null); } + protected boolean isHostOsCompatibleWithOtherHost(String hostOsInCluster, String hostOs) { + if (hostOsInCluster.equalsIgnoreCase(hostOs)) { + return true; + } + if (COMPATIBLE_HOST_OSES.contains(hostOsInCluster) && COMPATIBLE_HOST_OSES.contains(hostOs)) { - s_logger.info(String.format("The host OS (%s) is compatible with the existing host OS (%s) in the cluster.", hostOs, hostOsInCluster)); ++ logger.info(String.format("The host OS (%s) is compatible with the existing host OS (%s) in the cluster.", hostOs, hostOsInCluster)); + return true; + } + return false; + } + @Override public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map<String, String> details, List<String> hostTags) { // TODO Auto-generated method stub diff --cc server/src/main/java/com/cloud/server/ManagementServerImpl.java index 81ab2524bd6,14afcc71245..0adcd8310da --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@@ -5148,12 -5062,47 +5148,47 @@@ public class ManagementServerImpl exten } final Pair<List<HypervisorCapabilitiesVO>, Integer> result = _hypervisorCapabilitiesDao.searchAndCount(sc, searchFilter); - return new Pair<List<? extends HypervisorCapabilities>, Integer>(result.first(), result.second()); + return new Pair<>(result.first(), result.second()); } + protected HypervisorCapabilitiesVO getHypervisorCapabilitiesForUpdate(final Long id, final String hypervisorStr, final String hypervisorVersion) { + if (id == null && StringUtils.isAllEmpty(hypervisorStr, hypervisorVersion)) { + throw new InvalidParameterValueException("Either ID or hypervisor and hypervisor version must be specified"); + } + if (id != null) { + if (!StringUtils.isAllBlank(hypervisorStr, hypervisorVersion)) { + throw new InvalidParameterValueException("ID can not be specified together with hypervisor and hypervisor version"); + } + HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findById(id, true); + if (hpvCapabilities == null) { + final InvalidParameterValueException ex = new InvalidParameterValueException("unable to find the hypervisor capabilities for specified id"); + ex.addProxyObject(id.toString(), "Id"); + throw ex; + } + return hpvCapabilities; + } + if (StringUtils.isAnyBlank(hypervisorStr, hypervisorVersion)) { + throw new InvalidParameterValueException("Hypervisor and hypervisor version must be specified together"); + } + HypervisorType hypervisorType = HypervisorType.getType(hypervisorStr); + if (hypervisorType == HypervisorType.None) { + throw new InvalidParameterValueException("Invalid hypervisor specified"); + } + HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(hypervisorType, hypervisorVersion); + if (hpvCapabilities == null) { + final InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find the hypervisor capabilities for specified hypervisor and hypervisor version"); + ex.addProxyObject(hypervisorStr, "hypervisor"); + ex.addProxyObject(hypervisorVersion, "hypervisorVersion"); + throw ex; + } + return hpvCapabilities; + } + @Override public HypervisorCapabilities updateHypervisorCapabilities(UpdateHypervisorCapabilitiesCmd cmd) { - final Long id = cmd.getId(); + Long id = cmd.getId(); + final String hypervisorStr = cmd.getHypervisor(); + final String hypervisorVersion = cmd.getHypervisorVersion(); final Boolean securityGroupEnabled = cmd.getSecurityGroupEnabled(); final Long maxGuestsLimit = cmd.getMaxGuestsLimit(); final Integer maxDataVolumesLimit = cmd.getMaxDataVolumesLimit(); @@@ -5174,7 -5117,14 +5203,14 @@@ if (!updateNeeded) { return hpvCapabilities; } + if (StringUtils.isNotBlank(hypervisorVersion) && !hpvCapabilities.getHypervisorVersion().equals(hypervisorVersion)) { - s_logger.debug(String.format("Hypervisor capabilities for hypervisor: %s and version: %s does not exist, creating a copy from the parent version: %s for update.", hypervisorStr, hypervisorVersion, hpvCapabilities.getHypervisorVersion())); ++ logger.debug(String.format("Hypervisor capabilities for hypervisor: %s and version: %s does not exist, creating a copy from the parent version: %s for update.", hypervisorStr, hypervisorVersion, hpvCapabilities.getHypervisorVersion())); + HypervisorCapabilitiesVO copy = new HypervisorCapabilitiesVO(hpvCapabilities); + copy.setHypervisorVersion(hypervisorVersion); + hpvCapabilities = _hypervisorCapabilitiesDao.persist(copy); + } + id = hpvCapabilities.getId(); hpvCapabilities = _hypervisorCapabilitiesDao.createForUpdate(id); if (securityGroupEnabled != null) { diff --cc server/src/main/java/com/cloud/server/StatsCollector.java index 86b8b0c682c,070508f2502..9d0d95c8b39 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@@ -1960,10 -1964,10 +1962,10 @@@ public class StatsCollector extends Man vmStatsMaxRetentionTime.scope(), vmStatsMaxRetentionTime.toString())); return; } - LOGGER.trace("Removing older VM stats records."); + logger.trace("Removing older VM stats records."); Date now = new Date(); Date limit = DateUtils.addMinutes(now, -maxRetentionTime); - vmStatsDao.removeAllByTimestampLessThan(limit); + vmStatsDao.removeAllByTimestampLessThan(limit, vmStatsRemoveBatchSize.value()); } /** diff --cc server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index c950322452b,553f72b719b..73a7f7ab546 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@@ -1055,12 -1057,12 +1055,12 @@@ public class VolumeApiServiceImpl exten created = false; VolumeInfo vol = volFactory.getVolume(cmd.getEntityId()); vol.stateTransit(Volume.Event.DestroyRequested); - throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e); + throw new CloudRuntimeException("Failed to create volume: " + volume.getUuid(), e); } finally { if (!created) { - s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); - _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, cmd.getDisplayVolume()); - _resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.primary_storage, cmd.getDisplayVolume(), new Long(volume.getSize())); + logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); + _resourceLimitMgr.decrementVolumeResourceCount(volume.getAccountId(), cmd.getDisplayVolume(), + volume.getSize(), _diskOfferingDao.findByIdIncludingRemoved(volume.getDiskOfferingId())); } } } diff --cc server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index b1c653d3f92,2ff3bf2ca21..c593b23d7cc --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@@ -796,9 -794,16 +796,16 @@@ public class UnmanagedVMsManagerImpl im } } copyRemoteVolumeCommand.setTempPath(tmpPath); + int copyTimeout = UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.value(); + if (copyTimeout <= 0) { + copyTimeout = Integer.valueOf(UnmanagedVMsManager.RemoteKvmInstanceDisksCopyTimeout.defaultValue()); + } + int copyTimeoutInSecs = copyTimeout * 60; + copyRemoteVolumeCommand.setWait(copyTimeoutInSecs); - LOGGER.error(String.format("Initiating copy remote volume %s from %s, timeout %d secs", path, remoteUrl, copyTimeoutInSecs)); ++ logger.error(String.format("Initiating copy remote volume %s from %s, timeout %d secs", path, remoteUrl, copyTimeoutInSecs)); Answer answer = agentManager.easySend(dest.getHost().getId(), copyRemoteVolumeCommand); if (!(answer instanceof CopyRemoteVolumeAnswer)) { - throw new CloudRuntimeException("Error while copying volume"); + throw new CloudRuntimeException("Error while copying volume of remote instance: " + answer.getDetails()); } CopyRemoteVolumeAnswer copyRemoteVolumeAnswer = (CopyRemoteVolumeAnswer) answer; if(!copyRemoteVolumeAnswer.getResult()) { diff --cc ui/src/views/compute/DeployVM.vue index 865ee79d76c,0bd68e5e204..6792314e74b --- a/ui/src/views/compute/DeployVM.vue +++ b/ui/src/views/compute/DeployVM.vue @@@ -2343,10 -2275,8 +2343,11 @@@ export default args.pageSize = args.pageSize || 10 } args.zoneid = _.get(this.zone, 'id') + args.account = store.getters.project?.id ? null : this.owner.account + args.domainid = store.getters.project?.id ? null : this.owner.domainid + args.projectid = store.getters.project?.id || this.owner.projectid args.templatefilter = templateFilter + args.projectid = -1 args.details = 'all' args.showicon = 'true' args.id = this.templateId diff --cc utils/src/main/java/com/cloud/utils/ssh/SshHelper.java index fc229bd7464,1856c0b3838..87221ab5ac8 --- a/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java +++ b/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java @@@ -39,8 -38,9 +39,9 @@@ import com.cloud.utils.Pair public class SshHelper { private static final int DEFAULT_CONNECT_TIMEOUT = 180000; private static final int DEFAULT_KEX_TIMEOUT = 60000; + private static final int DEFAULT_WAIT_RESULT_TIMEOUT = 120000; - private static final Logger s_logger = Logger.getLogger(SshHelper.class); + protected static Logger LOGGER = LogManager.getLogger(SshHelper.class); public static Pair<Boolean, String> sshExecute(String host, int port, String user, File pemKeyFile, String password, String command) throws Exception {
