This is an automated email from the ASF dual-hosted git repository. vishesh pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/cloudstack.git
commit 6fe835e1183da5b25e22f1d673602655b1591c14 Merge: 525c2c6fe93 b2ef53b8a2d Author: Vishesh <vishes...@gmail.com> AuthorDate: Thu Jun 13 11:57:55 2024 +0530 Merge branch '4.19' .../java/com/cloud/network/vpc/VpcService.java | 2 + .../org/apache/cloudstack/api/ApiConstants.java | 1 + .../api/command/user/volume/ListVolumesCmd.java | 18 ++ .../api/command/user/vpc/CreateVPCCmd.java | 6 +- .../cloudstack/api/response/VolumeResponse.java | 12 +- .../api/command/user/vpc/CreateVPCCmdTest.java | 5 +- .../com/cloud/agent/api/GetVolumeStatAnswer.java | 85 ++++++++++ .../com/cloud/agent/api/GetVolumeStatCommand.java | 72 ++++++++ .../cloudstack/storage/to/VolumeObjectTO.java | 9 + .../main/java/com/cloud/storage/dao/VolumeDao.java | 3 +- .../java/com/cloud/storage/dao/VolumeDaoImpl.java | 1 - .../storage/datastore/db/ImageStoreDaoImpl.java | 2 +- .../META-INF/db/views/cloud.volume_view.sql | 1 + .../wrapper/LibvirtGetRemoteVmsCommandWrapper.java | 26 ++- .../LibvirtGetVolumeStatCommandWrapper.java | 66 ++++++++ .../wrapper/LibvirtMigrateCommandWrapper.java | 86 ++++++++++ .../LibvirtMigrateVolumeCommandWrapper.java | 23 ++- .../hypervisor/kvm/storage/KVMStoragePool.java | 3 + .../kvm/storage/KVMStorageProcessor.java | 2 +- .../kvm/storage/ScaleIOStorageAdaptor.java | 15 +- .../hypervisor/kvm/storage/ScaleIOStoragePool.java | 5 + .../hypervisor/kvm/storage/StorageAdaptor.java | 18 +- .../wrapper/LibvirtMigrateCommandWrapperTest.java | 12 ++ .../version/KubernetesVersionManagerImpl.java | 24 ++- .../version/KubernetesVersionManagerImplTest.java | 73 +++++++++ .../driver/ScaleIOPrimaryDataStoreDriver.java | 23 ++- .../driver/ScaleIOPrimaryDataStoreDriverTest.java | 31 +++- .../java/com/cloud/api/query/QueryManagerImpl.java | 33 +++- .../com/cloud/api/query/dao/VolumeJoinDaoImpl.java | 2 + .../java/com/cloud/api/query/vo/VolumeJoinVO.java | 8 +- .../configuration/ConfigurationManagerImpl.java | 26 +-- .../java/com/cloud/network/vpc/VpcManagerImpl.java | 11 ++ .../java/com/cloud/storage/StorageManagerImpl.java | 15 +- .../com/cloud/storage/VolumeApiServiceImpl.java | 14 +- .../main/java/com/cloud/vm/UserVmManagerImpl.java | 2 +- .../cloudstack/vm/UnmanagedVMsManagerImpl.java | 14 +- .../storage/resource/HttpUploadServerHandler.java | 3 +- ui/src/components/header/CreateMenu.vue | 181 +++++++-------------- ui/src/components/view/SearchView.vue | 63 ++++++- ui/src/config/section/storage.js | 2 +- ui/src/permission.js | 6 +- ui/src/views/compute/CreateKubernetesCluster.vue | 3 +- ui/src/views/dashboard/CapacityDashboard.vue | 6 +- ui/src/views/project/AddAccountOrUserToProject.vue | 70 ++++++-- 44 files changed, 843 insertions(+), 240 deletions(-) diff --cc api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 81217b8406a,7512120cc00..54c0227de9f --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@@ -266,9 -263,9 +266,10 @@@ public class ApiConstants public static final String IS_CLEANUP_REQUIRED = "iscleanuprequired"; public static final String IS_DYNAMIC = "isdynamic"; public static final String IS_EDGE = "isedge"; + public static final String IS_ENCRYPTED = "isencrypted"; public static final String IS_EXTRACTABLE = "isextractable"; public static final String IS_FEATURED = "isfeatured"; + public static final String IS_IMPLICIT = "isimplicit"; public static final String IS_PORTABLE = "isportable"; public static final String IS_PUBLIC = "ispublic"; public static final String IS_PERSISTENT = "ispersistent"; diff --cc api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java index 94f05f707a0,39f2b32751d..89a65f8c27c --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java @@@ -208,21 -210,17 +208,17 @@@ public class CreateVPCCmd extends BaseA public void execute() { Vpc vpc = null; try { - if (isStart()) { - _vpcService.startVpc(getEntityId(), true); - } else { - logger.debug("Not starting VPC as " + ApiConstants.START + "=false was passed to the API"); - } + _vpcService.startVpc(this); vpc = _entityMgr.findById(Vpc.class, getEntityId()); } catch (ResourceUnavailableException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); } catch (ConcurrentOperationException ex) { - s_logger.warn("Exception: ", ex); + logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { - s_logger.info(ex); - s_logger.trace(ex); + logger.info(ex); + logger.trace(ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } diff --cc engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java index 31d64daf147,be1a7e01b4e..2e68dcae3c5 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VolumeDaoImpl.java @@@ -77,11 -73,8 +77,10 @@@ public class VolumeDaoImpl extends Gene protected GenericSearchBuilder<VolumeVO, SumCount> secondaryStorageSearch; private final SearchBuilder<VolumeVO> poolAndPathSearch; @Inject + ReservationDao reservationDao; + @Inject ResourceTagDao _tagsDao; - protected static final String SELECT_VM_SQL = "SELECT DISTINCT instance_id from volumes v where v.host_id = ? and v.mirror_state = ?"; // need to account for zone-wide primary storage where storage_pool has // null-value pod and cluster, where hypervisor information is stored in // storage_pool diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java index ead294ad05f,69802bb845f..a9da4a50452 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetRemoteVmsCommandWrapper.java @@@ -45,11 -46,11 +45,9 @@@ import java.util.List @ResourceWrapper(handles = GetRemoteVmsCommand.class) public final class LibvirtGetRemoteVmsCommandWrapper extends CommandWrapper<GetRemoteVmsCommand, Answer, LibvirtComputingResource> { - private static final Logger s_logger = Logger.getLogger(LibvirtGetRemoteVmsCommandWrapper.class); - @Override public Answer execute(final GetRemoteVmsCommand command, final LibvirtComputingResource libvirtComputingResource) { - String result = null; - String hypervisorURI = "qemu+tcp://" + command.getRemoteIp() + - "/system"; + String hypervisorURI = "qemu+tcp://" + command.getRemoteIp() + "/system"; HashMap<String, UnmanagedInstanceTO> unmanagedInstances = new HashMap<>(); try { Connect conn = LibvirtConnection.getConnection(hypervisorURI); @@@ -58,26 -59,27 +56,28 @@@ final Domain domain = libvirtComputingResource.getDomain(conn, name); final DomainInfo.DomainState ps = domain.getInfo().state; - final VirtualMachine.PowerState state = libvirtComputingResource.convertToPowerState(ps); - s_logger.debug("VM " + domain.getName() + " - powerstate: " + ps + ", state: " + state.toString()); + - logger.debug("VM " + domain.getName() + ": powerstate = " + ps + "; vm state=" + state.toString()); ++ logger.debug("VM " + domain.getName() + " - powerstate: " + ps + ", state: " + state.toString()); if (state == VirtualMachine.PowerState.PowerOff) { try { UnmanagedInstanceTO instance = getUnmanagedInstance(libvirtComputingResource, domain, conn); unmanagedInstances.put(instance.getName(), instance); } catch (Exception e) { - logger.error("Error while fetching instance details", e); - s_logger.error("Couldn't fetch VM " + domain.getName() + " details, due to: " + e.getMessage(), e); ++ logger.error("Couldn't fetch VM " + domain.getName() + " details, due to: " + e.getMessage(), e); } } domain.free(); } - logger.debug("Found Vms: "+ unmanagedInstances.size()); - return new GetRemoteVmsAnswer(command, "", unmanagedInstances); - s_logger.debug("Found " + unmanagedInstances.size() + " stopped VMs on host " + command.getRemoteIp()); ++ logger.debug("Found " + unmanagedInstances.size() + " stopped VMs on host " + command.getRemoteIp()); + return new GetRemoteVmsAnswer(command, "", unmanagedInstances); } catch (final LibvirtException e) { - logger.error("Error while listing stopped Vms on remote host: "+ e.getMessage()); - return new Answer(command, false, result); - s_logger.error("Failed to list stopped VMs on remote host " + command.getRemoteIp() + ", due to: " + e.getMessage(), e); ++ logger.error("Failed to list stopped VMs on remote host " + command.getRemoteIp() + ", due to: " + e.getMessage(), e); + if (e.getMessage().toLowerCase().contains("connection refused")) { + return new Answer(command, false, "Unable to connect to remote host " + command.getRemoteIp() + ", please check the libvirtd tcp connectivity and retry"); + } + return new Answer(command, false, "Unable to list stopped VMs on remote host " + command.getRemoteIp() + ", due to: " + e.getMessage()); } } @@@ -103,8 -105,8 +103,8 @@@ return instance; } catch (Exception e) { - logger.debug("Unable to retrieve unmanaged instance info. ", e); - throw new CloudRuntimeException("Unable to retrieve unmanaged instance info. " + e.getMessage()); - s_logger.debug("Unable to retrieve unmanaged instance info, due to: " + e.getMessage(), e); ++ logger.debug("Unable to retrieve unmanaged instance info, due to: " + e.getMessage(), e); + throw new CloudRuntimeException("Unable to retrieve unmanaged instance info, due to: " + e.getMessage()); } } diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java index b97cb666de0,5ba174acd39..e15a3287692 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java @@@ -90,6 -92,8 +91,7 @@@ public final class LibvirtMigrateComman private static final String GRAPHICS_ELEM_END = "/graphics>"; private static final String GRAPHICS_ELEM_START = "<graphics"; private static final String CONTENTS_WILDCARD = "(?s).*"; + private static final String CDROM_LABEL = "hdc"; - private static final Logger s_logger = Logger.getLogger(LibvirtMigrateCommandWrapper.class); protected String createMigrationURI(final String destinationIp, final LibvirtComputingResource libvirtComputingResource) { if (StringUtils.isEmpty(destinationIp)) { @@@ -167,12 -172,20 +170,20 @@@ String oldIsoVolumePath = getOldVolumePath(disks, vmName); String newIsoVolumePath = getNewVolumePathIfDatastoreHasChanged(libvirtComputingResource, conn, to); if (newIsoVolumePath != null && !newIsoVolumePath.equals(oldIsoVolumePath)) { - s_logger.debug(String.format("Editing mount path of iso from %s to %s", oldIsoVolumePath, newIsoVolumePath)); + logger.debug(String.format("Editing mount path of iso from %s to %s", oldIsoVolumePath, newIsoVolumePath)); xmlDesc = replaceDiskSourceFile(xmlDesc, newIsoVolumePath, vmName); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("Replaced disk mount point [%s] with [%s] in VM [%s] XML configuration. New XML configuration is [%s].", oldIsoVolumePath, newIsoVolumePath, vmName, xmlDesc)); + if (logger.isDebugEnabled()) { + logger.debug(String.format("Replaced disk mount point [%s] with [%s] in VM [%s] XML configuration. New XML configuration is [%s].", oldIsoVolumePath, newIsoVolumePath, vmName, xmlDesc)); } } + + // Replace CDROM ISO path + String oldCdromIsoPath = getOldVolumePathForCdrom(disks, vmName); + String newCdromIsoPath = getNewVolumePathForCdrom(libvirtComputingResource, conn, to); + if (newCdromIsoPath != null && !newCdromIsoPath.equals(oldCdromIsoPath)) { + xmlDesc = replaceCdromIsoPath(xmlDesc, vmName, oldCdromIsoPath, newCdromIsoPath); + } + // delete the metadata of vm snapshots before migration vmsnapshots = libvirtComputingResource.cleanVMSnapshotMetadata(dm); @@@ -701,6 -713,81 +712,81 @@@ return newIsoVolumePath; } + private String getOldVolumePathForCdrom(List<DiskDef> disks, String vmName) { + String oldIsoVolumePath = null; + for (DiskDef disk : disks) { + if (DiskDef.DeviceType.CDROM.equals(disk.getDeviceType()) + && CDROM_LABEL.equals(disk.getDiskLabel()) + && disk.getDiskPath() != null) { + oldIsoVolumePath = disk.getDiskPath(); + break; + } + } + return oldIsoVolumePath; + } + + private String getNewVolumePathForCdrom(LibvirtComputingResource libvirtComputingResource, Connect conn, VirtualMachineTO to) throws LibvirtException, URISyntaxException { + DiskTO newDisk = null; + for (DiskTO disk : to.getDisks()) { + DataTO data = disk.getData(); + if (disk.getDiskSeq() == 3 && data != null && data.getPath() != null) { + newDisk = disk; + break; + } + } + + String newIsoVolumePath = null; + if (newDisk != null) { + newIsoVolumePath = libvirtComputingResource.getVolumePath(conn, newDisk); + } + return newIsoVolumePath; + } + + protected String replaceCdromIsoPath(String xmlDesc, String vmName, String oldIsoVolumePath, String newIsoVolumePath) throws IOException, ParserConfigurationException, TransformerException, SAXException { + InputStream in = IOUtils.toInputStream(xmlDesc); + + DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document doc = docBuilder.parse(in); + + // Get the root element + Node domainNode = doc.getFirstChild(); + + NodeList domainChildNodes = domainNode.getChildNodes(); + + for (int i = 0; i < domainChildNodes.getLength(); i++) { + Node domainChildNode = domainChildNodes.item(i); + if ("devices".equals(domainChildNode.getNodeName())) { + NodeList devicesChildNodes = domainChildNode.getChildNodes(); + for (int x = 0; x < devicesChildNodes.getLength(); x++) { + Node deviceChildNode = devicesChildNodes.item(x); + if ("disk".equals(deviceChildNode.getNodeName())) { + Node diskNode = deviceChildNode; + NodeList diskChildNodes = diskNode.getChildNodes(); + for (int z = 0; z < diskChildNodes.getLength(); z++) { + Node diskChildNode = diskChildNodes.item(z); + if ("source".equals(diskChildNode.getNodeName())) { + NamedNodeMap sourceNodeAttributes = diskChildNode.getAttributes(); + Node sourceNodeAttribute = sourceNodeAttributes.getNamedItem("file"); + if (oldIsoVolumePath != null && sourceNodeAttribute != null + && oldIsoVolumePath.equals(sourceNodeAttribute.getNodeValue())) { + diskNode.removeChild(diskChildNode); + Element newChildSourceNode = doc.createElement("source"); + newChildSourceNode.setAttribute("file", newIsoVolumePath); + diskNode.appendChild(newChildSourceNode); - s_logger.debug(String.format("Replaced ISO path [%s] with [%s] in VM [%s] XML configuration.", oldIsoVolumePath, newIsoVolumePath, vmName)); ++ logger.debug(String.format("Replaced ISO path [%s] with [%s] in VM [%s] XML configuration.", oldIsoVolumePath, newIsoVolumePath, vmName)); + return getXml(doc); + } + } + } + } + } + } + } + + return getXml(doc); + } + private String getPathFromSourceText(Set<String> paths, String sourceText) { if (paths != null && StringUtils.isNotBlank(sourceText)) { for (String path : paths) { diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java index 00f627d0528,0b9b951122a..cd5ce031e64 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java @@@ -137,13 -139,12 +137,12 @@@ public class LibvirtMigrateVolumeComman parameters[0] = parameter; dm.blockCopy(destDiskLabel, diskdef, parameters, Domain.BlockCopyFlags.REUSE_EXT); - LOGGER.info(String.format("Block copy has started for the volume %s : %s ", destDiskLabel, srcPath)); + logger.info(String.format("Block copy has started for the volume %s : %s ", destDiskLabel, srcPath)); return checkBlockJobStatus(command, dm, destDiskLabel, srcPath, destPath, libvirtComputingResource, conn, srcSecretUUID); - } catch (Exception e) { String msg = "Migrate volume failed due to " + e.toString(); - LOGGER.warn(msg, e); + logger.warn(msg, e); if (destDiskLabel != null) { try { dm.blockJobAbort(destDiskLabel, Domain.BlockJobAbortFlags.ASYNC); @@@ -169,17 -171,26 +169,26 @@@ while (waitTimeInSec > 0) { DomainBlockJobInfo blockJobInfo = dm.getBlockJobInfo(diskLabel, 0); if (blockJobInfo != null) { - logger.debug(String.format("Volume %s : %s block copy progress: %s%% current value:%s end value:%s", diskLabel, srcPath, (blockJobInfo.end == 0)? 0 : 100*(blockJobInfo.cur / (double) blockJobInfo.end), blockJobInfo.cur, blockJobInfo.end)); + blockCopyProgress = (blockJobInfo.end == 0)? blockCopyProgress : 100 * (blockJobInfo.cur / (double) blockJobInfo.end); - LOGGER.debug(String.format("Volume %s : %s, block copy progress: %s%%, current value: %s end value: %s, job info - type: %s, bandwidth: %s", ++ logger.debug(String.format("Volume %s : %s, block copy progress: %s%%, current value: %s end value: %s, job info - type: %s, bandwidth: %s", + diskLabel, srcPath, blockCopyProgress, blockJobInfo.cur, blockJobInfo.end, blockJobInfo.type, blockJobInfo.bandwidth)); if (blockJobInfo.cur == blockJobInfo.end) { - logger.info(String.format("Block copy completed for the volume %s : %s", diskLabel, srcPath)); - dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.PIVOT); - if (StringUtils.isNotEmpty(srcSecretUUID)) { - libvirtComputingResource.removeLibvirtVolumeSecret(conn, srcSecretUUID); + if (blockJobInfo.end > 0) { - LOGGER.info(String.format("Block copy completed for the volume %s : %s", diskLabel, srcPath)); ++ logger.info(String.format("Block copy completed for the volume %s : %s", diskLabel, srcPath)); + dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.PIVOT); + if (StringUtils.isNotEmpty(srcSecretUUID)) { + libvirtComputingResource.removeLibvirtVolumeSecret(conn, srcSecretUUID); + } + break; + } else { + // cur = 0, end = 0 - at this point, disk does not have an active block job (so, no need to abort job) + String msg = String.format("No active block copy job for the volume %s : %s - job stopped at %s progress", diskLabel, srcPath, blockCopyProgress); - LOGGER.warn(msg); ++ logger.warn(msg); + return new MigrateVolumeAnswer(command, false, msg, null); } - break; } } else { - LOGGER.info("Failed to get the block copy status, trying to abort the job"); + logger.info("Failed to get the block copy status, trying to abort the job"); dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC); } waitTimeInSec--; diff --cc plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index 817b263e9b4,a359113ab08..8397334d0dd --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@@ -62,10 -62,11 +62,12 @@@ import org.apache.cloudstack.storage.to import org.apache.cloudstack.storage.volume.VolumeObject; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.cloud.agent.api.Answer; + import com.cloud.agent.api.GetVolumeStatAnswer; + import com.cloud.agent.api.GetVolumeStatCommand; import com.cloud.agent.api.storage.MigrateVolumeCommand; import com.cloud.agent.api.storage.ResizeVolumeCommand; import com.cloud.agent.api.to.DataObjectType; @@@ -491,11 -492,11 +493,11 @@@ public class ScaleIOPrimaryDataStoreDri } public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId) { - return createVolume(volumeInfo, storagePoolId, false); + return createVolume(volumeInfo, storagePoolId, false, null); } - public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId, boolean migrationInvolved) { + public CreateObjectAnswer createVolume(VolumeInfo volumeInfo, long storagePoolId, boolean migrationInvolved, Long usageSize) { - LOGGER.debug("Creating PowerFlex volume"); + logger.debug("Creating PowerFlex volume"); StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); @@@ -858,8 -863,17 +864,17 @@@ EndPoint ep = RemoteHostEndPoint.getHypervisorHostEndPoint(host); Answer answer = null; + Long srcVolumeUsableSize = null; try { - CreateObjectAnswer createAnswer = createVolume((VolumeInfo) destData, destStore.getId(), true); + GetVolumeStatCommand statCmd = new GetVolumeStatCommand(srcVolumeInfo.getPath(), srcVolumeInfo.getStoragePoolType(), srcStore.getUuid()); + GetVolumeStatAnswer statAnswer = (GetVolumeStatAnswer) ep.sendMessage(statCmd); + if (!statAnswer.getResult() ) { - LOGGER.warn(String.format("Unable to get volume %s stats", srcVolumeInfo.getId())); ++ logger.warn(String.format("Unable to get volume %s stats", srcVolumeInfo.getId())); + } else if (statAnswer.getVirtualSize() > 0) { + srcVolumeUsableSize = statAnswer.getVirtualSize(); + } + + CreateObjectAnswer createAnswer = createVolume((VolumeInfo) destData, destStore.getId(), true, srcVolumeUsableSize); destVolumePath = createAnswer.getData().getPath(); destVolTO.setPath(destVolumePath); diff --cc server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java index 1872f85a26f,c9391a32167..7114bbbd90d --- a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java @@@ -1780,6 -1677,17 +1780,17 @@@ public class VpcManagerImpl extends Man return result; } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VPC_CREATE, eventDescription = "creating vpc", async = true) + public void startVpc(final CreateVPCCmd cmd) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { + if (!cmd.isStart()) { - s_logger.debug("Not starting VPC as " + ApiConstants.START + "=false was passed to the API"); ++ logger.debug("Not starting VPC as " + ApiConstants.START + "=false was passed to the API"); + return; + } + startVpc(cmd.getEntityId(), true); + } + protected boolean startVpc(final Vpc vpc, final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { // deploy provider diff --cc server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 9a6f8563223,adece636501..f3f0c5dc7e4 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@@ -2893,21 -2828,30 +2897,12 @@@ public class StorageManagerImpl extend List<Pair<Volume, Answer>> answers = new ArrayList<Pair<Volume, Answer>>(); for (Pair<Volume, DiskProfile> volumeDiskProfilePair : volumes) { - String storagePolicyId = null; Volume volume = volumeDiskProfilePair.first(); DiskProfile diskProfile = volumeDiskProfilePair.second(); - if (volume.getVolumeType() == Type.ROOT) { - Long vmId = volume.getInstanceId(); - if (vmId != null) { - VMInstanceVO vm = _vmInstanceDao.findByIdIncludingRemoved(vmId); - storagePolicyId = _serviceOfferingDetailsDao.getDetail(vm.getServiceOfferingId(), ApiConstants.STORAGE_POLICY); - } - } else { - storagePolicyId = _diskOfferingDetailsDao.getDetail(diskProfile.getDiskOfferingId(), ApiConstants.STORAGE_POLICY); - } + String storagePolicyId = _diskOfferingDetailsDao.getDetail(diskProfile.getDiskOfferingId(), ApiConstants.STORAGE_POLICY); - if (StringUtils.isNotEmpty(storagePolicyId)) { - VsphereStoragePolicyVO storagePolicyVO = _vsphereStoragePolicyDao.findById(Long.parseLong(storagePolicyId)); - List<Long> hostIds = getUpHostsInPool(pool.getId()); - Collections.shuffle(hostIds); - - if (hostIds == null || hostIds.isEmpty()) { - throw new StorageUnavailableException("Unable to send command to the pool " + pool.getName() + " due to there is no enabled hosts up in this cluster", pool.getId()); - } - try { - StorageFilerTO storageFilerTO = new StorageFilerTO(pool); - CheckDataStoreStoragePolicyComplainceCommand cmd = new CheckDataStoreStoragePolicyComplainceCommand(storagePolicyVO.getPolicyId(), storageFilerTO); - long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostIds.get(0), cmd); - Answer answer = _agentMgr.send(targetHostId, cmd); - answers.add(new Pair<>(volume, answer)); - } catch (AgentUnavailableException e) { - s_logger.debug("Unable to send storage pool command to " + pool + " via " + hostIds.get(0), e); - throw new StorageUnavailableException("Unable to send command to the pool ", pool.getId()); - } catch (OperationTimedoutException e) { - s_logger.debug("Failed to process storage pool command to " + pool + " via " + hostIds.get(0), e); - throw new StorageUnavailableException("Failed to process storage command to the pool ", pool.getId()); - } + Answer answer = getCheckDatastorePolicyComplianceAnswer(storagePolicyId, pool); + if (answer != null) { + answers.add(new Pair<>(volume, answer)); } } // check cummilative result for all volumes diff --cc ui/src/components/view/SearchView.vue index 88a1d638f05,c284b8a635a..b81f1b3c800 --- a/ui/src/components/view/SearchView.vue +++ b/ui/src/components/view/SearchView.vue @@@ -286,7 -285,7 +286,7 @@@ export default } if (['zoneid', 'domainid', 'imagestoreid', 'storageid', 'state', 'account', 'hypervisor', 'level', 'clusterid', 'podid', 'groupid', 'entitytype', 'accounttype', 'systemvmtype', 'scope', 'provider', - 'type', 'scope', 'managementserverid'].includes(item) - 'type', 'serviceofferingid', 'diskofferingid'].includes(item) ++ 'type', 'scope', 'managementserverid', 'serviceofferingid', 'diskofferingid'].includes(item) ) { type = 'list' } else if (item === 'tags') { @@@ -405,7 -397,8 +405,9 @@@ let podIndex = -1 let clusterIndex = -1 let groupIndex = -1 + let managementServerIdIndex = -1 + let serviceOfferingIndex = -1 + let diskOfferingIndex = -1 if (arrayField.includes('type')) { if (this.$route.path === '/alert') { @@@ -473,12 -466,18 +475,24 @@@ promises.push(await this.fetchInstanceGroups(searchKeyword)) } + if (arrayField.includes('managementserverid')) { + managementServerIdIndex = this.fields.findIndex(item => item.name === 'managementserverid') + this.fields[managementServerIdIndex].loading = true + promises.push(await this.fetchManagementServers(searchKeyword)) + } + + if (arrayField.includes('serviceofferingid')) { + serviceOfferingIndex = this.fields.findIndex(item => item.name === 'serviceofferingid') + this.fields[serviceOfferingIndex].loading = true + promises.push(await this.fetchServiceOfferings(searchKeyword)) + } + + if (arrayField.includes('diskofferingid')) { + diskOfferingIndex = this.fields.findIndex(item => item.name === 'diskofferingid') + this.fields[diskOfferingIndex].loading = true + promises.push(await this.fetchDiskOfferings(searchKeyword)) + } + Promise.all(promises).then(response => { if (typeIndex > -1) { const types = response.filter(item => item.type === 'type') @@@ -540,12 -539,20 +554,27 @@@ this.fields[groupIndex].opts = this.sortArray(groups[0].data) } } + + if (managementServerIdIndex > -1) { + const managementServers = response.filter(item => item.type === 'managementserverid') + if (managementServers && managementServers.length > 0) { + this.fields[managementServerIdIndex].opts = this.sortArray(managementServers[0].data) + } + } ++ + if (serviceOfferingIndex > -1) { + const serviceOfferings = response.filter(item => item.type === 'serviceofferingid') + if (serviceOfferings && serviceOfferings.length > 0) { + this.fields[serviceOfferingIndex].opts = this.sortArray(serviceOfferings[0].data) + } + } + + if (diskOfferingIndex > -1) { + const diskOfferings = response.filter(item => item.type === 'diskofferingid') + if (diskOfferings && diskOfferings.length > 0) { + this.fields[diskOfferingIndex].opts = this.sortArray(diskOfferings[0].data) + } + } }).finally(() => { if (typeIndex > -1) { this.fields[typeIndex].loading = false @@@ -571,9 -578,12 +600,15 @@@ if (groupIndex > -1) { this.fields[groupIndex].loading = false } + if (managementServerIdIndex > -1) { + this.fields[managementServerIdIndex].loading = false + } + if (serviceOfferingIndex > -1) { + this.fields[serviceOfferingIndex].loading = false + } + if (diskOfferingIndex > -1) { + this.fields[diskOfferingIndex].loading = false + } this.fillFormFieldValues() }) },