This is an automated email from the ASF dual-hosted git repository. sureshanaparti pushed a commit to branch 4.22 in repository https://gitbox.apache.org/repos/asf/cloudstack.git
commit 30d306622a90ac43f2a6c35ee999110ad1bc5194 Merge: ef60aa56015 6bed3d4e641 Author: Suresh Kumar Anaparti <[email protected]> AuthorDate: Fri Jan 30 21:15:21 2026 +0530 Merge branch '4.20' into 4.22 .../api/command/user/vm/DeployVMCmd.java | 2 +- .../api/command/user/vm/UpdateVMCmd.java | 5 +- .../cloudstack/storage/to/SnapshotObjectTO.java | 2 - .../storage/snapshot/SnapshotObject.java | 5 +- .../kvm/resource/LibvirtComputingResource.java | 15 ++-- .../kvm/storage/KVMStorageProcessor.java | 81 +++++++++++++++++----- .../main/java/com/cloud/api/ApiResponseHelper.java | 7 +- ui/src/components/view/SearchFilter.vue | 26 +++---- ui/src/views/AutogenView.vue | 66 ++++++++++-------- .../com/cloud/usage/parser/BackupUsageParser.java | 3 +- .../usage/parser/VMSnapshotOnPrimaryParser.java | 2 +- 11 files changed, 138 insertions(+), 76 deletions(-) diff --cc api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index 393a2bb4727,f08f906997c..81ee00c98a2 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@@ -16,13 -16,27 +16,14 @@@ // under the License. package org.apache.cloudstack.api.command.user.vm; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; +import java.util.Objects; +import java.util.stream.Stream; -import javax.annotation.Nonnull; - -import org.apache.cloudstack.acl.RoleType; -import org.apache.cloudstack.affinity.AffinityGroupResponse; ++import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.api.ACL; import org.apache.cloudstack.api.APICommand; -import org.apache.cloudstack.api.ApiArgValidator; -import org.apache.cloudstack.api.ApiCommandResourceType; import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy; import org.apache.cloudstack.api.ApiErrorCode; -import org.apache.cloudstack.api.BaseAsyncCreateCustomIdCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.ServerApiException; @@@ -36,15 -60,28 +37,14 @@@ import org.apache.cloudstack.context.Ca import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; -import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.network.Network; -import com.cloud.network.Network.IpAddresses; -import com.cloud.offering.DiskOffering; -import com.cloud.template.VirtualMachineTemplate; import com.cloud.uservm.UserVm; - import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.net.Dhcp; -import com.cloud.utils.net.NetUtils; -import com.cloud.utils.StringUtils; import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VmDetailConstants; @APICommand(name = "deployVirtualMachine", description = "Creates and automatically starts an Instance based on a service offering, disk offering, and Template.", responseObject = UserVmResponse.class, responseView = ResponseView.Restricted, entityType = {VirtualMachine.class}, - requestHasSensitiveInfo = false, responseHasSensitiveInfo = true) -public class DeployVMCmd extends BaseAsyncCreateCustomIdCmd implements SecurityGroupAction, UserCmd { - - private static final String s_name = "deployvirtualmachineresponse"; + requestHasSensitiveInfo = false) +public class DeployVMCmd extends BaseDeployVMCmd { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --cc api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java index ddc2c06eb09,67a929bc6c2..f870a9bbba9 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/UpdateVMCmd.java @@@ -16,14 -16,17 +16,15 @@@ // under the License. package org.apache.cloudstack.api.command.user.vm; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.uservm.UserVm; + import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; - -import org.apache.cloudstack.api.ApiArgValidator; -import org.apache.cloudstack.api.response.UserDataResponse; - +import com.cloud.utils.net.Dhcp; +import com.cloud.vm.VirtualMachine; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@@ -39,17 -41,15 +40,16 @@@ import org.apache.cloudstack.api.Server import org.apache.cloudstack.api.command.user.UserCmd; import org.apache.cloudstack.api.response.GuestOSResponse; import org.apache.cloudstack.api.response.SecurityGroupResponse; +import org.apache.cloudstack.api.response.UserDataResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.vm.lease.VMLeaseManager; +import org.apache.commons.lang3.EnumUtils; - import org.apache.commons.lang3.StringUtils; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.user.Account; -import com.cloud.uservm.UserVm; -import com.cloud.utils.net.Dhcp; -import com.cloud.vm.VirtualMachine; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; @APICommand(name = "updateVirtualMachine", description = "Updates properties of an Instance. The Instance has to be stopped and restarted for the " + "new properties to take effect. UpdateVirtualMachine does not first check whether the Instance is stopped. " + diff --cc core/src/main/java/org/apache/cloudstack/storage/to/SnapshotObjectTO.java index 0c3bb99e75c,9d64ab84121..7643f80bbaa --- a/core/src/main/java/org/apache/cloudstack/storage/to/SnapshotObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/SnapshotObjectTO.java @@@ -48,16 -44,9 +48,14 @@@ public class SnapshotObjectTO extends D private Long physicalSize = (long) 0; private long accountId; - public SnapshotObjectTO() { - } + @Override + public DataObjectType getObjectType() { + return DataObjectType.SNAPSHOT; + } + public SnapshotObjectTO(SnapshotInfo snapshot) { this.path = snapshot.getPath(); this.setId(snapshot.getId()); diff --cc plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index ceb371f2cae,52740cead27..030d9747d6c --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@@ -44,17 -42,10 +44,20 @@@ import java.util.UUID import java.util.stream.Collectors; import javax.naming.ConfigurationException; - +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.transform.TransformerException; +import javax.xml.xpath.XPath; +import javax.xml.xpath.XPathConstants; +import javax.xml.xpath.XPathExpressionException; +import javax.xml.xpath.XPathFactory; + +import com.cloud.agent.api.Command; +import com.cloud.hypervisor.kvm.resource.LibvirtXMLParser; + import com.fasterxml.jackson.core.JsonProcessingException; + import com.fasterxml.jackson.databind.JsonNode; + import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer; import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.direct.download.DirectDownloadHelper; @@@ -364,7 -307,16 +367,17 @@@ public class KVMStorageProcessor implem final TemplateObjectTO newTemplate = new TemplateObjectTO(); newTemplate.setPath(primaryVol.getName()); newTemplate.setSize(primaryVol.getSize()); + newTemplate.setFormat(getFormat(primaryPool.getType())); + + if (List.of( + StoragePoolType.RBD, + StoragePoolType.PowerFlex, + StoragePoolType.Linstor, + StoragePoolType.FiberChannel).contains(primaryPool.getType())) { + newTemplate.setFormat(ImageFormat.RAW); + } else { + newTemplate.setFormat(ImageFormat.QCOW2); + } data = newTemplate; } else if (destData.getObjectType() == DataObjectType.VOLUME) { final VolumeObjectTO volumeObjectTO = new VolumeObjectTO(); @@@ -1844,27 -1792,91 +1853,30 @@@ String diskPath = disk.getPath(); String snapshotPath = diskPath + File.separator + snapshotName; - Long snapshotSize = null; - if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && !primaryPool.isExternalSnapshot()) { - - validateAvailableSizeOnPoolToTakeVolumeSnapshot(primaryPool, disk); - - try { - snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName); - - String diskLabel = takeVolumeSnapshot(resource.getDisks(conn, vmName), snapshotName, diskPath, vm); - String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, disk, snapshotPath, volume, cmd.getWait()); - - mergeSnapshotIntoBaseFile(vm, diskLabel, diskPath, snapshotName, volume, conn); - - validateConvertResult(convertResult, snapshotPath); - } catch (LibvirtException e) { - if (!e.getMessage().contains(LIBVIRT_OPERATION_NOT_SUPPORTED_MESSAGE)) { - throw e; - } - - logger.info(String.format("It was not possible to take live disk snapshot for volume [%s], in VM [%s], due to [%s]. We will take full snapshot of the VM" - + " and extract the disk instead. Consider upgrading your QEMU binary.", volume, vmName, e.getMessage())); - - takeFullVmSnapshotForBinariesThatDoesNotSupportLiveDiskSnapshot(vm, snapshotName, vmName); - primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR); - extractDiskFromFullVmSnapshot(disk, volume, snapshotPath, snapshotName, vmName, vm); - } - - /* - * libvirt on RHEL6 doesn't handle resume event emitted from - * qemu - */ - vm = resource.getDomain(conn, vmName); - state = vm.getInfo().state; - if (state == DomainInfo.DomainState.VIR_DOMAIN_PAUSED) { - vm.resume(); + SnapshotObjectTO newSnapshot = new SnapshotObjectTO(); + if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state) && !primaryPool.isExternalSnapshot()) { + if (snapshotTO.isKvmIncrementalSnapshot()) { + newSnapshot = takeIncrementalVolumeSnapshotOfRunningVm(snapshotTO, primaryPool, secondaryPool, imageStoreTo != null ? imageStoreTo.getUrl() : null, snapshotName, volume, vm, conn, cmd.getWait()); + } else { + newSnapshot = takeFullVolumeSnapshotOfRunningVm(cmd, primaryPool, secondaryPool, disk, snapshotName, conn, vmName, diskPath, vm, volume, snapshotPath); } } else { - /** - * For RBD we can't use libvirt to do our snapshotting or any Bash scripts. - * libvirt also wants to store the memory contents of the Virtual Machine, - * but that's not possible with RBD since there is no way to store the memory - * contents in RBD. - * - * So we rely on the Java bindings for RBD to create our snapshot - * - * This snapshot might not be 100% consistent due to writes still being in the - * memory of the Virtual Machine, but if the VM runs a kernel which supports - * barriers properly (>2.6.32) this won't be any different then pulling the power - * cord out of a running machine. - */ if (primaryPool.getType() == StoragePoolType.RBD) { - takeRbdVolumeSnapshotOfStoppedVm(primaryPool, disk, snapshotName); - try { - Rados r = radosConnect(primaryPool); - - final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir()); - final Rbd rbd = new Rbd(io); - final RbdImage image = rbd.open(disk.getName()); - - logger.debug("Attempting to create RBD snapshot " + disk.getName() + "@" + snapshotName); - image.snapCreate(snapshotName); - - long rbdSnapshotSize = getRbdSnapshotSize(primaryPool.getSourceDir(), disk.getName(), snapshotName, primaryPool.getSourceHost(), primaryPool.getAuthUserName(), primaryPool.getAuthSecret()); - if (rbdSnapshotSize > 0) { - snapshotSize = rbdSnapshotSize; - } - - rbd.close(image); - r.ioCtxDestroy(io); - } catch (final Exception e) { - logger.error("A RBD snapshot operation on " + disk.getName() + " failed. The error was: " + e.getMessage()); ++ Long snapshotSize = takeRbdVolumeSnapshotOfStoppedVm(primaryPool, disk, snapshotName); + newSnapshot.setPath(snapshotPath); ++ if (snapshotSize != null) { ++ newSnapshot.setPhysicalSize(snapshotSize); + } } else if (primaryPool.getType() == StoragePoolType.CLVM) { - /* VM is not running, create a snapshot by ourself */ - final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, logger); - command.add(MANAGE_SNAPSTHOT_CREATE_OPTION, disk.getPath()); - command.add(NAME_OPTION, snapshotName); - final String result = command.execute(); - if (result != null) { - logger.debug("Failed to manage snapshot: " + result); - return new CreateObjectAnswer("Failed to manage snapshot: " + result); - } + CreateObjectAnswer result = takeClvmVolumeSnapshotOfStoppedVm(disk, snapshotName); + if (result != null) return result; + newSnapshot.setPath(snapshotPath); } else { - snapshotPath = getSnapshotPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName); - String convertResult = convertBaseFileToSnapshotFileInPrimaryStorageDir(primaryPool, disk, snapshotPath, volume, cmd.getWait()); - validateConvertResult(convertResult, snapshotPath); + if (snapshotTO.isKvmIncrementalSnapshot()) { + newSnapshot = takeIncrementalVolumeSnapshotOfStoppedVm(snapshotTO, primaryPool, secondaryPool, imageStoreTo != null ? imageStoreTo.getUrl() : null, snapshotName, volume, conn, cmd.getWait()); + } else { + newSnapshot = takeFullVolumeSnapshotOfStoppedVm(cmd, primaryPool, secondaryPool, snapshotName, disk, volume); + } } } @@@ -1882,504 -1896,31 +1894,537 @@@ } } + private SnapshotObjectTO createSnapshotToAndUpdatePathAndSize(String path, String fullPath) { + final File snapFile = new File(fullPath); + long size = 0; + + if (snapFile.exists()) { + size = snapFile.length(); + } + + SnapshotObjectTO snapshotObjectTo = new SnapshotObjectTO(); + + snapshotObjectTo.setPath(path); + snapshotObjectTo.setPhysicalSize(size); + + return snapshotObjectTo; + } + + private SnapshotObjectTO takeIncrementalVolumeSnapshotOfStoppedVm(SnapshotObjectTO snapshotObjectTO, KVMStoragePool primaryPool, KVMStoragePool secondaryPool, + String secondaryPoolUrl, String snapshotName, VolumeObjectTO volumeObjectTo, Connect conn, int wait) throws LibvirtException { + resource.validateLibvirtAndQemuVersionForIncrementalSnapshots(); + Domain vm = null; + logger.debug("Taking incremental volume snapshot of volume [{}]. Snapshot will be copied to [{}].", volumeObjectTo, + ObjectUtils.defaultIfNull(secondaryPool, primaryPool)); + try { + String vmName = String.format("DUMMY-VM-%s", snapshotName); + + String vmXml = getVmXml(primaryPool, volumeObjectTo, vmName); + + logger.debug("Creating dummy VM with volume [{}] to take an incremental snapshot of it.", volumeObjectTo); + resource.startVM(conn, vmName, vmXml, Domain.CreateFlags.PAUSED); + + vm = resource.getDomain(conn, vmName); + + resource.recreateCheckpointsOnVm(List.of(volumeObjectTo), vmName, conn); + + return takeIncrementalVolumeSnapshotOfRunningVm(snapshotObjectTO, primaryPool, secondaryPool, secondaryPoolUrl, snapshotName, volumeObjectTo, vm, conn, wait); + } catch (InternalErrorException | LibvirtException | CloudRuntimeException e) { + logger.error("Failed to take incremental volume snapshot of volume [{}] due to {}.", volumeObjectTo, e.getMessage(), e); + throw new CloudRuntimeException(e); + } finally { + if (vm != null) { + vm.destroy(); + } + } + } + + private String getVmXml(KVMStoragePool primaryPool, VolumeObjectTO volumeObjectTo, String vmName) { + String machine = resource.isGuestAarch64() ? LibvirtComputingResource.VIRT : LibvirtComputingResource.PC; + String cpuArch = resource.getGuestCpuArch() != null ? resource.getGuestCpuArch() : "x86_64"; + + return String.format(DUMMY_VM_XML, vmName, cpuArch, machine, resource.getHypervisorPath(), primaryPool.getLocalPathFor(volumeObjectTo.getPath())); + } + + private SnapshotObjectTO takeIncrementalVolumeSnapshotOfRunningVm(SnapshotObjectTO snapshotObjectTO, KVMStoragePool primaryPool, KVMStoragePool secondaryPool, + String secondaryPoolUrl, String snapshotName, VolumeObjectTO volumeObjectTo, Domain vm, Connect conn, int wait) { + logger.debug("Taking incremental volume snapshot of volume [{}] attached to running VM [{}]. Snapshot will be copied to [{}].", volumeObjectTo, volumeObjectTo.getVmName(), + ObjectUtils.defaultIfNull(secondaryPool, primaryPool)); + resource.validateLibvirtAndQemuVersionForIncrementalSnapshots(); + + Pair<String, String> fullSnapshotPathAndDirPath = getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(primaryPool, secondaryPool, snapshotName, volumeObjectTo, false); + + String diskLabel; + String vmName; + try { + List<DiskDef> disks = resource.getDisks(conn, vm.getName()); + diskLabel = getDiskLabelToSnapshot(disks, volumeObjectTo.getPath(), vm); + vmName = vm.getName(); + } catch (LibvirtException e) { + logger.error("Failed to get VM's disks or VM name due to: [{}].", e.getMessage(), e); + throw new CloudRuntimeException(e); + } + + String[] parents = snapshotObjectTO.getParents(); + String fullSnapshotPath = fullSnapshotPathAndDirPath.first(); + + String backupXml = generateBackupXml(volumeObjectTo, parents, diskLabel, fullSnapshotPath); + String checkpointXml = String.format(CHECKPOINT_XML, snapshotName, diskLabel); + + Path backupXmlPath = createFileAndWrite(backupXml, BACKUP_XML_TEMP_DIR, snapshotName); + Path checkpointXmlPath = createFileAndWrite(checkpointXml, CHECKPOINT_XML_TEMP_DIR, snapshotName); + + String backupCommand = String.format(BACKUP_BEGIN_COMMAND, vmName, backupXmlPath.toString(), checkpointXmlPath.toString()); + + createFolderOnCorrectStorage(primaryPool, secondaryPool, fullSnapshotPathAndDirPath); + + if (Script.runSimpleBashScript(backupCommand) == null) { + throw new CloudRuntimeException(String.format("Error backing up using backupXML [%s], checkpointXML [%s] for volume [%s].", backupXml, checkpointXml, + volumeObjectTo)); + } + + try { + waitForBackup(vmName); + } catch (CloudRuntimeException ex) { + cancelBackupJob(snapshotObjectTO); + throw ex; + } + + rebaseSnapshot(snapshotObjectTO, secondaryPool, secondaryPoolUrl, fullSnapshotPath, snapshotName, parents, wait); + + try { + Files.setPosixFilePermissions(Path.of(fullSnapshotPath), PosixFilePermissions.fromString("rw-r--r--")); + } catch (IOException ex) { + logger.warn("Failed to change permissions of snapshot [{}], snapshot download will not be possible.", snapshotName); + } + + String checkpointPath = dumpCheckpoint(primaryPool, secondaryPool, snapshotName, volumeObjectTo, vmName, parents); + + SnapshotObjectTO result = createSnapshotToAndUpdatePathAndSize(secondaryPool == null ? fullSnapshotPath : fullSnapshotPathAndDirPath.second() + File.separator + snapshotName, + fullSnapshotPath); + + result.setCheckpointPath(checkpointPath); + + return result; + } + + protected void createFolderOnCorrectStorage(KVMStoragePool primaryPool, KVMStoragePool secondaryPool, Pair<String, String> fullSnapshotPathAndDirPath) { + if (secondaryPool == null) { + primaryPool.createFolder(fullSnapshotPathAndDirPath.second()); + } else { + secondaryPool.createFolder(fullSnapshotPathAndDirPath.second()); + } + } + + protected String generateBackupXml(VolumeObjectTO volumeObjectTo, String[] parents, String diskLabel, String fullSnapshotPath) { + if (parents == null) { + logger.debug("Snapshot of volume [{}] does not have a parent, taking a full snapshot.", volumeObjectTo); + return String.format(BACKUP_XML, diskLabel, fullSnapshotPath); + } else { + logger.debug("Snapshot of volume [{}] has parents [{}], taking an incremental snapshot.", volumeObjectTo, Arrays.toString(parents)); + String parentCheckpointName = getParentCheckpointName(parents); + return String.format(INCREMENTAL_BACKUP_XML, parentCheckpointName, diskLabel, fullSnapshotPath); + } + } + + private void waitForBackup(String vmName) throws CloudRuntimeException { + int timeout = incrementalSnapshotTimeout; + logger.debug("Waiting for backup of VM [{}] to finish, timeout is [{}].", vmName, timeout); + + String result; + + while (timeout > 0) { + result = checkBackupJob(vmName); + + if (result.contains("Completed") && result.contains("Backup")) { + return; + } + + timeout -= 10000; + try { + Thread.sleep(10000); + } catch (InterruptedException e) { + throw new CloudRuntimeException(e); + } + } + + throw new CloudRuntimeException(String.format("Timeout while waiting for incremental snapshot for VM [%s] to finish.", vmName)); + } + + private void cancelBackupJob(SnapshotObjectTO snapshotObjectTO) { + Script.runSimpleBashScript(String.format(DOMJOBABORT_COMMAND, snapshotObjectTO.getVmName())); + + String result = checkBackupJob(snapshotObjectTO.getVmName()); + + if (result.contains("Backup") && result.contains("Cancelled")) { + logger.debug("Successfully canceled incremental snapshot job."); + } else { + logger.warn("Couldn't cancel the incremental snapshot job correctly. Job status is [{}].", result); + } + } + + private String checkBackupJob(String vmName) { + return Script.runSimpleBashScriptWithFullResult(String.format(DOMJOBINFO_COMPLETED_COMMAND, vmName), 10); + } + + protected void rebaseSnapshot(SnapshotObjectTO snapshotObjectTO, KVMStoragePool secondaryPool, String secondaryUrl, String snapshotPath, String snapshotName, String[] parents, int wait) { + if (parents == null) { + logger.debug("No need to rebase snapshot [{}], this snapshot has no parents, therefore it is the first on its backing chain.", snapshotName); + return; + } + String parentSnapshotPath; + + if (secondaryPool == null) { + parentSnapshotPath = parents[parents.length - 1]; + } else if (!secondaryUrl.equals(snapshotObjectTO.getParentStore().getUrl())) { + KVMStoragePool parentPool = storagePoolMgr.getStoragePoolByURI(snapshotObjectTO.getParentStore().getUrl()); + parentSnapshotPath = parentPool.getLocalPath() + File.separator + parents[parents.length - 1]; + storagePoolMgr.deleteStoragePool(parentPool.getType(), parentPool.getUuid()); + } else { + parentSnapshotPath = secondaryPool.getLocalPath() + File.separator + parents[parents.length - 1]; + } + + QemuImgFile snapshotFile = new QemuImgFile(snapshotPath); + QemuImgFile parentSnapshotFile = new QemuImgFile(parentSnapshotPath); + + logger.debug("Rebasing snapshot [{}] with parent [{}].", snapshotName, parentSnapshotPath); + + try { + QemuImg qemuImg = new QemuImg(wait); + qemuImg.rebase(snapshotFile, parentSnapshotFile, PhysicalDiskFormat.QCOW2.toString(), false); + } catch (LibvirtException | QemuImgException e) { + logger.error("Exception while rebasing incremental snapshot [{}] due to: [{}].", snapshotName, e.getMessage(), e); + throw new CloudRuntimeException(e); + } + } + + protected String getParentCheckpointName(String[] parents) { + String immediateParentPath = parents[parents.length - 1]; + return immediateParentPath.substring(immediateParentPath.lastIndexOf(File.separator) + 1); + } + + private Path createFileAndWrite(String content, String dir, String fileName) { + File dirFile = new File(dir); + if (!dirFile.exists()) { + dirFile.mkdirs(); + } + + Path filePath = Path.of(dirFile.getPath(), fileName); + try { + return Files.write(filePath, content.getBytes()); + } catch (IOException ex) { + String message = String.format("Error while writing file [%s].", filePath); + logger.error(message, ex); + throw new CloudRuntimeException(message, ex); + } + } + + private String dumpCheckpoint(KVMStoragePool primaryPool, KVMStoragePool secondaryPool, String snapshotName, VolumeObjectTO volumeObjectTo, String vmName, String[] snapshotParents) { + String result = Script.runSimpleBashScriptWithFullResult(String.format(CHECKPOINT_DUMP_XML_COMMAND, vmName, snapshotName), 10); + + String snapshotParent = null; + if (snapshotParents != null) { + String snapshotParentPath = snapshotParents[snapshotParents.length - 1]; + snapshotParent = snapshotParentPath.substring(snapshotParentPath.lastIndexOf(File.separator) + 1); + } + + return cleanupCheckpointXmlDumpCheckpointAndRedefine(result, primaryPool, secondaryPool, snapshotName, volumeObjectTo, snapshotParent, vmName); + } + + private String cleanupCheckpointXmlDumpCheckpointAndRedefine(String checkpointXml, KVMStoragePool primaryPool, KVMStoragePool secondaryPool, String snapshotName, VolumeObjectTO volumeObjectTo, String snapshotParent, String vmName) { + String updatedCheckpointXml; + try { + updatedCheckpointXml = updateCheckpointXml(checkpointXml, snapshotParent); + } catch (TransformerException | ParserConfigurationException | IOException | SAXException | + XPathExpressionException e) { + logger.error("Exception while parsing checkpoint XML [{}].", checkpointXml, e); + throw new CloudRuntimeException(e); + } + + Pair<String, String> checkpointFullPathAndDirPath = getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(primaryPool, secondaryPool, snapshotName, volumeObjectTo, true); + + String fullPath = checkpointFullPathAndDirPath.first(); + String dirPath = checkpointFullPathAndDirPath.second(); + + KVMStoragePool workPool = ObjectUtils.defaultIfNull(secondaryPool, primaryPool); + workPool.createFolder(dirPath); + + logger.debug("Saving checkpoint of volume [{}], attached to VM [{}], referring to snapshot [{}] to path [{}].", volumeObjectTo, vmName, snapshotName, fullPath); + createFileAndWrite(updatedCheckpointXml, workPool.getLocalPath() + File.separator + dirPath, snapshotName); + + logger.debug("Redefining checkpoint on VM [{}].", vmName); + Script.runSimpleBashScript(String.format(LibvirtComputingResource.CHECKPOINT_CREATE_COMMAND, vmName, fullPath)); + + return fullPath; + } + + /** + * Updates the checkpoint XML, setting the parent to {@code snapshotParent} and removing any disks that were not backed up. + * @param checkpointXml checkpoint XML to be parsed + * @param snapshotParent snapshot parent + * */ + private String updateCheckpointXml(String checkpointXml, String snapshotParent) throws ParserConfigurationException, XPathExpressionException, IOException, SAXException, TransformerException { + logger.debug("Parsing checkpoint XML [{}].", checkpointXml); + + InputStream in = IOUtils.toInputStream(checkpointXml); + DocumentBuilderFactory docFactory = ParserUtils.getSaferDocumentBuilderFactory(); + DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); + Document doc = docBuilder.parse(in); + XPath xPath = XPathFactory.newInstance().newXPath(); + + updateParent(snapshotParent, doc, xPath); + + removeUnnecessaryDisks(doc, xPath); + + String finalXml = LibvirtXMLParser.getXml(doc); + + logger.debug("Checkpoint XML after parsing is [{}].", finalXml); + + return finalXml; + } + + /** + * Removes all the disk definitions on the checkpoint XML from disks that were not affected. + * @param checkpointXml the checkpoint XML to be updated. + * */ + private void removeUnnecessaryDisks(Document checkpointXml, XPath xPath) throws XPathExpressionException { + Node disksNode = (Node) xPath.compile("/domaincheckpoint/disks").evaluate(checkpointXml, XPathConstants.NODE); + NodeList disksNodeChildren = disksNode.getChildNodes(); + for (int j = 0; j < disksNodeChildren.getLength(); j++) { + Node diskNode = disksNodeChildren.item(j); + if (diskNode == null) { + continue; + } + if ("disk".equals(diskNode.getNodeName()) && "no".equals(diskNode.getAttributes().getNamedItem("checkpoint").getNodeValue())) { + disksNode.removeChild(diskNode); + logger.trace("Removing node [{}].", diskNode); + } + } + } + + /** + * Updates the parent on the {@code checkpointXml} to {@code snapshotParent}. If {@code snapshotParent} is null, removes the parent. + * @param checkpointXml the checkpoint XML to be updated + * @param snapshotParent the snapshot parent. Inform null if no parent. + * */ + private void updateParent(String snapshotParent, Document checkpointXml, XPath xPath) throws XPathExpressionException { + if (snapshotParent == null) { + Object parentNodeObject = xPath.compile("/domaincheckpoint/parent").evaluate(checkpointXml, XPathConstants.NODE); + if (parentNodeObject == null) { + return; + } + Node parentNode = (Node) parentNodeObject; + parentNode.getParentNode().removeChild(parentNode); + return; + } + + Node parentNameNode = (Node) xPath.compile("/domaincheckpoint/parent/name").evaluate(checkpointXml, XPathConstants.NODE); + parentNameNode.setTextContent(snapshotParent); + } + + /** + * If imageStore is not null, copy the snapshot directly to secondary storage, else, copy it to the primary storage. + * + * @return SnapshotObjectTO of the new snapshot. + * */ + private SnapshotObjectTO takeFullVolumeSnapshotOfRunningVm(CreateObjectCommand cmd, KVMStoragePool primaryPool, KVMStoragePool secondaryPool, KVMPhysicalDisk disk, String snapshotName, + Connect conn, String vmName, String diskPath, Domain vm, VolumeObjectTO volume, String snapshotPath) throws IOException, LibvirtException { + logger.debug("Taking full volume snapshot of volume [{}] attached to running VM [{}]. Snapshot will be copied to [{}].", volume, vmName, + ObjectUtils.defaultIfNull(secondaryPool, primaryPool)); + + validateAvailableSizeOnPoolToTakeVolumeSnapshot(primaryPool, disk); + String relativePath = null; + try { + String diskLabel = takeVolumeSnapshot(resource.getDisks(conn, vmName), snapshotName, diskPath, vm); + + Pair<String, String> fullSnapPathAndDirPath = getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(primaryPool, secondaryPool, snapshotName, volume, false); + + snapshotPath = fullSnapPathAndDirPath.first(); + String directoryPath = fullSnapPathAndDirPath.second(); + relativePath = directoryPath + File.separator + snapshotName; + + String convertResult = convertBaseFileToSnapshotFileInStorageDir(ObjectUtils.defaultIfNull(secondaryPool, primaryPool), disk, snapshotPath, directoryPath, volume, cmd.getWait()); + + resource.mergeSnapshotIntoBaseFile(vm, diskLabel, diskPath, null, true, snapshotName, volume, conn); + + validateConvertResult(convertResult, snapshotPath); + } catch (LibvirtException e) { + if (!e.getMessage().contains(LIBVIRT_OPERATION_NOT_SUPPORTED_MESSAGE)) { + throw e; + } + + logger.info("It was not possible to take live disk snapshot for volume [{}], in VM [{}], due to [{}]. We will take full snapshot of the VM" + + " and extract the disk instead. Consider upgrading your QEMU binary.", volume, vmName, e.getMessage()); + + takeFullVmSnapshotForBinariesThatDoesNotSupportLiveDiskSnapshot(vm, snapshotName, vmName); + primaryPool.createFolder(TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR); + extractDiskFromFullVmSnapshot(disk, volume, snapshotPath, snapshotName, vmName, vm); + } + + /* + * libvirt on RHEL6 doesn't handle resume event emitted from + * qemu + */ + vm = resource.getDomain(conn, vmName); + DomainInfo.DomainState state = vm.getInfo().state; + if (state == DomainInfo.DomainState.VIR_DOMAIN_PAUSED) { + vm.resume(); + } + + return createSnapshotToAndUpdatePathAndSize(secondaryPool == null ? snapshotPath : relativePath, snapshotPath); + } + + + private SnapshotObjectTO takeFullVolumeSnapshotOfStoppedVm(CreateObjectCommand cmd, KVMStoragePool primaryPool, KVMStoragePool secondaryPool, String snapshotName, KVMPhysicalDisk disk, VolumeObjectTO volume) throws IOException { + logger.debug("Taking full volume snapshot of volume [{}]. Snapshot will be copied to [{}].", volume, + ObjectUtils.defaultIfNull(secondaryPool, primaryPool)); + Pair<String, String> fullSnapPathAndDirPath = getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(primaryPool, secondaryPool, snapshotName, volume, false); + + String snapshotPath = fullSnapPathAndDirPath.first(); + String directoryPath = fullSnapPathAndDirPath.second(); + String relativePath = directoryPath + File.separator + snapshotName; + + String convertResult = convertBaseFileToSnapshotFileInStorageDir(ObjectUtils.defaultIfNull(secondaryPool, primaryPool), disk, snapshotPath, directoryPath, volume, cmd.getWait()); + + validateConvertResult(convertResult, snapshotPath); + + return createSnapshotToAndUpdatePathAndSize(secondaryPool == null ? snapshotPath : relativePath, snapshotPath); + } + + private CreateObjectAnswer takeClvmVolumeSnapshotOfStoppedVm(KVMPhysicalDisk disk, String snapshotName) { + /* VM is not running, create a snapshot by ourself */ + final Script command = new Script(_manageSnapshotPath, _cmdsTimeout, logger); + command.add(MANAGE_SNAPSTHOT_CREATE_OPTION, disk.getPath()); + command.add(NAME_OPTION, snapshotName); + final String result = command.execute(); + if (result != null) { + String message = String.format("Failed to manage snapshot [%s] due to: [%s].", snapshotName, result); + logger.debug(message); + return new CreateObjectAnswer(message); + } + return null; + } + + /** + * For RBD we can't use libvirt to do our snapshotting or any Bash scripts. + * libvirt also wants to store the memory contents of the Virtual Machine, + * but that's not possible with RBD since there is no way to store the memory + * contents in RBD. + * <p> + * So we rely on the Java bindings for RBD to create our snapshot + * <p> + * This snapshot might not be 100% consistent due to writes still being in the + * memory of the Virtual Machine, but if the VM runs a kernel which supports + * barriers properly (>2.6.32) this won't be any different then pulling the power + * cord out of a running machine. + */ - private void takeRbdVolumeSnapshotOfStoppedVm(KVMStoragePool primaryPool, KVMPhysicalDisk disk, String snapshotName) { ++ private Long takeRbdVolumeSnapshotOfStoppedVm(KVMStoragePool primaryPool, KVMPhysicalDisk disk, String snapshotName) { ++ Long snapshotSize = null; + try { + Rados r = radosConnect(primaryPool); + + final IoCTX io = r.ioCtxCreate(primaryPool.getSourceDir()); + final Rbd rbd = new Rbd(io); + final RbdImage image = rbd.open(disk.getName()); + + logger.debug("Attempting to create RBD snapshot {}@{}", disk.getName(), snapshotName); + image.snapCreate(snapshotName); + ++ image.snapCreate(snapshotName); ++ long rbdSnapshotSize = getRbdSnapshotSize(primaryPool.getSourceDir(), disk.getName(), snapshotName, primaryPool.getSourceHost(), primaryPool.getAuthUserName(), primaryPool.getAuthSecret()); ++ if (rbdSnapshotSize > 0) { ++ snapshotSize = rbdSnapshotSize; ++ } ++ + rbd.close(image); + r.ioCtxDestroy(io); + } catch (final Exception e) { + logger.error("A RBD snapshot operation on [{}] failed. The error was: {}", disk.getName(), e.getMessage(), e); + } ++ return snapshotSize; ++ } ++ + private long getRbdSnapshotSize(String poolPath, String diskName, String snapshotName, String rbdMonitor, String authUser, String authSecret) { + logger.debug("Get RBD snapshot size for {}/{}@{}", poolPath, diskName, snapshotName); + //cmd: rbd du <pool>/<disk-name>@<snapshot-name> --format json --mon-host <monitor-host> --id <user> --key <key> 2>/dev/null + String snapshotDetailsInJson = Script.runSimpleBashScript(String.format("rbd du %s/%s@%s --format json --mon-host %s --id %s --key %s 2>/dev/null", poolPath, diskName, snapshotName, rbdMonitor, authUser, authSecret)); + if (StringUtils.isNotBlank(snapshotDetailsInJson)) { + ObjectMapper mapper = new ObjectMapper(); + try { + JsonNode root = mapper.readTree(snapshotDetailsInJson); + for (JsonNode image : root.path("images")) { + if (snapshotName.equals(image.path("snapshot").asText())) { + long usedSizeInBytes = image.path("used_size").asLong(); + logger.debug("RBD snapshot {}/{}@{} used size in bytes: {}", poolPath, diskName, snapshotName, usedSizeInBytes); + return usedSizeInBytes; + } + } + } catch (JsonProcessingException e) { + logger.error("Unable to get the RBD snapshot size, RBD snapshot cmd output: {}", snapshotDetailsInJson, e); + } + } else { + logger.warn("Failed to get RBD snapshot size for {}/{}@{} - no output for RBD snapshot cmd", poolPath, diskName, snapshotName); + } + + return 0; } + /** + * Retrieves the disk label to take snapshot; + * @param disks List of VM's disks; + * @param diskPath Path of the disk to take snapshot; + * @param vm VM in which disks are; + * @return the label to take snapshot. If the disk path is not found in VM's XML, it will throw a CloudRuntimeException. + * @throws org.libvirt.LibvirtException if the disk is not found + */ + protected String getDiskLabelToSnapshot(List<DiskDef> disks, String diskPath, Domain vm) throws LibvirtException { + logger.debug("Searching disk label of disk with path [{}] on VM [{}].", diskPath, vm.getName()); + for (DiskDef disk : disks) { + String diskDefPath = disk.getDiskPath(); + + if (StringUtils.isEmpty(diskDefPath)) { + continue; + } + + if (!diskDefPath.contains(diskPath)) { + continue; + } + logger.debug("Found disk label [{}] for volume with path [{}] on VM [{}].", disk.getDiskLabel(), diskPath, vm.getName()); + + return disk.getDiskLabel(); + } + + throw new CloudRuntimeException(String.format("VM [%s] has no disk with path [%s]. VM's XML [%s].", vm.getName(), diskPath, vm.getXMLDesc(0))); + } + + /** + * Gets the fully qualified path of the snapshot or checkpoint and the directory path. If a secondary pool is informed, the path will be on the secondary pool, + * otherwise, the path will be on the primary pool. + * @param primaryPool Primary pool definition, the path returned will be here if no secondary pool is informed; + * @param secondaryPool Secondary pool definition. If informed, the primary pool will be ignored and the path returned will be on the secondary pool; + * @param snapshotName Name of the snapshot; + * @param volume Volume that is being snapshot; + * @param checkpoint Whether to return a path for a snapshot or a snapshot's checkpoint; + * @return Fully qualified path and the directory path of the snapshot/checkpoint. + * */ + private Pair<String, String> getFullSnapshotOrCheckpointPathAndDirPathOnCorrectStorage(KVMStoragePool primaryPool, KVMStoragePool secondaryPool, String snapshotName, + VolumeObjectTO volume, boolean checkpoint) { + String fullSnapshotPath; + String dirPath; + + if (secondaryPool == null) { + fullSnapshotPath = getSnapshotOrCheckpointPathInPrimaryStorage(primaryPool.getLocalPath(), snapshotName, checkpoint); + dirPath = checkpoint ? TemplateConstants.DEFAULT_CHECKPOINT_ROOT_DIR : TemplateConstants.DEFAULT_SNAPSHOT_ROOT_DIR; + } else { + Pair<String, String> fullPathAndDirectoryPath = getSnapshotOrCheckpointPathAndDirectoryPathInSecondaryStorage(secondaryPool.getLocalPath(), snapshotName, + volume.getAccountId(), volume.getVolumeId(), checkpoint); + + fullSnapshotPath = fullPathAndDirectoryPath.first(); + dirPath = fullPathAndDirectoryPath.second(); + } + return new Pair<>(fullSnapshotPath, dirPath); + } + protected void deleteFullVmSnapshotAfterConvertingItToExternalDiskSnapshot(Domain vm, String snapshotName, VolumeObjectTO volume, String vmName) throws LibvirtException { logger.debug(String.format("Deleting full Instance Snapshot [%s] of Instance [%s] as we already converted it to an external disk Snapshot of the volume [%s].", snapshotName, vmName, volume)); diff --cc ui/src/components/view/SearchFilter.vue index ed950c094a3,34ca438b5c5..1b38ae6820d --- a/ui/src/components/view/SearchFilter.vue +++ b/ui/src/components/view/SearchFilter.vue @@@ -56,7 -56,7 +56,7 @@@ <script> --import { api } from '@/api/index' ++import { getAPI } from '@/api' export default { name: 'SearchFilter', @@@ -296,7 -298,7 +298,7 @@@ }, getHypervisor (value) { return new Promise((resolve) => { -- api('listHypervisors').then(json => { ++ getAPI('listHypervisors').then(json => { if (json?.listhypervisorsresponse?.hypervisor) { for (const key in json.listhypervisorsresponse.hypervisor) { const hypervisor = json.listhypervisorsresponse.hypervisor[key] @@@ -316,7 -318,7 +318,7 @@@ if (!this.$isValidUuid(id)) { return resolve('') } -- api(apiName, { listAll: true, id: id }).then(json => { ++ getAPI(apiName, { listAll: true, id: id }).then(json => { const items = json && json[responseKey1] && json[responseKey1][responseKey2] if (Array.isArray(items) && items.length > 0 && items[0] && items[0][field] !== undefined) { resolve(items[0][field]) @@@ -337,7 -339,7 +339,7 @@@ }, getAlertType (type) { return new Promise((resolve) => { -- api('listAlertTypes').then(json => { ++ getAPI('listAlertTypes').then(json => { const alertTypes = {} for (const key in json.listalerttypesresponse.alerttype) { const alerttype = json.listalerttypesresponse.alerttype[key] @@@ -351,7 -353,7 +353,7 @@@ }, getAffinityGroupType (type) { return new Promise((resolve) => { -- api('listAffinityGroupTypes').then(json => { ++ getAPI('listAffinityGroupTypes').then(json => { const alertTypes = {} for (const key in json.listaffinitygrouptypesresponse.affinityGroupType) { const affinityGroupType = json.listaffinitygrouptypesresponse.affinityGroupType[key] diff --cc ui/src/views/AutogenView.vue index 4b7b9857927,325df769dee..a5c0be3d488 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@@ -17,16 -17,13 +17,19 @@@ <template> <div> - <a-affix :offsetTop="this.$store.getters.maintenanceInitiated || this.$store.getters.shutdownTriggered ? 103 : 78"> + <a-affix + :key="'affix-' + showSearchFilters" - :offsetTop="this.$store.getters.shutdownTriggered ? 103 : 78" ++ :offsetTop="this.$store.getters.maintenanceInitiated || this.$store.getters.shutdownTriggered ? 103 : 78" + > - <a-card class="breadcrumb-card" style="z-index: 10"> + <a-card + class="breadcrumb-card" + style="z-index: 10" + > <a-row> - <a-col :span="device === 'mobile' ? 24 : 12" style="padding-left: 12px; margin-top: 10px"> + <a-col + :span="device === 'mobile' ? 24 : 12" + style="padding-left: 12px; margin-top: 10px" + > <breadcrumb :resource="resource"> <template #end> <a-button diff --cc usage/src/main/java/com/cloud/usage/parser/BackupUsageParser.java index b639e92c71a,257bc468302..012172959fd --- a/usage/src/main/java/com/cloud/usage/parser/BackupUsageParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/BackupUsageParser.java @@@ -75,9 -88,9 +74,9 @@@ public class BackupUsageParser extends final UsageVO usageRecord = new UsageVO(zoneId, account.getAccountId(), account.getDomainId(), description, usageDisplay + " Hrs", - UsageTypes.BACKUP, new Double(usage), vmId, null, offeringId, null, vmId, + UsageTypes.BACKUP, (double) usage, vmId, null, offeringId, null, vmId, usageBackup.getSize(), usageBackup.getProtectedSize(), startDate, endDate); - s_usageDao.persist(usageRecord); + usageDao.persist(usageRecord); } return true; diff --cc usage/src/main/java/com/cloud/usage/parser/VMSnapshotOnPrimaryParser.java index b3b9ef69e56,252f661569d..de86a23b8fc --- a/usage/src/main/java/com/cloud/usage/parser/VMSnapshotOnPrimaryParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/VMSnapshotOnPrimaryParser.java @@@ -109,9 -125,9 +109,9 @@@ public class VMSnapshotOnPrimaryParser String usageDesc = "VMSnapshot Id: " + vmSnapshotId + " On Primary Usage: VM Id: " + vmId; usageDesc += " Size: " + toHumanReadableSize(virtualSize); - UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", usageType, new Double(usage), vmId, name, null, null, + UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", usageType, (double) usage, vmId, name, null, null, vmSnapshotId, physicalSize, virtualSize, startDate, endDate); - s_usageDao.persist(usageRecord); + usageDao.persist(usageRecord); } }
