Instead of our own. The code is almost the same, but the upstream implementation uses qemu's transactional system and performs a drain() on the block device first. This seems to help avoid some issues we run into with qcow2 files when creating snapshots.
Signed-off-by: Wolfgang Bumiller <w.bumil...@proxmox.com> --- PVE/QMPClient.pm | 2 ++ PVE/QemuServer.pm | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/PVE/QMPClient.pm b/PVE/QMPClient.pm index 2277fef..6be4a41 100755 --- a/PVE/QMPClient.pm +++ b/PVE/QMPClient.pm @@ -126,6 +126,8 @@ sub cmd { $cmd->{execute} eq 'query-savevm' || $cmd->{execute} eq 'delete-drive-snapshot' || $cmd->{execute} eq 'guest-shutdown' || + $cmd->{execute} eq 'blockdev-snapshot-internal-sync' || + $cmd->{execute} eq 'blockdev-snapshot-delete-internal-sync' || $cmd->{execute} eq 'snapshot-drive' ) { $timeout = 10*60; # 10 mins ? } else { diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm index b60be59..cf84255 100644 --- a/PVE/QemuServer.pm +++ b/PVE/QemuServer.pm @@ -4288,7 +4288,7 @@ sub qemu_volume_snapshot { my $running = check_running($vmid); if ($running && do_snapshots_with_qemu($storecfg, $volid)){ - vm_mon_cmd($vmid, "snapshot-drive", device => $deviceid, name => $snap); + vm_mon_cmd($vmid, 'blockdev-snapshot-internal-sync', device => $deviceid, name => $snap); } else { PVE::Storage::volume_snapshot($storecfg, $volid, $snap); } @@ -4310,7 +4310,7 @@ sub qemu_volume_snapshot_delete { } if ($running && do_snapshots_with_qemu($storecfg, $volid)){ - vm_mon_cmd($vmid, "delete-drive-snapshot", device => $deviceid, name => $snap); + vm_mon_cmd($vmid, 'blockdev-snapshot-delete-internal-sync', device => $deviceid, name => $snap); } else { PVE::Storage::volume_snapshot_delete($storecfg, $volid, $snap, $running); } -- 2.11.0 _______________________________________________ pve-devel mailing list pve-devel@pve.proxmox.com https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel