When scanning all configured storages for disk images belonging to the VM, the migration could easily fail if a storage is not available, but enabled. That storage might not even be used by the VM at all.
By not scanning all storages and only looking at the disk images referenced in the VM config, we can avoid unnecessary failures. Some information that used to be provided by the storage scanning needs to be fetched explicilty (size, format). Behaviorally the biggest change is that unreferenced disk images will not be migrated anymore. Only images in the config or in a snapshot will be migrated. The tests have been adapted accordingly. Signed-off-by: Aaron Lauterer <[email protected]> --- changes since v3: now it only removes the storage scanning PVE/QemuMigrate.pm | 49 ++++----------------------- test/MigrationTest/QemuMigrateMock.pm | 10 ++++++ test/run_qemu_migrate_tests.pl | 11 +++--- 3 files changed, 22 insertions(+), 48 deletions(-) diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm index 09cc1d8..5f4f402 100644 --- a/PVE/QemuMigrate.pm +++ b/PVE/QemuMigrate.pm @@ -312,49 +312,6 @@ sub scan_local_volumes { $abort = 1; }; - my @sids = PVE::Storage::storage_ids($storecfg); - foreach my $storeid (@sids) { - my $scfg = PVE::Storage::storage_config($storecfg, $storeid); - next if $scfg->{shared} && !$self->{opts}->{remote}; - next if !PVE::Storage::storage_check_enabled($storecfg, $storeid, undef, 1); - - # get list from PVE::Storage (for unused volumes) - my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, $vmid, undef, 'images'); - - next if @{$dl->{$storeid}} == 0; - - my $targetsid = PVE::JSONSchema::map_id($self->{opts}->{storagemap}, $storeid); - if (!$self->{opts}->{remote}) { - # check if storage is available on target node - my $target_scfg = PVE::Storage::storage_check_enabled( - $storecfg, - $targetsid, - $self->{node}, - ); - - die "content type 'images' is not available on storage '$targetsid'\n" - if !$target_scfg->{content}->{images}; - - } - - my $bwlimit = $self->get_bwlimit($storeid, $targetsid); - - PVE::Storage::foreach_volid($dl, sub { - my ($volid, $sid, $volinfo) = @_; - - $local_volumes->{$volid}->{ref} = 'storage'; - $local_volumes->{$volid}->{size} = $volinfo->{size}; - $local_volumes->{$volid}->{targetsid} = $targetsid; - $local_volumes->{$volid}->{bwlimit} = $bwlimit; - - # If with_snapshots is not set for storage migrate, it tries to use - # a raw+size stream, but on-the-fly conversion from qcow2 to raw+size - # back to qcow2 is currently not possible. - $local_volumes->{$volid}->{snapshots} = ($volinfo->{format} =~ /^(?:qcow2|vmdk)$/); - $local_volumes->{$volid}->{format} = $volinfo->{format}; - }); - } - my $replicatable_volumes = !$self->{replication_jobcfg} ? {} : PVE::QemuConfig->get_replicatable_volumes($storecfg, $vmid, $conf, 0, 1); foreach my $volid (keys %{$replicatable_volumes}) { @@ -407,6 +364,12 @@ sub scan_local_volumes { $local_volumes->{$volid}->{ref} = 'storage' if $attr->{is_unused}; $local_volumes->{$volid}->{ref} = 'generated' if $attr->{is_tpmstate}; + $local_volumes->{$volid}->{bwlimit} = $self->get_bwlimit($sid, $targetsid); + $local_volumes->{$volid}->{targetsid} = $targetsid; + + ($local_volumes->{$volid}->{size}, $local_volumes->{$volid}->{format}) + = PVE::Storage::volume_size_info($storecfg, $volid); + $local_volumes->{$volid}->{is_vmstate} = $attr->{is_vmstate} ? 1 : 0; $local_volumes->{$volid}->{drivename} = $attr->{drivename} diff --git a/test/MigrationTest/QemuMigrateMock.pm b/test/MigrationTest/QemuMigrateMock.pm index 94fe686..9034d10 100644 --- a/test/MigrationTest/QemuMigrateMock.pm +++ b/test/MigrationTest/QemuMigrateMock.pm @@ -240,6 +240,16 @@ $MigrationTest::Shared::storage_module->mock( delete $source_volids->{$volid}; }, + volume_size_info => sub { + my ($scfg, $volid) = @_; + my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid); + + for my $v ($source_vdisks->{$storeid}->@*) { + return wantarray ? ($v->{size}, $v->{format}, $v->{used}, $v->{parent}) : $v + if $v->{volid} eq $volid; + } + die "could not find '$volid' in mock 'source_vdisks'\n"; + }, ); $MigrationTest::Shared::tools_module->mock( diff --git a/test/run_qemu_migrate_tests.pl b/test/run_qemu_migrate_tests.pl index 090449f..7a9d7ea 100755 --- a/test/run_qemu_migrate_tests.pl +++ b/test/run_qemu_migrate_tests.pl @@ -708,7 +708,6 @@ my $tests = [ }, }, { - # FIXME: Maybe add orphaned drives as unused? name => '149_running_orphaned_disk_targetstorage_zfs', target => 'pve1', vmid => 149, @@ -729,10 +728,11 @@ my $tests = [ }, expected_calls => $default_expected_calls_online, expected => { - source_volids => {}, + source_volids => { + 'local-dir:149/vm-149-disk-0.qcow2' => 1, + }, target_volids => { 'local-zfs:vm-149-disk-10' => 1, - 'local-zfs:vm-149-disk-0' => 1, }, vm_config => get_patched_config(149, { scsi0 => 'local-zfs:vm-149-disk-10,format=raw,size=4G', @@ -765,10 +765,11 @@ my $tests = [ }, expected_calls => $default_expected_calls_online, expected => { - source_volids => {}, + source_volids => { + 'local-dir:149/vm-149-disk-0.qcow2' => 1, + }, target_volids => { 'local-lvm:vm-149-disk-10' => 1, - 'local-dir:149/vm-149-disk-0.qcow2' => 1, }, vm_config => get_patched_config(149, { scsi0 => 'local-lvm:vm-149-disk-10,format=raw,size=4G', -- 2.39.2 _______________________________________________ pve-devel mailing list [email protected] https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
