If last_sync is 0 then the VM config was stolen or the VM migrated by HA manager in case of a node failure. On this precondition the replication snapshot should not deleted. This snapshot will be used to restore the replication. --- PVE/Replication.pm | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/PVE/Replication.pm b/PVE/Replication.pm index ce2109e..dac0994 100644 --- a/PVE/Replication.pm +++ b/PVE/Replication.pm @@ -134,10 +134,13 @@ sub prepare { if ((defined($snapname) && ($snap eq $snapname)) || (defined($parent_snapname) && ($snap eq $parent_snapname))) { $last_snapshots->{$volid}->{$snap} = 1; - } elsif ($snap =~ m/^\Q$prefix\E/) { + } elsif ($snap =~ m/^\Q$prefix\E/ && $last_sync != 0) { $logfunc->("delete stale replication snapshot '$snap' on $volid"); PVE::Storage::volume_snapshot_delete($storecfg, $volid, $snap); $cleaned_replicated_volumes->{$volid} = 1; + # Last_sync=0 and a replication snapshot only occur, if the VM was stolen + } elsif ($snap =~ m/^\Q$prefix\E/) { + $last_snapshots->{$volid}->{$snap} = 1; } } } @@ -207,11 +210,11 @@ sub replicate { } my $ssh_info = PVE::Cluster::get_ssh_info($jobcfg->{target}); - remote_prepare_local_job($ssh_info, $jobid, $vmid, [], $store_list, 0, undef, 1, $logfunc); + remote_prepare_local_job($ssh_info, $jobid, $vmid, [], $store_list, 1, undef, 1, $logfunc); } # remove all local replication snapshots (lastsync => 0) - prepare($storecfg, $sorted_volids, $jobid, 0, undef, $logfunc); + prepare($storecfg, $sorted_volids, $jobid, 1, undef, $logfunc); PVE::ReplicationConfig::delete_job($jobid); # update config $logfunc->("job removed"); -- 2.11.0 _______________________________________________ pve-devel mailing list pve-devel@pve.proxmox.com https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel