[pve-devel] applied: [PATCH xtermjs] check ticket via api instead of verify_vnc_ticket

2017-12-07 Thread Dominik Csapak
applied with correct dependency (libwww-perl instead of 
liblwp-protocol-https-perl)


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH cluster] ensure problematic ha service is stopped during update

2017-12-07 Thread Wolfgang Bumiller
applied with cleanups

On Thu, Dec 07, 2017 at 03:10:02PM +0100, Thomas Lamprecht wrote:
> Add a postinst file which stops, if running, the ha service before it
> configures pve-cluster and starts them again, if enabled.
> Do this only if the version installed before the upgrade is <= 2.0-3
> 
> dpkg-query has Version and Config-Version
> 
> Version is at this time the new unpacked version already, so we need
> to check both to catch all cases.
> 
> Signed-off-by: Thomas Lamprecht 
> ---
> 
>  debian/pve-cluster.postinst | 46 
> +
>  1 file changed, 46 insertions(+)
>  create mode 100644 debian/pve-cluster.postinst
> 
> diff --git a/debian/pve-cluster.postinst b/debian/pve-cluster.postinst
> new file mode 100644
> index 000..b894e87
> --- /dev/null
> +++ b/debian/pve-cluster.postinst
> @@ -0,0 +1,46 @@
> +#!/bin/bash
> +
> +# abort if any command returns an error value
> +set -e
> +
> +cp /var/lib/dpkg/status /var/lib/dpkg/status.bak

^ removed the above (old cruft), unnecessary and lintian complains

> +
> +# handle problem with ha-manager <= 2.0-3 which cannot handle a pmxcfs
> +# restart correctly
> +# TODO: remove in PVE 6.0
> +ha_version=$(dpkg-query --showformat='${Version}' --show pve-ha-manager)
> +hacfg_version=$(dpkg-query --showformat='${Config-Version}' --show 
> pve-ha-manager)
> +
> +function handlehaservice {
> +  if test -z "$1"; then
> +# no old version, nothing to do
> +true
> +  else
> +if dpkg --compare-versions "$ha_version" '<=' '2.0-3' || dpkg 
> --compare-versions "$hacfg_version" '<=' '2.0-3'; then
> +  if systemctl --quiet "$2" "pve-ha-crm.service"; then
> +systemctl "$3" "pve-ha-crm.service"
> +  fi
> +  if systemctl --quiet "$2" "pve-ha-lrm.service"; then
> +systemctl "$3" "pve-ha-lrm.service"
> +  fi
> +fi
> +  fi
> +}
> +
> +
> +case "$1" in
> +  configure)
> +handlehaservice "$2" 'is-active' 'stop'
> +;;
> +
> +esac
> +
> +#DEBHELPER#
> +
> +case "$1" in
> +  configure)
> +handlehaservice "$2" 'is-enabled' 'start'
> +;;
> +
> +esac
> +
> -- 
> 2.11.0

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH xtermjs] check ticket via api instead of verify_vnc_ticket

2017-12-07 Thread Dominik Csapak
since we do not want to depend on libpve-accesscontrol,
we check the ticket via the api on http://localhost:85

this means we have to pass the path and permission via the commandline

Signed-off-by: Dominik Csapak 
---
 debian/control   |  2 +-
 src/PVE/CLI/termproxy.pm | 44 
 src/www/main.js  |  9 +
 3 files changed, 38 insertions(+), 17 deletions(-)

diff --git a/debian/control b/debian/control
index 79b3ec9..419f7e2 100644
--- a/debian/control
+++ b/debian/control
@@ -7,7 +7,7 @@ Standards-Version: 3.8.3
 
 Package: pve-xtermjs
 Architecture: any
-Depends: libpve-access-control (>= 5.0-7),
+Depends: liblwp-protocol-https-perl,
  libpve-common-perl (>= 5.0-23),
  ${misc:Depends}
 Description: HTML/JS Shell client
diff --git a/src/PVE/CLI/termproxy.pm b/src/PVE/CLI/termproxy.pm
index c45eb50..3932f55 100644
--- a/src/PVE/CLI/termproxy.pm
+++ b/src/PVE/CLI/termproxy.pm
@@ -6,21 +6,39 @@ use warnings;
 use PVE::RPCEnvironment;
 use PVE::CLIHandler;
 use PVE::JSONSchema qw(get_standard_option);
-use PVE::AccessControl;
 use PVE::PTY;
+use LWP::UserAgent;
 use IO::Select;
 use IO::Socket::IP;
 
 use base qw(PVE::CLIHandler);
 
 use constant MAX_QUEUE_LEN => 16*1024;
+use constant DEFAULT_PATH => '/';
+use constant DEFAULT_PERM => 'Sys.Console';
 
 sub setup_environment {
 PVE::RPCEnvironment->setup_default_cli_env();
 }
 
+sub verify_ticket {
+my ($ticket, $user, $path, $perm) = @_;
+
+my $ua = LWP::UserAgent->new();
+
+my $res = $ua->post ('http://localhost:85/api2/json/access/ticket', 
Content => {
+username => $user,
+password => $ticket,
+path => $path,
+privs => $perm, });
+
+if (!$res->is_success) {
+   die "Authentication failed: '$res->status_line'\n";
+}
+}
+
 sub listen_and_authenticate {
-my ($port, $timeout) = @_;
+my ($port, $timeout, $path, $perm) = @_;
 
 my $params = {
Listen => 1,
@@ -42,13 +60,11 @@ sub listen_and_authenticate {
 
 my $queue;
 my $n = sysread($client, $queue, 4096);
-if ($n && $queue =~ s/^([^:]+):([^:]+):(.+)\n//) {
+if ($n && $queue =~ s/^([^:]+):(.+)\n//) {
my $user = $1;
-   my $path = $2;
-   my $ticket = $3;
+   my $ticket = $2;
 
-   die "authentication failed\n"
-   if !PVE::AccessControl::verify_vnc_ticket($ticket, $user, $path);
+   verify_ticket($ticket, $user, $path, $perm);
 
die "aknowledge failed\n"
if !syswrite($client, "OK");
@@ -194,6 +210,16 @@ __PACKAGE__->register_method ({
type => 'integer',
description => "The port to listen on."
},
+   path => {
+   type => 'string',
+   description => "The Authentication path. (default: 
'".DEFAULT_PATH."')",
+   default => DEFAULT_PATH,
+   },
+   perm => {
+   type => 'string',
+   description => "The Authentication Permission. (default: 
'".DEFAULT_PERM."')",
+   default => DEFAULT_PERM,
+   },
'extra-args' => get_standard_option('extra-args'),
},
 },
@@ -208,7 +234,9 @@ __PACKAGE__->register_method ({
die "No command given\n";
}
 
-   my ($queue, $handle) = listen_and_authenticate($param->{port}, 10);
+   my $path = $param->{path} // DEFAULT_PATH;
+   my $perm = $param->{perm} // DEFAULT_PERM;
+   my ($queue, $handle) = listen_and_authenticate($param->{port}, 10, 
$path, $perm);
 
run_pty($cmd, $handle, $queue);
 
diff --git a/src/www/main.js b/src/www/main.js
index a489937..62ec1c1 100644
--- a/src/www/main.js
+++ b/src/www/main.js
@@ -13,7 +13,6 @@ var term,
 socketURL,
 socket,
 ticket,
-path,
 resize,
 ping,
 state = states.start;
@@ -89,18 +88,12 @@ function createTerminal() {
 switch (type) {
case 'kvm':
url += '/qemu/' + vmid;
-   path = '/vms/' + vmid;
break;
case 'lxc':
url += '/lxc/' + vmid;
-   path = '/vms/' + vmid;
-   break;
-   case 'shell': 
-   path = '/nodes/' + nodename;
break;
case 'upgrade':
params.upgrade = 1;
-   path = '/nodes/' + nodename;
break;
 }
 API2Request({
@@ -161,7 +154,7 @@ function runTerminal() {
}, 250);
 });
 
-socket.send(PVE.UserName + ':' + path + ':' + ticket + "\n");
+socket.send(PVE.UserName + ':' + ticket + "\n");
 
 setTimeout(function() {term.fit();}, 250);
 }
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] PVE Replica Email notification on failure.

2017-12-07 Thread Wolfgang Link
Now you get a email if a replication job fail.
The mail is only send the first time, when a job switched from 'ok' state in 
'error' state.
No more notification will come when a job with error state retry to sync.

[PATCH manager V2]
Indentation cleanup.
Small clenup like W.Bumiller has suggested.

[PATCH manager V3]
Remove eval like D.Maurer has suggested.

[PATCH manager V4]
Rebase to current code base.
Remove eval like W.Bumiller has suggested.


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH widget-toolkit 1/2] taskviewer: port over extraTite config parameter

2017-12-07 Thread Dominik Csapak

applied both patches

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH V4 manager 1/2] Indentation cleanup.

2017-12-07 Thread Wolfgang Link
---
 PVE/API2/Replication.pm | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/PVE/API2/Replication.pm b/PVE/API2/Replication.pm
index f396615d..38449892 100644
--- a/PVE/API2/Replication.pm
+++ b/PVE/API2/Replication.pm
@@ -77,15 +77,15 @@ sub run_jobs {
 my $iteration = $now // time();
 
 my $code = sub {
-   my $start_time = $now // time();
+   my $start_time = $now // time();
 
-   PVE::ReplicationState::purge_old_states();
+   PVE::ReplicationState::purge_old_states();
 
-   while (my $jobcfg = PVE::ReplicationState::get_next_job($iteration, 
$start_time)) {
-   my $guest_class = $lookup_guest_class->($jobcfg->{vmtype});
-   PVE::Replication::run_replication($guest_class, $jobcfg, 
$iteration, $start_time, $logfunc, 1, $verbose);
-   $start_time = $now // time();
-   }
+   while (my $jobcfg = PVE::ReplicationState::get_next_job($iteration, 
$start_time)) {
+   my $guest_class = $lookup_guest_class->($jobcfg->{vmtype});
+   PVE::Replication::run_replication($guest_class, $jobcfg, 
$iteration, $start_time, $logfunc, 1, $verbose);
+   $start_time = $now // time();
+   }
 };
 
 my $res = PVE::Tools::lock_file($pvesr_lock_path, 60, $code);
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH V4 guset-common] Remove noerr form replication.

2017-12-07 Thread Wolfgang Link
We will handle this errors in the API and decide what to do.
---
 PVE/Replication.pm | 95 +++---
 1 file changed, 41 insertions(+), 54 deletions(-)

diff --git a/PVE/Replication.pm b/PVE/Replication.pm
index c25ed44..9bc4e61 100644
--- a/PVE/Replication.pm
+++ b/PVE/Replication.pm
@@ -304,7 +304,7 @@ sub replicate {
 }
 
 my $run_replication_nolock = sub {
-my ($guest_class, $jobcfg, $iteration, $start_time, $logfunc, $noerr, 
$verbose) = @_;
+my ($guest_class, $jobcfg, $iteration, $start_time, $logfunc, $verbose) = 
@_;
 
 my $jobid = $jobcfg->{id};
 
@@ -313,79 +313,66 @@ my $run_replication_nolock = sub {
 # we normaly write errors into the state file,
 # but we also catch unexpected errors and log them to syslog
 # (for examply when there are problems writing the state file)
-eval {
-   my $state = PVE::ReplicationState::read_job_state($jobcfg);
 
-   PVE::ReplicationState::record_job_start($jobcfg, $state, $start_time, 
$iteration);
+my $state = PVE::ReplicationState::read_job_state($jobcfg);
+
+PVE::ReplicationState::record_job_start($jobcfg, $state, $start_time, 
$iteration);
 
-   my $t0 = [gettimeofday];
+my $t0 = [gettimeofday];
 
-   mkdir $PVE::ReplicationState::replicate_logdir;
-   my $logfile = PVE::ReplicationState::job_logfile_name($jobid);
-   open(my $logfd, '>', $logfile) ||
-   die "unable to open replication log '$logfile' - $!\n";
+mkdir $PVE::ReplicationState::replicate_logdir;
+my $logfile = PVE::ReplicationState::job_logfile_name($jobid);
+open(my $logfd, '>', $logfile) ||
+   die "unable to open replication log '$logfile' - $!\n";
 
-   my $logfunc_wrapper = sub {
-   my ($msg) = @_;
+my $logfunc_wrapper = sub {
+   my ($msg) = @_;
 
-   my $ctime = get_log_time();
-   print $logfd "$ctime $jobid: $msg\n";
-   if ($logfunc) {
-   if ($verbose) {
-   $logfunc->("$ctime $jobid: $msg");
-   } else {
-   $logfunc->($msg);
-   }
+   my $ctime = get_log_time();
+   print $logfd "$ctime $jobid: $msg\n";
+   if ($logfunc) {
+   if ($verbose) {
+   $logfunc->("$ctime $jobid: $msg");
+   } else {
+   $logfunc->($msg);
}
-   };
+   }
+};
 
-   $logfunc_wrapper->("start replication job");
+$logfunc_wrapper->("start replication job");
 
-   eval {
-   $volumes = replicate($guest_class, $jobcfg, $state, $start_time, 
$logfunc_wrapper);
-   };
-   my $err = $@;
+eval {
+   $volumes = replicate($guest_class, $jobcfg, $state, $start_time, 
$logfunc_wrapper);
+};
+my $err = $@;
 
-   if ($err) {
-   my $msg = "end replication job with error: $err";
-   chomp $msg;
-   $logfunc_wrapper->($msg);
-   } else {
-   $logfunc_wrapper->("end replication job");
-   }
+if ($err) {
+   my $msg = "end replication job with error: $err";
+   chomp $msg;
+   $logfunc_wrapper->($msg);
+} else {
+   $logfunc_wrapper->("end replication job");
+}
 
-   PVE::ReplicationState::record_job_end($jobcfg, $state, $start_time, 
tv_interval($t0), $err);
+PVE::ReplicationState::record_job_end($jobcfg, $state, $start_time, 
tv_interval($t0), $err);
 
-   close($logfd);
+close($logfd);
 
-   die $err if $err && !$noerr;
-};
-if (my $err = $@) {
-   if ($noerr) {
-   warn "$jobid: got unexpected replication job error - $err";
-   } else {
-   die $err;
-   }
-}
+die $err if $err;
 
 return $volumes;
 };
 
 sub run_replication {
-my ($guest_class, $jobcfg, $iteration, $start_time, $logfunc, $noerr, 
$verbose) = @_;
+my ($guest_class, $jobcfg, $iteration, $start_time, $logfunc, $verbose) = 
@_;
 
 my $volumes;
 
-eval {
-   my $timeout = 2; # do not wait too long - we repeat periodically anyways
-   $volumes = PVE::GuestHelpers::guest_migration_lock(
-   $jobcfg->{guest}, $timeout, $run_replication_nolock,
-   $guest_class, $jobcfg, $iteration, $start_time, $logfunc, $noerr, 
$verbose);
-};
-if (my $err = $@) {
-   return undef if $noerr;
-   die $err;
-}
+my $timeout = 2; # do not wait too long - we repeat periodically anyways
+$volumes = PVE::GuestHelpers::guest_migration_lock(
+   $jobcfg->{guest}, $timeout, $run_replication_nolock,
+   $guest_class, $jobcfg, $iteration, $start_time, $logfunc, $verbose);
+
 return $volumes;
 }
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH V4 manager 2/2] Send an email when a replication job fails.

2017-12-07 Thread Wolfgang Link
A email notification will be send for each job when the job fails.
This message will only send when an error occurs and the fail count is on 1.
---
 PVE/API2/Replication.pm  | 18 --
 PVE/CLI/pvesr.pm | 11 ++-
 bin/init.d/pvesr.service |  2 +-
 3 files changed, 27 insertions(+), 4 deletions(-)

diff --git a/PVE/API2/Replication.pm b/PVE/API2/Replication.pm
index 38449892..8c049363 100644
--- a/PVE/API2/Replication.pm
+++ b/PVE/API2/Replication.pm
@@ -72,7 +72,7 @@ sub run_single_job {
 
 # passing $now and $verbose is useful for regression testing
 sub run_jobs {
-my ($now, $logfunc, $verbose) = @_;
+my ($now, $logfunc, $verbose, $mail) = @_;
 
 my $iteration = $now // time();
 
@@ -83,7 +83,21 @@ sub run_jobs {
 
while (my $jobcfg = PVE::ReplicationState::get_next_job($iteration, 
$start_time)) {
my $guest_class = $lookup_guest_class->($jobcfg->{vmtype});
-   PVE::Replication::run_replication($guest_class, $jobcfg, 
$iteration, $start_time, $logfunc, 1, $verbose);
+
+   eval {
+   PVE::Replication::run_replication($guest_class, $jobcfg, 
$iteration, $start_time, $logfunc, $verbose);
+   };
+   if (my $err = $@) {
+   warn "$jobcfg->{id}: got unexpected replication job error - 
$err";
+   my $state = PVE::ReplicationState::read_state();
+   my $jobstate = PVE::ReplicationState::extract_job_state($state, 
$jobcfg);
+   eval {
+   PVE::Tools::sendmail('root', "Replication Job: 
$jobcfg->{id} failed", $err)
+   if $jobstate->{fail_count} == 1 && $mail;
+   };
+   warn ": $@" if $@;
+   };
+
$start_time = $now // time();
}
 };
diff --git a/PVE/CLI/pvesr.pm b/PVE/CLI/pvesr.pm
index 7da94404..cb79e2bf 100644
--- a/PVE/CLI/pvesr.pm
+++ b/PVE/CLI/pvesr.pm
@@ -221,12 +221,21 @@ __PACKAGE__->register_method ({
default => 0,
optional => 1,
},
+   mail => {
+   description => "Send an email notification in case of a 
failure.",
+   type => 'boolean',
+   default => 0,
+   optional => 1,
+   },
},
 },
 returns => { type => 'null' },
 code => sub {
my ($param) = @_;
 
+   die "Mail and id are mutually exclusive!\n"
+   if $param->{id} && $param->{mail};
+
my $logfunc;
 
if ($param->{verbose}) {
@@ -242,7 +251,7 @@ __PACKAGE__->register_method ({
 
} else {
 
-   PVE::API2::Replication::run_jobs(undef, $logfunc);
+   PVE::API2::Replication::run_jobs(undef, $logfunc, 0, 
$param->{mail});
}
 
return undef;
diff --git a/bin/init.d/pvesr.service b/bin/init.d/pvesr.service
index 5706d426..e0c082af 100644
--- a/bin/init.d/pvesr.service
+++ b/bin/init.d/pvesr.service
@@ -4,4 +4,4 @@ ConditionPathExists=/usr/bin/pvesr
 
 [Service]
 Type=oneshot
-ExecStart=/usr/bin/pvesr run
+ExecStart=/usr/bin/pvesr run --mail 1
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH proxmox-widget-toolkit] ObjectGrid: add_text_row: allow setting vtype for editor

2017-12-07 Thread Dominik Csapak


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] Fix #1589: auth_handler: raise exception on init_request error

2017-12-07 Thread Thomas Lamprecht
cfs_* methods cann now die (rightfully so) when the IPCC endpoint is
not connected, or another grave IPCC error arised.

As we did not catch those problems in the RPCEnvironments
init_request method, which loads the user config, this got
propagated to the anyevents auth_handler call in its
unshift_read_header method where then all errors where processed in
the same way => with an unauthorized response logging an logged in
user out.

So catch this error and raise an internal server errror exception
instead. Anyevent needs some minor modifiaction in a separate patch
to handle PVE::Exceptions correctly, so this is the partial fix for
bug #1589

Signed-off-by: Thomas Lamprecht 
---
 PVE/HTTPServer.pm | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/PVE/HTTPServer.pm b/PVE/HTTPServer.pm
index bbea3198..9a02e799 100755
--- a/PVE/HTTPServer.pm
+++ b/PVE/HTTPServer.pm
@@ -7,7 +7,7 @@ use PVE::SafeSyslog;
 use PVE::INotify;
 use PVE::Tools;
 use PVE::APIServer::AnyEvent;
-use PVE::Exception qw(raise_param_exc);
+use PVE::Exception qw(raise_param_exc raise);
 
 use PVE::RPCEnvironment;
 use PVE::AccessControl;
@@ -61,7 +61,8 @@ sub auth_handler {
 $rpcenv->set_language('C');
 $rpcenv->set_client_ip($peer_host);
 
-$rpcenv->init_request();
+eval { $rpcenv->init_request() };
+raise("RPCEnvironment init request failed: $@\n") if $@;
 
 my $require_auth = 1;
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] auth_handler: respond with passed error if we get a PVE::Exception

2017-12-07 Thread Thomas Lamprecht
Allows to fix a problem where a logged in connected client was logged
out because we could not verify him for this call as the cluster
filesystem was unavailable.

If we get such a exception then use it for responding.
THis is save as no logged out client can get ever do anything where
login privileges are required and a logged in client cannot to
anything during the problematic period, but does not gets logged out.
Partail fix for #1589

Signed-off-by: Thomas Lamprecht 
---
 PVE/APIServer/AnyEvent.pm | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/PVE/APIServer/AnyEvent.pm b/PVE/APIServer/AnyEvent.pm
index d7271a2..382eab4 100755
--- a/PVE/APIServer/AnyEvent.pm
+++ b/PVE/APIServer/AnyEvent.pm
@@ -1217,7 +1217,14 @@ sub unshift_read_header {
Net::SSLeay::ERR_clear_error();
# always delay unauthorized calls by 3 seconds
my $delay = 3;
-   if (my $formatter = 
PVE::APIServer::Formatter::get_login_formatter($format)) {
+
+   if (ref($err) eq "PVE::Exception") {
+
+   $err->{code} ||= HTTP_INTERNAL_SERVER_ERROR,
+   my $resp = HTTP::Response->new($err->{code}, 
$err->{msg});
+   $self->response($reqstate, $resp, undef, 0, $delay);
+
+   } elsif (my $formatter = 
PVE::APIServer::Formatter::get_login_formatter($format)) {
my ($raw, $ct, $nocomp) =
$formatter->($path, $auth, 
$self->{formatter_config});
my $resp;
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH cluster] ensure problematic ha service is stopped during update

2017-12-07 Thread Thomas Lamprecht
Add a postinst file which stops, if running, the ha service before it
configures pve-cluster and starts them again, if enabled.
Do this only if the version installed before the upgrade is <= 2.0-3

dpkg-query has Version and Config-Version

Version is at this time the new unpacked version already, so we need
to check both to catch all cases.

Signed-off-by: Thomas Lamprecht 
---

 debian/pve-cluster.postinst | 46 +
 1 file changed, 46 insertions(+)
 create mode 100644 debian/pve-cluster.postinst

diff --git a/debian/pve-cluster.postinst b/debian/pve-cluster.postinst
new file mode 100644
index 000..b894e87
--- /dev/null
+++ b/debian/pve-cluster.postinst
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# abort if any command returns an error value
+set -e
+
+cp /var/lib/dpkg/status /var/lib/dpkg/status.bak
+
+# handle problem with ha-manager <= 2.0-3 which cannot handle a pmxcfs
+# restart correctly
+# TODO: remove in PVE 6.0
+ha_version=$(dpkg-query --showformat='${Version}' --show pve-ha-manager)
+hacfg_version=$(dpkg-query --showformat='${Config-Version}' --show 
pve-ha-manager)
+
+function handlehaservice {
+  if test -z "$1"; then
+# no old version, nothing to do
+true
+  else
+if dpkg --compare-versions "$ha_version" '<=' '2.0-3' || dpkg 
--compare-versions "$hacfg_version" '<=' '2.0-3'; then
+  if systemctl --quiet "$2" "pve-ha-crm.service"; then
+systemctl "$3" "pve-ha-crm.service"
+  fi
+  if systemctl --quiet "$2" "pve-ha-lrm.service"; then
+systemctl "$3" "pve-ha-lrm.service"
+  fi
+fi
+  fi
+}
+
+
+case "$1" in
+  configure)
+handlehaservice "$2" 'is-active' 'stop'
+;;
+
+esac
+
+#DEBHELPER#
+
+case "$1" in
+  configure)
+handlehaservice "$2" 'is-enabled' 'start'
+;;
+
+esac
+
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel