[pve-devel] [PATCH 2/2] Fix Socket

2014-11-16 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/QemuServer.pm |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 9aea9ee..3dfe1b4 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -2589,7 +2589,7 @@ sub config_to_command {
 my $maxcpus = $conf-{maxcpus} if $conf-{maxcpus};
 
 my $total_cores = $sockets * $cores;
-my $allowed_cores = $cpuinfo-{cpus} * $cpuinfo-{sockets};
+my $allowed_cores = $cpuinfo-{cpus};
 
 die MAX $allowed_cores Cores allowed per VM on this Node 
if($allowed_cores  $total_cores ) ;
-- 
1.7.10.4

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 1/2] Add check by starting a VM, if Host have engough Cores for VM.

2014-11-16 Thread Wolfgang Link
From: root r...@ella.proxmox.com


Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/QemuServer.pm |6 ++
 1 file changed, 6 insertions(+)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 02bf404..9aea9ee 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -2588,6 +2588,12 @@ sub config_to_command {
 my $cores = $conf-{cores} || 1;
 my $maxcpus = $conf-{maxcpus} if $conf-{maxcpus};
 
+my $total_cores = $sockets * $cores;
+my $allowed_cores = $cpuinfo-{cpus} * $cpuinfo-{sockets};
+
+die MAX $allowed_cores Cores allowed per VM on this Node 
+   if($allowed_cores  $total_cores ) ;
+
 if ($maxcpus) {
push @$cmd, '-smp', cpus=$cores,maxcpus=$maxcpus;
 } else {
-- 
1.7.10.4

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Add Check: If host has enough real CPUs for starting VM, to prevent a Qemu CPU emualtion!

2014-11-17 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/QemuServer.pm |7 +++
 1 file changed, 7 insertions(+)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 02bf404..26c6c76 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -2588,6 +2588,13 @@ sub config_to_command {
 my $cores = $conf-{cores} || 1;
 my $maxcpus = $conf-{maxcpus} if $conf-{maxcpus};
 
+my $total_cores = $sockets * $cores;
+my $allowed_cores = $cpuinfo-{cpus};
+
+die MAX $allowed_cores Cores allowed per VM on this Node
+   if($allowed_cores  $total_cores ) ;
+
+
 if ($maxcpus) {
push @$cmd, '-smp', cpus=$cores,maxcpus=$maxcpus;
 } else {
-- 
1.7.10.4

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Bug#579:

2014-11-17 Thread Wolfgang Link
add check if START Parameter is set in FILE: /etc/default/pve-manager
If START=no NO VM will start if pve-manager start is called
If START!=no or not present, VMs will use the boot_at_start Flag

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 bin/init.d/pve-manager |4 
 1 file changed, 4 insertions(+)

diff --git a/bin/init.d/pve-manager b/bin/init.d/pve-manager
index e635f03..441e9d8 100755
--- a/bin/init.d/pve-manager
+++ b/bin/init.d/pve-manager
@@ -20,6 +20,10 @@ test -f $PVESH || exit 0
 case $1 in
start)
echo Starting VMs and Containers
+   [ -r /etc/default/pve-manager ]  . /etc/default/pve-manager
+   if [ $START = no ];then
+   exit 0
+   fi
pvesh --nooutput create /nodes/localhost/startall 
;;
stop)
-- 
1.7.10.4

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 2/2] now if the QMP command starts with guest-+ , it will bind dynamicly to the VMID.qga socket.

2014-11-25 Thread Wolfgang Link
Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/QMPClient.pm  |   54 +
 PVE/QemuServer.pm |   16 +---
 2 files changed, 39 insertions(+), 31 deletions(-)

diff --git a/PVE/QMPClient.pm b/PVE/QMPClient.pm
index 9674d00..b5684b6 100755
--- a/PVE/QMPClient.pm
+++ b/PVE/QMPClient.pm
@@ -20,7 +20,7 @@ use Data::Dumper;
 # Note: kvm can onyl handle 1 connection, so we close connections asap
 
 sub new {
-my ($class, $eventcb, $qga) = @_;
+my ($class, $eventcb) = @_;
 
 my $mux = new IO::Multiplex;
 
@@ -34,7 +34,6 @@ sub new {
 }, $class;
 
 $self-{eventcb} = $eventcb if $eventcb;
-$self-{qga} = $qga if $qga;
 
 $mux-set_callback_object($self);
 
@@ -106,9 +105,18 @@ sub cmd {
 };
 
 my $cmdid_seq = 0;
+my $cmdid_seq_qga = 0;
+
 my $next_cmdid = sub {
-$cmdid_seq++;
-return $$.0.$cmdid_seq;
+my ($qga) = @_;
+
+if($qga){
+   $cmdid_seq++;
+   return $$:$cmdid_seq;
+} else {
+   $cmdid_seq_qga++;
+   return $$.0.$cmdid_seq_qga;
+}
 };
 
 my $close_connection = sub {
@@ -124,9 +132,9 @@ my $close_connection = sub {
 };
 
 my $open_connection = sub {
-my ($self, $vmid, $timeout) = @_;
+my ($self, $vmid, $timeout, $qga) = @_;
 
-my $sname = PVE::QemuServer::qmp_socket($vmid, $self-{qga});
+my $sname = PVE::QemuServer::qmp_socket($vmid, $qga);
 
 $timeout = 1 if !$timeout;
 
@@ -181,7 +189,8 @@ my $check_queue = sub {
eval {
 
my $cmd = $self-{current}-{$vmid} = shift 
@{$self-{queue}-{$vmid}};
-   $cmd-{id} = $next_cmdid();
+
+   $cmd-{id} = $next_cmdid($cmd-{qga});
 
my $fd = -1;
if ($cmd-{execute} eq 'add-fd' || $cmd-{execute} eq 'getfd') {
@@ -191,7 +200,7 @@ my $check_queue = sub {
 
my $qmpcmd = undef;
 
-   if($self-{qga}){
+   if($self-{current}-{$vmid}-{qga}){
 
my $qmpcmdid =to_json({
execute = 'guest-sync',
@@ -242,11 +251,15 @@ sub queue_execute {
 # open all necessary connections
 foreach my $vmid (keys %{$self-{queue}}) {
next if !scalar(@{$self-{queue}-{$vmid}}); # no commands for the VM
+   
+   if ($self-{queue}-{$vmid}[0]-{execute} =~ /^guest\-+/){
+   $self-{queue}-{$vmid}[0]-{qga} = 1;
+   }
 
eval {
-   my $fh = $open_connection($self, $vmid, $timeout);
+   my $fh = $open_connection($self, $vmid, $timeout, 
$self-{queue}-{$vmid}[0]-{qga});
   
-if(!$self-{qga}){
+if(!$self-{queue}-{$vmid}[0]-{qga}){
 my $cmd = { execute = 'qmp_capabilities', arguments = {} };
 unshift @{$self-{queue}-{$vmid}}, $cmd;
 
@@ -290,16 +303,17 @@ sub mux_close {
 # the descriptors.
 sub mux_input {
 my ($self, $mux, $fh, $input) = @_;
-   
-if($self-{qga}){
-   return if $$input !~ m/}\n(.+)}\n$/;
-}else{
-   return if $$input !~ m/}\r\n$/;
-}
 
-my $raw = $$input;
+my $vmid = $self-{fhs_lookup}-{$fh};
+my $raw;
+if ($self-{current}-{$vmid}-{qga}) {
+   return if $$input !~ s/^([^\n]+}\n[^\n]+})\n(.*)$/$2/so;
+   $raw = $1;
+} else {
+   return if $$input !~ s/^([^\n]+})\r?\n(.*)$/$2/so;
+   $raw = $1;
+}
 
-my $vmid = $self-{fhs_lookup}-{$fh};
 if (!$vmid) {
warn internal error - unable to lookup vmid;
return;
@@ -308,7 +322,7 @@ sub mux_input {
 eval {
my @jsons = split(\n, $raw);
 
-   if($self-{qga}){
+   if($self-{current}-{$vmid}-{qga}){
 
die response is not complete if @jsons != 2 ;
 
@@ -328,7 +342,7 @@ sub mux_input {
$obj = from_json($jsons[1]);
 
if (my $callback = $curcmd-{callback}) {
-   $callback($vmid, $obj);
+   $callback($vmid, $obj);   
}
 
return;
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index d740564..ab02b93 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -2881,13 +2881,6 @@ sub qga_socket {
 return ${var_run_tmpdir}/$vmid.qga;
 }
 
-sub vm_qga_cmd {
-my ($vmid, $execute, %params) = @_;
-
-my $cmd = { execute = $execute, arguments = \%params };
-vm_qmp_command($vmid, $cmd, undef, 1);
-}
-
 sub pidfile_name {
 my ($vmid) = @_;
 return ${var_run_tmpdir}/$vmid.pid;
@@ -3481,7 +3474,7 @@ sub vm_mon_cmd_nocheck {
 }
 
 sub vm_qmp_command {
-my ($vmid, $cmd, $nocheck, $qga) = @_;
+my ($vmid, $cmd, $nocheck) = @_;
 
 my $res;
 
@@ -3493,12 +3486,13 @@ sub vm_qmp_command {
 
 eval {
die VM $vmid not running\n if !check_running($vmid, $nocheck);
-   my $sname = qmp_socket($vmid, $qga);
+   my $qga = ($cmd-{execute} =~ /^guest\-+/)?1:0;
+   my $sname = qmp_socket($vmid,$qga);
if (-e $sname) {
-   my $qmpclient = PVE::QMPClient-new(undef, $qga);
+   my $qmpclient = PVE::QMPClient-new

[pve-devel] [PATCH 1/2] This are the condensed changes form http://pve.proxmox.com/pipermail/pve-devel/2013-March/006913.html is added to see the orgin work!

2014-11-25 Thread Wolfgang Link
From: Alexandre Derumier aderum...@odiso.com

This patch series add code to send command to quest guest agent.

The procotol is qmp, so I have reuse as much as possible the current qmpclient

the only big difference is that we can't pass an id to a request, so we must 
send a guest-sync command with an id before the real command

command

{ execute: guest-sync, arguments: { id: 123456 } 
}{execute:guest-ping}

result

{ return: 123456}\n{return: {}}


Signed-off-by: Alexandre Derumier aderum...@odiso.com
---
 PVE/QMPClient.pm  |   77 -
 PVE/QemuServer.pm |   20 +-
 2 files changed, 78 insertions(+), 19 deletions(-)

diff --git a/PVE/QMPClient.pm b/PVE/QMPClient.pm
index 4e24419..9674d00 100755
--- a/PVE/QMPClient.pm
+++ b/PVE/QMPClient.pm
@@ -20,7 +20,7 @@ use Data::Dumper;
 # Note: kvm can onyl handle 1 connection, so we close connections asap
 
 sub new {
-my ($class, $eventcb) = @_;
+my ($class, $eventcb, $qga) = @_;
 
 my $mux = new IO::Multiplex;
 
@@ -34,6 +34,7 @@ sub new {
 }, $class;
 
 $self-{eventcb} = $eventcb if $eventcb;
+$self-{qga} = $qga if $qga;
 
 $mux-set_callback_object($self);
 
@@ -107,7 +108,7 @@ sub cmd {
 my $cmdid_seq = 0;
 my $next_cmdid = sub {
 $cmdid_seq++;
-return $$:$cmdid_seq;
+return $$.0.$cmdid_seq;
 };
 
 my $close_connection = sub {
@@ -125,7 +126,7 @@ my $close_connection = sub {
 my $open_connection = sub {
 my ($self, $vmid, $timeout) = @_;
 
-my $sname = PVE::QemuServer::qmp_socket($vmid);
+my $sname = PVE::QemuServer::qmp_socket($vmid, $self-{qga});
 
 $timeout = 1 if !$timeout;
 
@@ -188,10 +189,27 @@ my $check_queue = sub {
delete $cmd-{arguments}-{fd};
}
 
-   my $qmpcmd = to_json({
-   execute = $cmd-{execute},
-   arguments = $cmd-{arguments},
-   id = $cmd-{id}});
+   my $qmpcmd = undef;
+
+   if($self-{qga}){
+
+   my $qmpcmdid =to_json({
+   execute = 'guest-sync',
+   arguments = { id = int($cmd-{id})}});
+
+   $qmpcmd = to_json({
+   execute = $cmd-{execute},
+   arguments = $cmd-{arguments}});
+
+   $qmpcmd = $qmpcmdid.$qmpcmd;
+
+   }else{
+
+   $qmpcmd = to_json({
+   execute = $cmd-{execute},
+   arguments = $cmd-{arguments},
+   id = $cmd-{id}});
+   }
 
if ($fd = 0) {
my $ret = PVE::IPCC::sendfd(fileno($fh), $fd, $qmpcmd);
@@ -227,16 +245,19 @@ sub queue_execute {
 
eval {
my $fh = $open_connection($self, $vmid, $timeout);
-   my $cmd = { execute = 'qmp_capabilities', arguments = {} };
-   unshift @{$self-{queue}-{$vmid}}, $cmd;
+  
+if(!$self-{qga}){
+my $cmd = { execute = 'qmp_capabilities', arguments = {} };
+unshift @{$self-{queue}-{$vmid}}, $cmd;
+
$self-{mux}-set_timeout($fh, $timeout);
};
if (my $err = $@) {
warn $err;
$self-{errors}-{$vmid} = $err;
}
+   }
 }
-
 my $running;
 
 for (;;) {
@@ -269,10 +290,14 @@ sub mux_close {
 # the descriptors.
 sub mux_input {
 my ($self, $mux, $fh, $input) = @_;
+   
+if($self-{qga}){
+   return if $$input !~ m/}\n(.+)}\n$/;
+}else{
+   return if $$input !~ m/}\r\n$/;
+}
 
-return if $$input !~ s/^(.*})\r\n(.*)$/$2/so;
-
-my $raw = $1;
+my $raw = $$input;
 
 my $vmid = $self-{fhs_lookup}-{$fh};
 if (!$vmid) {
@@ -283,6 +308,32 @@ sub mux_input {
 eval {
my @jsons = split(\n, $raw);
 
+   if($self-{qga}){
+
+   die response is not complete if @jsons != 2 ;
+
+   my $obj = from_json($jsons[0]);
+   my $cmdid = $obj-{return};
+   die received responsed without command id\n if !$cmdid;
+
+   my $curcmd = $self-{current}-{$vmid};
+   die unable to lookup current command for VM $vmid\n if !$curcmd;
+
+   delete $self-{current}-{$vmid};
+
+   if ($curcmd-{id} ne $cmdid) {
+   die got wrong command id '$cmdid' (expected $curcmd-{id})\n;
+   }
+
+   $obj = from_json($jsons[1]);
+
+   if (my $callback = $curcmd-{callback}) {
+   $callback($vmid, $obj);
+   }
+
+   return;
+   }
+
foreach my $json (@jsons) {
my $obj = from_json($json);
next if defined($obj-{QMP}); # skip monitor greeting
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 2970598..d740564 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -2871,8 +2871,9 @@ sub spice_port {
 }
 
 sub qmp_socket {
-my ($vmid) = @_;
-return ${var_run_tmpdir}/$vmid.qmp;
+my ($vmid, $qga) = @_;
+my $sockettype = $qga ? 'qga' : 'qmp';
+  

[pve-devel] [PATCH] Fix in PVE::QemuServer::snapshot_create

2014-12-03 Thread Wolfgang Link
remove the freezefs flag.
If Qemu Guest Agent flag is set in config the vm filesystem will always be 
frozen.

also remove param freezefs in PVE::API2 snapshot,
because there is no use for it.

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/API2/Qemu.pm  |7 +--
 PVE/QemuServer.pm |8 
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 0787074..6cbfa7a 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -2853,11 +2853,6 @@ __PACKAGE__-register_method({
type = 'boolean',
description = Save the vmstate,
},
-   freezefs = {
-   optional = 1,
-   type = 'boolean',
-   description = Freeze the filesystem,
-   },
description = {
optional = 1,
type = 'string',
@@ -2888,7 +2883,7 @@ __PACKAGE__-register_method({
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, snapshot VM $vmid: 
$snapname);
PVE::QemuServer::snapshot_create($vmid, $snapname, 
$param-{vmstate},
-$param-{freezefs}, 
$param-{description});
+, $param-{description});
};
 
return $rpcenv-fork_worker('qmsnapshot', $vmid, $authuser, $realcmd);
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index eb15f9c..b6245c3 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -4876,11 +4876,11 @@ my $savevm_wait = sub {
 };
 
 sub snapshot_create {
-my ($vmid, $snapname, $save_vmstate, $freezefs, $comment) = @_;
+my ($vmid, $snapname, $save_vmstate, $comment) = @_;
 
 my $snap = $snapshot_prepare($vmid, $snapname, $save_vmstate, $comment);
 
-$freezefs = $save_vmstate = 0 if !$snap-{vmstate}; # vm is not running
+$save_vmstate = 0 if !$snap-{vmstate}; # vm is not running
 
 my $drivehash = {};
 
@@ -4888,7 +4888,7 @@ sub snapshot_create {
 
 my $config = load_config($vmid); 

-if ($running  $freezefs  $config-{agent}) {
+if ($running  $config-{agent}) {
eval { vm_mon_cmd($vmid, guest-fsfreeze-freeze); };
warn guest-fsfreeze-freeze problems - $@ if $@;
 }
@@ -4926,7 +4926,7 @@ sub snapshot_create {
eval { vm_mon_cmd($vmid, savevm-end)  };
warn $@ if $@;
 
-   if ($freezefs  $config-{agent}) {
+   if ($config-{agent}) {
eval { vm_mon_cmd($vmid, guest-fsfreeze-thaw); }; 
warn guest-fsfreeze-thaw problems - $@ if $@;
}
-- 
1.7.10.4

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Add qga freeze in vzdump in snapshot mode

2014-12-08 Thread Wolfgang Link
it will freeze the filesystem to provide consistents.

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/VZDump/QemuServer.pm |   19 +--
 1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/PVE/VZDump/QemuServer.pm b/PVE/VZDump/QemuServer.pm
index fd36c69..2c62de3 100644
--- a/PVE/VZDump/QemuServer.pm
+++ b/PVE/VZDump/QemuServer.pm
@@ -286,8 +286,8 @@ sub archive {
 my $resume_on_backup;
 
 my $skiplock = 1;
-
-if (!PVE::QemuServer::check_running($vmid)) {
+my $vm_is_running = PVE::QemuServer::check_running($vmid);
+if (!$vm_is_running) {
eval {
$self-loginfo(starting kvm to execute backup task);
PVE::QemuServer::vm_start($self-{storecfg}, $vmid, undef, 
@@ -380,8 +380,23 @@ sub archive {
 
$qmpclient-queue_cmd($vmid, $add_fd_cb, 'getfd', 
  fd = $outfileno, fdname = backup);
+ 
+   my $freeze_fs = 0; 
+
+   if ( $self-{vmlist}-{$vmid}-{agent} == 1  $vm_is_running ){
+   $freeze_fs = 
PVE::QemuServer::vm_mon_cmd($vmid,guest-fsfreeze-freeze);  
+   $self-loginfo(Can't freeze fs!)if ( defined($freeze_fs)   
$freeze_fs == 0 );
+   }
+   
$qmpclient-queue_execute();
 
+   my $thaw_fs = 0; 
+   $thaw_fs= PVE::QemuServer::vm_mon_cmd($vmid,guest-fsfreeze-thaw) 
+   if ( $freeze_fs  0 );
+
+   $self-loginfo(Error by trying thaw fs, you must unlock manually)
+   if ( $freeze_fs  0  0 != $freeze_fs - $thaw_fs );
+
die $qmpclient-{errors}-{$vmid} if $qmpclient-{errors}-{$vmid};
 
if ($cpid) {
-- 
1.7.10.4

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Add qga freeze in vzdump in snapshot mode

2014-12-09 Thread Wolfgang Link
it will freeze the filesystem to provide consistents.

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/VZDump/QemuServer.pm |   18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/PVE/VZDump/QemuServer.pm b/PVE/VZDump/QemuServer.pm
index fd36c69..b9edea1 100644
--- a/PVE/VZDump/QemuServer.pm
+++ b/PVE/VZDump/QemuServer.pm
@@ -286,8 +286,8 @@ sub archive {
 my $resume_on_backup;
 
 my $skiplock = 1;
-
-if (!PVE::QemuServer::check_running($vmid)) {
+my $vm_is_running = PVE::QemuServer::check_running($vmid);
+if (!$vm_is_running) {
eval {
$self-loginfo(starting kvm to execute backup task);
PVE::QemuServer::vm_start($self-{storecfg}, $vmid, undef, 
@@ -380,8 +380,22 @@ sub archive {
 
$qmpclient-queue_cmd($vmid, $add_fd_cb, 'getfd', 
  fd = $outfileno, fdname = backup);
+
+   if ($self-{vmlist}-{$vmid}-{agent}  $vm_is_running){
+   eval {PVE::QemuServer::vm_mon_cmd($vmid,guest-fsfreeze-freeze);};
+   if (my $err = $@) {
+   $self-logerr($err);
+   }  
+   }
+   
$qmpclient-queue_execute();
 
+   if ($self-{vmlist}-{$vmid}-{agent}  $vm_is_running ){
+   eval {PVE::QemuServer::vm_mon_cmd($vmid,guest-fsfreeze-thaw);};
+   if (my $err = $@) {
+   $self-logerr($err);
+   }
+   }
die $qmpclient-{errors}-{$vmid} if $qmpclient-{errors}-{$vmid};
 
if ($cpid) {
-- 
1.7.10.4

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] implement the command get_link_status to qemu-kve paket. so you can call per QMP. this funktion is not implemented in monitor. this command return the state of the given nic to che

2014-12-11 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 debian/patches/add-qmp-get-link-status.patch |   98 ++
 debian/patches/series|1 +
 2 files changed, 99 insertions(+)
 create mode 100644 debian/patches/add-qmp-get-link-status.patch

diff --git a/debian/patches/add-qmp-get-link-status.patch 
b/debian/patches/add-qmp-get-link-status.patch
new file mode 100644
index 000..105d415
--- /dev/null
+++ b/debian/patches/add-qmp-get-link-status.patch
@@ -0,0 +1,98 @@
+Index: new/qapi-schema.json
+===
+--- new.orig/qapi-schema.json  2014-12-10 09:15:50.890262765 +0100
 new/qapi-schema.json   2014-12-11 09:20:31.072561486 +0100
+@@ -1366,6 +1366,22 @@
+ ##
+ { 'command': 'set_link', 'data': {'name': 'str', 'up': 'bool'} }
+ 
++
++##
++# @get_link_status
++#
++# Get the current link state of the nics or nic.
++#
++# @name: name of the nic you get the state of
++#
++# Return: If link is up 1
++# If link is down 0
++# If an error occure an empty string.
++#
++# Notes: this is an Proxmox VE extension and not offical part of Qemu.
++##
++{ 'command': 'get_link_status', 'data': {'name': 'str'}, 'returns': 'int'}
++
+ ##
+ # @balloon:
+ #
+Index: new/net/net.c
+===
+--- new.orig/net/net.c 2014-12-10 10:24:39.790496356 +0100
 new/net/net.c  2014-12-11 09:37:55.971321170 +0100
+@@ -1141,6 +1141,32 @@
+ }
+ }
+ 
++int64_t qmp_get_link_status(const char *name, Error **errp)
++{
++NetClientState *ncs[MAX_QUEUE_NUM];
++NetClientState *nc;
++int queues;
++bool ret;
++
++queues = qemu_find_net_clients_except(name, ncs,
++  NET_CLIENT_OPTIONS_KIND_MAX,
++  MAX_QUEUE_NUM);
++
++if (queues == 0) {
++error_set(errp, QERR_DEVICE_NOT_FOUND, name);
++  return (int64_t) -1;
++}
++
++nc = ncs[0];
++ret = ncs[0]-link_down;
++
++if (nc-peer-info-type == NET_CLIENT_OPTIONS_KIND_NIC) {
++  ret = ncs[0]-peer-link_down;
++}
++
++return (int64_t) ret ? 0 : 1;
++}
++
+ void qmp_set_link(const char *name, bool up, Error **errp)
+ {
+ NetClientState *ncs[MAX_QUEUE_NUM];
+Index: new/qmp-commands.hx
+===
+--- new.orig/qmp-commands.hx   2014-12-10 09:15:50.891262737 +0100
 new/qmp-commands.hx2014-12-11 08:36:26.583532314 +0100
+@@ -1473,6 +1473,29 @@
+ EQMP
+ 
+ {
++.name   = get_link_status,
++.args_type  = name:s,
++.mhandler.cmd_new = qmp_marshal_input_get_link_status,
++},
++
++SQMP
++get_link_status
++
++
++Get the link status of a network adapter.
++
++Arguments:
++
++- name: network device name (json-string)
++
++Example:
++
++- { execute: set_link, arguments: { name: e1000.0 } }
++- { return: {1} }
++
++EQMP
++
++{
+ .name   = getfd,
+ .args_type  = fdname:s,
+ .params = getfd name,
diff --git a/debian/patches/series b/debian/patches/series
index 56741e6..e5bc4f5 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -28,3 +28,4 @@ disable-efi-enable-pxe-roms.patch
 backup-vma-allow-empty-backups.patch
 glusterfs-daemonize.patch
 gluster-backupserver.patch
+add-qmp-get-link-status.patch
-- 
1.7.10.4

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Add to config the nic flag link_down=[0|1] also enable nic link if the Flag is set in the config. we use to verify the result the qemu extension get_link_status from or pachtes.

2014-12-22 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/API2/Qemu.pm  |   57 +++--
 PVE/QemuServer.pm |   22 -
 2 files changed, 76 insertions(+), 3 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 066726d..f3fe3e6 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -849,9 +849,36 @@ my $vmconfig_update_net = sub {
 
if(($newnet-{bridge} ne $oldnet-{bridge}) || ($newnet-{tag} 
ne $oldnet-{tag}) || ($newnet-{firewall} ne $oldnet-{firewall})){
PVE::Network::tap_unplug($iface);
-   PVE::Network::tap_plug($iface, $newnet-{bridge}, 
$newnet-{tag}, $newnet-{firewall});
+   PVE::Network::tap_plug($iface, $newnet-{bridge}, 
$newnet-{tag});
}
 
+   my $verify_link_status = sub {
+   my ($expected, $vmid) = @_;
+   my $nic_status;
+   eval {
+   my %param = (name = $opt); 
+   $nic_status = PVE::QemuServer::vm_mon_cmd($vmid, 
get_link_status,%param);
+   };
+   die $@ if $@;
+   die chagning nic status dosn't work! if $expected != 
$nic_status;
+   };
+
+   if($newnet-{link_down}){
+   eval {
+   my %param = (name = $opt, up = \0);
+   PVE::QemuServer::vm_mon_cmd($vmid, set_link,%param);
+   };
+   die $@ if $@;
+   $verify_link_status(0,$vmid);
+   }
+   if($oldnet-{link_down}  !$newnet-{link_down}){
+   eval {
+   my %param = (name = $opt, up = \1);
+   PVE::QemuServer::vm_mon_cmd($vmid, set_link,%param);
+   };
+   die $@ if $@;
+   $verify_link_status(1,$vmid);
+   }
}else{
#if bridge/nat mode change, we try to hot-unplug
die error hot-unplug $opt for update if 
!PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
@@ -982,7 +1009,7 @@ my $update_vm_api  = sub {
my $running = PVE::QemuServer::check_running($vmid);
 
foreach my $opt (keys %$param) { # add/change
-
+   
$conf = PVE::QemuServer::load_config($vmid); # update/reload
 
next if $conf-{$opt}  ($param-{$opt} eq $conf-{$opt}); # 
skip if nothing changed
@@ -1646,11 +1673,37 @@ __PACKAGE__-register_method({
my $realcmd = sub {
my $upid = shift;
 
+   my $conf = PVE::QemuServer::load_config($vmid);
+
syslog('info', start VM $vmid: $upid\n);
 
PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, 
$skiplock, $migratedfrom, undef,
  $machine, $spice_ticket);
 
+   foreach my $nic (keys %$conf){
+
+   if($nic =~  m/^net\d+$/){
+
+   my $nicconf= PVE::QemuServer::parse_net($conf-{$nic});
+
+   if($nicconf-{link_down}){
+
+   eval {
+   my %param = (name = $nic, up = \0);
+   PVE::QemuServer::vm_mon_cmd($vmid, 
set_link,%param);
+   };
+   die $@ if $@;
+
+   my $nic_status;
+   eval {
+   my %param = (name = $nic); 
+   $nic_status = 
PVE::QemuServer::vm_mon_cmd($vmid, get_link_status,%param);
+   };
+   warn $@ if $@;
+   warn Error Link from $nic is not Down if 
$nic_status != 0; 
+   }   
+   }
+   }
return;
};
 
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4840c73..7d8d863 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -510,7 +510,7 @@ my $nic_model_list_txt = join(' ', sort @$nic_model_list);
 my $netdesc = {
 optional = 1,
 type = 'string', format = 'pve-qm-net',
-typetext = MODEL=XX:XX:XX:XX:XX:XX 
[,bridge=dev][,queues=nbqueues][,rate=mbps][,tag=vlanid][,firewall=0|1],
+typetext = MODEL=XX:XX:XX:XX:XX:XX 
[,bridge=dev][,queues=nbqueues][,rate=mbps][,tag=vlanid][,firewall=0|1][,link_down=0|1],
 description = EODESCR,
 Specify network devices.
 
@@ -1378,6 +1378,8 @@ sub parse_net {
 $res-{tag} = $1;
 } elsif ($kvp =~ m/^firewall=(\d+)$/) {
$res-{firewall} = $1;
+   } elsif ($kvp =~ m/^link_down=(\d+)$/) {
+   $res-{link_down} = $1;
} else {
return undef;
}
@@ -1398,6 +1400,7 @@ sub print_net {
 $res .= ,rate=$net-{rate} if $net-{rate};
 $res .= ,tag=$net-{tag} if $net

[pve-devel] [PATCH] implement the disconnect nic button on the network edit pannel.

2014-12-22 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 www/manager/Parser.js   |5 +
 www/manager/qemu/NetworkEdit.js |8 +++-
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/www/manager/Parser.js b/www/manager/Parser.js
index 11fbe49..3494637 100644
--- a/www/manager/Parser.js
+++ b/www/manager/Parser.js
@@ -33,6 +33,8 @@ Ext.define('PVE.Parser', { statics: {
 res.tag = match_res[1];
} else if ((match_res = p.match(/^firewall=(\d+)$/)) !== null) {
 res.firewall = match_res[1];
+   } else if ((match_res = p.match(/^link_down=(\d+)$/)) !== null) {
+res.disconnect = match_res[1];
} else {
errors = true;
return false; // break
@@ -64,6 +66,9 @@ Ext.define('PVE.Parser', { statics: {
if (net.rate) {
netstr += ,rate= + net.rate;
}
+   if (net.disconnect) {
+   netstr += ,link_down= + net.disconnect;
+   }
return netstr;
 },
 
diff --git a/www/manager/qemu/NetworkEdit.js b/www/manager/qemu/NetworkEdit.js
index 1e5b97b..ae3147e 100644
--- a/www/manager/qemu/NetworkEdit.js
+++ b/www/manager/qemu/NetworkEdit.js
@@ -18,6 +18,7 @@ Ext.define('PVE.qemu.NetworkInputPanel', {
me.network.bridge = undefined;
}
me.network.macaddr = values.macaddr;
+   me.network.disconnect = values.disconnect;
 
if (values.rate) {
me.network.rate = values.rate;
@@ -147,7 +148,12 @@ Ext.define('PVE.qemu.NetworkInputPanel', {
value: '',
emptyText: 'unlimited',
allowBlank: true
-   }
+   },
+   {
+   xtype: 'pvecheckbox',
+   fieldLabel: gettext('Disconnect'),
+   name: 'disconnect'
+   },
];
 
me.callParent();
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] backup lock

2014-12-22 Thread Wolfgang Link
Can you pleas post me your version of PVE.

pveversion -v

because I can't reproduce it at my machine, not on the gui and not at cli.

how do you cancel the job manually?

Regrades 

Wolfgang

 On December 23, 2014 at 6:36 AM lyt_yudi lyt_y...@icloud.com wrote:
 
 
 
  在 2014年12月23日,下午1:20,lyt_yudi lyt_y...@icloud.com 写道:
  
  This indicates that something is wrong, maybe a crashed backup job?
 
 INFO: starting new backup job: vzdump 121 --remove 0 --mode snapshot
 --compress lzo --storage local --node t3
 INFO: Starting Backup of VM 121 (qemu)
 INFO: status = running
 INFO: update VM 121: -lock backup
 INFO: exclude disk 'virtio1' (backup=no)
 INFO: backup mode: snapshot
 INFO: ionice priority: 7
 INFO: snapshots found (not included into backup)
 INFO: creating archive
 '/var/lib/vz/dump/vzdump-qemu-121-2014_12_23-13_22_52.vma.lzo'
 INFO: started backup task 'd7330854-dbb6-4461-aba5-f3b604bcfa34'
 INFO: status: 0% (88211456/34359738368), sparse 0% (83898368), duration 3,
 29/1 MB/s
 ERROR: interrupted by signal
 INFO: aborting backup job
 
 this ERROR is caused by manual cancellation.
 
 maybe this process can be integrated to unlock operation.
 
 thanks.___
 pve-devel mailing list
 pve-devel@pve.proxmox.com
 http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Change check for better understanding!

2014-12-29 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/QemuServer.pm |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index cb84f42..9ef186e 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -3724,7 +3724,7 @@ sub vm_stop {
 
eval {
if ($shutdown) {
-   if (!$nocheck  $conf-{agent}) {
+   if (defined($conf)  $conf-{agent}) {
vm_qmp_command($vmid, { execute = guest-shutdown }, 
$nocheck);
} else {
vm_qmp_command($vmid, { execute = system_powerdown }, 
$nocheck);
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Fix backup failure at shutdown.

2014-12-30 Thread Wolfgang Link
This fix include a new function of vzdump.
Now you can call vzdump -stop and the backup will be aborted.
Also if the pve-manager init script stop the process, vzdump -stop will called.

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/API2/VZDump.pm |4 
 PVE/VZDump.pm  |   55 ++--
 bin/init.d/pve-manager |2 ++
 3 files changed, 59 insertions(+), 2 deletions(-)

diff --git a/PVE/API2/VZDump.pm b/PVE/API2/VZDump.pm
index 98b0b6d..3ae98de 100644
--- a/PVE/API2/VZDump.pm
+++ b/PVE/API2/VZDump.pm
@@ -63,6 +63,10 @@ __PACKAGE__-register_method ({
 
# silent exit if we run on wrong node
exit(0) if $param-{node}  $param-{node} ne $nodename;
+   
+   if($param-{stop}){
+   PVE::VZDump::stop_all_backups;
+   }
 
my $cmdline = PVE::VZDump::command_line($param);
 
diff --git a/PVE/VZDump.pm b/PVE/VZDump.pm
index 147326f..4144790 100644
--- a/PVE/VZDump.pm
+++ b/PVE/VZDump.pm
@@ -22,6 +22,8 @@ my @posix_filesystems = qw(ext3 ext4 nfs nfs4 reiserfs xfs);
 
 my $lockfile = '/var/run/vzdump.lock';
 
+my $pidfile = '/var/run/vzdump.pid';
+
 my $logdir = '/var/log/vzdump';
 
 my @plugins = qw (PVE::VZDump::OpenVZ);
@@ -210,6 +212,8 @@ sub read_vzdump_defaults {
$res-{lockwait} = int($1);
} elsif ($line =~ m/stopwait:\s*(\d+)\s*$/) {
$res-{stopwait} = int($1);
+   } elsif ($line =~ m/stop:\s*(\d+)\s*$/) {
+   $res-{stop} = int($1);
} elsif ($line =~ m/size:\s*(\d+)\s*$/) {
$res-{size} = int($1);
} elsif ($line =~ m/maxfiles:\s*(\d+)\s*$/) {
@@ -596,6 +600,15 @@ sub get_lvm_device {
 sub getlock {
 my ($self) = @_;
 
+my $fh;
+   
+open($fh,  $pidfile)
+   or die cannot open $pidfile: $!;
+
+print $fh $$, \n,PVE::ProcFSTools::read_proc_starttime($$), \n;
+
+close $fh || warn close $pidfile failed: $!;
+
 my $maxwait = $self-{opts}-{lockwait} || $self-{lockwait};
  
 if (!open (SERVER_FLCK, $lockfile)) {
@@ -1013,6 +1026,11 @@ sub exec_backup {
 
 my $opts = $self-{opts};
 
+if ($opts-{stop}) {
+   stop_all_backups($self);
+   return;
+}
+
 debugmsg ('info', starting new backup job: $self-{cmdline}, undef, 1);
 debugmsg ('info', skip external VMs:  . join(', ', @{$self-{skiplist}}))
if scalar(@{$self-{skiplist}});
@@ -1078,6 +1096,8 @@ sub exec_backup {
 die $err if $err;
 
 die job errors\n if $errcount; 
+
+unlink $pidfile;
 }
 
 my $confdesc = {
@@ -1163,6 +1183,12 @@ my $confdesc = {
description = Store resulting file to this storage.,
optional = 1,
 }),
+stop = {
+   type = 'boolean',
+   description = Stop all current runnig backup jobs on this host.,
+   optional = 1,
+   default = 0,
+},
 size = {
type = 'integer',
description = LVM snapshot size in MB.,
@@ -1228,7 +1254,7 @@ sub verify_vzdump_parameters {
 my ($param, $check_missing) = @_;
 
 raise_param_exc({ all = option conflicts with option 'vmid'})
-   if $param-{all}  $param-{vmid};
+   if ($param-{all} || $param-{stop})  $param-{vmid};
 
 raise_param_exc({ exclude = option conflicts with option 'vmid'})
if $param-{exclude}  $param-{vmid};
@@ -1238,8 +1264,33 @@ sub verify_vzdump_parameters {
 return if !$check_missing;
 
 raise_param_exc({ vmid = property is missing})
-   if !$param-{all}  !$param-{vmid};
+   if !($param-{all} || $param-{stop})  !$param-{vmid};
+
+}
+
+sub stop_all_backups {
+my($self) = @_;
+
+return if ! -e $pidfile;
+
+my @param = split(/\n/,PVE::Tools::file_get_contents($pidfile));
+my $pid;
+my $stime;
+
+if ($param[0] =~ /^([-\@\w.]+)$/){
+   $pid = $1;
+}
+if ($param[1] =~/^([-\@\w.]+)$/){
+   $stime = $1;
+}
+
+if(PVE::ProcFSTools::check_process_running($pid, $stime)  
+   PVE::ProcFSTools::read_proc_starttime($pid) == $stime){
+   print toll;
+   kill(15,$pid);
+}
 
+unlink $pidfile;
 }
 
 sub command_line {
diff --git a/bin/init.d/pve-manager b/bin/init.d/pve-manager
index b887ed3..6540425 100755
--- a/bin/init.d/pve-manager
+++ b/bin/init.d/pve-manager
@@ -31,6 +31,8 @@ case $1 in
pvesh --nooutput create /nodes/localhost/startall 
;;
stop)
+   echo Stopping all running Backups
+   vzdump -stop
echo Stopping VMs and Containers
pvesh --nooutput create /nodes/localhost/stopall 
;;
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] zfs: auto import after reboot

2015-02-03 Thread Wolfgang Link
this is necessary, because after a reboot all pools except rpool are gone

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPlugin.pm |5 +
 PVE/Storage/ZFSPoolPlugin.pm |   10 +-
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/PVE/Storage/ZFSPlugin.pm b/PVE/Storage/ZFSPlugin.pm
index 581ef6c..e885949 100644
--- a/PVE/Storage/ZFSPlugin.pm
+++ b/PVE/Storage/ZFSPlugin.pm
@@ -343,4 +343,9 @@ sub volume_has_feature {
 return undef;
 }
 
+sub activate_storage {
+my ($class, $storeid, $scfg, $cache) = @_;
+return 1;
+}
+
 1;
diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 10da7f7..553a92b 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -154,7 +154,7 @@ sub zfs_request {
 my $cmd = [];
 
 if ($method eq 'zpool_list') {
-   push @$cmd = 'zpool', 'list';
+   push @$cmd, 'zpool', 'list';
 } else {
push @$cmd, 'zfs', $method;
 }
@@ -419,6 +419,14 @@ sub volume_snapshot_rollback {
 
 sub activate_storage {
 my ($class, $storeid, $scfg, $cache) = @_;
+
+my @param = ('-o', 'name', '-H');
+
+my $text = zfs_request($class, $scfg, undef, 'zpool_list', @param);
+ 
+if ($text !~ $scfg-{pool}) {
+   run_command(zpool import -d /dev/disk/by-id/ -a);
+}
 return 1;
 }
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] pveproxy: enable TSLv1

2015-02-03 Thread Wolfgang Link
this is necessary for IE communication

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 bin/pveproxy |4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/bin/pveproxy b/bin/pveproxy
index a9b6f1c..a254f08 100755
--- a/bin/pveproxy
+++ b/bin/pveproxy
@@ -107,8 +107,10 @@ sub init {
ssl = {
# Note: older versions are considered insecure, for example
# search for Poodle-Attac
+   method = TLSv1,
sslv2 = 0,
-   sslv3 = 0, 
+   sslv3 = 0,
+   verify = 1,
cipher_list = $proxyconf-{CIPHERS} || 'HIGH:MEDIUM:!aNULL:!MD5',
key_file = '/etc/pve/local/pve-ssl.key',
cert_file = '/etc/pve/local/pve-ssl.pem',
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] pveproxy: enable TSLv1

2015-02-03 Thread Wolfgang Link

No it don't, is only commanded by the doc to set this flag.
Am 03.02.15 um 18:23 schrieb Dietmar Maurer:

+   sslv3 = 0,
+   verify = 1,

Really? Why does a server side verification influence the client?



___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] Question to ZFSPlugin.pm

2015-01-21 Thread Wolfgang Link

I implemented the ZFSPlugin for local Zfs use.

When I test my adaption of this Plug-in,
I recognize that it is possible to erase a template witch has a linked 
clone.


I mean the Zfs-volume will not destroyed but the config of the template.

My problem is I can't validate, it on the orginal Plugin, because I have 
no iSCSI Nexenta/OmniOS.


Has the ZFSPlugin the same behavior on iSCSI?

regards.






___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] memory form : hotplug improvements

2015-02-10 Thread Wolfgang Link

Yes, I am the new Proxmox *t*eam member.

Am 10.02.15 um 17:14 schrieb Alexandre DERUMIER:

Seem to be perfect here :)

Thanks !

I'm going to work on cpu hotplug tommorow.

BTW, is Wolfgang a new proxmox team member ?



- Mail original -
De: dietmar diet...@proxmox.com
À: aderumier aderum...@odiso.com
Cc: pve-devel pve-devel@pve.proxmox.com
Envoyé: Mardi 10 Février 2015 16:42:39
Objet: Re: [pve-devel] [PATCH] memory form : hotplug improvements


does onSpinUp, onSpinDown also work if we change value manually without using
up|down button ?

I finally fixed that by listening to blur signal:

https://git.proxmox.com/?p=pve-manager.git;a=commitdiff;h=5e0a288e38c8cba1faf99a5efb62a06352582d03

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] bug-fix: ZFSPoolPlugin

2015-02-12 Thread Wolfgang Link
improve Error handling.
inform user only if there is really no device.
if both checks are fail.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   20 
 1 file changed, 16 insertions(+), 4 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 7dc7d3e..231d109 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -180,14 +180,26 @@ sub alloc_image {
 $name = $class-zfs_find_free_diskname($storeid, $scfg, $vmid) if !$name;
 
 $class-zfs_create_zvol($scfg, $name, $size);
-run_command (udevadm trigger --subsystem-match block);
-run_command (udevadm settle --timeout 5);
-
+
+eval {
+   run_command (udevadm trigger --subsystem-match block);
+   run_command (udevadm settle --timeout 5);
+};
+
+my $warn = @$;
+
+my $create_ok;
+
 for (1..10) {
-   last if -e /dev/zvol/$scfg-{pool}/$name ;
+   if (-e /dev/zvol/$scfg-{pool}/$name) {
+   $create_ok = 1;
+   last;
+   }
Time::HiRes::usleep(100);
 }
 
+die can't alloc image\n unless  $create_ok;
+
 return $name;
 }
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] bug-fix for size output

2015-02-12 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 7dc7d3e..8584bac 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -97,15 +97,17 @@ sub zfs_parse_zvol_list {
my @parts = split /\//, $1;
my $name = pop @parts;
my $pool = join('/', @parts);
+   my $size = $2;
+   my $origin = $3;
 
next unless $name =~ m!^(\w+)-(\d+)-(\w+)-(\d+)$!;
$name = $pool . '/' . $name;
 
$zvol-{pool} = $pool;
$zvol-{name} = $name;
-   $zvol-{size} = zfs_parse_size($2);
+   $zvol-{size} = zfs_parse_size($size);
if ($3 !~ /^-$/) {
-   $zvol-{origin} = $3;
+   $zvol-{origin} = $origin;
}
push @$list, $zvol;
}
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V2] Storage: add method

2015-02-11 Thread Wolfgang Link
add method volume_rollback_is_possible and redactor
Improve error handling
If snapshot is not reversible catch it before vm will lock and shutdown.
This is the case if zfs has an younger snapshot.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage.pm   |   15 +++
 PVE/Storage/Plugin.pm|6 ++
 PVE/Storage/ZFSPlugin.pm |7 ++-
 PVE/Storage/ZFSPoolPlugin.pm |   12 ++--
 4 files changed, 33 insertions(+), 7 deletions(-)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index 8711710..7845ad1 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -144,6 +144,21 @@ sub volume_resize {
 }
 }
 
+sub volume_rollback_is_possible {
+my ($cfg, $volid, $snap) = @_;
+
+my ($storeid, $volname) = parse_volume_id($volid, 1);
+if ($storeid) {
+my $scfg = storage_config($cfg, $storeid);
+my $plugin = PVE::Storage::Plugin-lookup($scfg-{type});
+return $plugin-volume_rollback_is_possible($scfg, $storeid, $volname, 
$snap);
+} elsif ($volid =~ m|^(/.+)$|  -e $volid) {
+die snapshot rollback device $volid is not possible;
+} else {
+die can't parse volume id;
+}
+}
+
 sub volume_snapshot {
 my ($cfg, $volid, $snap, $running) = @_;
 
diff --git a/PVE/Storage/Plugin.pm b/PVE/Storage/Plugin.pm
index 15c23d4..3eb99f2 100644
--- a/PVE/Storage/Plugin.pm
+++ b/PVE/Storage/Plugin.pm
@@ -656,6 +656,12 @@ sub volume_snapshot {
 return undef;
 }
 
+sub volume_rollback_is_possible {
+my ($class, $scfg, $storeid, $volname, $snap) = @_; 
+
+return 1; 
+}
+
 sub volume_snapshot_rollback {
 my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
diff --git a/PVE/Storage/ZFSPlugin.pm b/PVE/Storage/ZFSPlugin.pm
index 77394b9..6aac58d 100644
--- a/PVE/Storage/ZFSPlugin.pm
+++ b/PVE/Storage/ZFSPlugin.pm
@@ -311,11 +311,8 @@ sub volume_snapshot_rollback {
 my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 # abort rollback if snapshot is not the latest
-my $recentsnap = $class-zfs_get_latest_snapshot($scfg, $volname);
-if ($snap ne $recentsnap) {
-die cannot rollback, more recent snapshots exist\n;
-}
-
+$class-volume_rollback_is_possible($scfg, $storeid, $volname, $snap);
+
 $class-zfs_delete_lu($scfg, $volname);
 
 $class-zfs_request($class, $scfg, undef, 'rollback', 
$scfg-{pool}/$volname\@$snap);
diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 754f29f..2a1cfef 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -412,12 +412,20 @@ sub volume_snapshot_rollback {
 my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 # abort rollback if snapshot is not the latest
+$class-volume_rollback_is_possible($scfg, $storeid, $volname, $snap);
+
+zfs_request($class, $scfg, undef, 'rollback', 
$scfg-{pool}/$volname\@$snap);
+}
+
+sub volume_rollback_is_possible {
+my ($class, $scfg, $storeid, $volname, $snap) = @_; 
+
 my $recentsnap = $class-zfs_get_latest_snapshot($scfg, $volname);
 if ($snap ne $recentsnap) {
-die cannot rollback, more recent snapshots exist\n;
+   die can't rollback, more recent snapshots exist\n;
 }
 
-zfs_request($class, $scfg, undef, 'rollback', 
$scfg-{pool}/$volname\@$snap);
+return 1; 
 }
 
 sub activate_storage {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V2 1/2] QemuServer: check snapshot befor rollback

2015-02-11 Thread Wolfgang Link
this will check, if it is possibel to rollback a snapshot befor VM will 
shutdown and get locked.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm |   18 ++
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 5d7f9d7..3fc742c 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -5550,13 +5550,23 @@ sub snapshot_rollback {
 
 my $storecfg = PVE::Storage::config();
 
-my $updatefn = sub {
+my $conf = load_config($vmid);
 
-   my $conf = load_config($vmid);
+die you can't rollback if vm is a template\n if is_template($conf);
 
-   die you can't rollback if vm is a template\n if is_template($conf);
+$snap = $conf-{snapshots}-{$snapname};
 
-   $snap = $conf-{snapshots}-{$snapname};
+foreach_drive($snap, sub {
+   my ($ds, $drive) = @_;
+
+   return if drive_is_cdrom($drive);
+
+   my $volid = $drive-{file};
+
+   PVE::Storage::volume_rollback_is_possible($storecfg, $volid, $snapname);
+});
+
+my $updatefn = sub {
 
die snapshot '$snapname' does not exist\n if !defined($snap);
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] Check rollback is possible

2015-02-11 Thread Wolfgang Link
If a storage do not support rollback to all snapshots,
it will be tested before the VM get locked and shutdown. 


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V2 2/2] QemuServer: remove useless var

2015-02-11 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm |1 -
 1 file changed, 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 3fc742c..b457095 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -5621,7 +5621,6 @@ sub snapshot_rollback {
return if drive_is_cdrom($drive);
 
my $volid = $drive-{file};
-   my $device = drive-$ds;
 
PVE::Storage::volume_snapshot_rollback($storecfg, $volid, $snapname);
 });
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] ZFS tool for asynchronous replica

2015-02-13 Thread Wolfgang Link

Know anybody a open source tool for asynchronous replica with zfs.
I found this here, but it seem to be no more active.
http://www.bolthole.com/solaris/zrep/


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Fix: disable root

2015-01-28 Thread Wolfgang Link
From: Wolfgang Link wolfg...@linksystems.org

root can now be disabled in GUI.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/AccessControl.pm |   10 --
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/PVE/AccessControl.pm b/PVE/AccessControl.pm
index db85d08..a1b4971 100644
--- a/PVE/AccessControl.pm
+++ b/PVE/AccessControl.pm
@@ -356,8 +356,6 @@ sub check_user_enabled {
 
 return 1 if $data-{enable};
 
-return 1 if $username eq 'root@pam'; # root is always enabled
-
 die user '$username' is disabled\n if !$noerr;
  
 return undef;
@@ -695,10 +693,10 @@ sub userconfig_force_defaults {
$cfg-{roles}-{$r} = $special_roles-{$r};
 }
 
-# fixme: remove 'root' group (not required)?
-
-# add root user 
-$cfg-{users}-{'root@pam'}-{enable} = 1;
+# add root user if not exists
+if (!$cfg-{users}-{'root@pam'}) {
+   $cfg-{users}-{'root@pam'}-{enable} = 1; 
+}
 }
 
 sub parse_user_config {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Fix: disable root

2015-01-28 Thread Wolfgang Link
From: Wolfgang Link wolfg...@linksystems.org

root can now be disabled in GUI.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/AccessControl.pm |   10 --
 1 file changed, 4 insertions(+), 6 deletions(-)

diff --git a/PVE/AccessControl.pm b/PVE/AccessControl.pm
index db85d08..a1b4971 100644
--- a/PVE/AccessControl.pm
+++ b/PVE/AccessControl.pm
@@ -356,8 +356,6 @@ sub check_user_enabled {
 
 return 1 if $data-{enable};
 
-return 1 if $username eq 'root@pam'; # root is always enabled
-
 die user '$username' is disabled\n if !$noerr;
  
 return undef;
@@ -695,10 +693,10 @@ sub userconfig_force_defaults {
$cfg-{roles}-{$r} = $special_roles-{$r};
 }
 
-# fixme: remove 'root' group (not required)?
-
-# add root user 
-$cfg-{users}-{'root@pam'}-{enable} = 1;
+# add root user if not exists
+if (!$cfg-{users}-{'root@pam'}) {
+   $cfg-{users}-{'root@pam'}-{enable} = 1; 
+}
 }
 
 sub parse_user_config {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] zfs: fix wait by alloc_image

2015-01-28 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |7 +++
 1 file changed, 7 insertions(+)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 10da7f7..ed10484 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -185,6 +185,13 @@ sub alloc_image {
 
 $class-zfs_create_zvol($scfg, $name, $size);
 
+run_command (udevadm trigger --subsystem-match block);
+run_command (udevadm settle --timeout 5);
+
+for (1..4) {
+   Time::HiRes::usleep(100);
+}
+
 return $name;
 }
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] zfs: fixV2 wait by alloc_image

2015-01-28 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPlugin.pm |9 -
 PVE/Storage/ZFSPoolPlugin.pm |7 +++
 2 files changed, 15 insertions(+), 1 deletion(-)

diff --git a/PVE/Storage/ZFSPlugin.pm b/PVE/Storage/ZFSPlugin.pm
index 581ef6c..d8acce9 100644
--- a/PVE/Storage/ZFSPlugin.pm
+++ b/PVE/Storage/ZFSPlugin.pm
@@ -265,7 +265,14 @@ sub clone_image {
 sub alloc_image {
 my ($class, $storeid, $scfg, $vmid, $fmt, $name, $size) = @_;
 
-my $volname = $class-SUPER::alloc_image($storeid, $scfg, $vmid, $fmt, 
$name, $size);
+die unsupported format '$fmt' if $fmt ne 'raw';
+
+die illegal name '$name' - sould be 'vm-$vmid-*'\n
+if $name  $name !~ m/^vm-$vmid-/;
+
+my $volname = $class-zfs_find_free_diskname($storeid, $scfg, $vmid) if 
!$name;
+
+$class-zfs_create_zvol($scfg, $name, $size);
  
 my $guid = $class-zfs_create_lu($scfg, $volname);
 $class-zfs_add_lun_mapping_entry($scfg, $volname, $guid);
diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 10da7f7..490b61c 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -184,6 +184,13 @@ sub alloc_image {
 $name = $class-zfs_find_free_diskname($storeid, $scfg, $vmid) if !$name;
 
 $class-zfs_create_zvol($scfg, $name, $size);
+run_command (udevadm trigger --subsystem-match block);
+run_command (udevadm settle --timeout 5);
+
+for (1..10) {
+   last if -e /dev/zvol/$scfg-{pool}/$name ;
+   Time::HiRes::usleep(100);
+}
 
 return $name;
 }
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 4/6] Gui storage: rename storage type

2015-01-26 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/Utils.js   |4 ++--
 www/manager/storage/ZFSPoolEdit.js |2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/www/manager/Utils.js b/www/manager/Utils.js
index 0b25b2b..8f46009 100644
--- a/www/manager/Utils.js
+++ b/www/manager/Utils.js
@@ -713,9 +713,9 @@ Ext.define('PVE.Utils', { statics: {
} else if (value === 'sheepdog') {
return 'Sheepdog';
} else if (value === 'zfs') {
-   return 'ZFS';
+   return 'ZFS over iSCSI';
} else if (value === 'zfspool') {
-   return 'ZFSPool';
+   return 'ZFS';
} else if (value === 'iscsidirect') {
return 'iSCSIDirect';
} else {
diff --git a/www/manager/storage/ZFSPoolEdit.js 
b/www/manager/storage/ZFSPoolEdit.js
index 0b963a4..60d52dc 100644
--- a/www/manager/storage/ZFSPoolEdit.js
+++ b/www/manager/storage/ZFSPoolEdit.js
@@ -95,7 +95,7 @@ Ext.define('PVE.storage.ZFSPoolEdit', {
});
 
Ext.apply(me, {
-subject: 'ZFSPool Storage',
+subject: 'ZFS Storage',
isAdd: true,
items: [ ipanel ]
});
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 1/6] Gui storage: copy code as template

2015-01-26 Thread Wolfgang Link
copy from ZFSEdit.js to ZFSPoolEdit.js

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/storage/ZFSPoolEdit.js |  173 
 1 file changed, 173 insertions(+)
 create mode 100644 www/manager/storage/ZFSPoolEdit.js

diff --git a/www/manager/storage/ZFSPoolEdit.js 
b/www/manager/storage/ZFSPoolEdit.js
new file mode 100644
index 000..f40bf7a
--- /dev/null
+++ b/www/manager/storage/ZFSPoolEdit.js
@@ -0,0 +1,173 @@
+Ext.define('PVE.storage.ZFSInputPanel', {
+extend: 'PVE.panel.InputPanel',
+
+onGetValues: function(values) {
+   var me = this;
+
+   if (me.create) {
+   values.type = 'zfs';
+   values.content = 'images';
+   } else {
+   delete values.storage;
+   }
+
+   values.disable = values.enable ? 0 : 1;
+   delete values.enable;
+
+   return values;
+},
+
+initComponent : function() {
+   var me = this;
+
+   me.column1 = [
+   {
+   xtype: me.create ? 'textfield' : 'displayfield',
+   name: 'storage',
+   height: 22, // hack: set same height as text fields
+   value: me.storageId || '',
+   fieldLabel: 'ID',
+   vtype: 'StorageId',
+   allowBlank: false
+   },
+   {
+   xtype: me.create ? 'textfield' : 'displayfield',
+   name: 'portal',
+   height: 22, // hack: set same height as text fields
+   value: '',
+   fieldLabel: gettext('Portal'),
+   allowBlank: false
+   },
+   {
+   xtype: me.create ? 'textfield' : 'displayfield',
+   name: 'pool',
+   height: 22, // hack: set same height as text fields
+   value: '',
+   fieldLabel: gettext('Pool'),
+   allowBlank: false
+   },
+   {
+   xtype: me.create ? 'textfield' : 'displayfield',
+   name: 'blocksize',
+   height: 22, // hack: set same height as text fields
+   value: '4k',
+   fieldLabel: gettext('Block Size'),
+   allowBlank: false
+   },
+   {
+   xtype: me.create ? 'textfield' : 'displayfield',
+   name: 'target',
+   height: 22, // hack: set same height as text fields
+   value: '',
+   fieldLabel: gettext('Target'),
+   allowBlank: false
+   },
+   {
+   xtype: me.create ? 'textfield' : 'displayfield',
+   name: 'comstar_tg',
+   height: 22, // hack: set same height as text fields
+   value: '',
+   fieldLabel: gettext('Target group'),
+   allowBlank: true
+   }
+   ];
+
+   me.column2 = [
+   {
+   xtype: 'pvecheckbox',
+   name: 'enable',
+   checked: true,
+   uncheckedValue: 0,
+   fieldLabel: gettext('Enable')
+   },
+   {
+   xtype: me.create ? 'pveiScsiProviderSelector' : 'displayfield',
+   name: 'iscsiprovider',
+   height: 22, // hack: set same height as text fields
+   value: 'comstar',
+   fieldLabel: gettext('iSCSI Provider'),
+   allowBlank: false
+   },
+   {
+   xtype: 'pvecheckbox',
+   name: 'sparse',
+   checked: false,
+   uncheckedValue: 0,
+   fieldLabel: gettext('Thin provision')
+   },
+   {
+   xtype: 'pvecheckbox',
+   name: 'nowritecache',
+   checked: true,
+   uncheckedValue: 0,
+   fieldLabel: gettext('Write cache')
+   },
+   {
+   xtype: me.create ? 'textfield' : 'displayfield',
+   name: 'comstar_hg',
+   height: 22, // hack: set same height as text fields
+   value: '',
+   fieldLabel: gettext('Host group'),
+   allowBlank: true
+   }
+   ];
+
+   if (me.create || me.storageId !== 'local') {
+   me.column2.unshift({
+   xtype: 'PVE.form.NodeSelector',
+   name: 'nodes',
+   fieldLabel: gettext('Nodes'),
+   emptyText: gettext('All') + ' (' +
+   gettext('No restrictions') +')',
+   multiSelect: true,
+   autoSelect: false
+   });
+   }
+
+   me.callParent();
+}
+});
+
+Ext.define('PVE.storage.ZFSEdit', {
+extend: 'PVE.window.Edit',
+
+initComponent : function() {
+   var me = this;
+
+   me.create = !me.storageId;
+
+   if (me.create) {
+me.url = '/api2/extjs/storage';
+me.method = 'POST';
+} else {
+me.url = '/api2/extjs/storage/' + me.storageId

[pve-devel] [PATCH 2/6] Gui storage: modify ZFSPoolEdit.js

2015-01-26 Thread Wolfgang Link
Change intput description to the needed.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/storage/ZFSPoolEdit.js |   67 
 1 file changed, 6 insertions(+), 61 deletions(-)

diff --git a/www/manager/storage/ZFSPoolEdit.js 
b/www/manager/storage/ZFSPoolEdit.js
index f40bf7a..0b963a4 100644
--- a/www/manager/storage/ZFSPoolEdit.js
+++ b/www/manager/storage/ZFSPoolEdit.js
@@ -1,11 +1,11 @@
-Ext.define('PVE.storage.ZFSInputPanel', {
+Ext.define('PVE.storage.ZFSPoolInputPanel', {
 extend: 'PVE.panel.InputPanel',
 
 onGetValues: function(values) {
var me = this;
 
if (me.create) {
-   values.type = 'zfs';
+   values.type = 'zfspool';
values.content = 'images';
} else {
delete values.storage;
@@ -32,43 +32,11 @@ Ext.define('PVE.storage.ZFSInputPanel', {
},
{
xtype: me.create ? 'textfield' : 'displayfield',
-   name: 'portal',
-   height: 22, // hack: set same height as text fields
-   value: '',
-   fieldLabel: gettext('Portal'),
-   allowBlank: false
-   },
-   {
-   xtype: me.create ? 'textfield' : 'displayfield',
name: 'pool',
height: 22, // hack: set same height as text fields
value: '',
fieldLabel: gettext('Pool'),
allowBlank: false
-   },
-   {
-   xtype: me.create ? 'textfield' : 'displayfield',
-   name: 'blocksize',
-   height: 22, // hack: set same height as text fields
-   value: '4k',
-   fieldLabel: gettext('Block Size'),
-   allowBlank: false
-   },
-   {
-   xtype: me.create ? 'textfield' : 'displayfield',
-   name: 'target',
-   height: 22, // hack: set same height as text fields
-   value: '',
-   fieldLabel: gettext('Target'),
-   allowBlank: false
-   },
-   {
-   xtype: me.create ? 'textfield' : 'displayfield',
-   name: 'comstar_tg',
-   height: 22, // hack: set same height as text fields
-   value: '',
-   fieldLabel: gettext('Target group'),
-   allowBlank: true
}
];
 
@@ -81,35 +49,12 @@ Ext.define('PVE.storage.ZFSInputPanel', {
fieldLabel: gettext('Enable')
},
{
-   xtype: me.create ? 'pveiScsiProviderSelector' : 'displayfield',
-   name: 'iscsiprovider',
-   height: 22, // hack: set same height as text fields
-   value: 'comstar',
-   fieldLabel: gettext('iSCSI Provider'),
-   allowBlank: false
-   },
-   {
xtype: 'pvecheckbox',
name: 'sparse',
checked: false,
uncheckedValue: 0,
fieldLabel: gettext('Thin provision')
-   },
-   {
-   xtype: 'pvecheckbox',
-   name: 'nowritecache',
-   checked: true,
-   uncheckedValue: 0,
-   fieldLabel: gettext('Write cache')
-   },
-   {
-   xtype: me.create ? 'textfield' : 'displayfield',
-   name: 'comstar_hg',
-   height: 22, // hack: set same height as text fields
-   value: '',
-   fieldLabel: gettext('Host group'),
-   allowBlank: true
-   }
+   }   
];
 
if (me.create || me.storageId !== 'local') {
@@ -128,7 +73,7 @@ Ext.define('PVE.storage.ZFSInputPanel', {
 }
 });
 
-Ext.define('PVE.storage.ZFSEdit', {
+Ext.define('PVE.storage.ZFSPoolEdit', {
 extend: 'PVE.window.Edit',
 
 initComponent : function() {
@@ -144,13 +89,13 @@ Ext.define('PVE.storage.ZFSEdit', {
 me.method = 'PUT';
 }
 
-   var ipanel = Ext.create('PVE.storage.ZFSInputPanel', {
+   var ipanel = Ext.create('PVE.storage.ZFSPoolInputPanel', {
create: me.create,
storageId: me.storageId
});
 
Ext.apply(me, {
-subject: 'ZFS Storage',
+subject: 'ZFSPool Storage',
isAdd: true,
items: [ ipanel ]
});
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 6/6] Gui storage: change input method

2015-01-26 Thread Wolfgang Link
change input method in Add:ZFS Storage at ZFS Pool from string to combobox.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/storage/ZFSPoolEdit.js |   37 
 1 file changed, 25 insertions(+), 12 deletions(-)

diff --git a/www/manager/storage/ZFSPoolEdit.js 
b/www/manager/storage/ZFSPoolEdit.js
index 3ee1a7a..4c3c1ca 100644
--- a/www/manager/storage/ZFSPoolEdit.js
+++ b/www/manager/storage/ZFSPoolEdit.js
@@ -11,17 +11,17 @@ Ext.define('PVE.storage.ZFSPoolSelector', {
 
var store = Ext.create('Ext.data.Store', {
autoLoad: {}, // true,
-   fields: [ 'vg', 'size', 'free' ],
+   fields: [ 'pool', 'size', 'free' ],
proxy: {
type: 'pve',
-   url: '/api2/json/nodes/' + me.nodename + '/scan/lvm'
+   url: '/api2/json/nodes/' + me.nodename + '/scan/zfs'
}
});
 
Ext.apply(me, {
store: store,
-   valueField: 'vg',
-   displayField: 'vg',
+   valueField: 'pool',
+   displayField: 'pool',
queryMode: 'local',
editable: false,
listConfig: {
@@ -72,17 +72,30 @@ Ext.define('PVE.storage.ZFSPoolInputPanel', {
fieldLabel: 'ID',
vtype: 'StorageId',
allowBlank: false
-   },
-   {
-   xtype: me.create ? 'textfield' : 'displayfield',
-   name: 'pool',
-   height: 22, // hack: set same height as text fields
-   value: '',
-   fieldLabel: gettext('Pool'),
-   allowBlank: false
}
];
 
+   var zfspoolnameField = Ext.createWidget(me.create ? 'textfield' : 
'displayfield', {
+   height: 22, // hack: set same height as text fields
+   name: 'poolname',
+   hidden: !!me.create,
+   disabled: !!me.create,
+   value: '',
+   fieldLabel: gettext('ZFS Pool'),
+   allowBlank: false
+   });
+   
+   if (me.create) {
+   var zfspoolField = Ext.create('PVE.storage.ZFSPoolSelector', {
+   name: 'zfspoolname',
+   fieldLabel: gettext('ZFS Pool'),
+   allowBlank: false
+   });
+   me.column1.push(zfspoolField);
+   }
+
+   me.column1.push(zfspoolnameField);
+
me.column2 = [
{
xtype: 'pvecheckbox',
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 3/6] Gui storage: bind ZFSPoolEdit.js and set defaults

2015-01-26 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/Makefile  |1 +
 www/manager/Utils.js  |2 ++
 www/manager/dc/StorageView.js |   14 +-
 www/manager/qemu/Clone.js |3 ++-
 www/manager/qemu/HDEdit.js|3 ++-
 www/manager/qemu/HDMove.js|4 ++--
 6 files changed, 22 insertions(+), 5 deletions(-)

diff --git a/www/manager/Makefile b/www/manager/Makefile
index 76d2ab3..ba6ca73 100644
--- a/www/manager/Makefile
+++ b/www/manager/Makefile
@@ -161,6 +161,7 @@ JSSRC=  
\
storage/RBDEdit.js  \
storage/SheepdogEdit.js \
storage/ZFSEdit.js  \
+   storage/ZFSPoolEdit.js  \
dc/Summary.js   \
dc/OptionView.js\
dc/StorageView.js   \
diff --git a/www/manager/Utils.js b/www/manager/Utils.js
index 93bd90b..0b25b2b 100644
--- a/www/manager/Utils.js
+++ b/www/manager/Utils.js
@@ -714,6 +714,8 @@ Ext.define('PVE.Utils', { statics: {
return 'Sheepdog';
} else if (value === 'zfs') {
return 'ZFS';
+   } else if (value === 'zfspool') {
+   return 'ZFSPool';
} else if (value === 'iscsidirect') {
return 'iSCSIDirect';
} else {
diff --git a/www/manager/dc/StorageView.js b/www/manager/dc/StorageView.js
index b351ef1..4bcf3b7 100644
--- a/www/manager/dc/StorageView.js
+++ b/www/manager/dc/StorageView.js
@@ -49,6 +49,8 @@ Ext.define('PVE.dc.StorageView', {
editor = 'PVE.storage.SheepdogEdit';
} else if (type === 'zfs') {
editor = 'PVE.storage.ZFSEdit';
+   } else if (type === 'zfspool') {
+   editor = 'PVE.storage.ZFSPoolEdit';
} else {
return;
}
@@ -165,7 +167,17 @@ Ext.define('PVE.dc.StorageView', {
win.on('destroy', reload);
win.show();
}
-   }
+   },
+   {
+text: PVE.Utils.format_storage_type('zfspool'),
+iconCls: 'pve-itype-icon-storage',
+handler: function() {
+var win = 
Ext.create('PVE.storage.ZFSPoolEdit', {});
+win.on('destroy', reload);
+win.show();
+}
+},
+
 /* the following type are conidered unstable
  * so we do not enable that on the GUI for now
{
diff --git a/www/manager/qemu/Clone.js b/www/manager/qemu/Clone.js
index 7f41487..de0c95a 100644
--- a/www/manager/qemu/Clone.js
+++ b/www/manager/qemu/Clone.js
@@ -69,7 +69,8 @@ Ext.define('PVE.window.Clone', {
 rec.data.type === 'rbd' ||
 rec.data.type === 'iscsi' ||
 rec.data.type === 'sheepdog' ||
-   rec.data.type === 'zfs'
+   rec.data.type === 'zfs' ||
+   rec.data.type === 'zfspool'
) {
 me.formatsel.setValue('raw');
 me.formatsel.setDisabled(true);
diff --git a/www/manager/qemu/HDEdit.js b/www/manager/qemu/HDEdit.js
index 0faa3c5..b2785b1 100644
--- a/www/manager/qemu/HDEdit.js
+++ b/www/manager/qemu/HDEdit.js
@@ -174,7 +174,8 @@ Ext.define('PVE.qemu.HDInputPanel', {
} else if (rec.data.type === 'lvm' || 
   rec.data.type === 'rbd' ||
   rec.data.type === 'sheepdog' ||
-  rec.data.type === 'zfs') {
+  rec.data.type === 'zfs' ||
+  rec.data.type === 'zfspool') {
me.hdfilesel.setDisabled(true);
me.hdfilesel.setVisible(false);
me.formatsel.setValue('raw');
diff --git a/www/manager/qemu/HDMove.js b/www/manager/qemu/HDMove.js
index 68d7e92..6c16a36 100644
--- a/www/manager/qemu/HDMove.js
+++ b/www/manager/qemu/HDMove.js
@@ -77,8 +77,8 @@ Ext.define('PVE.window.HDMove', {
 } else if (rec.data.type === 'lvm' ||
rec.data.type === 'rbd' ||
rec.data.type === 'sheepdog' ||
-   rec.data.type === 'zfs'
-
+   rec.data.type === 'zfs' ||
+  rec.data.type === 'zfspool'
 ) {
 me.formatsel.setValue('raw');
 me.formatsel.setDisabled(true);
-- 
1.7.10.4

[pve-devel] [PATCH 5/6] Gui storage: copy selector methode

2015-01-26 Thread Wolfgang Link
from LVMEdit.js to ZFSPoolEdit.js

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/storage/ZFSPoolEdit.js |   43 
 1 file changed, 43 insertions(+)

diff --git a/www/manager/storage/ZFSPoolEdit.js 
b/www/manager/storage/ZFSPoolEdit.js
index 60d52dc..3ee1a7a 100644
--- a/www/manager/storage/ZFSPoolEdit.js
+++ b/www/manager/storage/ZFSPoolEdit.js
@@ -1,3 +1,46 @@
+Ext.define('PVE.storage.ZFSPoolSelector', {
+extend: 'Ext.form.field.ComboBox',
+alias: 'widget.pveZFSPoolSelector',
+
+initComponent : function() {
+   var me = this;
+
+   if (!me.nodename) {
+   me.nodename = 'localhost';
+   }
+
+   var store = Ext.create('Ext.data.Store', {
+   autoLoad: {}, // true,
+   fields: [ 'vg', 'size', 'free' ],
+   proxy: {
+   type: 'pve',
+   url: '/api2/json/nodes/' + me.nodename + '/scan/lvm'
+   }
+   });
+
+   Ext.apply(me, {
+   store: store,
+   valueField: 'vg',
+   displayField: 'vg',
+   queryMode: 'local',
+   editable: false,
+   listConfig: {
+   loadingText: gettext('Scanning...'),
+   listeners: {
+   // hack: call setHeight to show scroll bars correctly
+   refresh: function(list) {
+   var lh = PVE.Utils.gridLineHeigh();
+   var count = store.getCount();
+   list.setHeight(lh * ((count  10) ? 10 : count));
+   }
+   }
+   }
+   });
+
+   me.callParent();
+}
+});
+
 Ext.define('PVE.storage.ZFSPoolInputPanel', {
 extend: 'PVE.panel.InputPanel',
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] ZfsDirPlugin

2015-01-23 Thread Wolfgang Link
this is the refactoring of ZfsPlugin and implement ZFSDirPlugin
In-Reply-To: 


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 9/9] zfs: move and refactor code

2015-01-23 Thread Wolfgang Link
copy and modify create_base and refactor clone_image

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSDirPlugin.pm |   43 +++
 PVE/Storage/ZFSPlugin.pm|   13 +
 2 files changed, 44 insertions(+), 12 deletions(-)

diff --git a/PVE/Storage/ZFSDirPlugin.pm b/PVE/Storage/ZFSDirPlugin.pm
index 9081d01..fa5a070 100644
--- a/PVE/Storage/ZFSDirPlugin.pm
+++ b/PVE/Storage/ZFSDirPlugin.pm
@@ -421,6 +421,49 @@ sub deactivate_volume {
 return 1;
 }
 
+sub clone_image {
+my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
+
+$snap ||= '__base__';
+
+my ($vtype, $basename, $basevmid, undef, undef, $isBase) =
+$class-parse_volname($volname);
+
+die clone_image only works on base images\n if !$isBase;
+
+my $name = $class-zfs_find_free_diskname($storeid, $scfg, $vmid);
+
+warn clone $volname: $basename to $name\n;
+
+$class-zfs_request($scfg, undef, 'clone', 
$scfg-{pool}/$basename\@$snap, $scfg-{pool}/$name);
+
+return $name;
+}
+
+sub create_base {
+my ($class, $storeid, $scfg, $volname) = @_;
+
+my $snap = '__base__';
+
+my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
+$class-parse_volname($volname);
+
+die create_base not possible with base image\n if $isBase;
+
+my $newname = $name;
+$newname =~ s/^vm-/base-/;
+
+my $newvolname = $basename ? $basename/$newname : $newname;
+
+$class-zfs_request($scfg, undef, 'rename', $scfg-{pool}/$name, 
$scfg-{pool}/$newname);
+
+my $running  = undef; #fixme : is create_base always offline ?
+
+$class-volume_snapshot($scfg, $storeid, $newname, $snap, $running);
+
+return $newvolname;
+}
+
 sub volume_has_feature {
 my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
 
diff --git a/PVE/Storage/ZFSPlugin.pm b/PVE/Storage/ZFSPlugin.pm
index 28600dc..afead22 100644
--- a/PVE/Storage/ZFSPlugin.pm
+++ b/PVE/Storage/ZFSPlugin.pm
@@ -254,18 +254,7 @@ sub create_base {
 sub clone_image {
 my ($class, $scfg, $storeid, $volname, $vmid, $snap) = @_;
 
-$snap ||= '__base__';
-
-my ($vtype, $basename, $basevmid, undef, undef, $isBase) =
-$class-parse_volname($volname);
-
-die clone_image only works on base images\n if !$isBase;
-
-my $name = $class-zfs_find_free_diskname($storeid, $scfg, $vmid);
-
-warn clone $volname: $basename to $name\n;
-
-$class-zfs_request($scfg, undef, 'clone', 
$scfg-{pool}/$basename\@$snap, $scfg-{pool}/$name);
+my $name  = $class-SUPER::clone_image($class, $scfg, $storeid, $volname, 
$vmid, $snap);
 
 my $guid = $class-zfs_create_lu($scfg, $name);
 $class-zfs_add_lun_mapping_entry($scfg, $name, $guid);
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 3/9] zfs: copy free_image

2015-01-23 Thread Wolfgang Link
modify it for ZFSDirPlugin.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSDirPlugin.pm |   11 +++
 1 file changed, 11 insertions(+)

diff --git a/PVE/Storage/ZFSDirPlugin.pm b/PVE/Storage/ZFSDirPlugin.pm
index da53525..e3c1a9d 100644
--- a/PVE/Storage/ZFSDirPlugin.pm
+++ b/PVE/Storage/ZFSDirPlugin.pm
@@ -173,6 +173,17 @@ sub alloc_image {
 
 }
 
+sub free_image {
+my ($class, $storeid, $scfg, $volname, $isBase) = @_;
+
+my (undef, $name, undef) = $class-parse_volname($volname);
+
+eval { $class-zfs_delete_zvol($scfg, $name); };
+die $@ if $@;
+
+return undef;
+}
+
 sub zfs_get_pool_stats {
 my ($class, $scfg) = @_;
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 7/9] zfs: move code

2015-01-23 Thread Wolfgang Link
move activate_volume deactivate_volume from ZFSPlugin to ZFSDirPlugin

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSDirPlugin.pm |9 +
 PVE/Storage/ZFSPlugin.pm|   10 --
 2 files changed, 9 insertions(+), 10 deletions(-)

diff --git a/PVE/Storage/ZFSDirPlugin.pm b/PVE/Storage/ZFSDirPlugin.pm
index 1782c04..7086c33 100644
--- a/PVE/Storage/ZFSDirPlugin.pm
+++ b/PVE/Storage/ZFSDirPlugin.pm
@@ -389,5 +389,14 @@ sub volume_snapshot_delete {
 $class-zfs_request($scfg, undef, 'destroy', 
$scfg-{pool}/$volname\@$snap);
 }
 
+sub activate_volume {
+my ($class, $storeid, $scfg, $volname, $exclusive, $cache) = @_;
+return 1;
+}
+
+sub deactivate_volume {
+my ($class, $storeid, $scfg, $volname, $exclusive, $cache) = @_;
+return 1;
+}
 
 1;
diff --git a/PVE/Storage/ZFSPlugin.pm b/PVE/Storage/ZFSPlugin.pm
index d7d5c6c..8f4c098 100644
--- a/PVE/Storage/ZFSPlugin.pm
+++ b/PVE/Storage/ZFSPlugin.pm
@@ -311,16 +311,6 @@ sub deactivate_storage {
 return 1;
 }
 
-sub activate_volume {
-my ($class, $storeid, $scfg, $volname, $exclusive, $cache) = @_;
-return 1;
-}
-
-sub deactivate_volume {
-my ($class, $storeid, $scfg, $volname, $exclusive, $cache) = @_;
-return 1;
-}
-
 sub volume_resize {
 my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 5/9] zfs: ZFSDirPlugin add methode path

2015-01-23 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSDirPlugin.pm |   16 
 1 file changed, 16 insertions(+)

diff --git a/PVE/Storage/ZFSDirPlugin.pm b/PVE/Storage/ZFSDirPlugin.pm
index fb7a1a2..260e328 100644
--- a/PVE/Storage/ZFSDirPlugin.pm
+++ b/PVE/Storage/ZFSDirPlugin.pm
@@ -132,6 +132,22 @@ sub parse_volname {
 
 # virtual zfs methods (subclass can overwrite them)
 
+sub path {
+my ($class, $scfg, $volname) = @_;
+
+my ($vtype, $name, $vmid) = $class-parse_volname($volname);
+
+my $path = '';
+
+if($vtype eq images){
+   $path = /dev/zvol/$scfg-{pool}/$volname;
+} else {
+   die $vtype is not allowed in ZFSDir!;
+}
+
+return ($path, $vmid, $vtype);
+}
+
 sub zfs_request {
 my ($class, $scfg, $timeout, $method, @params) = @_;
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 6/9] zfs: fix insert return at alloc_immage

2015-01-23 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSDirPlugin.pm |1 +
 1 file changed, 1 insertion(+)

diff --git a/PVE/Storage/ZFSDirPlugin.pm b/PVE/Storage/ZFSDirPlugin.pm
index 260e328..1782c04 100644
--- a/PVE/Storage/ZFSDirPlugin.pm
+++ b/PVE/Storage/ZFSDirPlugin.pm
@@ -187,6 +187,7 @@ sub alloc_image {
 
 $class-zfs_create_zvol($scfg, $name, $size);
 
+return $name;
 }
 
 sub free_image {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] why disk count less than 6?

2015-02-02 Thread Wolfgang Link

Hi,

this is limited on 6 disk, because of the gui installer.
but you can add more disk later by useing the CLI.

Rereads

On 02/02/2015 06:24 AM, lyt_yudi wrote:

hi, PVE Developers
 why disk count less than 6 ?
 why can't select all disk to zfs_raid_setup ?
……
sub get_zfs_raid_setup {

 my $filesys = $config_options-{filesys};

 my $dev_name_hash = {};

 my $devlist = [];
 my $bootdevlist = [];
 for (my $i = 0; $i  6; $i++) {
if (my $hd = $config_options-{disksel$i}) {
my ($disk, $devname, $size, $model) = @$hd;
die device '$devname' is used more than once\n
if $dev_name_hash-{$devname};
$dev_name_hash-{$devname} = $hd;
push @$devlist, $hd;
}
 }

 my $diskcount = scalar(@$devlist);
 die $filesys need at least one device\n if $diskcount  1;
…...

Thanks a lot!


lyt_yudi
lyt_y...@icloud.com





___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] ZfsPoolPlugin: fix add disks

2015-02-02 Thread Wolfgang Link
now it is possible add disk to an vm on rpool

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 10da7f7..629802e 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -98,12 +98,8 @@ sub zfs_parse_zvol_list {
my $name = pop @parts;
my $pool = join('/', @parts);
 
-   if ($pool !~ /^rpool$/) {
-   next unless $name =~ m!^(\w+)-(\d+)-(\w+)-(\d+)$!;
-   $name = $pool . '/' . $name;
-   } else {
-   next;
-   }
+   next unless $name =~ m!^(\w+)-(\d+)-(\w+)-(\d+)$!;
+   $name = $pool . '/' . $name;
 
$zvol-{pool} = $pool;
$zvol-{name} = $name;
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 6/6] Gui storage: change input method

2015-01-26 Thread Wolfgang Link
change input method in Add:ZFS Storage at ZFS Pool from string to combobox.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/storage/ZFSPoolEdit.js |   37 
 1 file changed, 25 insertions(+), 12 deletions(-)

diff --git a/www/manager/storage/ZFSPoolEdit.js 
b/www/manager/storage/ZFSPoolEdit.js
index 3ee1a7a..372f70a 100644
--- a/www/manager/storage/ZFSPoolEdit.js
+++ b/www/manager/storage/ZFSPoolEdit.js
@@ -11,17 +11,17 @@ Ext.define('PVE.storage.ZFSPoolSelector', {
 
var store = Ext.create('Ext.data.Store', {
autoLoad: {}, // true,
-   fields: [ 'vg', 'size', 'free' ],
+   fields: [ 'pool', 'size', 'free' ],
proxy: {
type: 'pve',
-   url: '/api2/json/nodes/' + me.nodename + '/scan/lvm'
+   url: '/api2/json/nodes/' + me.nodename + '/scan/zfs'
}
});
 
Ext.apply(me, {
store: store,
-   valueField: 'vg',
-   displayField: 'vg',
+   valueField: 'pool',
+   displayField: 'pool',
queryMode: 'local',
editable: false,
listConfig: {
@@ -72,17 +72,30 @@ Ext.define('PVE.storage.ZFSPoolInputPanel', {
fieldLabel: 'ID',
vtype: 'StorageId',
allowBlank: false
-   },
-   {
-   xtype: me.create ? 'textfield' : 'displayfield',
-   name: 'pool',
-   height: 22, // hack: set same height as text fields
-   value: '',
-   fieldLabel: gettext('Pool'),
-   allowBlank: false
}
];
 
+   var zfspoolnameField = Ext.createWidget(me.create ? 'textfield' : 
'displayfield', {
+   height: 22, // hack: set same height as text fields
+   name: 'poolname',
+   hidden: !!me.create,
+   disabled: !!me.create,
+   value: '',
+   fieldLabel: gettext('ZFS Pool'),
+   allowBlank: false
+   });
+   
+   if (me.create) {
+   var zfspoolField = Ext.create('PVE.storage.ZFSPoolSelector', {
+   name: 'pool',
+   fieldLabel: gettext('ZFS Pool'),
+   allowBlank: false
+   });
+   me.column1.push(zfspoolField);
+   }
+
+   me.column1.push(zfspoolnameField);
+
me.column2 = [
{
xtype: 'pvecheckbox',
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Fix: root can now be disabled in GUI.

2015-01-06 Thread Wolfgang Link

Signed-off-by: Wolfgang Link wolfg...@linksystems.org
---
 PVE/AccessControl.pm |7 ---
 1 file changed, 7 deletions(-)

diff --git a/PVE/AccessControl.pm b/PVE/AccessControl.pm
index db85d08..29dba39 100644
--- a/PVE/AccessControl.pm
+++ b/PVE/AccessControl.pm
@@ -356,8 +356,6 @@ sub check_user_enabled {
 
 return 1 if $data-{enable};
 
-return 1 if $username eq 'root@pam'; # root is always enabled
-
 die user '$username' is disabled\n if !$noerr;
  
 return undef;
@@ -694,11 +692,6 @@ sub userconfig_force_defaults {
 foreach my $r (keys %$special_roles) {
$cfg-{roles}-{$r} = $special_roles-{$r};
 }
-
-# fixme: remove 'root' group (not required)?
-
-# add root user 
-$cfg-{users}-{'root@pam'}-{enable} = 1;
 }
 
 sub parse_user_config {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] bug 597: hotplug fix

2015-02-10 Thread Wolfgang Link
wrap params in dopple quotes, so spaces will accept.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm |4 
 1 file changed, 4 insertions(+)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 7045c14..9b4e137 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -4248,6 +4248,10 @@ sub vm_qmp_command {
 sub vm_human_monitor_command {
 my ($vmid, $cmdline) = @_;
 
+#This is necessary, so Human-Monitor accept spaces. 
+$cmdline =~ s/(file=rbd:)/\$1/g;
+$cmdline .=\;
+
 my $res;
 
 my $cmd = {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] zfspoolplugin: fix volume_resize

2015-02-10 Thread Wolfgang Link
copy methode volume_resize form ZFSPlugin.pm to ZFSPoolPlugin.pm
refactor volume_resize

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPlugin.pm |7 ---
 PVE/Storage/ZFSPoolPlugin.pm |   10 ++
 2 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/PVE/Storage/ZFSPlugin.pm b/PVE/Storage/ZFSPlugin.pm
index 43cfcd8..77394b9 100644
--- a/PVE/Storage/ZFSPlugin.pm
+++ b/PVE/Storage/ZFSPlugin.pm
@@ -299,11 +299,12 @@ sub free_image {
 
 sub volume_resize {
 my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
+
+my $new_size = $class-SUPER::volume_resize($scfg, $storeid, $volname, 
$size, $running);
 
-my $new_size = ($size/1024);
-
-$class-zfs_request($scfg, undef, 'set', 'volsize=' . $new_size . 'k', 
$scfg-{pool}/$volname);
 $class-zfs_resize_lu($scfg, $volname, $new_size);
+
+return $new_size;
 }
 
 sub volume_snapshot_rollback {
diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 83dbc7f..754f29f 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -489,6 +489,16 @@ sub create_base {
 return $newvolname;
 }
 
+sub volume_resize {
+my ($class, $scfg, $storeid, $volname, $size, $running) = @_;
+
+my $new_size = ($size/1024);
+
+$class-zfs_request($scfg, undef, 'set', 'volsize=' . $new_size . 'k', 
$scfg-{pool}/$volname);
+
+return $new_size;
+}
+
 sub volume_has_feature {
 my ($class, $scfg, $feature, $storeid, $volname, $snapname, $running) = @_;
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] bug 597: hotplug fix

2015-02-10 Thread Wolfgang Link
wrap params in dopple quotes, so spaces will accept.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 556bbb7..59441e1 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -3384,7 +3384,7 @@ sub qemu_driveadd {
 my ($storecfg, $vmid, $device) = @_;
 
 my $drive = print_drive_full($storecfg, $vmid, $device);
-my $ret = vm_human_monitor_command($vmid, drive_add auto $drive);
+my $ret = vm_human_monitor_command($vmid, drive_add auto \$drive\);
 
 # If the command succeeds qemu prints: OK
 return 1 if $ret =~ m/OK/s;
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 2/2] QemuServer: remove useless var

2015-02-11 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm |1 -
 1 file changed, 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 6df375e..45d522d 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -5621,7 +5621,6 @@ sub snapshot_rollback {
return if drive_is_cdrom($drive);
 
my $volid = $drive-{file};
-   my $device = drive-$ds;
 
PVE::Storage::volume_snapshot_rollback($storecfg, $volid, $snapname);
 });
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 1/2] QemuServer: check snapshot befor rollback

2015-02-11 Thread Wolfgang Link
this will check, if it is possibel to rollback a snapshot befor VM will 
shutdown and get locked.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm |   18 ++
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 5d7f9d7..6df375e 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -5550,13 +5550,23 @@ sub snapshot_rollback {
 
 my $storecfg = PVE::Storage::config();
 
-my $updatefn = sub {
+my $conf = load_config($vmid);
 
-   my $conf = load_config($vmid);
+die you can't rollback if vm is a template\n if is_template($conf);
 
-   die you can't rollback if vm is a template\n if is_template($conf);
+$snap = $conf-{snapshots}-{$snapname};
 
-   $snap = $conf-{snapshots}-{$snapname};
+foreach_drive($snap, sub {
+   my ($ds, $drive) = @_;
+
+   return if drive_is_cdrom($drive);
+
+   my $volid = $drive-{file};
+
+   PVE::Storage::volume_rollback_is_possibel($storecfg, $volid, $snapname);
+});
+
+my $updatefn = sub {
 
die snapshot '$snapname' does not exist\n if !defined($snap);
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] zfs: auto import after reboot

2015-02-09 Thread Wolfgang Link


On 02/09/2015 12:07 PM, Dietmar Maurer wrote:

On 02/03/2015 12:59 PM, Wolfgang Link wrote:

@@ -419,6 +419,14 @@ sub volume_snapshot_rollback {
sub activate_storage {
  my ($class, $storeid, $scfg, $cache) = @_;
+
+my @param = ('-o', 'name', '-H');
+
+my $text = zfs_request($class, $scfg, undef, 'zpool_list', @param);
+
+if ($text !~ $scfg-{pool}) {
+run_command(zpool import -d /dev/disk/by-id/ -a);
+}
  return 1;
  }


activate_storage is inherted by ZFSPlugin, so we cannot do that! So 
please

overwrite it inside ZFSPlugin.pm.



ahead in this patch I override it in ZFSPlugin.pm

Is this wrong?

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] add vmname to vm remove msg

2015-02-12 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/openvz/Config.js |4 +++-
 www/manager/qemu/Config.js   |4 +++-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/www/manager/openvz/Config.js b/www/manager/openvz/Config.js
index 8589f22..17f40b5 100644
--- a/www/manager/openvz/Config.js
+++ b/www/manager/openvz/Config.js
@@ -15,6 +15,8 @@ Ext.define('PVE.openvz.Config', {
throw no VM ID specified;
}
 
+   var vmname = me.pveSelNode.data.name;
+
var caps = Ext.state.Manager.get('GuiCap');
 
var base_url = '/nodes/' + nodename + '/openvz/' + vmid;
@@ -88,7 +90,7 @@ Ext.define('PVE.openvz.Config', {
text: gettext('Remove'),
disabled: !caps.vms['VM.Allocate'],
dangerous: true,
-   confirmMsg: Ext.String.format(gettext('Are you sure you want to 
remove VM {0}? This will permanently erase all VM data.'), vmid),
+   confirmMsg: Ext.String.format(gettext('Are you sure you want to 
remove VM {0} ('+vmname+')? This will permanently erase all VM data.'), vmid),
handler: function() {
PVE.Utils.API2Request({
url: base_url,
diff --git a/www/manager/qemu/Config.js b/www/manager/qemu/Config.js
index 1d1c8a2..ed2dd24 100644
--- a/www/manager/qemu/Config.js
+++ b/www/manager/qemu/Config.js
@@ -15,6 +15,8 @@ Ext.define('PVE.qemu.Config', {
throw no VM ID specified;
}
 
+   var vmname = me.pveSelNode.data.name;
+
var caps = Ext.state.Manager.get('GuiCap');
 
var base_url = '/nodes/' + nodename + /qemu/ + vmid;
@@ -97,7 +99,7 @@ Ext.define('PVE.qemu.Config', {
text: gettext('Remove'),
disabled: !caps.vms['VM.Allocate'],
dangerous: true,
-   confirmMsg: Ext.String.format(gettext('Are you sure you want to 
remove VM {0}? This will permanently erase all VM data.'), vmid),
+   confirmMsg: Ext.String.format(gettext('Are you sure you want to 
remove VM {0} ('+vmname+')? This will permanently erase all VM data.'), vmid),
handler: function() {
PVE.Utils.API2Request({
url: base_url,
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] ZFS tool for asynchronous replica

2015-02-15 Thread Wolfgang Link

Do you use this on Linux or on BSD/SunOS?

On 02/13/2015 01:44 PM, Pablo Ruiz wrote:
I am using this same one at production on a few machines w/o an issue. 
Also around google you will find a port over bash instead of ksh 
(which in fact requires changing no more than 10 lines)..


Sometimes when a software has no recent releases, does not mean it is 
not maintained, but that it requires no more changes to do it's dutty 
as expected.. ;)


On Fri, Feb 13, 2015 at 10:12 AM, Wolfgang Link w.l...@proxmox.com 
mailto:w.l...@proxmox.com wrote:


Know anybody a open source tool for asynchronous replica with zfs.
I found this here, but it seem to be no more active.
http://www.bolthole.com/solaris/zrep/


___
pve-devel mailing list
pve-devel@pve.proxmox.com mailto:pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel




___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 2/2] change var name to prevent side effects.

2015-02-16 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 5cbd1b2..b9b3bf0 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -179,16 +179,18 @@ sub alloc_image {
 die illegal name '$name' - sould be 'vm-$vmid-*'\n
 if $name  $name !~ m/^vm-$vmid-/;
 
-$name = $class-zfs_find_free_diskname($storeid, $scfg, $vmid) if !$name;
-
-$class-zfs_create_zvol($scfg, $name, $size);
+my $volname = $name;
+ 
+$volname = $class-zfs_find_free_diskname($storeid, $scfg, $vmid) if 
!$volname;
 
-my $devname = /dev/zvol/$scfg-{pool}/$name;
+$class-zfs_create_zvol($scfg, $volname, $size);
+
+my $devname = /dev/zvol/$scfg-{pool}/$volname;
 
 run_command(udevadm trigger --subsystem-match block);
 system(udevadm settle --timeout 10 --exit-if-exists=${devname});
 
-return $name;
+return $volname;
 }
 
 sub free_image {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 1/2] Bug-fix alloc_image in ZFSPlugin.pm

2015-02-16 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPlugin.pm |6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage/ZFSPlugin.pm b/PVE/Storage/ZFSPlugin.pm
index f020985..e7acfb8 100644
--- a/PVE/Storage/ZFSPlugin.pm
+++ b/PVE/Storage/ZFSPlugin.pm
@@ -270,9 +270,11 @@ sub alloc_image {
 die illegal name '$name' - sould be 'vm-$vmid-*'\n
 if $name  $name !~ m/^vm-$vmid-/;
 
-my $volname = $class-zfs_find_free_diskname($storeid, $scfg, $vmid) if 
!$name;
+my $volname = $name;
+
+$volname = $class-zfs_find_free_diskname($storeid, $scfg, $vmid) if 
!$volname;
 
-$class-zfs_create_zvol($scfg, $name, $size);
+$class-zfs_create_zvol($scfg, $volname, $size);
  
 my $guid = $class-zfs_create_lu($scfg, $volname);
 $class-zfs_add_lun_mapping_entry($scfg, $volname, $guid);
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] prohibit ZFSPool multi nodes assignment

2015-02-16 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/storage/ZFSPoolEdit.js |6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/www/manager/storage/ZFSPoolEdit.js 
b/www/manager/storage/ZFSPoolEdit.js
index b0a17b2..f4baf75 100644
--- a/www/manager/storage/ZFSPoolEdit.js
+++ b/www/manager/storage/ZFSPoolEdit.js
@@ -113,10 +113,8 @@ Ext.define('PVE.storage.ZFSPoolInputPanel', {
xtype: 'PVE.form.NodeSelector',
name: 'nodes',
fieldLabel: gettext('Nodes'),
-   emptyText: gettext('All') + ' (' +
-   gettext('No restrictions') +')',
-   multiSelect: true,
-   autoSelect: false
+   multiSelect: false,
+   autoSelect: true
});
}
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V3] Bug Fix 602

2015-03-05 Thread Wolfgang Link
now zfs will wait 5 sec if error msg is dataset is busy

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   23 +--
 1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 5cbd1b2..999e43e 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -166,7 +166,7 @@ sub zfs_request {
 $msg .= $line\n;
 };
 
-run_command($cmd, outfunc = $output, timeout = $timeout);
+run_command($cmd, errmsg = ERROR, outfunc = $output, timeout = 
$timeout);
 
 return $msg;
 }
@@ -291,7 +291,26 @@ sub zfs_create_zvol {
 sub zfs_delete_zvol {
 my ($class, $scfg, $zvol) = @_;
 
-$class-zfs_request($scfg, undef, 'destroy', '-r', $scfg-{pool}/$zvol);
+my $ret;
+eval {$ret = $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol);};
+$ret = $@ if $@;
+
+if ($ret =~ m/^ERROR:(.*)/) {
+
+   if ($ret =~ m/.*: dataset is busy.*/){
+
+   for(my $i = 0; $ret  $i  5; $i++){
+   sleep(1);
+   
+   eval {$ret =  $class-zfs_request($scfg, undef, 'destroy', 
'-r', $scfg-{pool}/$zvol);};
+   $ret = $@ if $@;
+   }
+
+   die $ret if $ret;
+   } else {
+   die $ret;
+   }
+}
 }
 
 sub zfs_list_zvol {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] fix rpcinfo path

2015-03-03 Thread Wolfgang Link
change path in jessie of package rpcbind
from /usr/bin/rpcinfo to /usr/sbin/rpcinfo

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/NFSPlugin.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/Storage/NFSPlugin.pm b/PVE/Storage/NFSPlugin.pm
index 9ba68a7..79a7730 100644
--- a/PVE/Storage/NFSPlugin.pm
+++ b/PVE/Storage/NFSPlugin.pm
@@ -168,7 +168,7 @@ sub check_connection {
 my $server = $scfg-{server};
 
 # test connection to portmapper
-my $cmd = ['/usr/bin/rpcinfo', '-p', $server];
+my $cmd = ['/usr/sbin/rpcinfo', '-p', $server];
 
 eval {
run_command($cmd, timeout = 2, outfunc = sub {}, errfunc = sub {});
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATVH_V2] Bug Fix 602

2015-03-03 Thread Wolfgang Link
now zfs will wait 5 sec if error msg is dataset is busy

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   28 ++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 5cbd1b2..0f666b0 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -166,7 +166,16 @@ sub zfs_request {
 $msg .= $line\n;
 };
 
-run_command($cmd, outfunc = $output, timeout = $timeout);
+if ($method eq destroy) {
+
+   eval {run_command($cmd, errmsg = 1, outfunc = $output, timeout = 
$timeout);};
+   
+   if(my $err = $@) { 
+   return ERROR $err; 
+   }
+} else {
+   run_command($cmd, outfunc = $output, timeout = $timeout); 
+}
 
 return $msg;
 }
@@ -291,7 +300,22 @@ sub zfs_create_zvol {
 sub zfs_delete_zvol {
 my ($class, $scfg, $zvol) = @_;
 
-$class-zfs_request($scfg, undef, 'destroy', '-r', $scfg-{pool}/$zvol);
+my $ret = $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol);
+
+if ($ret =~ m/^ERROR (.*)/) {
+
+   if ($ret =~ m/.*dataset is busy.*/){
+
+   for(my $i = 0; $ret  $i  5; $i++){
+   sleep(1);
+   $ret =  $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol);
+   }
+
+   die $ret if $ret;
+   } else {
+   die $ret;
+   }
+}
 }
 
 sub zfs_list_zvol {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Bug Fix 602

2015-02-26 Thread Wolfgang Link
now zfs will wait 5 sec if error msg is dataset is busy

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   27 ++-
 1 file changed, 26 insertions(+), 1 deletion(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 5cbd1b2..9609dd1 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -291,7 +291,32 @@ sub zfs_create_zvol {
 sub zfs_delete_zvol {
 my ($class, $scfg, $zvol) = @_;
 
-$class-zfs_request($scfg, undef, 'destroy', '-r', $scfg-{pool}/$zvol);
+open(STDERR, /tmp/zfslog.txt);
+eval {  $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol)};
+close STDERR;
+if ($@) {
+   my $error = $@;
+   open(my $fh, ,/tmp/zfslog.txt);
+
+   $/ = undef;
+   my $msg = $fh;
+   close $fh; 
+
+   unlink /tmp/zfslog.txt;
+
+   if ($msg =~ m/dataset is busy/) {
+
+   for(my $i = 0; $error  $i  5; $i++){
+   sleep(1);
+   eval {  $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol)};
+   $error = $@;
+   }
+   die print $msg.$error if $error;
+   } else {
+   die $msg.$error;
+   }
+}
+unlink /tmp/zfslog.txt;
 }
 
 sub zfs_list_zvol {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V4] Bug Fix 602

2015-03-05 Thread Wolfgang Link
now zfs will wait 5 sec if error msg is dataset is busy

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   18 --
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 5cbd1b2..d8721a4 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -166,7 +166,7 @@ sub zfs_request {
 $msg .= $line\n;
 };
 
-run_command($cmd, outfunc = $output, timeout = $timeout);
+run_command($cmd, errmsg = ERROR, outfunc = $output, timeout = 
$timeout);
 
 return $msg;
 }
@@ -291,7 +291,21 @@ sub zfs_create_zvol {
 sub zfs_delete_zvol {
 my ($class, $scfg, $zvol) = @_;
 
-$class-zfs_request($scfg, undef, 'destroy', '-r', $scfg-{pool}/$zvol);
+my $ret = ;
+
+for(my $i = 0; $ret  $i  6; $i++){
+
+   eval {$ret =  $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol);};
+   if ($ret = $@) {
+   if ($ret =~ m/^ERROR:(.*): dataset is busy.*/) {
+   sleep(1);
+   } else {
+   die $ret;
+   }
+   }
+}
+
+die $ret if $ret;
 }
 
 sub zfs_list_zvol {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V5] Bug Fix #602: now zfs will wait 5 sec if error msg is dataset is busy

2015-03-06 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   17 +++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 5cbd1b2..30efe58 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -166,7 +166,7 @@ sub zfs_request {
 $msg .= $line\n;
 };
 
-run_command($cmd, outfunc = $output, timeout = $timeout);
+run_command($cmd, errmsg = ZFS ERROR, outfunc = $output, timeout = 
$timeout);
 
 return $msg;
 }
@@ -291,7 +291,20 @@ sub zfs_create_zvol {
 sub zfs_delete_zvol {
 my ($class, $scfg, $zvol) = @_;
 
-$class-zfs_request($scfg, undef, 'destroy', '-r', $scfg-{pool}/$zvol);
+for (my $i = 0; $i  5; $i++) {
+
+   eval { $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol); };
+   if (my $err = $@) {
+   if ($err =~ m/^ZFS ERROR:(.*): dataset is busy.*/) {
+   sleep(1);
+   die $err if $i == 4; 
+   } else {
+   die $err;
+   }
+   } else {
+   last;
+   }
+}
 }
 
 sub zfs_list_zvol {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V6] Bug Fix #602: now zfs will wait 5 sec if error msg is dataset is busy

2015-03-06 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   17 +++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 5cbd1b2..30efe58 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -166,7 +166,7 @@ sub zfs_request {
 $msg .= $line\n;
 };
 
-run_command($cmd, outfunc = $output, timeout = $timeout);
+run_command($cmd, errmsg = ZFS ERROR, outfunc = $output, timeout = 
$timeout);
 
 return $msg;
 }
@@ -291,7 +291,20 @@ sub zfs_create_zvol {
 sub zfs_delete_zvol {
 my ($class, $scfg, $zvol) = @_;
 
-$class-zfs_request($scfg, undef, 'destroy', '-r', $scfg-{pool}/$zvol);
+for (my $i = 0; $i  5; $i++) {
+
+   eval { $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol); };
+   if (my $err = $@) {
+   if ($err =~ m/^ZFS ERROR:(.*): dataset is busy.*/) {
+   sleep(1);
+   die $err if $i == 4; 
+   } else {
+   die $err;
+   }
+   } else {
+   last;
+   }
+}
 }
 
 sub zfs_list_zvol {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V6] Bug Fix #602: now zfs will wait 5 sec if error msg is dataset is busy

2015-03-06 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm |   20 ++--
 1 file changed, 18 insertions(+), 2 deletions(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 5cbd1b2..d187e23 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -166,7 +166,7 @@ sub zfs_request {
 $msg .= $line\n;
 };
 
-run_command($cmd, outfunc = $output, timeout = $timeout);
+run_command($cmd, errmsg = zfs error, outfunc = $output, timeout = 
$timeout);
 
 return $msg;
 }
@@ -291,7 +291,23 @@ sub zfs_create_zvol {
 sub zfs_delete_zvol {
 my ($class, $scfg, $zvol) = @_;
 
-$class-zfs_request($scfg, undef, 'destroy', '-r', $scfg-{pool}/$zvol);
+my $err;
+
+for (my $i = 0; $i  6; $i++) {
+
+   eval { $class-zfs_request($scfg, undef, 'destroy', '-r', 
$scfg-{pool}/$zvol); };
+   if ($err = $@) {
+   if ($err =~ m/^zfs error:(.*): dataset is busy.*/) {
+   sleep(1);
+   } else {
+   die $err;
+   }
+   } else {
+   last;
+   }
+}
+
+die $err if $err;
 }
 
 sub zfs_list_zvol {
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] implement offline migration on zfs

2015-04-24 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage.pm |   29 +
 1 file changed, 29 insertions(+)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index e46bc77..6fe0953 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -490,6 +490,35 @@ sub storage_migrate {
} else {
die $errstr - target type '$tcfg-{type}' not implemented\n;
}
+} elsif ($scfg-{type} eq 'zfspool') {
+
+   if($tcfg-{type} eq 'zfspool'){
+
+   die $errstr - pool on target has not same name as source!if 
$tcfg-{pool} ne $scfg-{pool};
+
+   my $zfspath = $volid ;
+   $zfspath =~ /(vm\-\d+\-disk\-\d+)/;
+   $zfspath = $scfg-{pool}\/$1;
+
+   my $snap = zfs snapshot $zfspath\@mig;
+
+   my $send = zfs send -v $zfspath\@mig \| ssh root\@$target_host zfs 
recv $zfspath;
+
+   my $destroy_target = ssh root\@$target_host zfs destroy 
$zfspath\@mig;
+   run_command($snap);
+   eval{
+   run_command($send);
+   };
+   my $err;
+   if ($err = $@){
+   run_command(zfs destroy $zfspath\@mig);
+   die $err;
+   } 
+   run_command($destroy_target);
+
+   } else {
+   die $errstr - target type $tcfg-{type} is not valid\n;
+   }
 } else {
die $errstr - source type '$scfg-{type}' not implemented\n;
 }
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] implement offline migration on zfs

2015-04-24 Thread Wolfgang Link
This implements migration on zfsfs with ssh and zfs send and receive

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] implement offline migration on zfs

2015-04-24 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuMigrate.pm |4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index af04ea5..f6eb3f5 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -248,10 +248,10 @@ sub sync_disks {
my $scfg =  PVE::Storage::storage_config($self-{storecfg}, $sid);
 
die can't migrate '$volid' - storage type '$scfg-{type}' not 
supported\n
-   if $scfg-{type} ne 'dir';
+   if (!($scfg-{type} eq 'dir' || $scfg-{type} eq 'zfspool')  
(!$sharedvm));
 
# if file, check if a backing file exist
-   if (($scfg-{type} eq 'dir')  (!$sharedvm)) {
+   if (!($scfg-{type} eq 'dir' || $scfg-{type} eq 'zfspool')  
(!$sharedvm)) {
my (undef, undef, undef, $parent) = 
PVE::Storage::volume_size_info($self-{storecfg}, $volid, 1);
die can't migrate '$volid' as it's a clone of '$parent' if 
$parent;
}
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] remove running from Storage and check it in QemuServer

2015-04-30 Thread Wolfgang Link
It is better to check if a VM is running in QemuServer then in Storage.
for the Storage there is no difference if it is running or not.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage.pm   | 4 ++--
 PVE/Storage/ISCSIDirectPlugin.pm | 2 +-
 PVE/Storage/LVMPlugin.pm | 2 +-
 PVE/Storage/Plugin.pm| 4 +---
 PVE/Storage/RBDPlugin.pm | 4 +---
 PVE/Storage/SheepdogPlugin.pm| 4 +---
 PVE/Storage/ZFSPoolPlugin.pm | 2 +-
 7 files changed, 8 insertions(+), 14 deletions(-)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index b542ee6..92c7d14 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -162,13 +162,13 @@ sub volume_rollback_is_possible {
 }
 
 sub volume_snapshot {
-my ($cfg, $volid, $snap, $running) = @_;
+my ($cfg, $volid, $snap) = @_;
 
 my ($storeid, $volname) = parse_volume_id($volid, 1);
 if ($storeid) {
 my $scfg = storage_config($cfg, $storeid);
 my $plugin = PVE::Storage::Plugin-lookup($scfg-{type});
-return $plugin-volume_snapshot($scfg, $storeid, $volname, $snap, 
$running);
+return $plugin-volume_snapshot($scfg, $storeid, $volname, $snap);
 } elsif ($volid =~ m|^(/.+)$|  -e $volid) {
 die snapshot file/device '$volid' is not possible\n;
 } else {
diff --git a/PVE/Storage/ISCSIDirectPlugin.pm b/PVE/Storage/ISCSIDirectPlugin.pm
index c957ade..763c482 100644
--- a/PVE/Storage/ISCSIDirectPlugin.pm
+++ b/PVE/Storage/ISCSIDirectPlugin.pm
@@ -205,7 +205,7 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 die volume snapshot is not possible on iscsi device;
 }
 
diff --git a/PVE/Storage/LVMPlugin.pm b/PVE/Storage/LVMPlugin.pm
index 1688bb5..19eb78c 100644
--- a/PVE/Storage/LVMPlugin.pm
+++ b/PVE/Storage/LVMPlugin.pm
@@ -456,7 +456,7 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 die lvm snapshot is not implemented;
 }
diff --git a/PVE/Storage/Plugin.pm b/PVE/Storage/Plugin.pm
index 5b72b07..f119068 100644
--- a/PVE/Storage/Plugin.pm
+++ b/PVE/Storage/Plugin.pm
@@ -641,12 +641,10 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 die can't snapshot this image format\n if $volname !~ m/\.(qcow2|qed)$/;
 
-return 1 if $running;
-
 my $path = $class-filesystem_path($scfg, $volname);
 
 my $cmd = ['/usr/bin/qemu-img', 'snapshot','-c', $snap, $path];
diff --git a/PVE/Storage/RBDPlugin.pm b/PVE/Storage/RBDPlugin.pm
index 2c45a68..878fa16 100644
--- a/PVE/Storage/RBDPlugin.pm
+++ b/PVE/Storage/RBDPlugin.pm
@@ -510,9 +510,7 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
-
-return 1 if $running;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 my ($vtype, $name, $vmid) = $class-parse_volname($volname);
 
diff --git a/PVE/Storage/SheepdogPlugin.pm b/PVE/Storage/SheepdogPlugin.pm
index 3e2c126..e358f9e 100644
--- a/PVE/Storage/SheepdogPlugin.pm
+++ b/PVE/Storage/SheepdogPlugin.pm
@@ -389,9 +389,7 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
-
-return 1 if $running;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class-parse_volname($volname);
diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 39fc348..1064869 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -415,7 +415,7 @@ sub volume_size_info {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 $class-zfs_request($scfg, undef, 'snapshot', 
$scfg-{pool}/$volname\@$snap);
 }
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] Snapshot: move check running from Storage to QemuServer

2015-04-30 Thread Wolfgang Link
Hi all@list,

I'm think we should check if we make snapshots not at the Storage.
IMHO it should be done at the QemuServer.

Is there any matter to kept it?


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] remove running from Storage and check it in QemuServer

2015-04-30 Thread Wolfgang Link
It is better to check if a VM is running in QemuServer then in Storage.
for the Storage there is no difference if it is running or not.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm | 21 -
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 708b208..39aff42 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -31,6 +31,8 @@ use PVE::QMPClient;
 use PVE::RPCEnvironment;
 use Time::HiRes qw(gettimeofday);
 
+my $snap_storage = {zfspool = 1, rbd = 1, zfs = 1, sheepdog = 1};
+
 my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
 
 # Note about locking: we use flock on the config file protect
@@ -3777,7 +3779,7 @@ sub qemu_volume_snapshot {
 
 my $running = check_running($vmid);
 
-return if !PVE::Storage::volume_snapshot($storecfg, $volid, $snap, 
$running);
+PVE::Storage::volume_snapshot($storecfg, $volid, $snap) if 
storage_support_snapshop($volid, $storecfg);
 
 return if !$running;
 
@@ -5772,6 +5774,23 @@ my $savevm_wait = sub {
 }
 };
 
+sub storage_support_snapshot {
+my ($volid, $storecfg) = @_;
+
+my $storage_name = PVE::Storage::parse_volume_id($volid);
+
+my $ret = undef;  
+if ($snap_storage-{$storecfg-{ids}-{$storage_name}-{type}} ){
+   $ret = 1;
+}
+
+if ($volid =~ m/\.(qcow2|qed)$/){
+   $ret = 1;
+}
+
+return $ret;
+}
+
 sub snapshot_create {
 my ($vmid, $snapname, $save_vmstate, $comment) = @_;
 
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V2] implement offline migration on zfs

2015-04-27 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage.pm |   29 +
 1 file changed, 29 insertions(+)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index e46bc77..4e15df8 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -490,6 +490,35 @@ sub storage_migrate {
} else {
die $errstr - target type '$tcfg-{type}' not implemented\n;
}
+} elsif ($scfg-{type} eq 'zfspool') {
+
+   if($tcfg-{type} eq 'zfspool'){
+
+   die $errstr - pool on target has not same name as source!if 
$tcfg-{pool} ne $scfg-{pool};
+
+   my ( undef, $volname) = parse_volname($cfg, $volid);
+
+   my $zfspath = $scfg-{pool}\/$volname;
+
+   my $snap = zfs snapshot $zfspath\@__migration__;
+
+   my $send = zfs send -v $zfspath\@__migration__ \| ssh 
root\@$target_host zfs recv $zfspath;
+
+   my $destroy_target = ssh root\@$target_host zfs destroy 
$zfspath\@__migration__;
+   run_command($snap);
+   eval{
+   run_command($send);
+   };
+   my $err;
+   if ($err = $@){
+   run_command(zfs destroy $zfspath\@__migration__);
+   die $err;
+   } 
+   run_command($destroy_target);
+
+   } else {
+   die $errstr - target type $tcfg-{type} is not valid\n;
+   }
 } else {
die $errstr - source type '$scfg-{type}' not implemented\n;
 }
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] Following tests on PvE 4.0 Jessie

2015-05-04 Thread Wolfgang Link

Hi,

no the procedure to create a cluster is the same and should work.

the 2 one looks like a bug. I will have a lock on it.


On 05/04/2015 10:04 AM, Moula BADJI wrote:

hi,

1- I can not create the cluster. perhaps that the procedure has changed.

root@pve-ceph4:/home/moula# pvecm status
Quorum information
--
Date: Mon May  4 07:52:31 2015
Quorum provider:  corosync_votequorum
Nodes:1
Node ID:  0x0001
Ring ID:  12
Quorate:  Yes

Votequorum information
--
Expected votes:   1
Highest expected: 1
Total votes:  1
Quorum:   1
Flags:Quorate

Membership information
--
 Nodeid  Votes Name
0x0001  1 192.168.100.40 (local)


and at the another node :

root@pve-ceph6:/home/moula# pvecm add 192.168.100.40
root@192.168.100.40's password:
root@192.168.100.40's password:
root@192.168.100.40's password:
unable to copy ssh ID
root@pve-ceph6:/home/moula# pvecm nodes
Cannot initialize CMAP service

2- Upload isos in the directory data:

With Btrfs format I can upload up to 2GB and no more, on another node
with the xfs format, no more than one mega .


Subsequently added later.

thank's

Moula from Kabylie.

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel




___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] Following tests on PvE 4.0 Jessie

2015-05-04 Thread Wolfgang Link



On 05/04/2015 02:53 PM, Eneko Lacunza wrote:

Hi Moula,

On 04/05/15 14:29, Moula BADJI wrote:

Le 04/05/2015 10:40, Alexandre DERUMIER a écrit :

root@pve-ceph6:/home/moula# pvecm add 192.168.100.40
root@192.168.100.40's password:
root@192.168.100.40's password:
root@192.168.100.40's password:
unable to copy ssh ID

Something seem to be wrong here.

Are you sure to have entered password correctly ?



I remade the same installation. I have the same problems.

Alexandre, my password is not wrong.

You're being asked for password 3 times, and then key copy fails. If 
you're really really sure that password is OK (you must input the 
password for the  192.168.100.40 host), then there's some kind of 
problem with SSH server in that host (I'd say that unrelated to proxmox?)


Cheers
Eneko


Do you allow in sshd_config root?

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH_V2] remove running from Storage and check it in QemuServer

2015-05-06 Thread Wolfgang Link
It is better to check if a VM is running in QemuServer then in Storage.
for the Storage there is no difference if it is running or not.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm | 29 +++--
 1 file changed, 23 insertions(+), 6 deletions(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 708b208..9a4e2ee 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -31,6 +31,8 @@ use PVE::QMPClient;
 use PVE::RPCEnvironment;
 use Time::HiRes qw(gettimeofday);
 
+my $qemu_snap_storage = {rbd = 1, sheepdog = 1};
+
 my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
 
 # Note about locking: we use flock on the config file protect
@@ -3777,12 +3779,11 @@ sub qemu_volume_snapshot {
 
 my $running = check_running($vmid);
 
-return if !PVE::Storage::volume_snapshot($storecfg, $volid, $snap, 
$running);
-
-return if !$running;
-
-vm_mon_cmd($vmid, snapshot-drive, device = $deviceid, name = $snap);
-
+if ($running  do_snapshots_with_qemu($storecfg, $volid)){
+   vm_mon_cmd($vmid, snapshot-drive, device = $deviceid, name = $snap);
+} else {
+   PVE::Storage::volume_snapshot($storecfg, $volid, $snap);
+}
 }
 
 sub qemu_volume_snapshot_delete {
@@ -5772,6 +5773,22 @@ my $savevm_wait = sub {
 }
 };
 
+sub do_snapshots_with_qemu {
+my ($storecfg, $volid) = @_;
+
+my $storage_name = PVE::Storage::parse_volume_id($volid);
+
+if ($qemu_snap_storage-{$storecfg-{ids}-{$storage_name}-{type}} ){
+   return 1;
+}
+
+if ($volid =~ m/\.(qcow2|qed)$/){
+   return 1;
+}
+
+return undef;
+}
+
 sub snapshot_create {
 my ($vmid, $snapname, $save_vmstate, $comment) = @_;
 
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] remove running from Storage and check it in QemuServer

2015-05-06 Thread Wolfgang Link
It is better to check if a VM is running in QemuServer then in Storage.
for the Storage there is no difference if it is running or not.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage.pm   | 4 ++--
 PVE/Storage/ISCSIDirectPlugin.pm | 2 +-
 PVE/Storage/LVMPlugin.pm | 2 +-
 PVE/Storage/Plugin.pm| 4 +---
 PVE/Storage/RBDPlugin.pm | 4 +---
 PVE/Storage/SheepdogPlugin.pm| 4 +---
 PVE/Storage/ZFSPoolPlugin.pm | 2 +-
 7 files changed, 8 insertions(+), 14 deletions(-)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index b542ee6..92c7d14 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -162,13 +162,13 @@ sub volume_rollback_is_possible {
 }
 
 sub volume_snapshot {
-my ($cfg, $volid, $snap, $running) = @_;
+my ($cfg, $volid, $snap) = @_;
 
 my ($storeid, $volname) = parse_volume_id($volid, 1);
 if ($storeid) {
 my $scfg = storage_config($cfg, $storeid);
 my $plugin = PVE::Storage::Plugin-lookup($scfg-{type});
-return $plugin-volume_snapshot($scfg, $storeid, $volname, $snap, 
$running);
+return $plugin-volume_snapshot($scfg, $storeid, $volname, $snap);
 } elsif ($volid =~ m|^(/.+)$|  -e $volid) {
 die snapshot file/device '$volid' is not possible\n;
 } else {
diff --git a/PVE/Storage/ISCSIDirectPlugin.pm b/PVE/Storage/ISCSIDirectPlugin.pm
index c957ade..763c482 100644
--- a/PVE/Storage/ISCSIDirectPlugin.pm
+++ b/PVE/Storage/ISCSIDirectPlugin.pm
@@ -205,7 +205,7 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 die volume snapshot is not possible on iscsi device;
 }
 
diff --git a/PVE/Storage/LVMPlugin.pm b/PVE/Storage/LVMPlugin.pm
index 1688bb5..19eb78c 100644
--- a/PVE/Storage/LVMPlugin.pm
+++ b/PVE/Storage/LVMPlugin.pm
@@ -456,7 +456,7 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 die lvm snapshot is not implemented;
 }
diff --git a/PVE/Storage/Plugin.pm b/PVE/Storage/Plugin.pm
index 5b72b07..f119068 100644
--- a/PVE/Storage/Plugin.pm
+++ b/PVE/Storage/Plugin.pm
@@ -641,12 +641,10 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 die can't snapshot this image format\n if $volname !~ m/\.(qcow2|qed)$/;
 
-return 1 if $running;
-
 my $path = $class-filesystem_path($scfg, $volname);
 
 my $cmd = ['/usr/bin/qemu-img', 'snapshot','-c', $snap, $path];
diff --git a/PVE/Storage/RBDPlugin.pm b/PVE/Storage/RBDPlugin.pm
index 2c45a68..878fa16 100644
--- a/PVE/Storage/RBDPlugin.pm
+++ b/PVE/Storage/RBDPlugin.pm
@@ -510,9 +510,7 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
-
-return 1 if $running;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 my ($vtype, $name, $vmid) = $class-parse_volname($volname);
 
diff --git a/PVE/Storage/SheepdogPlugin.pm b/PVE/Storage/SheepdogPlugin.pm
index 3e2c126..e358f9e 100644
--- a/PVE/Storage/SheepdogPlugin.pm
+++ b/PVE/Storage/SheepdogPlugin.pm
@@ -389,9 +389,7 @@ sub volume_resize {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
-
-return 1 if $running;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 my ($vtype, $name, $vmid, $basename, $basevmid, $isBase) =
$class-parse_volname($volname);
diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index 39fc348..1064869 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -415,7 +415,7 @@ sub volume_size_info {
 }
 
 sub volume_snapshot {
-my ($class, $scfg, $storeid, $volname, $snap, $running) = @_;
+my ($class, $scfg, $storeid, $volname, $snap) = @_;
 
 $class-zfs_request($scfg, undef, 'snapshot', 
$scfg-{pool}/$volname\@$snap);
 }
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] remove running from Storage and check it in QemuServer

2015-05-06 Thread Wolfgang Link
It is better to check if a VM is running in QemuServer then in Storage.
for the Storage there is no difference if it is running or not.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuServer.pm | 29 +++--
 1 file changed, 23 insertions(+), 6 deletions(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 708b208..9a4e2ee 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -31,6 +31,8 @@ use PVE::QMPClient;
 use PVE::RPCEnvironment;
 use Time::HiRes qw(gettimeofday);
 
+my $qemu_snap_storage = {rbd = 1, sheepdog = 1};
+
 my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
 
 # Note about locking: we use flock on the config file protect
@@ -3777,12 +3779,11 @@ sub qemu_volume_snapshot {
 
 my $running = check_running($vmid);
 
-return if !PVE::Storage::volume_snapshot($storecfg, $volid, $snap, 
$running);
-
-return if !$running;
-
-vm_mon_cmd($vmid, snapshot-drive, device = $deviceid, name = $snap);
-
+if ($running  do_snapshots_with_qemu($storecfg, $volid)){
+   vm_mon_cmd($vmid, snapshot-drive, device = $deviceid, name = $snap);
+} else {
+   PVE::Storage::volume_snapshot($storecfg, $volid, $snap);
+}
 }
 
 sub qemu_volume_snapshot_delete {
@@ -5772,6 +5773,22 @@ my $savevm_wait = sub {
 }
 };
 
+sub do_snapshots_with_qemu {
+my ($storecfg, $volid) = @_;
+
+my $storage_name = PVE::Storage::parse_volume_id($volid);
+
+if ($qemu_snap_storage-{$storecfg-{ids}-{$storage_name}-{type}} ){
+   return 1;
+}
+
+if ($volid =~ m/\.(qcow2|qed)$/){
+   return 1;
+}
+
+return undef;
+}
+
 sub snapshot_create {
 my ($vmid, $snapname, $save_vmstate, $comment) = @_;
 
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Bug-fix 617: change command arrangement

2015-04-07 Thread Wolfgang Link
the ovs-vsctl del-port flush the tc settings.
So we have to change the arrangement of setting up the network.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 pve-bridge |5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/pve-bridge b/pve-bridge
index 1b9801d..a924a7f 100755
--- a/pve-bridge
+++ b/pve-bridge
@@ -32,8 +32,9 @@ die unable to parse network config '$netid'\n if !$net;
 
 PVE::Network::tap_create($iface, $net-{bridge});
 
-PVE::Network::tap_rate_limit($iface, $net-{rate}) if $net-{rate};
-
+#if ovs is under this bridge all traffic control settings will be flushed.
 PVE::Network::tap_plug($iface, $net-{bridge}, $net-{tag}, $net-{firewall});
 
+PVE::Network::tap_rate_limit($iface, $net-{rate}) if $net-{rate};
+
 exit 0;
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] update link to proxmox subscription

2015-04-07 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 www/manager/Utils.js |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/manager/Utils.js b/www/manager/Utils.js
index c293b06..228dda1 100644
--- a/www/manager/Utils.js
+++ b/www/manager/Utils.js
@@ -68,7 +68,7 @@ Ext.define('PVE.Utils', { statics: {
'p': gettext('Premium')
 },
 
-noSubKeyHtml: 'You do not have a valid subscription for this server. 
Please visit a target=_blank 
href=http://www.proxmox.com/products/proxmox-ve/subscription-service-plans;www.proxmox.com/a
 to get a list of available options.',
+noSubKeyHtml: 'You do not have a valid subscription for this server. 
Please visit a target=_blank 
href=https://www.proxmox.com/en/proxmox-ve/pricing;www.proxmox.com/a to get 
a list of available options.',
 
 kvm_ostypes: {
other: gettext('Other OS types'),
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Bug-Fix: 618 correct typo

2015-04-08 Thread Wolfgang Link

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/QemuMigrate.pm |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index a49cdcc..af04ea5 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -247,7 +247,7 @@ sub sync_disks {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg =  PVE::Storage::storage_config($self-{storecfg}, $sid);
 
-   die can't migrate '$volid' - storagy type '$scfg-{type}' not 
supported\n
+   die can't migrate '$volid' - storage type '$scfg-{type}' not 
supported\n
if $scfg-{type} ne 'dir';
 
# if file, check if a backing file exist
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Extent man description for network settings

2015-06-22 Thread Wolfgang Link
From: w.l...@proxmox.com Wolfgang Link

There was no source expect the sourcecode, where the user can see the possible 
options.

Signed-off-by: w.l...@proxmox.com Wolfgang Link
---
 src/PVE/LXC.pm | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 11788cd..c3842ac 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -596,7 +596,12 @@ for (my $i = 0; $i  $MAX_LXC_NETWORKS; $i++) {
 $confdesc-{net$i} = {
optional = 1,
type = 'string', format = 'pve-lxc-network',
-   description = Specifies network interfaces for the container.,
+   description = Specifies network interfaces for the container.\n\n.
+   The string should have the follow format:\n\n.
+   -net[0-9] bridge=vmbrNummber[,hwaddr=MAC]\n.
+   [,mut=Number][,name=String][,ip=IPv4Format/CIDR]\n.
+   ,ip6=IPv6Format/CIDR][,gw=GatwayIPv4]\n.
+   ,gw6=GatwayIPv6][,firewall=[1|0]][,tag=VlanNo],
 };
 }
 
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Fix ZFSPoolPlugin path for subvol

2015-06-22 Thread Wolfgang Link
Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 PVE/Storage/ZFSPoolPlugin.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index cae598d..6b095f1 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -150,7 +150,7 @@ sub path {
 if ($vtype eq images) {
if ($volname =~ m/^subvol-/) {
# fixme: we currently assume standard mount point?!
-   $path = $scfg-{pool}/$volname;
+   $path = /$scfg-{pool}/$volname;
} else {
$path = /dev/zvol/$scfg-{pool}/$volname;
}
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] correct typo

2015-06-22 Thread Wolfgang Link
From: w.l...@proxmox.com Wolfgang Link

Signed-off-by: w.l...@proxmox.com Wolfgang Link
---
 src/PVE/LXC.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 068b46f..11788cd 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -228,7 +228,7 @@ sub parse_lxc_option {
 
 my $parser = $valid_lxc_keys-{$name};
 
-die inavlid key '$name'\n if !defined($parser);
+die invalid key '$name'\n if !defined($parser);
 
 if ($parser eq '1') {
return $value;  
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] Proxmox VE 4.0 beta1 released!

2015-06-23 Thread Wolfgang Link

There are no official ceph packeges for jessie at the moment.
but they will come soon.

On 06/23/2015 12:00 PM, Kamil Trzciński wrote:

The pve-test repository is broken: missing for example ceph-common.

On Tue, Jun 23, 2015 at 8:39 AM, Martin Maurer mar...@proxmox.com 
mailto:mar...@proxmox.com wrote:


Hi all,

We are proud to announce the release of the first beta of our
Proxmox VE 4.x family - based on the great Debian Jessie.

Please help us reaching the final release date by testing this
beta and by providing feedback.

A big Thank-you to our active community for all feedback, testing,
bug reporting and patch submissions.

So what's new in Proxmox VE 4.0 beta1?

New Proxmox VE HA Manager for High Availability Clusters

The brand new HA manager dramatically simplifies HA setups – it
works with a few clicks on the GUI, test it! And it elminates the
need of external fencing devices, watchdog fencing is
automatically configured.

Linux Containers (lxc)

The new container solution for Proxmox VE will be fully integrated
into our frameworks, e.g. this includes also our storage plugins.
And it works with all modern and latest Linux kernels.

DRBD9

We already included the brand new DRBD9 stable release, so its
ready for testing!

New wiki articles and release notes

http://pve.proxmox.com/wiki/Roadmap#Proxmox_VE_4.0_beta1
http://pve.proxmox.com/wiki/Proxmox_VE_4.x_Cluster
http://pve.proxmox.com/wiki/High_Availability_Cluster_4.x
http://pve.proxmox.com/wiki/Linux_Container
http://pve.proxmox.com/wiki/DRBD9
http://pve.proxmox.com/wiki/Install_Proxmox_VE_on_Debian_Jessie

Download
http://www.proxmox.com/en/downloads
Alternate ISO download:
http://download.proxmox.com/iso/

Bugtracker
https://bugzilla.proxmox.com

FAQ
* All beta1 installations can be updated to 4.0 stable without any
problem
* Currently there is no upgrade path from 3.4 to 4.0 beta1, the
upgrade script will be provided later
* LXC is not yet feature complete, work in progress
* Ceph packages for Jessie are not yet available

-- 
Best Regards,


Martin Maurer
Proxmox VE project leader

mar...@proxmox.com mailto:mar...@proxmox.com
http://www.proxmox.com

___
pve-devel mailing list
pve-devel@pve.proxmox.com mailto:pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel




--
Kamil Trzciński

ayu...@ayufan.eu mailto:ayu...@ayufan.eu
www.ayufan.eu http://www.ayufan.eu


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] Proxmox VE 4.0 beta1 released!

2015-06-23 Thread Wolfgang Link

You can make a Backup (use gzip)
and then copy the backup file in /var/lib/vz/template/cache/
Now you can create a new CT using the backup as template.

On 06/23/2015 11:08 AM, Charlie wrote:

Great work!

How's migrations for current ovz-containers to lxc? plug-and-play i 
assume?

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel




___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 1/4] code cleanup and man

2015-06-11 Thread Wolfgang Link
Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 pve-zsync | 54 +-
 1 file changed, 29 insertions(+), 25 deletions(-)

diff --git a/pve-zsync b/pve-zsync
index 3c134ce..cd2a228 100644
--- a/pve-zsync
+++ b/pve-zsync
@@ -362,7 +362,7 @@ sub format_job {
 my ($job, $line) = @_;
 my $text = ;
 
-if ($job-{state} eq stoped) {
+if ($job-{state} eq stopped) {
$text = #;
 }
 if ($line) {
@@ -508,7 +508,7 @@ sub sync {
my ($source, $dest, $job, $param, $date) = @_;
 
($source-{old_snap},$source-{last_snap}) = snapshot_get($source, 
$dest, $param-{maxsnap}, $param-{name});
-   print Dumper $source, $dest;
+
eval{
snapshot_add($source, $dest, $param-{name}, $date);
 
@@ -888,7 +888,7 @@ sub disable_job {
 my ($param) = @_;
 
 my $job = get_job($param);
-$job-{state} = stoped;
+$job-{state} = stopped;
 update_state($job);
 update_cron($job);
 }
@@ -1122,65 +1122,65 @@ pve-zsync help cmd [OPTIONS]
 
 pve-zsync create -dest string -source string [OPTIONS]
 
-  Create a sync Job
+Create a sync Job
 
-  -dest  string
+-dest  string
 
the destination target is like [IP]:Pool[/Path]
 
-  -limit integer
+-limit integer
 
max sync speed in kBytes/s, default unlimited
 
-  -maxsnap   string
+-maxsnap   string
 
how much snapshots will be kept before get erased, default 1
 
-  -name  string
+-name  string
 
name of the sync job, if not set it is default
 
-  -skip  boolean
+-skip  boolean
 
if this flag is set it will skip the first sync
 
-  -sourcestring
+-sourcestring
 
the source can be an VMID or [IP:]ZFSPool[/Path]
 
 pve-zsync destroy -source string [OPTIONS]
 
-  remove a sync Job from the scheduler
+remove a sync Job from the scheduler
 
-  -name  string
+-name  string
 
name of the sync job, if not set it is default
 
-  -sourcestring
+-sourcestring
 
 the source can be an  VMID or [IP:]ZFSPool[/Path]
 
 pve-zsync disable -source string [OPTIONS]
 
-  pause a sync job
+pause a sync job
 
-  -name  string
+-name  string
 
name of the sync job, if not set it is default
 
-  -sourcestring
+-sourcestring
 
 the source can be an  VMID or [IP:]ZFSPool[/Path]
 
 pve-zsync enable -source string [OPTIONS]
 
-  enable a syncjob and reset error
+enable a syncjob and reset error
 
-  -name  string
+-name  string
 
name of the sync job, if not set it is default
 
-  -sourcestring
+-sourcestring
 
 the source can be an  VMID or [IP:]ZFSPool[/Path]
 pve-zsync list
@@ -1203,16 +1203,16 @@ pve-zsync sync -dest string -source string [OPTIONS]
 
max sync speed in kBytes/s, default unlimited
 
- -maxsnap   integer
+-maxsnap   integer
 
how much snapshots will be kept before get erased, default 1
 
- -name  string
+-name  string
 
name of the sync job, if not set it is default.
It is only necessary if scheduler allready contains this source.
 
-  -sourcestring
+-sourcestring
 
the source can be an VMID or [IP:]ZFSPool[/Path]
 
@@ -1233,10 +1233,14 @@ add sync job from local VM to remote ZFS Server
 pve-zsync create -source=100 -dest=192.168.1.2:zfspool
 
 =head1 IMPORTANT FILES
+ 
+Cron jobs are stored at /etc/cron.d/pve-zsync
+
+The VM config get copied on the destination machine to  /var/pve-zsync/
+
+The config is stored at /var/pve-zsync/
 
-Where the cron jobs are stored/etc/cron.d/pve-zsync
-Where the VM config get copied on the destination machine /var/pve-zsync/
-Where the config is stored/var/pve-zsync/
+=head1 COPYRIGHT AND DISCLAIMER
 
 Copyright (C) 2007-2015 Proxmox Server Solutions GmbH
 
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 4/4] fix incremental sync

2015-06-11 Thread Wolfgang Link
wrong snapshot where be used.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 pve-zsync | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pve-zsync b/pve-zsync
index b458826..2575527 100644
--- a/pve-zsync
+++ b/pve-zsync
@@ -797,7 +797,7 @@ sub send_image {
 $cmd .= -v  if $param-{verbose};
 
 if($source-{last_snap}  snapshot_exist($source ,$dest, 
$param-{method})) {
-   $cmd .= -i $source-{all}\@$source-{old_snap} 
$source-{all}\@$source-{new_snap} ;
+   $cmd .= -i $source-{all}\@$source-{last_snap} 
$source-{all}\@$source-{new_snap} ;
 } else {
$cmd .= $source-{all}\@$source-{new_snap} ;
 }
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] pve-zsync

2015-06-11 Thread Wolfgang Link
This are some fixes for the pve-sync tool

___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 2/4] Add dependencies

2015-06-11 Thread Wolfgang Link
Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 control.in | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/control.in b/control.in
index e56ab8b..12bd95e 100644
--- a/control.in
+++ b/control.in
@@ -3,7 +3,7 @@ Version: @@VERSION@@-@@PKGRELEASE@@
 Section: perl
 Priority: optional
 Architecture: @@ARCH@@
-Depends: perl (= 5.6.0-16)
+Depends: perl (= 5.6.0-16), cstream, libcatmandu-importer-getjson-perl
 Maintainer: Proxmox Support Team supp...@proxmox.com
 Description: Proxmox VE storage management library
  This package contains the Proxmox VE ZFS sync Tool.
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 1/4] code cleanup and man

2015-06-12 Thread Wolfgang Link
Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 pve-zsync | 54 +-
 1 file changed, 29 insertions(+), 25 deletions(-)

diff --git a/pve-zsync b/pve-zsync
index 3c134ce..cd2a228 100644
--- a/pve-zsync
+++ b/pve-zsync
@@ -362,7 +362,7 @@ sub format_job {
 my ($job, $line) = @_;
 my $text = ;
 
-if ($job-{state} eq stoped) {
+if ($job-{state} eq stopped) {
$text = #;
 }
 if ($line) {
@@ -508,7 +508,7 @@ sub sync {
my ($source, $dest, $job, $param, $date) = @_;
 
($source-{old_snap},$source-{last_snap}) = snapshot_get($source, 
$dest, $param-{maxsnap}, $param-{name});
-   print Dumper $source, $dest;
+
eval{
snapshot_add($source, $dest, $param-{name}, $date);
 
@@ -888,7 +888,7 @@ sub disable_job {
 my ($param) = @_;
 
 my $job = get_job($param);
-$job-{state} = stoped;
+$job-{state} = stopped;
 update_state($job);
 update_cron($job);
 }
@@ -1122,65 +1122,65 @@ pve-zsync help cmd [OPTIONS]
 
 pve-zsync create -dest string -source string [OPTIONS]
 
-  Create a sync Job
+Create a sync Job
 
-  -dest  string
+-dest  string
 
the destination target is like [IP]:Pool[/Path]
 
-  -limit integer
+-limit integer
 
max sync speed in kBytes/s, default unlimited
 
-  -maxsnap   string
+-maxsnap   string
 
how much snapshots will be kept before get erased, default 1
 
-  -name  string
+-name  string
 
name of the sync job, if not set it is default
 
-  -skip  boolean
+-skip  boolean
 
if this flag is set it will skip the first sync
 
-  -sourcestring
+-sourcestring
 
the source can be an VMID or [IP:]ZFSPool[/Path]
 
 pve-zsync destroy -source string [OPTIONS]
 
-  remove a sync Job from the scheduler
+remove a sync Job from the scheduler
 
-  -name  string
+-name  string
 
name of the sync job, if not set it is default
 
-  -sourcestring
+-sourcestring
 
 the source can be an  VMID or [IP:]ZFSPool[/Path]
 
 pve-zsync disable -source string [OPTIONS]
 
-  pause a sync job
+pause a sync job
 
-  -name  string
+-name  string
 
name of the sync job, if not set it is default
 
-  -sourcestring
+-sourcestring
 
 the source can be an  VMID or [IP:]ZFSPool[/Path]
 
 pve-zsync enable -source string [OPTIONS]
 
-  enable a syncjob and reset error
+enable a syncjob and reset error
 
-  -name  string
+-name  string
 
name of the sync job, if not set it is default
 
-  -sourcestring
+-sourcestring
 
 the source can be an  VMID or [IP:]ZFSPool[/Path]
 pve-zsync list
@@ -1203,16 +1203,16 @@ pve-zsync sync -dest string -source string [OPTIONS]
 
max sync speed in kBytes/s, default unlimited
 
- -maxsnap   integer
+-maxsnap   integer
 
how much snapshots will be kept before get erased, default 1
 
- -name  string
+-name  string
 
name of the sync job, if not set it is default.
It is only necessary if scheduler allready contains this source.
 
-  -sourcestring
+-sourcestring
 
the source can be an VMID or [IP:]ZFSPool[/Path]
 
@@ -1233,10 +1233,14 @@ add sync job from local VM to remote ZFS Server
 pve-zsync create -source=100 -dest=192.168.1.2:zfspool
 
 =head1 IMPORTANT FILES
+ 
+Cron jobs are stored at /etc/cron.d/pve-zsync
+
+The VM config get copied on the destination machine to  /var/pve-zsync/
+
+The config is stored at /var/pve-zsync/
 
-Where the cron jobs are stored/etc/cron.d/pve-zsync
-Where the VM config get copied on the destination machine /var/pve-zsync/
-Where the config is stored/var/pve-zsync/
+=head1 COPYRIGHT AND DISCLAIMER
 
 Copyright (C) 2007-2015 Proxmox Server Solutions GmbH
 
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 2/4] fix incremental sync

2015-06-12 Thread Wolfgang Link
wrong snapshot where be used.

Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 pve-zsync | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pve-zsync b/pve-zsync
index cd2a228..0023aad 100644
--- a/pve-zsync
+++ b/pve-zsync
@@ -796,7 +796,7 @@ sub send_image {
 $cmd .= -v  if $param-{verbose};
 
 if($source-{last_snap}  snapshot_exist($source ,$dest, 
$param-{method})) {
-   $cmd .= -i $source-{all}\@$source-{old_snap} 
$source-{all}\@$source-{new_snap} ;
+   $cmd .= -i $source-{all}\@$source-{last_snap} 
$source-{all}\@$source-{new_snap} ;
 } else {
$cmd .= $source-{all}\@$source-{new_snap} ;
 }
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 4/4] bump version 1.5-2

2015-06-12 Thread Wolfgang Link
Signed-off-by: Wolfgang Link w.l...@proxmox.com
---
 Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Makefile b/Makefile
index 905deda..5c31a28 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@ RELEASE=3.4
 
 VERSION=1.5
 PACKAGE=pve-zsync
-PKGREL=1
+PKGREL=2
 
 DESTDIR=
 PREFIX=/usr
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PVE-User] Proxmox VE ZFS replication manager released (pve-zsync)

2015-06-30 Thread Wolfgang Link

There where some issues in 0.6.4 but they are fixed in 0.6.4!
https://github.com/zfsonlinux/zfs/releases/tag/zfs-0.6.4


On 06/30/2015 02:08 PM, Angel Docampo wrote:

Hi there!

Is it based on zfs send-receive? I thought it was buggy on linux... 
perhaps it was on 0.6.3?


Anyway, that's a great feature, thank you!

:)

On 30/06/15 12:19, Martin Maurer wrote:

Hi all,

We just released the brand new Proxmox VE ZFS replication manager
(pve-zsync)!

This CLI tool synchronizes your virtual machine (virtual disks and VM
configuration) or directory stored on ZFS between two servers - very
useful for backup and replication tasks.

A big Thank-you to our active community for all feedback, testing, bug
reporting and patch submissions.

Documentation
http://pve.proxmox.com/wiki/PVE-zsync

Git
https://git.proxmox.com/?p=pve-zsync.git;a=summary

Bugtracker
https://bugzilla.proxmox.com/



--


*Angel Docampo
*
*Datalab Tecnologia, s.a.*
Castillejos, 352 - 08025 Barcelona
Tel. 93 476 69 14 - Ext: 114
Mob. 670.299.381



___
pve-user mailing list
pve-u...@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-user


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Fix Typo

2015-08-17 Thread Wolfgang Link
---
 src/PVE/HA/CRM.pm |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/PVE/HA/CRM.pm b/src/PVE/HA/CRM.pm
index 9feda05..4196bba 100644
--- a/src/PVE/HA/CRM.pm
+++ b/src/PVE/HA/CRM.pm
@@ -16,7 +16,7 @@ use PVE::HA::Manager;
 my $valid_states = {
 wait_for_quorum = cluster is not quorate, waiting,
 master = quorate, and we got the ha_manager lock,
-lost_manager_lock = we lost the ha_manager lock (watchgog active),
+lost_manager_lock = we lost the ha_manager lock (watchdog active),
 slave = quorate, but we do not own the ha_manager lock,
 };
 
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Bugfix 684: Fix Typo

2015-08-17 Thread Wolfgang Link
---
 www/manager/ha/Fencing.js |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/manager/ha/Fencing.js b/www/manager/ha/Fencing.js
index f6bd365..1238fbc 100644
--- a/www/manager/ha/Fencing.js
+++ b/www/manager/ha/Fencing.js
@@ -16,7 +16,7 @@ Ext.define('PVE.ha.FencingView', {
viewConfig: {
trackOver: false,
deferEmptyText: false,
-   emptyText: 'Use watchgog based fencing.'
+   emptyText: 'Use watchdog based fencing.'
},
columns: [
{
-- 
1.7.10.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Bugfix 517: improve error message

2015-08-17 Thread Wolfgang Link
Get a understandble message,
if someone try to increase a qcow2 image where one or more snapshots reside
---
 PVE/API2/Qemu.pm | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 24a066f..5caaf1a 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -2648,6 +2648,12 @@ __PACKAGE__-register_method({
 
my $drive = PVE::QemuServer::parse_drive($disk, $conf-{$disk});
 
+   my (undef, undef, undef, undef, undef, undef, $format) =
+   PVE::Storage::parse_volname($storecfg, $drive-{file});
+
+   die can't resize volume: $disk if snapshot exists\n 
+   if %{$conf-{snapshots}}  $format eq 'qcow2';
+
my $volid = $drive-{file};
 
die disk '$disk' has no associated volume\n if !$volid;
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] correct storage content type

2015-08-24 Thread Wolfgang Link
---
 www/manager/lxc/CreateWizard.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/manager/lxc/CreateWizard.js b/www/manager/lxc/CreateWizard.js
index 77df991..d037316 100644
--- a/www/manager/lxc/CreateWizard.js
+++ b/www/manager/lxc/CreateWizard.js
@@ -18,7 +18,7 @@ Ext.define('PVE.lxc.CreateWizard', {
var storagesel = Ext.create('PVE.form.StorageSelector', {
name: 'storage',
fieldLabel: gettext('Storage'),
-   storageContent: 'images,rootdir',
+   storageContent: 'rootdir',
autoSelect: true,
allowBlank: false
});
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] fix bug #690: add container content type to RBD storage, at content selector on GUI

2015-08-24 Thread Wolfgang Link
---
 www/manager/storage/RBDEdit.js | 17 +++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/www/manager/storage/RBDEdit.js b/www/manager/storage/RBDEdit.js
index 0170ba4..66d91bc 100644
--- a/www/manager/storage/RBDEdit.js
+++ b/www/manager/storage/RBDEdit.js
@@ -6,7 +6,6 @@ Ext.define('PVE.storage.RBDInputPanel', {
 
if (me.create) {
values.type = 'rbd';
-values.content = 'images';
 
} else {
delete values.storage;
@@ -65,8 +64,17 @@ Ext.define('PVE.storage.RBDInputPanel', {
checked: true,
uncheckedValue: 0,
fieldLabel: gettext('Enable')
+   },
+   {
+   xtype: 'pveContentTypeSelector',
+   cts: ['images', 'rootdir'],
+   fieldLabel: gettext('Content'),
+   name: 'content',
+   value: ['images', 'rootdir'],
+   multiSelect: true,
+   allowBlank: false
}
-   ];
+   ];
 
if (me.create || me.storageId !== 'local') {
me.column2.unshift({
@@ -117,6 +125,11 @@ Ext.define('PVE.storage.RBDEdit', {
 me.load({
 success:  function(response, options) {
 var values = response.result.data;
+
+   var ctypes = values.content || '';
+
+   values.content = ctypes.split(',');
+
 if (values.nodes) {
 values.nodes = values.nodes.split(',');
 }
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] Bugfix 688: if vm is not owner of this disk remove from config

2015-08-20 Thread Wolfgang Link
---
 PVE/QemuServer.pm | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 5ba8e1c..7115007 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -4038,6 +4038,9 @@ sub try_deallocate_drive {
   if $used_paths-{$path};
PVE::Storage::vdisk_free($storecfg, $volid);
return 1;
+   } else {
+   # If vm is not owner of this disk remove from config
+   return 1;
}
 }
 
-- 
2.1.4


___
pve-devel mailing list
pve-devel@pve.proxmox.com
http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


  1   2   3   4   5   6   7   8   9   10   >