Re: [pve-devel] [PATCH container] Fix pct skiplock

2018-03-13 Thread Thomas Lamprecht
On 03/13/2018 05:20 PM, Alwin Antreich wrote:
> The method vm_start sets an environment variable that is not picked up
> anymore by systemd. This patch keeps the environment variable and
> introduces a skiplock file that is picked up by the
> lxc-pve-prestart-hook.
> 
> Signed-off-by: Alwin Antreich 
> ---
>  src/PVE/LXC.pm| 9 -
>  src/lxc-pve-prestart-hook | 5 -
>  2 files changed, 12 insertions(+), 2 deletions(-)
> 
> diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
> index 7adbcd1..2e3e4ca 100644
> --- a/src/PVE/LXC.pm
> +++ b/src/PVE/LXC.pm
> @@ -1545,7 +1545,14 @@ sub vm_start {
>  
>  update_lxc_config($vmid, $conf);
>  
> -local $ENV{PVE_SKIPLOCK}=1 if $skiplock;
> +if ($skiplock) {
> + # to stay compatible with old behaviour
> + local $ENV{PVE_SKIPLOCK}=1;

if it never gets passed along at all, no point in trying
to stay compatible, or?

> +
> + my $file = "/var/lib/lxc/$vmid/skiplock";

use /run/lxc/... or /run/pve-container (the former exists already)
those are guaranteed to be tmpfs and thus a reboot clears possible
left over flag files

> + open(my $fh, '>', $file) || die "failed to open $file for writing: 
> $!\n";
> + close($fh);
> +}
>  
>  my $cmd = ['systemctl', 'start', "pve-container\@$vmid"];
>  
> diff --git a/src/lxc-pve-prestart-hook b/src/lxc-pve-prestart-hook
> index fd29423..abe61aa 100755
> --- a/src/lxc-pve-prestart-hook
> +++ b/src/lxc-pve-prestart-hook
> @@ -57,13 +57,16 @@ __PACKAGE__->register_method ({
>   return undef if $param->{name} !~ m/^\d+$/;
>  
>   my $vmid = $param->{name};
> + my $file = "/var/lib/lxc/$vmid/skiplock";

$file is very ambiguous, maybe $skiplock_flag_fn?

> + my $skiplock = $ENV{PVE_SKIPLOCK} || 1 if -e $file;

don't do that, it's quite confusing, for what is the if?
the whole assign, then you could omit the $ENV{PVE_SKIPLOCK}
part...

Do either

my $skiplock = $ENV{PVE_SKIPLOCK} || -e $file;

or maybe even better:
my $skiplock = $ENV{PVE_SKIPLOCK}; # backwar copatibillity
$skiplock = 1 if -e $file;

Or if you say systemd does not picks this up just omit the $ENV
completely?


> + unlink $file if -e $file;
>  
>   PVE::Cluster::check_cfs_quorum(); # only start if we have quorum
>  
>   return undef if ! -f PVE::LXC::Config->config_file($vmid);
>  
>   my $conf = PVE::LXC::Config->load_config($vmid);
> - if (!$ENV{PVE_SKIPLOCK} && !PVE::LXC::Config->has_lock($conf, 
> 'mounted')) {
> + if (!$skiplock && !PVE::LXC::Config->has_lock($conf, 'mounted')) {
>   PVE::LXC::Config->check_lock($conf);
>   }
>  
> 


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH container] Fix pct skiplock

2018-03-13 Thread Alwin Antreich
The method vm_start sets an environment variable that is not picked up
anymore by systemd. This patch keeps the environment variable and
introduces a skiplock file that is picked up by the
lxc-pve-prestart-hook.

Signed-off-by: Alwin Antreich 
---
 src/PVE/LXC.pm| 9 -
 src/lxc-pve-prestart-hook | 5 -
 2 files changed, 12 insertions(+), 2 deletions(-)

diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index 7adbcd1..2e3e4ca 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -1545,7 +1545,14 @@ sub vm_start {
 
 update_lxc_config($vmid, $conf);
 
-local $ENV{PVE_SKIPLOCK}=1 if $skiplock;
+if ($skiplock) {
+   # to stay compatible with old behaviour
+   local $ENV{PVE_SKIPLOCK}=1;
+
+   my $file = "/var/lib/lxc/$vmid/skiplock";
+   open(my $fh, '>', $file) || die "failed to open $file for writing: 
$!\n";
+   close($fh);
+}
 
 my $cmd = ['systemctl', 'start', "pve-container\@$vmid"];
 
diff --git a/src/lxc-pve-prestart-hook b/src/lxc-pve-prestart-hook
index fd29423..abe61aa 100755
--- a/src/lxc-pve-prestart-hook
+++ b/src/lxc-pve-prestart-hook
@@ -57,13 +57,16 @@ __PACKAGE__->register_method ({
return undef if $param->{name} !~ m/^\d+$/;
 
my $vmid = $param->{name};
+   my $file = "/var/lib/lxc/$vmid/skiplock";
+   my $skiplock = $ENV{PVE_SKIPLOCK} || 1 if -e $file;
+   unlink $file if -e $file;
 
PVE::Cluster::check_cfs_quorum(); # only start if we have quorum
 
return undef if ! -f PVE::LXC::Config->config_file($vmid);
 
my $conf = PVE::LXC::Config->load_config($vmid);
-   if (!$ENV{PVE_SKIPLOCK} && !PVE::LXC::Config->has_lock($conf, 
'mounted')) {
+   if (!$skiplock && !PVE::LXC::Config->has_lock($conf, 'mounted')) {
PVE::LXC::Config->check_lock($conf);
}
 
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [RFC edk2-firmware] Initial commit

2018-03-13 Thread Thomas Lamprecht
Currently builds OVMF in a simmilar fashion the Debian package does,
we just focus on a x86_64 build for now, disable secure boot (was
disabled in our binary images from pve-qemu, it needs a static build
of an specific version of OpenSSL, bit of an headache) and has our
Logo for the boot splash.

Signed-off-by: Thomas Lamprecht 
---
 .gitignore   |   4 +++
 .gitmodules  |   3 +++
 Makefile |  47 
 debian/Logo.bmp  | Bin 0 -> 49078 bytes
 debian/changelog |   6 +
 debian/clean |   4 +++
 debian/compat|   1 +
 debian/control   |  21 +++
 debian/copyright |  29 
 debian/pve-edk2-firmware.install |   3 +++
 debian/pve-edk2-firmware.links   |   1 +
 debian/rules |  56 +++
 edk2 |   1 +
 13 files changed, 176 insertions(+)
 create mode 100644 .gitignore
 create mode 100644 .gitmodules
 create mode 100644 Makefile
 create mode 100644 debian/Logo.bmp
 create mode 100644 debian/changelog
 create mode 100644 debian/clean
 create mode 100644 debian/compat
 create mode 100644 debian/control
 create mode 100644 debian/copyright
 create mode 100644 debian/pve-edk2-firmware.install
 create mode 100644 debian/pve-edk2-firmware.links
 create mode 100755 debian/rules
 create mode 16 edk2

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000..a4c231b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+*.deb
+*.buildinfo
+*.changes
+edk2.build
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000..e24b385
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "edk2"]
+   path = edk2
+   url = ../mirror_edk2
diff --git a/Makefile b/Makefile
new file mode 100644
index 000..831dcff
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,47 @@
+PACKAGE=pve-edk2-firmware
+VER=1.$(shell date -d @$$(dpkg-parsechangelog -S timestamp) '+%Y%m%d')
+PKGREL=1
+
+SRCDIR=edk2
+BUILDDIR=${SRCDIR}.build
+
+GITVERSION:=$(shell git rev-parse HEAD)
+
+DEB=${PACKAGE}_${VER}-${PKGREL}_all.deb
+
+all: ${DEB}
+   @echo ${DEB}
+
+.PHONY: deb
+deb: ${DEB}
+${DEB}: | submodule
+   rm -rf ${BUILDDIR}
+   cp -rpa ${SRCDIR} ${BUILDDIR}
+   cp -a debian ${BUILDDIR}
+   echo "git clone git://git.proxmox.com/git/pve-guest-fw-edk2.git\\ngit 
checkout ${GITVERSION}" > ${BUILDDIR}/debian/SOURCE
+   cd ${BUILDDIR}; dpkg-buildpackage -rfakeroot -b -uc -us
+   lintian ${DEB}
+   @echo ${DEB}
+
+.PHONY: submodule
+submodule:
+   test -f "${SRCDIR}/Readme.md" || git submodule update --init
+
+.PHONY: update_modules
+update_modules: submodule
+   git submodule foreach 'git pull --ff-only origin master'
+
+.PHONY: upload
+upload: ${DEB}
+   tar cf - ${DEB}|ssh -X repo...@repo.proxmox.com -- upload --product 
pmg,pve --dist stretch
+
+.PHONY: distclean
+distclean: clean
+
+.PHONY: clean
+clean:
+   rm -rf *~ debian/*~ *.deb ${BUILDDIR} *.changes *.dsc *.buildinfo
+
+.PHONY: dinstall
+dinstall: ${DEB}
+   dpkg -i ${DEB}
diff --git a/debian/Logo.bmp b/debian/Logo.bmp
new file mode 100644
index 
..4c46dd84772b32fa5c70e31d2483d66850495dc3
GIT binary patch
literal 49078
zcmeHP33wGnwmzxmKD=%q}>Tf#MUt^rcJ1sGSi0;t^vu1%{0b<(bbq-$>msM8M8>eYq1
z^{xlF?iPIS0N2;A2lX1%2P3T$G-%WS;QBkDVdF;7s7Yf;s{bDV^?N{*W+)0Nk-AI78
zk3z@WJ3-Pd6X5oY(Kv0^IgEWOPB@I{p@*!!$_h_#|}c-W5ov-$A##x|
z=#hCBK*lrZp9MX8WkTlNnE-b_2jtEA#`M@SR|c*beZ>4w(4cNdP1Ez@*8)0T}re!07!j
zc`CNzF@J-kvERVdC#He%Uk3ml{T2d`9t0S72%dQANr3T((SICZ!guhy8BgK+Nr1o%;1^#$+2h7Ss$VB`Dn{)ZpH#=m|DAAS5+*!1bg@bPD#z-L=G
z!{%*UV9WNcux-cZuzlwa*s<#i*tvTb?Ar4se7Scw?Af;$_I

[pve-devel] [RFC pve-qemu] drop OVMF binaries

2018-03-13 Thread Thomas Lamprecht
we moved this out to the pve-edk2-firmware package, which allows us
to build OVMF from source rather than tracking binary files.

This breaks older qemu-server versions, so add a Break to d/control

Signed-off-by: Thomas Lamprecht 
---
 debian/Logo.bmp  | Bin 49078 -> 0 bytes
 debian/OVMF_CODE-pure-efi.fd | Bin 1966080 -> 0 bytes
 debian/OVMF_README.txt   |  11 ---
 debian/OVMF_VARS-pure-efi.fd | Bin 131072 -> 0 bytes
 debian/control   |   1 +
 debian/pve-qemu-kvm.install  |   4 
 6 files changed, 1 insertion(+), 15 deletions(-)
 delete mode 100644 debian/Logo.bmp
 delete mode 100644 debian/OVMF_CODE-pure-efi.fd
 delete mode 100644 debian/OVMF_README.txt
 delete mode 100644 debian/OVMF_VARS-pure-efi.fd

diff --git a/debian/Logo.bmp b/debian/Logo.bmp
deleted file mode 100644
index 
4c46dd84772b32fa5c70e31d2483d66850495dc3..
diff --git a/debian/OVMF_CODE-pure-efi.fd b/debian/OVMF_CODE-pure-efi.fd
deleted file mode 100644
index 
807676da68a1daf80611a026c8d68ddb39d9be2d..
diff --git a/debian/OVMF_README.txt b/debian/OVMF_README.txt
deleted file mode 100644
index 7085e29..000
diff --git a/debian/OVMF_VARS-pure-efi.fd b/debian/OVMF_VARS-pure-efi.fd
deleted file mode 100644
index 
3b8bb9b61b0278fa5ac258b1a5b4460d1f681d16..
diff --git a/debian/control b/debian/control
index d2c40ff..4b6715b 100644
--- a/debian/control
+++ b/debian/control
@@ -60,6 +60,7 @@ Replaces: pve-kvm,
   pve-qemu-kvm-2.6.18,
   qemu-system-x86,
   qemu-utils,
+Breaks: qemu-server (<= 5.0-23)
 Description: Full virtualization on x86 hardware
  Using KVM, one can run multiple virtual PCs, each running unmodified Linux or
  Windows images. Each virtual machine has private virtualized hardware: a
diff --git a/debian/pve-qemu-kvm.install b/debian/pve-qemu-kvm.install
index 09cad29..30949e5 100644
--- a/debian/pve-qemu-kvm.install
+++ b/debian/pve-qemu-kvm.install
@@ -2,7 +2,3 @@
 vma usr/bin/
 debian/kvm-ifup etc/kvm/
 debian/kvm-ifdown etc/kvm/
-
-#install ovmf uefi rom
-debian/OVMF_CODE-pure-efi.fd usr/share/kvm/
-debian/OVMF_VARS-pure-efi.fd usr/share/kvm/
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] use pve-edk2-firmware to load ovmf

2018-03-13 Thread Thomas Lamprecht
We're able to just change it's path as we use the FD_SIZE_2MB option
to build OVMF in the new pve-edk2-firmware package, which was the
earlier implicit size of our current used OVMF BLOB.

Incoming live migrations have their OVMF_CODE image in a RAMBlock[1],
and only load the new image on a warm reboot, or, naturally, an full
stop-start cycle.

Signed-off-by: Thomas Lamprecht 
---

If this would get applied already we need an version bump so that the
pve-qemu 'Break: qemu-server (<= 5.0-23)' works.

This can be seen as a RFC for now...

 PVE/QemuServer.pm | 4 ++--
 debian/control| 1 +
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index a26b2ab..2d27b74 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -38,8 +38,8 @@ use Time::HiRes qw(gettimeofday);
 use File::Copy qw(copy);
 use URI::Escape;
 
-my $OVMF_CODE = '/usr/share/kvm/OVMF_CODE-pure-efi.fd';
-my $OVMF_VARS = '/usr/share/kvm/OVMF_VARS-pure-efi.fd';
+my $OVMF_CODE = '/usr/share/OVMF/OVMF_CODE.fd';
+my $OVMF_VARS = '/usr/share/OVMF/OVMF_VARS.fd';
 
 my $qemu_snap_storage = {rbd => 1, sheepdog => 1};
 
diff --git a/debian/control b/debian/control
index b8aa4ed..cba1016 100644
--- a/debian/control
+++ b/debian/control
@@ -32,6 +32,7 @@ Depends: dbus,
  pve-ha-manager,
  pve-qemu-kvm (>= 2.9.1-9),
  socat,
+ pve-edk2-firmware,
  ${perl:Depends},
  ${shlibs:Depends},
 Description: Qemu Server Tools
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH storage 03/11] Add helper function for extract CIFS credentials.

2018-03-13 Thread Thomas Lamprecht
On 03/13/2018 03:11 PM, Wolfgang Link wrote:
> ---
>  PVE/API2/Storage/Config.pm | 16 
>  1 file changed, 16 insertions(+)
> 
> diff --git a/PVE/API2/Storage/Config.pm b/PVE/API2/Storage/Config.pm
> index aa8c931..fa8d4de 100755
> --- a/PVE/API2/Storage/Config.pm
> +++ b/PVE/API2/Storage/Config.pm
> @@ -12,6 +12,7 @@ use HTTP::Status qw(:constants);
>  use Storable qw(dclone);
>  use PVE::JSONSchema qw(get_standard_option);
>  use PVE::RPCEnvironment;
> +use Term::ReadLine;
>  
please do not use Term::ReadLine anymore, rather use Wolfgang B.'s
nice PTY module, i.e.:

use PVE::PTY;

>  use PVE::RESTHandler;
>  
> @@ -36,6 +37,21 @@ my $api_storage_config = sub {
>  return $scfg;
>  };
>  
> +my $extract_cifs_credentials = sub {
> +my ($param) = @_;
> +
> +my $password = extract_param($param, 'password');
> +
> +if (!defined($password)) {
> + my $term = new Term::ReadLine ('pvesm');
> + my $attribs = $term->Attribs;
> + $attribs->{redisplay_function} = $attribs->{shadow_redisplay};
> + $password = $term->readline('Enter password: ');

$password = PVE::PTY::read_password('Enter password: ') if !defined($password);

> +}
> +
> +return "password=$password\n";
> +};
> +
>  __PACKAGE__->register_method ({
>  name => 'index', 
>  path => '',
> 


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 08/11] Register CIFSPlugin in the storage plugin system.

2018-03-13 Thread Wolfgang Link
---
 PVE/Storage.pm| 2 ++
 PVE/Storage/Plugin.pm | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index 143ed2e..6a2b40b 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -25,6 +25,7 @@ use PVE::Storage::DirPlugin;
 use PVE::Storage::LVMPlugin;
 use PVE::Storage::LvmThinPlugin;
 use PVE::Storage::NFSPlugin;
+use PVE::Storage::CIFSPlugin;
 use PVE::Storage::ISCSIPlugin;
 use PVE::Storage::RBDPlugin;
 use PVE::Storage::SheepdogPlugin;
@@ -42,6 +43,7 @@ PVE::Storage::DirPlugin->register();
 PVE::Storage::LVMPlugin->register();
 PVE::Storage::LvmThinPlugin->register();
 PVE::Storage::NFSPlugin->register();
+PVE::Storage::CIFSPlugin->register();
 PVE::Storage::ISCSIPlugin->register();
 PVE::Storage::RBDPlugin->register();
 PVE::Storage::SheepdogPlugin->register();
diff --git a/PVE/Storage/Plugin.pm b/PVE/Storage/Plugin.pm
index 6f72cee..64d4e2f 100644
--- a/PVE/Storage/Plugin.pm
+++ b/PVE/Storage/Plugin.pm
@@ -332,7 +332,7 @@ sub parse_config {
$d->{content} = $def->{content}->[1] if !$d->{content};
}
 
-   if ($type eq 'iscsi' || $type eq 'nfs' || $type eq 'rbd' || $type eq 
'sheepdog' || $type eq 'iscsidirect' || $type eq 'glusterfs' || $type eq 'zfs' 
|| $type eq 'drbd') {
+   if ($type eq 'iscsi' || $type eq 'nfs' || $type eq 'cifs' || $type eq 
'rbd' || $type eq 'sheepdog' || $type eq 'iscsidirect' || $type eq 'glusterfs' 
|| $type eq 'zfs' || $type eq 'drbd') {
$d->{shared} = 1;
}
 }
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 10/11] Add cifsscan to API

2018-03-13 Thread Wolfgang Link
---
 PVE/API2/Storage/Scan.pm | 51 
 1 file changed, 51 insertions(+)

diff --git a/PVE/API2/Storage/Scan.pm b/PVE/API2/Storage/Scan.pm
index 11d139f..e9ce18e 100644
--- a/PVE/API2/Storage/Scan.pm
+++ b/PVE/API2/Storage/Scan.pm
@@ -45,6 +45,7 @@ __PACKAGE__->register_method ({
{ method => 'glusterfs' },
{ method => 'usb' },
{ method => 'zfs' },
+   { method => 'cifs' },
];
 
return $res;
@@ -121,6 +122,56 @@ __PACKAGE__->register_method ({
return $data;
 }});
 
+__PACKAGE__->register_method ({
+name => 'cifsscan',
+path => 'cifs',
+method => 'GET',
+description => "Scan remote CIFS server.",
+protected => 1,
+proxyto => "node",
+permissions => {
+   check => ['perm', '/storage', ['Datastore.Allocate']],
+},
+parameters => {
+   additionalProperties => 0,
+   properties => {
+   node => get_standard_option('pve-node'),
+   server => { type => 'string', format => 'pve-storage-server' },
+   username => { type => 'string', optional => 1 },
+   password => { type => 'string', optional => 1 },
+   domain => { type => 'string', optional => 1 },
+   },
+},
+returns => {
+   type => 'array',
+   items => {
+   type => "object",
+   properties => {
+   share => { type => 'string'},
+   description => { type => 'string'},
+   },
+   },
+},
+code => sub {
+   my ($param) = @_;
+
+   my $server = $param->{server};
+
+   my $username = $param->{username};
+   my $password = $param->{password};
+   my $domain = $param->{domain};
+
+   my $res = PVE::Storage::scan_cifs($server, $username, $password, 
$domain);
+
+   my $data = [];
+   foreach my $k (keys %$res) {
+   next if $k =~ m/NT_STATUS_/;
+   push @$data, { share => $k, description => $res->{$k} };
+   }
+
+   return $data;
+}});
+
 # Note: GlusterFS currently does not have an equivalent of showmount.
 # As workaround, we simply use nfs showmount. 
 # see http://www.gluster.org/category/volumes/
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 06/11] Add remove cifs in API call.

2018-03-13 Thread Wolfgang Link
---
 PVE/API2/Storage/Config.pm | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/PVE/API2/Storage/Config.pm b/PVE/API2/Storage/Config.pm
index f274593..d6d752a 100755
--- a/PVE/API2/Storage/Config.pm
+++ b/PVE/API2/Storage/Config.pm
@@ -310,6 +310,12 @@ __PACKAGE__->register_method ({
die "can't remove storage - storage is used as base of another 
storage\n"
if PVE::Storage::storage_is_used($cfg, $storeid);
 
+   my $cred_file = '/etc/pve/priv/'.$storeid.'.cred';
+
+   unlink $cred_file
+   if ($cfg->{ids}->{$storeid}->{type} eq 'cifs') &&
+   (-e $cred_file);
+
if ($scfg->{type} eq 'rbd' && !defined($scfg->{monhost})) {
my $ceph_storage_keyring = 
"/etc/pve/priv/ceph/${storeid}.keyring";
if (-f $ceph_storage_keyring) {
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 02/11] Add CIFS Storage Plugin.

2018-03-13 Thread Wolfgang Link
This Plugin use as template the NFSpluigin.
We do only support smbversion 2 and 3.
Version 3 is default and must override through the config.
---
 PVE/Storage/CIFSPlugin.pm | 215 ++
 PVE/Storage/Makefile  |   2 +-
 2 files changed, 216 insertions(+), 1 deletion(-)
 create mode 100644 PVE/Storage/CIFSPlugin.pm

diff --git a/PVE/Storage/CIFSPlugin.pm b/PVE/Storage/CIFSPlugin.pm
new file mode 100644
index 000..5cf8509
--- /dev/null
+++ b/PVE/Storage/CIFSPlugin.pm
@@ -0,0 +1,215 @@
+package PVE::Storage::CIFSPlugin;
+
+use strict;
+use warnings;
+use Net::IP;
+use PVE::Tools qw(run_command);
+use PVE::ProcFSTools;
+use File::Path;
+use PVE::Storage::Plugin;
+use PVE::JSONSchema qw(get_standard_option);
+
+use base qw(PVE::Storage::Plugin);
+
+# CIFS helper functions
+
+sub cifs_is_mounted {
+my ($server, $share, $mountpoint, $mountdata) = @_;
+
+$server = "[$server]" if Net::IP::ip_is_ipv6($server);
+my $source = "//${server}/$share";
+$mountdata = PVE::ProcFSTools::parse_proc_mounts() if !$mountdata;
+
+return $mountpoint if grep {
+   $_->[2] =~ /^cifs/ &&
+   $_->[0] =~ m|^\Q$source\E/?$| &&
+   $_->[1] eq $mountpoint
+} @$mountdata;
+return undef;
+}
+
+sub get_cred_file {
+my ($storeid) = @_;
+
+my $cred_file = '/etc/pve/priv/'.$storeid.'.cred';
+
+if (-e $cred_file) {
+   return $cred_file;
+}
+return undef;
+}
+
+sub cifs_mount {
+my ($server, $share, $mountpoint, $storeid, $smbver, $user, $domain) = @_;
+
+$server = "[$server]" if Net::IP::ip_is_ipv6($server);
+my $source = "//${server}/$share";
+
+my $cmd = ['/bin/mount', '-t', 'cifs', $source, $mountpoint, '-o', 'soft', 
'-o'];
+
+if (my $cred_file = get_cred_file($storeid)) {
+   push @$cmd, "username=$user", '-o', "credentials=$cred_file";
+   push @$cmd, '-o', "domain=$domain" if defined($domain);
+} else {
+   push @$cmd, 'guest,username=guest';
+}
+
+push @$cmd, '-o', defined($smbver) ? "vers=$smbver" : "vers=3.0";
+
+run_command($cmd, errmsg => "mount error");
+}
+
+# Configuration
+
+sub type {
+return 'cifs';
+}
+
+sub plugindata {
+return {
+   content => [ { images => 1, rootdir => 1, vztmpl => 1, iso => 1,
+  backup => 1}, { images => 1 }],
+   format => [ { raw => 1, qcow2 => 1, vmdk => 1 } , 'raw' ],
+};
+}
+
+sub properties {
+return {
+   share => {
+   description => "CIFS share.",
+   type => 'string',
+   },
+   password => {
+   description => "Password for CIFS share.",
+   type => 'string',
+   maxLength => 256,
+   },
+   domain => {
+   description => "CIFS domain.",
+   type => 'string',
+   optional => 1,
+   maxLength => 256,
+   },
+   smbversion => {
+   description => "",
+   type => 'string',
+   optional => 1,
+   },
+};
+}
+
+sub options {
+return {
+   path => { fixed => 1 },
+   server => { fixed => 1 },
+   share => { fixed => 1 },
+   nodes => { optional => 1 },
+   disable => { optional => 1 },
+   maxfiles => { optional => 1 },
+   content => { optional => 1 },
+   format => { optional => 1 },
+   username => { optional => 1 },
+   password => { optional => 1},
+   domain => { optional => 1},
+   smbversion => { optional => 1},
+};
+}
+
+
+sub check_config {
+my ($class, $sectionId, $config, $create, $skipSchemaCheck) = @_;
+
+$config->{path} = "/mnt/pve/$sectionId" if $create && !$config->{path};
+
+return $class->SUPER::check_config($sectionId, $config, $create, 
$skipSchemaCheck);
+}
+
+# Storage implementation
+
+sub status {
+my ($class, $storeid, $scfg, $cache) = @_;
+
+$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
+   if !$cache->{mountdata};
+
+my $path = $scfg->{path};
+my $server = $scfg->{server};
+my $share = $scfg->{share};
+
+return undef
+   if !cifs_is_mounted($server, $share, $path, $cache->{mountdata});
+
+return $class->SUPER::status($storeid, $scfg, $cache);
+}
+
+sub activate_storage {
+my ($class, $storeid, $scfg, $cache) = @_;
+
+$cache->{mountdata} = PVE::ProcFSTools::parse_proc_mounts()
+   if !$cache->{mountdata};
+
+my $path = $scfg->{path};
+my $server = $scfg->{server};
+my $share = $scfg->{share};
+
+if (!cifs_is_mounted($server, $share, $path, $cache->{mountdata})) {
+
+   mkpath $path;
+
+   die "unable to activate storage '$storeid' - " .
+   "directory '$path' does not exist\n" if ! -d $path;
+
+   cifs_mount($server, $share, $path, $storeid, $scfg->{smbversion},
+   $scfg->{username}, $scfg->{domain});
+}
+
+$class->SUPER::activate_storage($storeid, $scfg, $cache);
+}
+
+sub deactivate_storage {
+my ($class, $storeid, $scfg, $cache) = @_;
+
+$cache->{mountdata} 

[pve-devel] [PATCH storage 09/11] Add cifsscan.

2018-03-13 Thread Wolfgang Link
---
 PVE/Storage.pm | 35 +++
 1 file changed, 35 insertions(+)

diff --git a/PVE/Storage.pm b/PVE/Storage.pm
index 6a2b40b..4140a99 100755
--- a/PVE/Storage.pm
+++ b/PVE/Storage.pm
@@ -1116,6 +1116,41 @@ sub scan_nfs {
 return $res;
 }
 
+sub scan_cifs {
+my ($server_in, $user, $password, $domain) = @_;
+
+my $server;
+if (!($server = resolv_server ($server_in))) {
+   die "unable to resolve address for server '${server_in}'\n";
+}
+
+# we support only Windows grater than 2012 cifsscan so use smb3
+my $cmd = ['/usr/bin/smbclient', '-m', 'smb3', '-d', '0', '-L', $server];
+if (defined($user)) {
+   die "password is required" if !defined($password);
+   push @$cmd, '-U', "$user\%$password";
+   push @$cmd, '-W', $domain if defined($domain);
+} else {
+   push @$cmd, '-N';
+}
+
+my $res = {};
+run_command($cmd,
+   outfunc => sub {
+   my $line = shift;
+   if ($line =~ m/(\S+)\s*Disk\s*(\S*)/) {
+   $res->{$1} = $2;
+   } elsif ($line =~ m/(NT_STATUS_(\S*))/) {
+   $res->{$1} = '';
+   }
+   },
+   errfunc => sub {},
+   noerr => 1
+);
+
+return $res;
+}
+
 sub scan_zfs {
 
 my $cmd = ['zfs',  'list', '-t', 'filesystem', '-H', '-o', 
'name,avail,used'];
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 11/11] Add cifsscan to CLI

2018-03-13 Thread Wolfgang Link
---
 PVE/CLI/pvesm.pm | 13 +
 1 file changed, 13 insertions(+)

diff --git a/PVE/CLI/pvesm.pm b/PVE/CLI/pvesm.pm
index 9455595..e6c37ea 100755
--- a/PVE/CLI/pvesm.pm
+++ b/PVE/CLI/pvesm.pm
@@ -331,6 +331,19 @@ our $cmddef = {
 printf "%-${maxlen}s %s\n", $rec->{path}, 
$rec->{options};
 }
 }],
+cifsscan => [ "PVE::API2::Storage::Scan", 'cifsscan', ['server'],
+{ node => $nodename }, sub  {
+my $res = shift;
+
+my $maxlen = 0;
+foreach my $rec (@$res) {
+my $len = length ($rec->{share});
+$maxlen = $len if $len > $maxlen;
+}
+foreach my $rec (@$res) {
+printf "%-${maxlen}s %s\n", $rec->{share}, 
$rec->{description};
+}
+}],
 glusterfsscan => [ "PVE::API2::Storage::Scan", 'glusterfsscan', ['server'],
 { node => $nodename }, sub  {
 my $res = shift;
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 05/11] Add cifs in create API call.

2018-03-13 Thread Wolfgang Link
In this patch the nodes will be deleted if the nodes parameter comes with a 
empty string.
We need this in the GUI when update the nodes in the config to reset if a nodes.

If we do not erase the empty hash the storage online check would be skipped.
Also the password and user would not be verified.
---
 PVE/API2/Storage/Config.pm | 28 +++-
 1 file changed, 23 insertions(+), 5 deletions(-)

diff --git a/PVE/API2/Storage/Config.pm b/PVE/API2/Storage/Config.pm
index a321204..f274593 100755
--- a/PVE/API2/Storage/Config.pm
+++ b/PVE/API2/Storage/Config.pm
@@ -154,6 +154,14 @@ __PACKAGE__->register_method ({
my $type = extract_param($param, 'type');
my $storeid = extract_param($param, 'storage');
 
+   # revent an empty nodelist.
+   # fix me in section config create never need an empty entity.
+
+   delete $param->{nodes} if !$param->{nodes};
+
+   my $user_data = &$extract_cifs_credentials($param)
+   if $type eq 'cifs' && $param->{username};
+
if ($param->{portal}) {
$param->{portal} = PVE::Storage::resolv_portal($param->{portal});
}
@@ -205,11 +213,21 @@ __PACKAGE__->register_method ({
die "failed to copy ceph authx keyring for storage 
'$storeid': $err\n";
}
}
-
-   # try to activate if enabled on local node,
-   # we only do this to detect errors/problems sooner
-   if (PVE::Storage::storage_check_enabled($cfg, $storeid, undef, 
1)) {
-   PVE::Storage::activate_storage($cfg, $storeid);
+   # create a password file in /etc/pve/priv,
+   # this file is used as a cert_file at mount time.
+   my $cred_file = &$set_cifs_credentials($user_data, $storeid)
+   if defined($user_data);
+
+   eval {
+   # try to activate if enabled on local node,
+   # we only do this to detect errors/problems sooner
+   if (PVE::Storage::storage_check_enabled($cfg, $storeid, 
undef, 1)) {
+   PVE::Storage::activate_storage($cfg, $storeid);
+   }
+   };
+   if(my $err = $@) {
+   unlink $cred_file if defined($cred_file);
+   die $err;
}
 
PVE::Storage::write_config($cfg);
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] Add cifs storage plugin

2018-03-13 Thread Wolfgang Link
---
 www/manager6/Makefile|   1 +
 www/manager6/Utils.js|   2 +
 www/manager6/dc/StorageView.js   |  11 ++
 www/manager6/storage/CIFSEdit.js | 285 +++
 4 files changed, 299 insertions(+)
 create mode 100644 www/manager6/storage/CIFSEdit.js

diff --git a/www/manager6/Makefile b/www/manager6/Makefile
index e0c46557..b83adfb0 100644
--- a/www/manager6/Makefile
+++ b/www/manager6/Makefile
@@ -150,6 +150,7 @@ JSSRC=  
\
storage/Browser.js  \
storage/DirEdit.js  \
storage/NFSEdit.js  \
+   storage/CIFSEdit.js \
storage/GlusterFsEdit.js\
storage/IScsiEdit.js\
storage/LVMEdit.js  \
diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js
index 87699e56..330822d7 100644
--- a/www/manager6/Utils.js
+++ b/www/manager6/Utils.js
@@ -391,6 +391,8 @@ Ext.define('PVE.Utils', { utilities: {
return Proxmox.Utils.directoryText;
} else if (value === 'nfs') {
return 'NFS';
+   } else if (value === 'cifs') {
+   return 'CIFS';
} else if (value === 'glusterfs') {
return 'GlusterFS';
} else if (value === 'lvm') {
diff --git a/www/manager6/dc/StorageView.js b/www/manager6/dc/StorageView.js
index f09733a5..ba6faafb 100644
--- a/www/manager6/dc/StorageView.js
+++ b/www/manager6/dc/StorageView.js
@@ -42,6 +42,8 @@ Ext.define('PVE.dc.StorageView', {
editor = 'PVE.storage.DirEdit';
} else if (type === 'nfs') {
editor = 'PVE.storage.NFSEdit';
+   } else if (type === 'cifs') {
+   editor = 'PVE.storage.CIFSEdit';
} else if (type === 'glusterfs') {
editor = 'PVE.storage.GlusterFsEdit';
} else if (type === 'lvm') {
@@ -134,6 +136,15 @@ Ext.define('PVE.dc.StorageView', {
}
},
{
+   text:  PVE.Utils.format_storage_type('cifs'),
+   iconCls: 'fa fa-fw fa-building',
+   handler: function() {
+   var win = 
Ext.create('PVE.storage.CIFSEdit', {});
+   win.on('destroy', reload);
+   win.show();
+   }
+   },
+   {
text: PVE.Utils.format_storage_type('iscsi'),
iconCls: 'fa fa-fw fa-building',
handler: function() {
diff --git a/www/manager6/storage/CIFSEdit.js b/www/manager6/storage/CIFSEdit.js
new file mode 100644
index ..61e99380
--- /dev/null
+++ b/www/manager6/storage/CIFSEdit.js
@@ -0,0 +1,285 @@
+Ext.define('PVE.storage.CIFSScan', {
+extend: 'Ext.form.field.ComboBox',
+alias: 'widget.pveCIFSScan',
+
+queryParam: 'server',
+
+valueField: 'share',
+displayField: 'share',
+matchFieldWidth: false,
+listConfig: {
+   loadingText: gettext('Scanning...'),
+   width: 350
+},
+doRawQuery: function() {
+},
+
+onTriggerClick: function() {
+   var me = this;
+
+   if (!me.queryCaching || me.lastQuery !== me.cifsServer) {
+   me.store.removeAll();
+   }
+
+   delete me.store.getProxy().setExtraParams({});
+   if (me.cifsUsername && me.cifsPassword) {
+   me.store.getProxy().setExtraParam('username', me.cifsUsername);
+   me.store.getProxy().setExtraParam('password', me.cifsPassword);
+   }
+
+   if (me.cifsDomain) {
+   me.store.getProxy().setExtraParam('domain', me.cifsDomain);
+   }
+
+   me.allQuery = me.cifsServer;
+
+   me.callParent();
+},
+
+setServer: function(server) {
+   var me = this;
+
+   me.cifsServer = server;
+},
+
+setUsername: function(username) {
+   var me = this;
+
+   me.cifsUsername = username;
+},
+
+setPassword: function(password) {
+   var me = this;
+
+   me.cifsPassword = password;
+},
+
+setDomain: function(domain) {
+   var me = this;
+
+   me.cifsDomain = domain;
+},
+
+initComponent : function() {
+   var me = this;
+
+   if (!me.nodename) {
+   me.nodename = 'localhost';
+   }
+
+   var store = Ext.create('Ext.data.Store', {
+   fields: ['description', 'share'],
+   proxy: {
+   type: 'proxmox',
+   url: '/api2/json/nodes/' + me.nodename + '/scan/cifs'
+   }
+   });
+   store.sort('share', 'ASC');
+
+   Ext.apply(me, {
+   store: store
+   });
+
+   me.callParent();
+  

[pve-devel] [PATCH storage 07/11] Add CIFS dependencies in package management.

2018-03-13 Thread Wolfgang Link
---
 debian/control | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/debian/control b/debian/control
index 04812bb..ffaefff 100644
--- a/debian/control
+++ b/debian/control
@@ -24,6 +24,8 @@ Depends: cstream,
  smartmontools,
  thin-provisioning-tools,
  udev,
+cifs-utils,
+smbclient,
  ${perl:Depends},
 Description: Proxmox VE storage management library
  This package contains the storage management library used by Proxmox VE.
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] CIFS Plugin

2018-03-13 Thread Wolfgang Link
This is a storage plugin for cifs/smb network storage.
It is tested with Windows Server2012R2, Server1016, Debian9 with samba 4.5
and CentOS6 with samba 3.6.


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 01/11] Remove pool with -f parameter.

2018-03-13 Thread Wolfgang Link
The test pool will not removed if we do not force it.
---
 test/run_test_zfspoolplugin.pl | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/test/run_test_zfspoolplugin.pl b/test/run_test_zfspoolplugin.pl
index bda8348..f6218b1 100755
--- a/test/run_test_zfspoolplugin.pl
+++ b/test/run_test_zfspoolplugin.pl
@@ -2643,7 +2643,7 @@ sub setup_zpool {
 sub clean_up_zpool {
 
 eval {
-   run_command("zpool destroy $subvol");
+   run_command("zpool destroy -f $subvol");
 };
 if ($@) {
warn $@;}
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 03/11] Add helper function for extract CIFS credentials.

2018-03-13 Thread Wolfgang Link
---
 PVE/API2/Storage/Config.pm | 16 
 1 file changed, 16 insertions(+)

diff --git a/PVE/API2/Storage/Config.pm b/PVE/API2/Storage/Config.pm
index aa8c931..fa8d4de 100755
--- a/PVE/API2/Storage/Config.pm
+++ b/PVE/API2/Storage/Config.pm
@@ -12,6 +12,7 @@ use HTTP::Status qw(:constants);
 use Storable qw(dclone);
 use PVE::JSONSchema qw(get_standard_option);
 use PVE::RPCEnvironment;
+use Term::ReadLine;
 
 use PVE::RESTHandler;
 
@@ -36,6 +37,21 @@ my $api_storage_config = sub {
 return $scfg;
 };
 
+my $extract_cifs_credentials = sub {
+my ($param) = @_;
+
+my $password = extract_param($param, 'password');
+
+if (!defined($password)) {
+   my $term = new Term::ReadLine ('pvesm');
+   my $attribs = $term->Attribs;
+   $attribs->{redisplay_function} = $attribs->{shadow_redisplay};
+   $password = $term->readline('Enter password: ');
+}
+
+return "password=$password\n";
+};
+
 __PACKAGE__->register_method ({
 name => 'index', 
 path => '',
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage 04/11] Add set CIFS credentials.

2018-03-13 Thread Wolfgang Link
---
 PVE/API2/Storage/Config.pm | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/PVE/API2/Storage/Config.pm b/PVE/API2/Storage/Config.pm
index fa8d4de..a321204 100755
--- a/PVE/API2/Storage/Config.pm
+++ b/PVE/API2/Storage/Config.pm
@@ -52,6 +52,17 @@ my $extract_cifs_credentials = sub {
 return "password=$password\n";
 };
 
+my $set_cifs_credentials = sub {
+my ($user_data, $storeid) = @_;
+
+my $cred_path = '/etc/pve/priv/';
+
+my $cred_file = $cred_path.$storeid.".cred";
+PVE::Tools::file_set_contents($cred_file, $user_data);
+
+return $cred_file;
+};
+
 __PACKAGE__->register_method ({
 name => 'index', 
 path => '',
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu] build: use 3.0 source format

2018-03-13 Thread Dietmar Maurer
I don't really like this. Why not simply:

 %:
dh $@ --with quilt



> diff --git a/debian/source/format b/debian/source/format
> new file mode 100644
> index 000..163aaf8
> --- /dev/null
> +++ b/debian/source/format
> @@ -0,0 +1 @@
> +3.0 (quilt)
> -- 
> 2.14.2
> 
> 
> ___
> pve-devel mailing list
> pve-devel@pve.proxmox.com
> https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu] build: use 3.0 source format

2018-03-13 Thread Fabian Grünbichler
instead of manually including and calling quilt. resulting binary debs
are identical.

Signed-off-by: Fabian Grünbichler 
---
note: the rules file could probably benefit from further cleanup, but this was
trivial and easily verifiable ;)

 debian/rules | 8 ++--
 debian/source/format | 1 +
 2 files changed, 3 insertions(+), 6 deletions(-)
 create mode 100644 debian/source/format

diff --git a/debian/rules b/debian/rules
index 4209404..02fcce8 100755
--- a/debian/rules
+++ b/debian/rules
@@ -20,10 +20,6 @@ ARCH ?= $(shell dpkg-architecture -qDEB_HOST_GNU_CPU)
 PACKAGE=pve-qemu-kvm
 destdir := $(CURDIR)/debian/$(PACKAGE)
 
-ifneq "$(wildcard /usr/share/quilt/quilt.make)" ""
-include /usr/share/quilt/quilt.make
-endif
-
 CFLAGS = -Wall
 
 ifneq (,$(findstring noopt,$(DEB_BUILD_OPTIONS)))
@@ -48,7 +44,7 @@ config.status: configure
--enable-virtfs --disable-libnfs \
--disable-guest-agent --disable-guest-agent-msi
 
-build: patch build-stamp
+build: build-stamp
 
 build-stamp:  config.status
dh_testdir
@@ -60,7 +56,7 @@ build-stamp:  config.status
 
touch $@
 
-clean: unpatch
+clean:
dh_testdir
dh_testroot
rm -f build-stamp
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 000..163aaf8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] Cloud Init Questions

2018-03-13 Thread Wolfgang Bumiller
On Tue, Mar 13, 2018 at 11:27:31AM +0100, Dominik Csapak wrote:
> Hi,
> 
> since the cloud init patches got recently applied, i had a few questions to
> any one actually using cloud init
> 
> currently the instance-id is the hash of the config (afaics)
> so we trigger the cloud-init code for a new instance
> each time we have a differing config
> 
> isn't this a little too much? e.g. the ssh host key gets regenerated
> the sudoers file gets an additional entry (ok this is stupid on the cloud
> init side, but still),...

For reference - IIRC we initially did this because back when we started
it was the only way to get it to actually read the updated config when
changing something.

> 
> would it not be better to use the vmid for this?

If cloud-init in the versions usually found on current images
(debian-stable etc.) actually behave, then I'm all for it. Otherwise we
have to find some other way to deal with things like cloudinit's ssh
server key regeneration behavior...

> (e.g. having a cloud init vm which i clone would trigger that code
> only once after cloning, which is exactly what i want?)
> 
> since i am working on the gui portion for this, are
> there any special request from anyone for the gui?
> 
> should we include the password setting mechanism on the gui?
> (i would rather not)

For containers we currently only deal with passwords at create-time.
Since we cannot really go that way with VMs we may have to accept that
this needs to be part of the GUI. But even if not, we probably do want
the ability to *remove* via the GUI.
Personally I wouldn't recommend using this option anyway since you
either have plaintext passwords in the config (because "older" versions
(such as the one included in debian-stable) don't support pre-hashed
passwords) and that pre-hashed passwords - when not rate limited by
some server/client protocol - are pretty easily cracked anyway...
So this is mostly an option for private servers or test setups IMHO.

Also for reference: we did consider a password option for `qm start`
(iow. without storign it in the config) but the disk image would still
be stored on a storage other users may have read-access to in order to
create the ISO image read by cloud-init at run-time...

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH REBASED v4 container 0/5] lxc clone/move volume patches

2018-03-13 Thread Dietmar Maurer
applied all 5 patches

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH cluster 2/2] fix tainted input in backup_cfs_database

2018-03-13 Thread Dietmar Maurer
applied

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH cluster 1/2] refactor backup_cfs_database

2018-03-13 Thread Dietmar Maurer
applied

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH cluster 1/2] pvecm: check if APIClient exception code is defined

2018-03-13 Thread Dietmar Maurer
applied

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH cluster 2/2] api: add fork worker ID for create and join

2018-03-13 Thread Dietmar Maurer
applied

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH cluster] cluster join: ensure updatecerts gets called on quorate cluster

2018-03-13 Thread Dietmar Maurer
applied

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH manager 2/2] postinst: ensure pve-daily-upgrade timer gets started on transition

2018-03-13 Thread Thomas Lamprecht
Signed-off-by: Thomas Lamprecht 
Acked-by: Fabian Grünbichler 
---
 debian/postinst | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/debian/postinst b/debian/postinst
index 4f8b158e..f1153fb3 100755
--- a/debian/postinst
+++ b/debian/postinst
@@ -93,6 +93,8 @@ case "$1" in
# remove old/unused init.d files
OLD_INITD_FILES="pvebanner pvenetcommit pve-manager pvedaemon 
pveproxy pvestatd spiceproxy"
for f in ${OLD_INITD_FILES}; do rm -f "/etc/init.d/$f"; done
+
+   deb-systemd-invoke start pve-daily-update.timer
fi
 fi
 ;;
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH manager 0/8] further build/postinst cleanups

2018-03-13 Thread Thomas Lamprecht
applied, with the two follow up patches we discussed

Thomas Lamprecht (2):
  postinst: handle masked units
  postinst: ensure pve-daily-upgrade timer gets started on transition

 debian/postinst | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH manager 1/2] postinst: handle masked units

2018-03-13 Thread Thomas Lamprecht
check if a unit is masked before starting/restarting/reloading it,
as else we get pretty ugly error messages during upgrade.

as "deb-systemd-helper --quiet was-enabled" differs from the
"systemctl is-enabled" behaviour, the former returns true for masked
units while the latter does not, we have to manually call systemctl,
circumventing the deb helper.

Signed-off-by: Thomas Lamprecht 
Acked-by: Fabian Grünbichler 
---
 debian/postinst | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/debian/postinst b/debian/postinst
index 58736a15..4f8b158e 100755
--- a/debian/postinst
+++ b/debian/postinst
@@ -71,7 +71,9 @@ case "$1" in
else
dh_action="start"
fi
-   deb-systemd-invoke $dh_action "$unit"
+   if systemctl -q is-enabled "$unit"; then
+   deb-systemd-invoke $dh_action "$unit"
+   fi
done
 fi
 
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [RFC qemu-server] cloud-init: make cipassword interactive on the CLI

2018-03-13 Thread Dietmar Maurer
applied

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [RFC common] cli: more generic interactive parameter definition

2018-03-13 Thread Dietmar Maurer
applied

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] Cloud Init Questions

2018-03-13 Thread Dominik Csapak

Hi,

since the cloud init patches got recently applied, i had a few questions 
to any one actually using cloud init


currently the instance-id is the hash of the config (afaics)
so we trigger the cloud-init code for a new instance
each time we have a differing config

isn't this a little too much? e.g. the ssh host key gets regenerated
the sudoers file gets an additional entry (ok this is stupid on the 
cloud init side, but still),...


would it not be better to use the vmid for this?
(e.g. having a cloud init vm which i clone would trigger that code
only once after cloning, which is exactly what i want?)

since i am working on the gui portion for this, are
there any special request from anyone for the gui?

should we include the password setting mechanism on the gui?
(i would rather not)

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 6/8] postinst: switch to /bin/sh

2018-03-13 Thread Fabian Grünbichler
we don't use anything bash specific in our postinst, and this way linitian
should warn us about any bashisms we introduce.

Signed-off-by: Fabian Grünbichler 
---
 debian/postinst | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/debian/postinst b/debian/postinst
index a648ec39..58736a15 100755
--- a/debian/postinst
+++ b/debian/postinst
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 
 # Abort if any command returns an error value 
 set -e
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [RFC manager 8/8] build: remove empty WWWROOT dir

2018-03-13 Thread Fabian Grünbichler
seems to be a remnant of the Apache days..

Signed-off-by: Fabian Grünbichler 
---
it's empty, and I did not find anything using it..

 www/manager6/Makefile | 1 -
 defines.mk| 1 -
 2 files changed, 2 deletions(-)

diff --git a/www/manager6/Makefile b/www/manager6/Makefile
index 2688fd0e..e0c46557 100644
--- a/www/manager6/Makefile
+++ b/www/manager6/Makefile
@@ -204,7 +204,6 @@ all: pvemanagerlib.js
 
 .PHONY: install 
 install: pvemanagerlib.js
-   install -d ${WWWBASEDIR}/root
install -d ${WWWJSDIR}
install -m 0644 pvemanagerlib.js ${WWWJSDIR}
 
diff --git a/defines.mk b/defines.mk
index 997491d3..8b54fd2a 100644
--- a/defines.mk
+++ b/defines.mk
@@ -14,7 +14,6 @@ HARADIR=${DESTDIR}/usr/share/cluster
 DOCDIR=${DESTDIR}/usr/share/doc/${PACKAGE}
 PODDIR=${DESTDIR}/usr/share/doc/${PACKAGE}/pod
 WWWBASEDIR=${DESTDIR}/usr/share/${PACKAGE}
-WWWROOTDIR=${WWWBASEDIR}/root
 WWWIMAGEDIR=${WWWBASEDIR}/images
 WWWTOUCHDIR=${WWWBASEDIR}/touch
 WWWCSSDIR=${WWWBASEDIR}/css
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 3/8] postinst: refactor service/timer handling

2018-03-13 Thread Fabian Grünbichler
reduce code duplication, and reload-or-restart timers just like service
units instead of just starting them.

Signed-off-by: Fabian Grünbichler 
---
 debian/postinst | 33 +
 1 file changed, 9 insertions(+), 24 deletions(-)

diff --git a/debian/postinst b/debian/postinst
index 685f3c28..ebce2536 100755
--- a/debian/postinst
+++ b/debian/postinst
@@ -54,43 +54,28 @@ case "$1" in
 
 # same as dh_systemd_enable (code copied)
 
-for timer in pvesr pve-daily-update; do
-   deb-systemd-helper unmask $timer.timer >/dev/null || true
+UNITS="pvedaemon.service pveproxy.service spiceproxy.service 
pvestatd.service pvebanner.service pvesr.timer pve-daily-update.timer"
+NO_RESTART_UNITS="pvenetcommit.service pve-guests.service"
 
-   # was-enabled defaults to true, so new installations run enable.
-   if deb-systemd-helper --quiet was-enabled $timer.timer; then
-   # Enables the unit on first installation, creates new
-   # symlinks on upgrades if the unit file has changed.
-   deb-systemd-helper enable $timer.timer >/dev/null || true
-   else
-   # Update the statefile to add new symlinks (if any), which need to 
be
-   # cleaned up on purge. Also remove old symlinks.
-   deb-systemd-helper update-state $timer.timer >/dev/null || true
-   fi
-done
-
-for service in pvedaemon pveproxy spiceproxy pvestatd pvebanner 
pvenetcommit pve-guests; do
-   deb-systemd-helper unmask $service.service >/dev/null || true
+for unit in ${UNITS} ${NO_RESTART_UNITS}; do
+   deb-systemd-helper unmask "$unit" >/dev/null || true
 
# was-enabled defaults to true, so new installations run enable.
-   if deb-systemd-helper --quiet was-enabled $service.service; then
+   if deb-systemd-helper --quiet was-enabled "$unit"; then
# Enables the unit on first installation, creates new
# symlinks on upgrades if the unit file has changed.
-   deb-systemd-helper enable $service.service >/dev/null || true
+   deb-systemd-helper enable "$unit" >/dev/null || true
else
# Update the statefile to add new symlinks (if any), which need to 
be
# cleaned up on purge. Also remove old symlinks.
-   deb-systemd-helper update-state $service.service >/dev/null || true
+   deb-systemd-helper update-state "$unit" >/dev/null || true
fi
 done
 
 if test ! -e /proxmox_install_mode; then
-
-   for service in pvedaemon pveproxy spiceproxy pvestatd; do
-   deb-systemd-invoke reload-or-restart $service
+   for unit in ${UNITS}; do
+   deb-systemd-invoke reload-or-restart "$unit"
done
-
-   deb-systemd-invoke start pvesr.timer pve-daily-update.timer >/dev/null 
|| true
 fi
 
 if test -z "$2"; then
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 4/8] postinst: unify version-dependent cleanup

2018-03-13 Thread Fabian Grünbichler
putting this into one place is better for readability

Signed-off-by: Fabian Grünbichler 
---
 debian/postinst | 35 +--
 1 file changed, 17 insertions(+), 18 deletions(-)

diff --git a/debian/postinst b/debian/postinst
index ebce2536..f2ac0ab2 100755
--- a/debian/postinst
+++ b/debian/postinst
@@ -31,15 +31,6 @@ case "$1" in
 
 mkdir /etc/pve 2>/dev/null || true
 
-if dpkg --compare-versions "$2" '<=' '5.1-47'; then
-   # remove cron update job, superseded by systemd timer
-   rm -f /etc/cron.d/pveupdate
-
-   # remove old/unused init.d files
-   OLD_INITD_FILES="pvebanner pvenetcommit pve-manager pvedaemon pveproxy 
pvestatd spiceproxy"
-   for f in ${OLD_INITD_FILES}; do rm -f "/etc/init.d/$f"; done
-fi
-
 if test ! -e /var/lib/pve-manager/apl-info/download.proxmox.com; then
mkdir -p /var/lib/pve-manager/apl-info
cp /usr/share/doc/pve-manager/aplinfo.dat 
/var/lib/pve-manager/apl-info/download.proxmox.com
@@ -78,15 +69,23 @@ case "$1" in
done
 fi
 
-if test -z "$2"; then
-  : # no old version, nothing to do
-else
-  # "$2" is the most recently configured version
-  if dpkg --compare-versions "$2" '<=' '5.0-23'; then
-# 5.0-23 temporarily reverted the removal of the startcom CA in
-# ca-certificates; we've since switched to let's encrypt
-update-ca-certificates >/dev/null 2>&1
-  fi
+if test -n "$2"; then
+   # "$2" is the most recently configured version
+
+   if dpkg --compare-versions "$2" '<=' '5.0-23'; then
+   # 5.0-23 temporarily reverted the removal of the startcom CA in
+   # ca-certificates; we've since switched to let's encrypt
+   update-ca-certificates >/dev/null 2>&1
+   fi
+
+   if dpkg --compare-versions "$2" '<=' '5.1-47'; then
+   # remove cron update job, superseded by systemd timer
+   rm -f /etc/cron.d/pveupdate
+
+   # remove old/unused init.d files
+   OLD_INITD_FILES="pvebanner pvenetcommit pve-manager pvedaemon 
pveproxy pvestatd spiceproxy"
+   for f in ${OLD_INITD_FILES}; do rm -f "/etc/init.d/$f"; done
+   fi
 fi
 ;;
 
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 2/8] postinst: actually enable and start pveupdate.timer

2018-03-13 Thread Fabian Grünbichler
Signed-off-by: Fabian Grünbichler 
---
 debian/postinst | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/debian/postinst b/debian/postinst
index 9f2b95a7..685f3c28 100755
--- a/debian/postinst
+++ b/debian/postinst
@@ -54,7 +54,7 @@ case "$1" in
 
 # same as dh_systemd_enable (code copied)
 
-for timer in pvesr; do
+for timer in pvesr pve-daily-update; do
deb-systemd-helper unmask $timer.timer >/dev/null || true
 
# was-enabled defaults to true, so new installations run enable.
@@ -90,7 +90,7 @@ case "$1" in
deb-systemd-invoke reload-or-restart $service
done
 
-   deb-systemd-invoke start pvesr.timer >/dev/null || true
+   deb-systemd-invoke start pvesr.timer pve-daily-update.timer >/dev/null 
|| true
 fi
 
 if test -z "$2"; then
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 7/8] build: use git rev-parse for GITVERSION

2018-03-13 Thread Fabian Grünbichler
Signed-off-by: Fabian Grünbichler 
---
 Makefile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Makefile b/Makefile
index 532833aa..6cf32a95 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,7 @@ DESTDIR=
 SUBDIRS = aplinfo PVE bin www
 
 ARCH:=$(shell dpkg-architecture -qDEB_BUILD_ARCH)
-GITVERSION:=$(shell cat .git/refs/heads/master)
+GITVERSION:=$(shell git rev-parse HEAD)
 
 DEB=${PACKAGE}_${VERSION}-${PACKAGERELEASE}_${ARCH}.deb
 
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 1/8] postinst: cleanup trigger code

2018-03-13 Thread Fabian Grünbichler
reload-or-try-restart works just fine even if the unit is stopped or
disabled

Signed-off-by: Fabian Grünbichler 
---
 debian/postinst | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/debian/postinst b/debian/postinst
index 62ea8903..9f2b95a7 100755
--- a/debian/postinst
+++ b/debian/postinst
@@ -18,11 +18,10 @@ case "$1" in
 test -f /etc/pve/local/pve-ssl.pem || exit 0;
 test -e /proxmox_install_mode && exit 0;
 
-# Note: reload-or-try-restart fails if service is not active
-systemctl --quiet is-active pvedaemon.service && deb-systemd-invoke 
reload-or-try-restart pvedaemon.service
-systemctl --quiet is-active pvestatd.service && deb-systemd-invoke 
reload-or-try-restart pvestatd.service
-systemctl --quiet is-active pveproxy.service && deb-systemd-invoke 
reload-or-try-restart pveproxy.service
-systemctl --quiet is-active spiceproxy.service && deb-systemd-invoke 
reload-or-try-restart spiceproxy.service
+deb-systemd-invoke reload-or-try-restart pvedaemon.service
+deb-systemd-invoke reload-or-try-restart pvestatd.service
+deb-systemd-invoke reload-or-try-restart pveproxy.service
+deb-systemd-invoke reload-or-try-restart spiceproxy.service
 
 exit 0;;
 
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 5/8] postinst: start/restart units like dh_start

2018-03-13 Thread Fabian Grünbichler
with an added "reload-or" for pvedaemon/pveproxy/spiceproxy, which
dh_start unfortunately does not yet support

Signed-off-by: Fabian Grünbichler 
---
 debian/postinst | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/debian/postinst b/debian/postinst
index f2ac0ab2..a648ec39 100755
--- a/debian/postinst
+++ b/debian/postinst
@@ -64,8 +64,14 @@ case "$1" in
 done
 
 if test ! -e /proxmox_install_mode; then
+   # modeled after code generated by dh_start
for unit in ${UNITS}; do
-   deb-systemd-invoke reload-or-restart "$unit"
+   if test -n "$2"; then
+   dh_action="reload-or-try-restart";
+   else
+   dh_action="start"
+   fi
+   deb-systemd-invoke $dh_action "$unit"
done
 fi
 
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 0/8] further build/postinst cleanups

2018-03-13 Thread Fabian Grünbichler
this series follows up on the recent build/packaging cleanup series by Thomas
Lamprecht, adds some missing bits and refactors the postinst some more to make
it short/easier to read

also included are two entirely unrelated nit fixes for the build it self

Fabian Grünbichler (8):
  postinst: cleanup trigger code
  postinst: actually enable and start pveupdate.timer
  postinst: refactor service/timer handling
  postinst: unify version-dependent cleanup
  postinst: start/restart units like dh_start
  postinst: switch to /bin/sh
  build: use git rev-parse for GITVERSION
  build: remove empty WWWROOT dir

 Makefile  |  2 +-
 www/manager6/Makefile |  1 -
 debian/postinst   | 85 ++-
 defines.mk|  1 -
 4 files changed, 38 insertions(+), 51 deletions(-)

-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] increase zfs default timeout to 30sec

2018-03-13 Thread Thomas Lamprecht
On 03/13/2018 09:53 AM, Lauri Tirkkonen wrote:
> On Tue, Mar 13 2018 09:45:18 +0100, Fabian Grünbichler wrote:
>> On Mon, Mar 12, 2018 at 04:06:47PM +0200, Lauri Tirkkonen wrote:
>>> busy pools can easily take more than 5 seconds for eg. zfs create
>>> ---
>>>  PVE/Storage/ZFSPoolPlugin.pm | 2 +-
>>>  1 file changed, 1 insertion(+), 1 deletion(-)
>>>
>>> diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
>>> index e864a58..7ba035f 100644
>>> --- a/PVE/Storage/ZFSPoolPlugin.pm
>>> +++ b/PVE/Storage/ZFSPoolPlugin.pm
>>> @@ -167,7 +167,7 @@ sub path {
>>>  sub zfs_request {
>>>  my ($class, $scfg, $timeout, $method, @params) = @_;
>>>  
>>> -my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 5;
>>> +my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 30;
>>
>> unfortunately, this is not the way to solve this. our API has a timeout
>> per request, so things that (can) take a long time should run in a
>> worker (the API then returns the ID of the task, which can be queried
>> over the API to retrieve status and/or output).
> 
> This did solve our immediate issue: adding a disk to an existing VM from
> the web UI failed with timeout, when the pool had some load on it. (A
> side note: SIGKILLing the zfs process won't make it exit any faster if
> it's waiting in kernel, which it likely is).
> 
> Obviously you are free to solve the underlying issue(s) any way you
> wish, we just wanted to share our fix. 5 seconds just isn't enough.

What Fabian meant with:
> [...] our API has a timeout per request, [...]

is that our API already has 30 seconds as timeout for response,
so using 30 seconds here can be problematic.

As a quick easy improvement we could probably increase it from
5 to say 20-25 seconds, still below the API timeout, but nonetheless
multiple times higher than now.

cheers,
Thomas


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] avoiding VMID reuse

2018-03-13 Thread Lauri Tirkkonen
On Tue, Mar 13 2018 10:06:41 +0100, Thomas Lamprecht wrote:
> On 03/12/2018 03:13 PM, Lauri Tirkkonen wrote:
> > Hi,
> > 
> > we'd like to keep VMIDs always unique per node, so that we can use zfs
> > for backups more easily (ie. avoid dataset name collisions; granted,
> > this still leaves room for collisions if disks are removed and added to
> > a VM). We've implemented the following (crappy) PoC that accomplishes
> > just this by storing the highest allocated VMID and returning that for
> > /cluster/nextid.
> > 
> 
> Sorry if I misunderstood you but VMIDs are already _guaranteed_ to be
> unique cluster wide, so also unique per node?

I'll try to clarify: if I create a VM that gets assigned vmid 100, and
use zfs for storage, its first disk image is called
/vm-100-disk-1. If I then later remove vmid 100, and create
another new VM, /nextid will suggest that the new vmid should be 100,
and its disk image will also be called vm-100-disk-1. We're backing up
our disk images using zfs snapshots and sends (on other ZFS hosts too,
not just PVE), so it's quite bad if we reuse a name for a completely
different dataset - it'll require manual admin intevention. So, we want
to avoid using any vmid that has been used in the past.

> Your approach, allowing different nodes from a cluster to alloc
> the same VMID also should not work, our clustered configuration
> file system (pmxcfs) does not allows this, i.e. no VMID.conf
> file can exist multiple times in the qemu-server and lxc folders
> ()i.e., the folders holding CT/VM configuration files)

Right. We're not actually running PVE in a cluster configuration, so I
might've been a little confused there - if the VMID's are unique in the
cluster anyway, then the storage for the counter shouldn't be under
"local/", I suppose.
___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] avoiding VMID reuse

2018-03-13 Thread Thomas Lamprecht
Hi,

On 03/12/2018 03:13 PM, Lauri Tirkkonen wrote:
> Hi,
> 
> we'd like to keep VMIDs always unique per node, so that we can use zfs
> for backups more easily (ie. avoid dataset name collisions; granted,
> this still leaves room for collisions if disks are removed and added to
> a VM). We've implemented the following (crappy) PoC that accomplishes
> just this by storing the highest allocated VMID and returning that for
> /cluster/nextid.
> 

Sorry if I misunderstood you but VMIDs are already _guaranteed_ to be
unique cluster wide, so also unique per node?

Your approach, allowing different nodes from a cluster to alloc
the same VMID also should not work, our clustered configuration
file system (pmxcfs) does not allows this, i.e. no VMID.conf
file can exist multiple times in the qemu-server and lxc folders
()i.e., the folders holding CT/VM configuration files)

If your problem is that you request multiple VMIDs at the same
time, and thus run into a race, then take  a look at:

https://pve.proxmox.com/pipermail/pve-devel/2016-October/023440.html

I tried to address this when I tried to make the VMID on VM/CT
create optional. Making create optional won't happen, as it
violates the POST only once principle and brings you in hell's kitchen.

But the reserve VMID part may make some sense, at least it's idea.

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] increase zfs default timeout to 30sec

2018-03-13 Thread Lauri Tirkkonen
On Tue, Mar 13 2018 09:45:18 +0100, Fabian Grünbichler wrote:
> On Mon, Mar 12, 2018 at 04:06:47PM +0200, Lauri Tirkkonen wrote:
> > busy pools can easily take more than 5 seconds for eg. zfs create
> > ---
> >  PVE/Storage/ZFSPoolPlugin.pm | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> > 
> > diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
> > index e864a58..7ba035f 100644
> > --- a/PVE/Storage/ZFSPoolPlugin.pm
> > +++ b/PVE/Storage/ZFSPoolPlugin.pm
> > @@ -167,7 +167,7 @@ sub path {
> >  sub zfs_request {
> >  my ($class, $scfg, $timeout, $method, @params) = @_;
> >  
> > -my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 5;
> > +my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 30;
> 
> unfortunately, this is not the way to solve this. our API has a timeout
> per request, so things that (can) take a long time should run in a
> worker (the API then returns the ID of the task, which can be queried
> over the API to retrieve status and/or output).

This did solve our immediate issue: adding a disk to an existing VM from
the web UI failed with timeout, when the pool had some load on it. (A
side note: SIGKILLing the zfs process won't make it exit any faster if
it's waiting in kernel, which it likely is).

Obviously you are free to solve the underlying issue(s) any way you
wish, we just wanted to share our fix. 5 seconds just isn't enough.
___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH] increase zfs default timeout to 30sec

2018-03-13 Thread Fabian Grünbichler
first off, thanks for the patch!

if you haven't already, please send a signed CLA[1] to
  off...@proxmox.com, and include a "Signed-Off-By" tag in your
  commits/patches!

On Mon, Mar 12, 2018 at 04:06:47PM +0200, Lauri Tirkkonen wrote:
> busy pools can easily take more than 5 seconds for eg. zfs create
> ---
>  PVE/Storage/ZFSPoolPlugin.pm | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
> index e864a58..7ba035f 100644
> --- a/PVE/Storage/ZFSPoolPlugin.pm
> +++ b/PVE/Storage/ZFSPoolPlugin.pm
> @@ -167,7 +167,7 @@ sub path {
>  sub zfs_request {
>  my ($class, $scfg, $timeout, $method, @params) = @_;
>  
> -my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 5;
> +my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 30;

unfortunately, this is not the way to solve this. our API has a timeout
per request, so things that (can) take a long time should run in a
worker (the API then returns the ID of the task, which can be queried
over the API to retrieve status and/or output).

one long-standing issue in this areas is actually listing content of
storages, which can take quite a while with some storage technologies
(e.g. Ceph, ZFS, ..). there were some ideas on how to solve this
(add an async flag to the involved API calls that allows to opt-into the
worker behaviour and cache the content client side in our web
interface), but no implementation so far.

maybe a similar approach can be taken for allocating and freeing images
(of course without the caching ;))?

guest create/restore/migrate already all run inside workers, so this
issue should not trigger there..

@Thomas, Dominik:

I am not sure what would need to be done to accomodate such an async
flag for API2::Storage::Content POST and DELETES? the CLI side is
trivial since workers run sync there anyway..

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] avoiding VMID reuse

2018-03-13 Thread Lauri Tirkkonen
Hi,

we'd like to keep VMIDs always unique per node, so that we can use zfs
for backups more easily (ie. avoid dataset name collisions; granted,
this still leaves room for collisions if disks are removed and added to
a VM). We've implemented the following (crappy) PoC that accomplishes
just this by storing the highest allocated VMID and returning that for
/cluster/nextid.

The patches are across four repositories, so I'm attaching four files
named by repository.
diff --git a/data/PVE/Cluster.pm b/data/PVE/Cluster.pm
index b49fd94..fea0639 100644
--- a/data/PVE/Cluster.pm
+++ b/data/PVE/Cluster.pm
@@ -996,6 +996,19 @@ sub log_msg {
syslog("err", "writing cluster log failed: $@") if $@;
 }
 
+sub alloc_vmid {
+my ($vmid) = @_;
+check_vmid_unused($vmid);
+open(my $fh, '+<', '/etc/pve/local/nextid');
+my $next = 0 + readline($fh);
+if ($vmid >= $next) {
+$next = 1 + $vmid;
+seek($fh, 0, 0);
+print $fh "$next";
+}
+close($fh);
+}
+
 sub check_vmid_unused {
 my ($vmid, $noerr) = @_;
 
diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm
index 733826e..9accfdb 100644
--- a/src/PVE/API2/LXC.pm
+++ b/src/PVE/API2/LXC.pm
@@ -337,6 +337,7 @@ __PACKAGE__->register_method({
die "Could not reserve ID $vmid, already taken\n" if $@;
}
 
+   PVE::Cluster::alloc_vmid($vmid);
PVE::Cluster::check_cfs_quorum();
my $vollist = [];
 
diff --git a/PVE/API2/Cluster.pm b/PVE/API2/Cluster.pm
index 7f38e61c..590f4fb7 100644
--- a/PVE/API2/Cluster.pm
+++ b/PVE/API2/Cluster.pm
@@ -505,11 +505,13 @@ __PACKAGE__->register_method({
raise_param_exc({ vmid => "VM $vmid already exists" });
}
 
-   for (my $i = 100; $i < 1; $i++) {
-   return $i if !defined($idlist->{$i});
-   }
-
-   die "unable to get any free VMID\n";
+   open(my $fh, '<', '/etc/pve/local/nextid');
+   my $i = 0 + readline($fh);
+   close($fh);
+   if (defined($idlist->{$i})) {
+   die "unable to get any free VMID\n";
+}
+   return $i;
 }});
 
 1;
diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index 0174feb..358d539 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -558,6 +558,7 @@ __PACKAGE__->register_method({
 
# test after locking
PVE::Cluster::check_vmid_unused($vmid);
+   PVE::Cluster::alloc_vmid($vmid);
 
# ensure no old replication state are exists
PVE::ReplicationState::delete_guest_states($vmid);
___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH] increase zfs default timeout to 30sec

2018-03-13 Thread Lauri Tirkkonen
busy pools can easily take more than 5 seconds for eg. zfs create
---
 PVE/Storage/ZFSPoolPlugin.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/Storage/ZFSPoolPlugin.pm b/PVE/Storage/ZFSPoolPlugin.pm
index e864a58..7ba035f 100644
--- a/PVE/Storage/ZFSPoolPlugin.pm
+++ b/PVE/Storage/ZFSPoolPlugin.pm
@@ -167,7 +167,7 @@ sub path {
 sub zfs_request {
 my ($class, $scfg, $timeout, $method, @params) = @_;
 
-my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 5;
+my $default_timeout = PVE::RPCEnvironment->is_worker() ? 60*60 : 30;
 
 my $cmd = [];
 
-- 
2.16.2

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu-server 1/3] use default hostname when none is set

2018-03-13 Thread Thomas Lamprecht
On 03/12/2018 04:25 PM, Dominik Csapak wrote:
> use "VM$vmid" like we do in a container
> 

series looks OK for me, but I let Wolfgang/Dietmar handle this,
CloudInit is fully theirs ;)

Reviewed-by: Thomas Lamprecht 

Nit: could you please add a tag in front of the commit message,
for a series like this, which affects a specific subsystem of a repo
e.g.:

cloudinit: use default hostname when none is set

It helps a lot when looking through git log later, where you do
not always have the file change statistics available like here,
and even here it helps because one knows from the very first words
he reads what the patch is about :)
(this doesn't make always sense, but here it'd work great)

> Signed-off-by: Dominik Csapak 
> ---
>  PVE/QemuServer/Cloudinit.pm | 12 ++--
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/PVE/QemuServer/Cloudinit.pm b/PVE/QemuServer/Cloudinit.pm
> index dc55d2d..31f8462 100644
> --- a/PVE/QemuServer/Cloudinit.pm
> +++ b/PVE/QemuServer/Cloudinit.pm
> @@ -63,8 +63,8 @@ sub get_cloudinit_format {
>  }
>  
>  sub get_hostname_fqdn {
> -my ($conf) = @_;
> -my $hostname = $conf->{name};
> +my ($conf, $vmid) = @_;
> +my $hostname = $conf->{name} // "VM$vmid";
>  my $fqdn;
>  if ($hostname =~ /\./) {
>   $fqdn = $hostname;
> @@ -96,9 +96,9 @@ sub get_dns_conf {
>  }
>  
>  sub cloudinit_userdata {
> -my ($conf) = @_;
> +my ($conf, $vmid) = @_;
>  
> -my ($hostname, $fqdn) = get_hostname_fqdn($conf);
> +my ($hostname, $fqdn) = get_hostname_fqdn($conf, $vmid);
>  
>  my $content = "#cloud-config\n";
>  
> @@ -198,7 +198,7 @@ EOF
>  sub generate_configdrive2 {
>  my ($conf, $vmid, $drive, $volname, $storeid) = @_;
>  
> -my $user_data = cloudinit_userdata($conf);
> +my $user_data = cloudinit_userdata($conf, $vmid);
>  my $network_data = configdrive2_network($conf);
>  
>  my $digest_data = $user_data . $network_data;
> @@ -363,7 +363,7 @@ sub nocloud_metadata {
>  sub generate_nocloud {
>  my ($conf, $vmid, $drive, $volname, $storeid) = @_;
>  
> -my $user_data = cloudinit_userdata($conf);
> +my $user_data = cloudinit_userdata($conf, $vmid);
>  my $network_data = nocloud_network($conf);
>  
>  my $digest_data = $user_data . $network_data;
> 


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] remove legacy vm_monitor_command

2018-03-13 Thread Thomas Lamprecht
We introduced our QMP socket with commit
c971c4f2213524f27125f558978a428b53628f34 (29.05.2012)

Already tried to remove this with commit
7b7c6d1b5dcee25e1194d4b8a0219bd5c31a5639 (13.07.2012)

But reverted that to allow migration of VMs still using the old
montior to ones which already switched over to the new QMP one,
in commit dab36e1ee924be0efab3f85937c23910b456f4b9 (17.08.2012)
see bug #242 for reference

This was all done  and released in PVE 2.2, as no migration through
nodes differing more than one major version is possible we can
finally remove this code for good.

Signed-off-by: Thomas Lamprecht 
---
 PVE/QemuServer.pm | 79 ---
 1 file changed, 79 deletions(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index a26b2ab..5462d07 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -4169,81 +4169,6 @@ sub __read_avail {
 return $res;
 }
 
-# old code, only used to shutdown old VM after update
-sub vm_monitor_command {
-my ($vmid, $cmdstr, $nocheck) = @_;
-
-my $res;
-
-eval {
-   die "VM $vmid not running\n" if !check_running($vmid, $nocheck);
-
-   my $sname = "${var_run_tmpdir}/$vmid.mon";
-
-   my $sock = IO::Socket::UNIX->new( Peer => $sname ) ||
-   die "unable to connect to VM $vmid socket - $!\n";
-
-   my $timeout = 3;
-
-   # hack: migrate sometime blocks the monitor (when migrate_downtime
-   # is set)
-   if ($cmdstr =~ m/^(info\s+migrate|migrate\s)/) {
-   $timeout = 60*60; # 1 hour
-   }
-
-   # read banner;
-   my $data = __read_avail($sock, $timeout);
-
-   if ($data !~ m/^QEMU\s+(\S+)\s+monitor\s/) {
-   die "got unexpected qemu monitor banner\n";
-   }
-
-   my $sel = new IO::Select;
-   $sel->add($sock);
-
-   if (!scalar(my @ready = $sel->can_write($timeout))) {
-   die "monitor write error - timeout";
-   }
-
-   my $fullcmd = "$cmdstr\r";
-
-   # syslog('info', "VM $vmid monitor command: $cmdstr");
-
-   my $b;
-   if (!($b = $sock->syswrite($fullcmd)) || ($b != length($fullcmd))) {
-   die "monitor write error - $!";
-   }
-
-   return if ($cmdstr eq 'q') || ($cmdstr eq 'quit');
-
-   $timeout = 20;
-
-   if ($cmdstr =~ m/^(info\s+migrate|migrate\s)/) {
-   $timeout = 60*60; # 1 hour
-   } elsif ($cmdstr =~ m/^(eject|change)/) {
-   $timeout = 60; # note: cdrom mount command is slow
-   }
-   if ($res = __read_avail($sock, $timeout)) {
-
-   my @lines = split("\r?\n", $res);
-
-   shift @lines if $lines[0] !~ m/^unknown command/; # skip echo
-
-   $res = join("\n", @lines);
-   $res .= "\n";
-   }
-};
-
-my $err = $@;
-
-if ($err) {
-   syslog("err", "VM $vmid monitor command failed - $err");
-   die $err;
-}
-
-return $res;
-}
-
 sub qemu_block_resize {
 my ($vmid, $deviceid, $storecfg, $volid, $size) = @_;
 
@@ -5049,10 +4974,6 @@ sub vm_qmp_command {
my $qmpclient = PVE::QMPClient->new();
 
$res = $qmpclient->cmd($vmid, $cmd, $timeout);
-   } elsif (-e "${var_run_tmpdir}/$vmid.mon") {
-   die "can't execute complex command on old monitor - stop/start your 
vm to fix the problem\n"
-   if scalar(%{$cmd->{arguments}});
-   vm_monitor_command($vmid, $cmd->{execute}, $nocheck);
} else {
die "unable to open monitor socket\n";
}
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel