[pve-devel] [PATCH zfsonlinux 2/6] Use installed python3

2020-05-13 Thread Stoiko Ivanov
From: Antonio Russo 

adapted from debian-upstream [0] commit
594e747e14f3051513ea499c40c17cadb5d0e92b

[0] https://salsa.debian.org/zfsonlinux-team/zfs.git

Signed-off-by: Stoiko Ivanov 
---
 .../patches/0007-Use-installed-python3.patch  | 60 +++
 debian/patches/series |  1 +
 debian/rules  |  1 +
 3 files changed, 62 insertions(+)
 create mode 100644 debian/patches/0007-Use-installed-python3.patch

diff --git a/debian/patches/0007-Use-installed-python3.patch 
b/debian/patches/0007-Use-installed-python3.patch
new file mode 100644
index ..59769d63
--- /dev/null
+++ b/debian/patches/0007-Use-installed-python3.patch
@@ -0,0 +1,60 @@
+From  Mon Sep 17 00:00:00 2001
+From: Antonio Russo 
+Date: Tue, 5 May 2020 22:15:16 -0600
+Subject: [PATCH] Use installed python3
+
+---
+ debian/patches/0004-prefer-python3-tests.patch  | 0
+ .../functional/cli_root/zfs_program/zfs_program_json.ksh| 6 +++---
+ .../tests/functional/rsend/send_encrypted_files.ksh | 2 +-
+ .../tests/functional/rsend/send_realloc_dnode_size.ksh  | 2 +-
+ 4 files changed, 5 insertions(+), 5 deletions(-)
+ create mode 100644 debian/patches/0004-prefer-python3-tests.patch
+
+diff --git a/debian/patches/0004-prefer-python3-tests.patch 
b/debian/patches/0004-prefer-python3-tests.patch
+new file mode 100644
+index 0..e69de29bb
+diff --git 
a/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh 
b/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh
+index 3788543b0..c7ee4ae9a 100755
+--- 
a/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh
 
b/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh
+@@ -100,10 +100,10 @@ typeset -a pos_cmds_out=(
+ # the same as the input and the --sort-keys option was added.  Detect when
+ # --sort-keys is supported and apply the option to ensure the expected order.
+ #
+-if python -m json.tool --sort-keys <<< "{}"; then
+-  JSON_TOOL_CMD="python -m json.tool --sort-keys"
++if python3 -m json.tool --sort-keys <<< "{}"; then
++  JSON_TOOL_CMD="python3 -m json.tool --sort-keys"
+ else
+-  JSON_TOOL_CMD="python -m json.tool"
++  JSON_TOOL_CMD="python3 -m json.tool"
+ fi
+ 
+ typeset -i cnt=0
+diff --git a/tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh 
b/tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh
+index 6288178f8..d4475b369 100755
+--- a/tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh
 b/tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh
+@@ -87,7 +87,7 @@ log_must xattrtest -f 10 -x 3 -s 32768 -r -k -p 
/$TESTPOOL/$TESTFS2/xattrsadir
+ # ZoL issue #7432
+ log_must zfs set compression=on xattr=sa $TESTPOOL/$TESTFS2
+ log_must touch /$TESTPOOL/$TESTFS2/attrs
+-log_must eval "python -c 'print \"a\" * 4096' | \
++log_must eval "python3 -c 'print \"a\" * 4096' | \
+   attr -s bigval /$TESTPOOL/$TESTFS2/attrs"
+ log_must zfs set compression=off xattr=on $TESTPOOL/$TESTFS2
+ 
+diff --git 
a/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh 
b/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh
+index 12a72fa09..aceec7880 100755
+--- a/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh
 b/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh
+@@ -93,7 +93,7 @@ log_must zfs snapshot $POOL/fs@c
+ # 4. Create an empty file and add xattrs to it to exercise reclaiming a
+ #dnode that requires more than 1 slot for its bonus buffer (Zol #7433)
+ log_must zfs set compression=on xattr=sa $POOL/fs
+-log_must eval "python -c 'print \"a\" * 512' | attr -s bigval /$POOL/fs/attrs"
++log_must eval "python3 -c 'print \"a\" * 512' | attr -s bigval 
/$POOL/fs/attrs"
+ log_must zfs snapshot $POOL/fs@d
+ 
+ # 5. Generate initial and incremental streams
diff --git a/debian/patches/series b/debian/patches/series
index 8fe3840a..33a76ef1 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -4,3 +4,4 @@
 0004-import-with-d-dev-disk-by-id-in-scan-service.patch
 0005-Enable-zed-emails.patch
 0006-dont-symlink-zed-scripts.patch
+0007-Use-installed-python3.patch
diff --git a/debian/rules b/debian/rules
index bb44c376..47fdb669 100755
--- a/debian/rules
+++ b/debian/rules
@@ -33,6 +33,7 @@ override_dh_auto_configure:
  --with-zfsexecdir=/usr/lib/zfs-linux \
  --enable-systemd \
  --enable-pyzfs \
+ --with-python=python3 \
  --with-systemdunitdir=/lib/systemd/system \
  --with-systemdpresetdir=/lib/systemd/system-preset \
  --with-systemdgeneratordir=/lib/systemd/system-generators \
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH zfsonlinux 3/6] Add dependencies for zfs-test

2020-05-13 Thread Stoiko Ivanov
From: Antonio Russo 

nfs-kernel-server is required for some tests

Signed-off-by: Stoiko Ivanov 
---
 debian/control | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/debian/control b/debian/control
index d9196716..62ab6221 100644
--- a/debian/control
+++ b/debian/control
@@ -185,6 +185,7 @@ Package: zfs-test
 Section: contrib/admin
 Architecture: linux-any
 Depends: acl,
+ attr,
  bc,
  fio,
  ksh,
@@ -198,6 +199,7 @@ Depends: acl,
  zfsutils-linux (>=${binary:Version}),
  ${misc:Depends},
  ${shlibs:Depends}
+Recommends: nfs-kernel-server
 Breaks: zfsutils-linux (<= 0.7.9-2)
 Replaces: zfsutils-linux (<= 0.7.9-2)
 Conflicts: zutils
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH zfsonlinux 1/6] update submodule and patches for zfs-0.8.4

2020-05-13 Thread Stoiko Ivanov
Signed-off-by: Stoiko Ivanov 
---
 .../0004-import-with-d-dev-disk-by-id-in-scan-service.patch   | 4 ++--
 upstream  | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/debian/patches/0004-import-with-d-dev-disk-by-id-in-scan-service.patch 
b/debian/patches/0004-import-with-d-dev-disk-by-id-in-scan-service.patch
index e1f3360c..4ad0db30 100644
--- a/debian/patches/0004-import-with-d-dev-disk-by-id-in-scan-service.patch
+++ b/debian/patches/0004-import-with-d-dev-disk-by-id-in-scan-service.patch
@@ -14,10 +14,10 @@ Signed-off-by: Thomas Lamprecht 
  1 file changed, 1 insertion(+), 1 deletion(-)
 
 diff --git a/etc/systemd/system/zfs-import-scan.service.in 
b/etc/systemd/system/zfs-import-scan.service.in
-index 4aae9f06e..ec5c45e63 100644
+index 278f937fe..e8c502a33 100644
 --- a/etc/systemd/system/zfs-import-scan.service.in
 +++ b/etc/systemd/system/zfs-import-scan.service.in
-@@ -11,7 +11,7 @@ ConditionPathExists=!@sysconfdir@/zfs/zpool.cache
+@@ -12,7 +12,7 @@ ConditionPathExists=!@sysconfdir@/zfs/zpool.cache
  [Service]
  Type=oneshot
  RemainAfterExit=yes
diff --git a/upstream b/upstream
index 9bb3d57b..6b18d7df 16
--- a/upstream
+++ b/upstream
@@ -1 +1 @@
-Subproject commit 9bb3d57b03e6916a2d38574420a2934b8827b3fb
+Subproject commit 6b18d7df3772cffa6469b00866d7eae585b912ae
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH zfsonlinux 6/6] Remove the unnecessary --with systemd dh option.

2020-05-13 Thread Stoiko Ivanov
adapted from debian-upstream [0] commit
53276c973c5e69f75b43371a6c94adc5d9dcfec0
(the systemd sequence is enabled by default since debhelper v10 - see
debhelper(7))

[0] https://salsa.debian.org/zfsonlinux-team/zfs.git

Signed-off-by: Stoiko Ivanov 
---
 debian/rules | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/debian/rules b/debian/rules
index 47fdb669..623cc610 100755
--- a/debian/rules
+++ b/debian/rules
@@ -10,7 +10,7 @@ SPHINX_BUILD = $(shell dpkg -L python3-sphinx | grep 
"sphinx-build$$")
 export DEB_BUILD_MAINT_OPTIONS = hardening=+all
 
 %:
-   dh $@ --with autoreconf,python3,systemd,sphinxdoc --parallel
+   dh $@ --with autoreconf,python3,sphinxdoc --parallel
 
 adapt_meta_file:
@# Embed the downstream version in the module.
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH zfsonlinux 0/6] update zfs to 0.8.4

2020-05-13 Thread Stoiko Ivanov
The Pullrequest for zfs 0.8.4 got merged yesterday - see [0].
For the greatest part it contains compatibility fixes for newer kernels
(5.5 - 5.7)

Commit e1b0704568b1674f47d43786180613e585aea2cf made dpkg-buildpackage
fail because dh_missing was confused about the new location of
'etc/zfs/zfs-functions' and 'etc/default/zfs' (they now exist both in
the source tree as well as in DESTDIR=debian/tmp) The merge request
for debian-upstream - see [1] addressed the issue in a similar way as my
initial workaround during testing - so I went ahead and cherry-picked theirs

Additionally I took the time to pick and adapt a few commits from
debian-upstream to keep them synchronized (patches 2-4).

Testing was done by letting a kernel with the proposed patchset run on my
workstation and various VMs for more than a week. Additionally a user,
requesting some performance related patches to ZFS ran a kernel containing
(a earlier version of) the proposed patchset along with 2 performance-related
commits - see [2].


[0] https://github.com/openzfs/zfs/pull/10209
[1] https://salsa.debian.org/zfsonlinux-team/zfs/-/merge_requests/23
[2] https://bugzilla.proxmox.com/show_bug.cgi?id=2666

Antonio Russo (3):
  Use installed python3
  Add dependencies for zfs-test
  Refresh debian/not-installed

Stoiko Ivanov (3):
  update submodule and patches for zfs-0.8.4
  Bump Standards-Version to 4.5.0 (no change).
  Remove the unnecessary --with systemd dh option.

 debian/control|  4 +-
 debian/not-installed  |  2 +
 ...ith-d-dev-disk-by-id-in-scan-service.patch |  4 +-
 .../patches/0007-Use-installed-python3.patch  | 60 +++
 debian/patches/series |  1 +
 debian/rules  |  3 +-
 upstream  |  2 +-
 7 files changed, 71 insertions(+), 5 deletions(-)
 create mode 100644 debian/patches/0007-Use-installed-python3.patch

-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH zfsonlinux 5/6] Bump Standards-Version to 4.5.0 (no change).

2020-05-13 Thread Stoiko Ivanov
Compared the entries in the Debian Policy Manual's upgrade checklist [0]

inspired by debian-upstream [1] commit d756d10a40607bd2b9599aa959b2aa1738551e72

[0] https://www.debian.org/doc/debian-policy/upgrading-checklist.html
[1] https://salsa.debian.org/zfsonlinux-team/zfs.git

Signed-off-by: Stoiko Ivanov 
---
 debian/control | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/debian/control b/debian/control
index 62ab6221..422b2c52 100644
--- a/debian/control
+++ b/debian/control
@@ -14,7 +14,7 @@ Build-Depends: debhelper (>= 10~),
python3-all-dev,
uuid-dev,
zlib1g-dev
-Standards-Version: 4.1.2
+Standards-Version: 4.5.0
 Homepage: https://www.zfsonlinux.org/
 Vcs-Git: https://git.proxmox.com/git/zfsonlinux.git
 Vcs-Browser: https://git.proxmox.com/?p=zfsonlinux.git;a=summary
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH zfsonlinux 4/6] Refresh debian/not-installed

2020-05-13 Thread Stoiko Ivanov
From: Antonio Russo 

/etc/default/zfs and /etc/zfs/zfs-functions are now installed by the
makefiles.  Continue to install them directly as before, but do not
--fail-missing because of them.

adapted from debian-upstream [0] commit
9a594875114fe186aebba2776b14817ab7f272ae

[0] https://salsa.debian.org/zfsonlinux-team/zfs.git

Signed-off-by: Stoiko Ivanov 
---
 debian/not-installed | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/debian/not-installed b/debian/not-installed
index 89061916..db0e5603 100644
--- a/debian/not-installed
+++ b/debian/not-installed
@@ -1,5 +1,6 @@
 usr/share/zfs/enum-extract.pl
 usr/share/zfs/zfs-helpers.sh
+etc/default/zfs
 etc/init.d
 etc/sudoers.d
 etc/zfs/vdev_id.conf.alias.example
@@ -9,3 +10,4 @@ etc/zfs/vdev_id.conf.sas_switch.example
 etc/zfs/vdev_id.conf.scsi.example
 usr/lib/dracut
 usr/share/zfs/enum-extract.pl
+etc/zfs/zfs-functions
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu-server 1/2] add support for nvme emulation

2020-05-13 Thread Stefan Reiter

Gave both patches a spin and they work fine on first glance.

We should probably assign a fixed PCI bus/addr to the NVMe devices 
though (same as we do for SCSI and AHCI hardware with print_pci_addr 
somewhere in the depths of config_to_command).


On 5/13/20 5:36 PM, Oguz Bektas wrote:

now we can add nvme drives;

nvme0: local-lvm:vm-103-disk-0,size=32G

max number is 8

Signed-off-by: Oguz Bektas 
---
  PVE/QemuServer.pm   | 20 +---
  PVE/QemuServer/Drive.pm | 21 +
  2 files changed, 38 insertions(+), 3 deletions(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index dcf05df..441d209 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -406,7 +406,7 @@ EODESC
optional => 1,
type => 'string', format => 'pve-qm-bootdisk',
description => "Enable booting from specified disk.",
-   pattern => '(ide|sata|scsi|virtio)\d+',
+   pattern => '(ide|sata|scsi|virtio|nvme)\d+',
  },
  smp => {
optional => 1,
@@ -1424,7 +1424,11 @@ sub print_drivedevice_full {
$device .= ",rotation_rate=1";
}
$device .= ",wwn=$drive->{wwn}" if $drive->{wwn};
-
+} elsif ($drive->{interface} eq 'nvme') {
+   my $maxdev = $PVE::QemuServer::Drive::MAX_NVME_DISKS;


$maxdev is not used anywhere?


+   my $path = $drive->{file};
+   $drive->{serial} = "$drive->{interface}$drive->{index}"; # serial is 
mandatory for nvme
+   $device = "nvme,drive=drive-$drive->{interface}$drive->{index}";
  } elsif ($drive->{interface} eq 'ide' || $drive->{interface} eq 'sata') {
my $maxdev = ($drive->{interface} eq 'sata') ? 
$PVE::QemuServer::Drive::MAX_SATA_DISKS : 2;
my $controller = int($drive->{index} / $maxdev);

>
> [...]

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH 0/2] nvme emulation

2020-05-13 Thread Oguz Bektas
add nvme emulation support for disks.

qemu-server:

Oguz Bektas (1):
  add support for nvme emulation

 PVE/QemuServer.pm   | 20 +---
 PVE/QemuServer/Drive.pm | 21 +
 2 files changed, 38 insertions(+), 3 deletions(-)

pve-manager:

Oguz Bektas (1):
  gui: add nvme as a bus type for creating disks

 www/manager6/Utils.js   |   3 ++-
 www/manager6/form/BusTypeSelector.js|   2 ++
 www/manager6/form/ControllerSelector.js |   4 ++--
 www/manager6/qemu/.Snapshot.js.swp  | Bin 0 -> 12288 bytes
 www/manager6/qemu/CloudInit.js  |   4 ++--
 www/mobile/QemuSummary.js   |   2 +-
 6 files changed, 9 insertions(+), 6 deletions(-)
 create mode 100644 www/manager6/qemu/.Snapshot.js.swp


-- 
2.20.1

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 2/2] gui: add nvme as a bus type for creating disks

2020-05-13 Thread Oguz Bektas
add nvme to the bus list and relevant spots in gui

Signed-off-by: Oguz Bektas 
---
 www/manager6/Utils.js   |   3 ++-
 www/manager6/form/BusTypeSelector.js|   2 ++
 www/manager6/form/ControllerSelector.js |   4 ++--
 www/manager6/qemu/.Snapshot.js.swp  | Bin 0 -> 12288 bytes
 www/manager6/qemu/CloudInit.js  |   4 ++--
 www/mobile/QemuSummary.js   |   2 +-
 6 files changed, 9 insertions(+), 6 deletions(-)
 create mode 100644 www/manager6/qemu/.Snapshot.js.swp

diff --git a/www/manager6/Utils.js b/www/manager6/Utils.js
index 0cce81d4..47b6e5c1 100644
--- a/www/manager6/Utils.js
+++ b/www/manager6/Utils.js
@@ -26,7 +26,7 @@ Ext.define('PVE.Utils', { utilities: {
 
 toolkit: undefined, // (extjs|touch), set inside Toolkit.js
 
-bus_match: /^(ide|sata|virtio|scsi)\d+$/,
+bus_match: /^(ide|sata|virtio|scsi|nvme)\d+$/,
 
 log_severity_hash: {
0: "panic",
@@ -1286,6 +1286,7 @@ Ext.define('PVE.Utils', { utilities: {
ide: 4,
sata: 6,
scsi: 31,
+   nvme: 8,
virtio: 16,
 },
 
diff --git a/www/manager6/form/BusTypeSelector.js 
b/www/manager6/form/BusTypeSelector.js
index 04643e77..c65eba79 100644
--- a/www/manager6/form/BusTypeSelector.js
+++ b/www/manager6/form/BusTypeSelector.js
@@ -15,6 +15,8 @@ Ext.define('PVE.form.BusTypeSelector', {
 
me.comboItems.push(['scsi', 'SCSI']);
 
+   me.comboItems.push(['nvme', 'NVME']);
+
me.callParent();
 }
 });
diff --git a/www/manager6/form/ControllerSelector.js 
b/www/manager6/form/ControllerSelector.js
index 89ecdf4a..0cea5fce 100644
--- a/www/manager6/form/ControllerSelector.js
+++ b/www/manager6/form/ControllerSelector.js
@@ -37,7 +37,7 @@ Ext.define('PVE.form.ControllerSelector', {
 
me.vmconfig = Ext.apply({}, vmconfig);
 
-   var clist = ['ide', 'virtio', 'scsi', 'sata'];
+   var clist = ['ide', 'virtio', 'scsi', 'sata', 'nvme'];
var bussel = me.down('field[name=controller]');
var deviceid = me.down('field[name=deviceid]');
 
@@ -47,7 +47,7 @@ Ext.define('PVE.form.ControllerSelector', {
deviceid.setValue(2);
return;
}
-   clist = ['ide', 'scsi', 'sata'];
+   clist = ['ide', 'scsi', 'sata', 'nvme'];
} else  {
// in most cases we want to add a disk to the same controller
// we previously used
diff --git a/www/manager6/qemu/.Snapshot.js.swp 
b/www/manager6/qemu/.Snapshot.js.swp
new file mode 100644
index 
..bcfd26a5a863605108667b951d6d8f3c9b3afa10
GIT binary patch
literal 12288
zcmeI%%?g4*5Ww-VdldBoR_EM4Abf-9&?)M^kyt@xsR^rIYYqaK~YH
zhw*olUG^4}sqH#_Nk`pGpYd=-4t{*e^Iy@|8~tlNU%Q%uYP^Y5aQ3=8z1O~XQc04i
z?0uw;-Y!https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server 1/2] add support for nvme emulation

2020-05-13 Thread Oguz Bektas
now we can add nvme drives;

nvme0: local-lvm:vm-103-disk-0,size=32G

max number is 8

Signed-off-by: Oguz Bektas 
---
 PVE/QemuServer.pm   | 20 +---
 PVE/QemuServer/Drive.pm | 21 +
 2 files changed, 38 insertions(+), 3 deletions(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index dcf05df..441d209 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -406,7 +406,7 @@ EODESC
optional => 1,
type => 'string', format => 'pve-qm-bootdisk',
description => "Enable booting from specified disk.",
-   pattern => '(ide|sata|scsi|virtio)\d+',
+   pattern => '(ide|sata|scsi|virtio|nvme)\d+',
 },
 smp => {
optional => 1,
@@ -1424,7 +1424,11 @@ sub print_drivedevice_full {
$device .= ",rotation_rate=1";
}
$device .= ",wwn=$drive->{wwn}" if $drive->{wwn};
-
+} elsif ($drive->{interface} eq 'nvme') {
+   my $maxdev = $PVE::QemuServer::Drive::MAX_NVME_DISKS;
+   my $path = $drive->{file};
+   $drive->{serial} = "$drive->{interface}$drive->{index}"; # serial is 
mandatory for nvme
+   $device = "nvme,drive=drive-$drive->{interface}$drive->{index}";
 } elsif ($drive->{interface} eq 'ide' || $drive->{interface} eq 'sata') {
my $maxdev = ($drive->{interface} eq 'sata') ? 
$PVE::QemuServer::Drive::MAX_SATA_DISKS : 2;
my $controller = int($drive->{index} / $maxdev);
@@ -2157,7 +2161,7 @@ sub parse_vm_config {
} else {
$key = 'ide2' if $key eq 'cdrom';
my $fmt = $confdesc->{$key}->{format};
-   if ($fmt && $fmt =~ /^pve-qm-(?:ide|scsi|virtio|sata)$/) {
+   if ($fmt && $fmt =~ /^pve-qm-(?:ide|scsi|virtio|sata|nvme)$/) {
my $v = parse_drive($key, $value);
if (my $volid = filename_to_volume_id($vmid, $v->{file}, 
$v->{media})) {
$v->{file} = $volid;
@@ -3784,7 +3788,17 @@ sub vm_deviceplug {
warn $@ if $@;
die $err;
 }
+} elsif ($deviceid =~ m/^(nvme)(\d+)$/) {
+
+qemu_driveadd($storecfg, $vmid, $device);
 
+   my $devicefull = print_drivedevice_full($storecfg, $conf, $vmid, 
$device, $arch, $machine_type);
+   eval { qemu_deviceadd($vmid, $devicefull); };
+   if (my $err = $@) {
+   eval { qemu_drivedel($vmid, $deviceid); };
+   warn $@ if $@;
+   die $err;
+}
 } elsif ($deviceid =~ m/^(net)(\d+)$/) {
 
return undef if !qemu_netdevadd($vmid, $conf, $arch, $device, 
$deviceid);
diff --git a/PVE/QemuServer/Drive.pm b/PVE/QemuServer/Drive.pm
index f84333f..b8a553a 100644
--- a/PVE/QemuServer/Drive.pm
+++ b/PVE/QemuServer/Drive.pm
@@ -27,6 +27,7 @@ 
PVE::JSONSchema::register_standard_option('pve-qm-image-format', {
 
 my $MAX_IDE_DISKS = 4;
 my $MAX_SCSI_DISKS = 31;
+my $MAX_NVME_DISKS = 8;
 my $MAX_VIRTIO_DISKS = 16;
 our $MAX_SATA_DISKS = 6;
 our $MAX_UNUSED_DISKS = 256;
@@ -275,6 +276,20 @@ my $scsidesc = {
 };
 PVE::JSONSchema::register_standard_option("pve-qm-scsi", $scsidesc);
 
+my $nvme_fmt = {
+%drivedesc_base,
+%ssd_fmt,
+%wwn_fmt,
+};
+
+my $nvmedesc = {
+optional => 1,
+type => 'string', format => $nvme_fmt,
+description => "Use volume as NVME disk (n is 0 to " . ($MAX_NVME_DISKS 
-1) . ").",
+};
+
+PVE::JSONSchema::register_standard_option("pve-qm-nvme", $nvmedesc);
+
 my $sata_fmt = {
 %drivedesc_base,
 %ssd_fmt,
@@ -364,6 +379,11 @@ for (my $i = 0; $i < $MAX_SCSI_DISKS; $i++)  {
 $drivedesc_hash->{"scsi$i"} = $scsidesc;
 }
 
+for (my $i = 0; $i < $MAX_NVME_DISKS; $i++)  {
+$drivedesc_hash->{"nvme$i"} = $nvmedesc;
+}
+
+
 for (my $i = 0; $i < $MAX_VIRTIO_DISKS; $i++)  {
 $drivedesc_hash->{"virtio$i"} = $virtiodesc;
 }
@@ -380,6 +400,7 @@ sub valid_drive_names {
 (map { "scsi$_" } (0 .. ($MAX_SCSI_DISKS - 1))),
 (map { "virtio$_" } (0 .. ($MAX_VIRTIO_DISKS - 1))),
 (map { "sata$_" } (0 .. ($MAX_SATA_DISKS - 1))),
+(map { "nvme$_" } (0 .. ($MAX_NVME_DISKS - 1))),
 'efidisk0');
 }
 
-- 
2.20.1

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH storage] Fix #2737: Can't call method "mode"

2020-05-13 Thread Alwin Antreich
on an undefined value at /usr/share/perl5/PVE/Storage/Plugin.pm line 928

This error message crops up when a file is deleted after getting the
file list and before the loop passed the file entry.

Signed-off-by: Alwin Antreich 
---
 PVE/Storage/Plugin.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/Storage/Plugin.pm b/PVE/Storage/Plugin.pm
index e9da403..cec136e 100644
--- a/PVE/Storage/Plugin.pm
+++ b/PVE/Storage/Plugin.pm
@@ -925,7 +925,7 @@ my $get_subdir_files = sub {
 
my $st = File::stat::stat($fn);
 
-   next if S_ISDIR($st->mode);
+   next if (!$st || S_ISDIR($st->mode));
 
my $info;
 
-- 
2.26.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH kernel] config: make vfio-pci and {x, e, o, u}hci modules

2020-05-13 Thread Thomas Lamprecht
Since 5.4 Ubuntu makes vfio-pci builtin to allow vfio-pci to be bound
to certain devices during boot, preventing other drivers from binding
them. https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1770845

Part of why this was done was because it mirrored Arch Linux choice
in the past, but there where some issues and Arch Linux went back to
modules again but this time making xhci_hcd and similar ?hci systems
modules too. Tries to mirror the values used in the current Arch
Linux config [0].

[0]: 
https://git.archlinux.org/svntogit/packages.git/tree/trunk/config?h=packages/linux=96121a8d6468c7067eb3759f6ca5e82f5f914f38

Signed-off-by: Thomas Lamprecht 
---
 debian/rules | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/debian/rules b/debian/rules
index d344d4c1cc42..57afae938834 100755
--- a/debian/rules
+++ b/debian/rules
@@ -46,6 +46,21 @@ PVE_CONFIG_OPTS= \
 -d CONFIG_MEMCG_DISABLED \
 -e CONFIG_MEMCG_SWAP_ENABLED \
 -e CONFIG_HYPERV \
+-m CONFIG_VFIO_IOMMU_TYPE1 \
+-m CONFIG_VFIO_VIRQFD \
+-m CONFIG_VFIO \
+-m CONFIG_VFIO_PCI \
+-m CONFIG_USB_XHCI_HCD \
+-m CONFIG_USB_XHCI_PCI \
+-m CONFIG_USB_EHCI_HCD \
+-m CONFIG_USB_EHCI_PCI \
+-m CONFIG_USB_EHCI_HCD_PLATFORM \
+-m CONFIG_USB_OHCI_HCD \
+-m CONFIG_USB_OHCI_HCD_PCI \
+-m CONFIG_USB_OHCI_HCD_PLATFORM \
+-d CONFIG_USB_OHCI_HCD_SSB \
+-m CONFIG_USB_UHCI_HCD \
+-d CONFIG_USB_SL811_HCD_ISO \
 -e CONFIG_MEMCG_KMEM \
 -d CONFIG_DEFAULT_CFQ \
 -e CONFIG_DEFAULT_DEADLINE \
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH docs 2/2] pvecm: whitespace fixup

2020-05-13 Thread Aaron Lauterer
Signed-off-by: Aaron Lauterer 
---
 pvecm.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pvecm.adoc b/pvecm.adoc
index 2866582..2cdb92b 100644
--- a/pvecm.adoc
+++ b/pvecm.adoc
@@ -61,7 +61,7 @@ Requirements
 
 * Date and time have to be synchronized.
 
-* SSH tunnel on TCP port 22 between nodes is used. 
+* SSH tunnel on TCP port 22 between nodes is used.
 
 * If you are interested in High Availability, you need to have at
   least three nodes for reliable quorum. All nodes should have the
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH docs 1/2] fix #2526: network config: change underscore to dash

2020-05-13 Thread Aaron Lauterer
The network config examples use the underscore but our tooling
generates the configs with dashes.

Signed-off-by: Aaron Lauterer 
---

 pve-network.adoc | 80 
 pvecm.adoc   |  6 ++--
 2 files changed, 43 insertions(+), 43 deletions(-)

diff --git a/pve-network.adoc b/pve-network.adoc
index 8f3af0d..1c5ae17 100644
--- a/pve-network.adoc
+++ b/pve-network.adoc
@@ -161,9 +161,9 @@ iface vmbr0 inet static
 address 192.168.10.2
 netmask 255.255.255.0
 gateway 192.168.10.1
-bridge_ports eno1
-bridge_stp off
-bridge_fd 0
+bridge-ports eno1
+bridge-stp off
+bridge-fd 0
 
 
 Virtual machines behave as if they were directly connected to the
@@ -209,9 +209,9 @@ auto vmbr0
 iface vmbr0 inet static
 address  203.0.113.17
 netmask  255.255.255.248
-bridge_ports none
-bridge_stp off
-bridge_fd 0
+bridge-ports none
+bridge-stp off
+bridge-fd 0
 
 
 
@@ -239,9 +239,9 @@ auto vmbr0
 iface vmbr0 inet static
 address  10.10.10.1
 netmask  255.255.255.0
-bridge_ports none
-bridge_stp off
-bridge_fd 0
+bridge-ports none
+bridge-stp off
+bridge-fd 0
 
 post-up   echo 1 > /proc/sys/net/ipv4/ip_forward
 post-up   iptables -t nat -A POSTROUTING -s '10.10.10.0/24' -o eno1 -j 
MASQUERADE
@@ -357,18 +357,18 @@ iface bond0 inet static
   slaves eno1 eno2
   address  192.168.1.2
   netmask  255.255.255.0
-  bond_miimon 100
-  bond_mode 802.3ad
-  bond_xmit_hash_policy layer2+3
+  bond-miimon 100
+  bond-mode 802.3ad
+  bond-xmit-hash-policy layer2+3
 
 auto vmbr0
 iface vmbr0 inet static
 address  10.10.10.2
 netmask  255.255.255.0
 gateway  10.10.10.1
-bridge_ports eno1
-bridge_stp off
-bridge_fd 0
+bridge-ports eno1
+bridge-stp off
+bridge-fd 0
 
 
 
@@ -389,18 +389,18 @@ iface eno2 inet manual
 auto bond0
 iface bond0 inet manual
   slaves eno1 eno2
-  bond_miimon 100
-  bond_mode 802.3ad
-  bond_xmit_hash_policy layer2+3
+  bond-miimon 100
+  bond-mode 802.3ad
+  bond-xmit-hash-policy layer2+3
 
 auto vmbr0
 iface vmbr0 inet static
 address  10.10.10.2
 netmask  255.255.255.0
 gateway  10.10.10.1
-bridge_ports bond0
-bridge_stp off
-bridge_fd 0
+bridge-ports bond0
+bridge-stp off
+bridge-fd 0
 
 
 
@@ -474,15 +474,15 @@ iface vmbr0v5 inet static
 address  10.10.10.2
 netmask  255.255.255.0
 gateway  10.10.10.1
-bridge_ports eno1.5
-bridge_stp off
-bridge_fd 0
+bridge-ports eno1.5
+bridge-stp off
+bridge-fd 0
 
 auto vmbr0
 iface vmbr0 inet manual
-bridge_ports eno1
-bridge_stp off
-bridge_fd 0
+bridge-ports eno1
+bridge-stp off
+bridge-fd 0
 
 
 
@@ -502,10 +502,10 @@ iface vmbr0.5 inet static
 
 auto vmbr0
 iface vmbr0 inet manual
-bridge_ports eno1
-bridge_stp off
-bridge_fd 0
-bridge_vlan_aware yes
+bridge-ports eno1
+bridge-stp off
+bridge-fd 0
+bridge-vlan-aware yes
 
 
 The next example is the same setup but a bond is used to
@@ -523,9 +523,9 @@ iface eno2 inet manual
 auto bond0
 iface bond0 inet manual
   slaves eno1 eno2
-  bond_miimon 100
-  bond_mode 802.3ad
-  bond_xmit_hash_policy layer2+3
+  bond-miimon 100
+  bond-mode 802.3ad
+  bond-xmit-hash-policy layer2+3
 
 iface bond0.5 inet manual
 
@@ -534,15 +534,15 @@ iface vmbr0v5 inet static
 address  10.10.10.2
 netmask  255.255.255.0
 gateway  10.10.10.1
-bridge_ports bond0.5
-bridge_stp off
-bridge_fd 0
+bridge-ports bond0.5
+bridge-stp off
+bridge-fd 0
 
 auto vmbr0
 iface vmbr0 inet manual
-bridge_ports bond0
-bridge_stp off
-bridge_fd 0
+bridge-ports bond0
+bridge-stp off
+bridge-fd 0
 
 
 
diff --git a/pvecm.adoc b/pvecm.adoc
index 4bf2f59..2866582 100644
--- a/pvecm.adoc
+++ b/pvecm.adoc
@@ -1256,9 +1256,9 @@ iface vmbr0 inet static
 address 192.X.Y.57
 netmask 255.255.250.0
 gateway 192.X.Y.1
-bridge_ports eno1
-bridge_stp off
-bridge_fd 0
+bridge-ports eno1
+bridge-stp off
+bridge-fd 0
 
 # cluster network
 auto eno2
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] sync_disks: fix check

2020-05-13 Thread Fabian Ebner
Signed-off-by: Fabian Ebner 
---

The real issue is that the shared volumes are scanned here and
that happens in the scan_volids call above. I'll try to address
that as part of the sync_disks cleanup I'm working on.

 PVE/QemuMigrate.pm | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index b729940..f6baeda 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -519,7 +519,9 @@ sub sync_disks {
PVE::QemuConfig->foreach_volume($conf, sub {
my ($key, $drive) = @_;
return if $key eq 'efidisk0'; # skip efidisk, will be handled later
-   return if !defined($local_volumes->{$key}); # only update sizes for 
local volumes
+
+   my $volid = $drive->{file};
+   return if !defined($local_volumes->{$volid}); # only update sizes 
for local volumes
 
my ($updated, $old_size, $new_size) = 
PVE::QemuServer::Drive::update_disksize($drive, $volid_hash);
if (defined($updated)) {
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH network] api: fix reload all UPID handling

2020-05-13 Thread Thomas Lamprecht
run_command returns the exit code, we need to parse out the UPID from
the outfunc.

Signed-off-by: Thomas Lamprecht 
---
 PVE/API2/Network/SDN.pm | 32 
 1 file changed, 20 insertions(+), 12 deletions(-)

diff --git a/PVE/API2/Network/SDN.pm b/PVE/API2/Network/SDN.pm
index 4af2b6f..3f497fc 100644
--- a/PVE/API2/Network/SDN.pm
+++ b/PVE/API2/Network/SDN.pm
@@ -3,16 +3,17 @@ package PVE::API2::Network::SDN;
 use strict;
 use warnings;
 
-use PVE::SafeSyslog;
-use PVE::Tools;
 use PVE::Cluster qw(cfs_lock_file cfs_read_file cfs_write_file);
+use PVE::Exception qw(raise_param_exc);
+use PVE::JSONSchema qw(get_standard_option);
 use PVE::RESTHandler;
 use PVE::RPCEnvironment;
-use PVE::JSONSchema qw(get_standard_option);
-use PVE::Exception qw(raise_param_exc);
+use PVE::SafeSyslog;
+use PVE::Tools qw(run_command);
+
+use PVE::API2::Network::SDN::Controllers;
 use PVE::API2::Network::SDN::Vnets;
 use PVE::API2::Network::SDN::Zones;
-use PVE::API2::Network::SDN::Controllers;
 
 use base qw(PVE::RESTHandler);
 
@@ -68,8 +69,14 @@ __PACKAGE__->register_method({
 my $create_reload_network_worker = sub {
 my ($nodename) = @_;
 
-#fixme: how to proxy to final node ?
-my $upid = PVE::Tools::run_command(['pvesh', 'set', 
"/nodes/$nodename/network"]);
+# FIXME: how to proxy to final node ?
+my $upid;
+run_command(['pvesh', 'set', "/nodes/$nodename/network"], outfunc => sub {
+   my $line = shift;
+   if ($line =~ /^["']?(UPID:[^\s"']+)["']?$/) {
+   $upid = $1;
+   }
+});
 #my $upid = PVE::API2::Network->reload_network_config(node => $nodename});
 my $res = PVE::Tools::upid_decode($upid);
 
@@ -101,13 +108,14 @@ __PACKAGE__->register_method ({
 $rpcenv->{type} = 'priv'; # to start tasks in background
PVE::Cluster::check_cfs_quorum();
my $nodelist = PVE::Cluster::get_nodelist();
-   foreach my $node (@$nodelist) {
-
-   my $pid;
-   eval { $pid = &$create_reload_network_worker($node); };
+   for my $node (@$nodelist) {
+   my $pid = eval { $create_reload_network_worker->($node) };
warn $@ if $@;
-   next if !$pid;
}
+
+   # FIXME: use libpve-apiclient (like in cluster join) to create
+   # tasks and moitor the tasks.
+
return;
 };
 
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel