[pve-devel] [PATCH v2] mixin/CBind: allow binding sub elements

2018-02-08 Thread Thomas Lamprecht
allow to access sub elements of configuration items for binding.

E.g., if the following configuration is present:

foo: {
bar: true
}

one could do:

xtype: 'displayfield',
cbind {
value: '{foo.bar}'
}
[...]

This mirrors the abillity of ExtJS's 'bind', which can do this also.
Simple implementation mirroring the surrounding code.

Signed-off-by: Thomas Lamprecht 
---

changes v1 -> v2:
* fix regex to allow even and uneven dot counts with only one letter between


 mixin/CBind.js | 13 +
 1 file changed, 13 insertions(+)

diff --git a/mixin/CBind.js b/mixin/CBind.js
index 48bc7f4..ecf30e2 100644
--- a/mixin/CBind.js
+++ b/mixin/CBind.js
@@ -41,6 +41,19 @@ Ext.define('Proxmox.Mixin.CBind', {
if (match[1]) cvalue = !cvalue;
obj[prop] = cvalue;
found = true;
+   } else if (match = 
/^\{(!)?([a-z_][a-z0-9_]*(\.[a-z_][a-z0-9_]*)+)\}$/i.exec(cdata)) {
+   var keys = match[2].split('.');
+   var cvalue = getConfigValue(keys.shift());
+   keys.forEach(function(k) {
+   if (k in cvalue) {
+   cvalue = cvalue[k];
+   } else {
+   throw "unable to get cbind data for '" + match[2] + 
"'";
+   }
+   });
+   if (match[1]) cvalue = !cvalue;
+   obj[prop] = cvalue;
+   found = true;
} else {
obj[prop] = cdata.replace(/{([a-z_][a-z0-9_]*)\}/ig, 
function(match, cname) {
var cvalue = getConfigValue(cname);
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [WIP manager] WIP: dc/cluster ui for cluster join, create and information

2018-02-08 Thread Thomas Lamprecht
This is a WIP mock up of a possible cluster management component for
our WebUI. It's *not* final, and not intended for review - I mean if
you see something obvious wrong it won't hurt to tell.

I've plans to improve visuals still a bit, making a clear tab
separation between fully manual, and assisted join.

The assisted join should get some additional displayed info.
I.e., cluster name we're joining, its IP type, how many rings it has
configured, ...

I sent this mainly to make testing the backend part, using the API
only, easier.

Oh and I found out that i missed adding

protected => 1,

For the cluster create API call (POST /cluster/config) in v6 of the
backend part, easy to fix for testing this, though, so waiting with a
v7 :)

cheers,


Signed-off-by: Thomas Lamprecht 
---
 www/manager6/Makefile  |   1 +
 www/manager6/dc/Cluster.js | 456 +
 www/manager6/dc/Config.js  |  13 +-
 3 files changed, 466 insertions(+), 4 deletions(-)
 create mode 100644 www/manager6/dc/Cluster.js

diff --git a/www/manager6/Makefile b/www/manager6/Makefile
index aec62613..e2145085 100644
--- a/www/manager6/Makefile
+++ b/www/manager6/Makefile
@@ -187,6 +187,7 @@ JSSRC=  
\
dc/SecurityGroups.js\
dc/Config.js\
dc/NodeView.js  \
+   dc/Cluster.js   \
Workspace.js
 
 lint: ${JSSRC}
diff --git a/www/manager6/dc/Cluster.js b/www/manager6/dc/Cluster.js
new file mode 100644
index ..40c1e07b
--- /dev/null
+++ b/www/manager6/dc/Cluster.js
@@ -0,0 +1,456 @@
+/*jslint confusion: true*/
+Ext.define('pve-cluster-nodes', {
+extend: 'Ext.data.Model',
+fields: [
+   'node', { type: 'integer', name: 'nodeid' }, 'ring0_addr', 'ring1_addr',
+   { type: 'integer', name: 'quorum_votes' }
+],
+proxy: {
+type: 'proxmox',
+   url: "/api2/json/cluster/config/nodes"
+},
+idProperty: 'nodeid'
+});
+
+Ext.define('pve-cluster-info', {
+extend: 'Ext.data.Model',
+proxy: {
+type: 'proxmox',
+   url: "/api2/json/cluster/config/join"
+}
+});
+
+Ext.define('PVE.ClusterCreateWindow', {
+extend: 'Proxmox.window.Edit',
+xtype: 'pveClusterCreateWindow',
+
+title: gettext('Create Cluster'),
+width: 800,
+
+method: 'POST',
+url: '/cluster/config',
+
+isCreate: true,
+subject: gettext('Cluster'),
+showProgress: true,
+
+items: [
+   {
+   xtype: 'textfield',
+   fieldLabel: gettext('Cluster Name'),
+   name: 'clustername'
+   },
+   {
+   xtype: 'proxmoxtextfield',
+   fieldLabel: gettext('Ring 0 Address'),
+   emptyText: gettext("IP resolved by node's hostname"),
+   name: 'ring0_addr',
+   skipEmptyText: true
+   }
+]
+});
+
+Ext.define('PVE.ClusterJoinNodeWindow', {
+extend: 'Proxmox.window.Edit',
+xtype: 'pveClusterJoinNodeWindow',
+
+title: gettext('Cluster Join'),
+width: 800,
+
+method: 'POST',
+url: '/cluster/config/join',
+
+isCreate: true,
+submitText: gettext('Join'),
+
+viewModel: {
+   parent: null,
+   data: {
+   peerIP: '',
+   peerFP: '',
+   ringsNeeded: 1,
+   ipVersion: 'any',
+   useSerializedInfo: true
+   }
+},
+
+controller: {
+   xclass: 'Ext.app.ViewController',
+   control: {
+   'radiofield[name=inputtype]': {
+   change: 'onInputTypeChange'
+   },
+   'textarea[name=serializedinfo]': {
+   change: 'recomputeSerializedInfo',
+   disable: 'clearOnDisable'
+   },
+   'proxmoxtextfield': {
+   disable: 'clearOnDisable'
+   },
+   'textfield': {
+   disable: 'clearOnDisable'
+   }
+   },
+   clearOnDisable: function(field) {
+   field.reset();
+   },
+   onInputTypeChange: function(field, value) {
+   var vm = this.getViewModel();
+   var auto = (field.inputValue === 'auto');
+   vm.set('useSerializedInfo', !auto);
+   },
+   recomputeSerializedInfo: function(field, value) {
+   var jsons = Ext.util.Base64.decode(value);
+   var joinInfo = Ext.JSON.decode(jsons, true);
+
+   if (joinInfo === null) {
+   return;
+   }
+
+   var vm = this.getViewModel();
+   vm.set('peerIP', joinInfo.ipAddress);
+   vm.set('peerFP', joinInfo.fingerprint);
+   }
+},
+
+items: [{
+   xtype: 'inputpanel',
+   column1: [
+   {
+   xtype: 'radiofield',
+   name: 'inputtype',
+   submitValue: false,
+   inputValue: 'auto',
+   boxLabel: gettext('Use encoded 

[pve-devel] [PATCH widget-toolkit] mixin/CBind: allow binding sub elements

2018-02-08 Thread Thomas Lamprecht
allow to access sub elements of configuration items for binding.

E.g., if the following configuration is present:

foo: {
bar: true
}

one could do:

xtype: 'displayfield',
cbind {
value: '{foo.bar}'
}
[...]

This mirrors the ability of ExtJS's 'bind', which can do this also.
Simple implementation mirroring the surrounding code.

Signed-off-by: Thomas Lamprecht 
---
 mixin/CBind.js | 13 +
 1 file changed, 13 insertions(+)

diff --git a/mixin/CBind.js b/mixin/CBind.js
index 48bc7f4..4eb9c97 100644
--- a/mixin/CBind.js
+++ b/mixin/CBind.js
@@ -41,6 +41,19 @@ Ext.define('Proxmox.Mixin.CBind', {
if (match[1]) cvalue = !cvalue;
obj[prop] = cvalue;
found = true;
+   } else if (match = 
/^\{(!)?([a-z_][a-z0-9_]*\.[a-z_][a-z0-9_]*)+\}$/i.exec(cdata)) {
+   var keys = match[2].split('.');
+   var cvalue = getConfigValue(keys.shift());
+   keys.forEach(function(k) {
+   if (k in cvalue) {
+   cvalue = cvalue[k];
+   } else {
+   throw "unable to get cbind data for '" + match[2] + 
"'";
+   }
+   });
+   if (match[1]) cvalue = !cvalue;
+   obj[prop] = cvalue;
+   found = true;
} else {
obj[prop] = cdata.replace(/{([a-z_][a-z0-9_]*)\}/ig, 
function(match, cname) {
var cvalue = getConfigValue(cname);
-- 
2.14.2


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [RFC PATCH] add kiosk option to vm

2018-02-08 Thread Dominik Csapak
this adds a 'kiosk' option, which enables qemus snapshot
mode, which redirects all writes to a temporary file, so
that if you stop the qemu process, the disks are in their original state

Signed-off-by: Dominik Csapak 
---
the only possible pitfall with this mode is that the temporary writes
go on a file on (i think) /var/tmp which in our case means the
root partition could get full, but since we can already enable this
option per disk, i guess this is not an issue

 PVE/QemuServer.pm | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 20d6682..28a0f19 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -532,6 +532,12 @@ EODESCR
description => "Select BIOS implementation.",
default => 'seabios',
 },
+kiosk => {
+   optional => 1,
+   type => 'boolean',
+   default => 0,
+   description => "If activated, writes to the disks of the VM will be 
temporary, and will be lost when stopping the VM."
+}
 };
 
 # what about other qemu settings ?
@@ -3401,6 +3407,10 @@ sub config_to_command {
}
 }
 
+if ($conf->{kiosk}) {
+   push @$cmd, '-snapshot';
+}
+
 # add custom args
 if ($conf->{args}) {
my $aa = PVE::Tools::split_args($conf->{args});
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] applied: [PATCH qemu-server 1/3] correct description of disks 'snapshot' flag

2018-02-08 Thread Wolfgang Bumiller
applied the other two of the series, but replaced this one with:

-   description => "Whether the drive should be included when making 
snapshots.",
+   description => "Controls qemu's snapshot mode feature."
+   . " If activated, changes made to the disk are temporary and will"
+   . " be discarded when the VM is shutdown.",

On Thu, Feb 08, 2018 at 12:09:22PM +0100, Dominik Csapak wrote:
> it has nothing to do with our snapshots, but with
> the qemu snapshot mode of the disk (see the man page)
> 
> Signed-off-by: Dominik Csapak 
> ---
>  PVE/QemuServer.pm | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
> index 2738f11..4c04668 100644
> --- a/PVE/QemuServer.pm
> +++ b/PVE/QemuServer.pm
> @@ -753,7 +753,7 @@ my %drivedesc_base = (
>  },
>  snapshot => {
>   type => 'boolean',
> - description => "Whether the drive should be included when making 
> snapshots.",
> + description => "Snapshot mode for the disk to use.",
>   optional => 1,
>  },
>  cache => {
> -- 
> 2.11.0

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH qemu-server 2/3] use special case for 'snapshot' disk parameter

2018-02-08 Thread Dominik Csapak

i forgot this fixes: #1662

On 02/08/2018 12:09 PM, Dominik Csapak wrote:

since qemu expects on|off (not 1|0),
we have to do it different

Signed-off-by: Dominik Csapak 
---
  PVE/QemuServer.pm | 9 -
  1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4c04668..0011018 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -1622,10 +1622,17 @@ sub print_drive_full {
 }
  
  my $opts = '';

-my @qemu_drive_options = qw(heads secs cyls trans media format cache 
snapshot rerror werror aio discard);
+my @qemu_drive_options = qw(heads secs cyls trans media format cache 
rerror werror aio discard);
  foreach my $o (@qemu_drive_options) {
$opts .= ",$o=$drive->{$o}" if $drive->{$o};
  }
+
+# snapshot only accepts on|off
+if (defined($drive->{snapshot})) {
+   my $v = $drive->{snapshot} ? 'on' : 'off';
+   $opts .= ",snapshot=$v";
+}
+
  foreach my $type (['', '-total'], [_rd => '-read'], [_wr => '-write']) {
my ($dir, $qmpname) = @$type;
if (my $v = $drive->{"mbps$dir"}) {




___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server 2/3] use special case for 'snapshot' disk parameter

2018-02-08 Thread Dominik Csapak
since qemu expects on|off (not 1|0),
we have to do it different

Signed-off-by: Dominik Csapak 
---
 PVE/QemuServer.pm | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 4c04668..0011018 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -1622,10 +1622,17 @@ sub print_drive_full {
}
 
 my $opts = '';
-my @qemu_drive_options = qw(heads secs cyls trans media format cache 
snapshot rerror werror aio discard);
+my @qemu_drive_options = qw(heads secs cyls trans media format cache 
rerror werror aio discard);
 foreach my $o (@qemu_drive_options) {
$opts .= ",$o=$drive->{$o}" if $drive->{$o};
 }
+
+# snapshot only accepts on|off
+if (defined($drive->{snapshot})) {
+   my $v = $drive->{snapshot} ? 'on' : 'off';
+   $opts .= ",snapshot=$v";
+}
+
 foreach my $type (['', '-total'], [_rd => '-read'], [_wr => '-write']) {
my ($dir, $qmpname) = @$type;
if (my $v = $drive->{"mbps$dir"}) {
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server 3/3] append option to drive if the option is defined

2018-02-08 Thread Dominik Csapak
if the value was '0', we did not append the option to the drive,
resulting in wrong command line if the qemu default of an option is not
'0'

Signed-off-by: Dominik Csapak 
---
 PVE/QemuServer.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 0011018..39a8916 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -1624,7 +1624,7 @@ sub print_drive_full {
 my $opts = '';
 my @qemu_drive_options = qw(heads secs cyls trans media format cache 
rerror werror aio discard);
 foreach my $o (@qemu_drive_options) {
-   $opts .= ",$o=$drive->{$o}" if $drive->{$o};
+   $opts .= ",$o=$drive->{$o}" if defined($drive->{$o});
 }
 
 # snapshot only accepts on|off
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server 1/3] correct description of disks 'snapshot' flag

2018-02-08 Thread Dominik Csapak
it has nothing to do with our snapshots, but with
the qemu snapshot mode of the disk (see the man page)

Signed-off-by: Dominik Csapak 
---
 PVE/QemuServer.pm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 2738f11..4c04668 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -753,7 +753,7 @@ my %drivedesc_base = (
 },
 snapshot => {
type => 'boolean',
-   description => "Whether the drive should be included when making 
snapshots.",
+   description => "Snapshot mode for the disk to use.",
optional => 1,
 },
 cache => {
-- 
2.11.0


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] is pve-qemu upgrade planned soon ?

2018-02-08 Thread Alexandre DERUMIER
Thanks Wolfgang !

>>I already rebased the patches but we didn't get around to doing much 
>>testing yet. You can find the branches on my github if you want to get a 
>>jump on it: 

>>https://github.com/Blub/qemu pve-next (qemu-2.11 pve patch set on top) 
>>https://github.com/Blub/qemu pve-next-patched (above with the extra/ patches) 


I'll try to test them next week.


>>(And by now I was hoping to have a 2.11.1 in sight to go directly to 
>>that ;-) but yeah, we can start moving there soon ) 

I agreed too to waiting for 2.11.1.


I'll keep you in touch.

Alexandre

- Mail original -
De: "Wolfgang Bumiller" 
À: "aderumier" 
Cc: "pve-devel" 
Envoyé: Jeudi 8 Février 2018 11:02:09
Objet: Re: [pve-devel] is pve-qemu upgrade planned soon ?

I already rebased the patches but we didn't get around to doing much 
testing yet. You can find the branches on my github if you want to get a 
jump on it: 

https://github.com/Blub/qemu pve-next (qemu-2.11 pve patch set on top) 
https://github.com/Blub/qemu pve-next-patched (above with the extra/ patches) 

Another reason was meltdown & spectre and hadlin & testing of the added 
machine models, setting the pcid flag etc. That seemed like less of a 
hassle to do with an otherwise already tested version (especially since 
they're then the same in pve 5 & 4). 

(And by now I was hoping to have a 2.11.1 in sight to go directly to 
that ;-) but yeah, we can start moving there soon ) 

On Wed, Feb 07, 2018 at 06:36:34PM +0100, Alexandre DERUMIER wrote: 
> Hi, 
> 
> is it planned to update pve-qemu to 2.10/ 2.11 soon ? 
> 
> 
> I'm currently testing again disk migration through nbd, and it seem to be 
> buggy since qemu 2.9. (random hang on long disk copy) 
> https://bugs.launchpad.net/ubuntu/+source/qemu/+bug/1711602 
> 
> 
> They are patches on qemu 2.10 
> http://lists.nongnu.org/archive/html/qemu-devel/2017-08/msg04513.html 
> 
> I'll try to backport them to 2.9 if needed. 
> 
> 
> Alexandre 

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] is pve-qemu upgrade planned soon ?

2018-02-08 Thread Wolfgang Bumiller
I already rebased the patches but we didn't get around to doing much
testing yet. You can find the branches on my github if you want to get a
jump on it:

https://github.com/Blub/qemu pve-next (qemu-2.11 pve patch set on top)
https://github.com/Blub/qemu pve-next-patched (above with the extra/ patches)

Another reason was meltdown & spectre and hadlin & testing of the added
machine models, setting the pcid flag etc. That seemed like less of a
hassle to do with an otherwise already tested version (especially since
they're then the same in pve 5 & 4).

(And by now I was hoping to have a 2.11.1 in sight to go directly to
that ;-) but yeah, we can start moving there soon )

On Wed, Feb 07, 2018 at 06:36:34PM +0100, Alexandre DERUMIER wrote:
> Hi,
> 
> is it planned to update pve-qemu to 2.10/ 2.11 soon ?
> 
> 
> I'm currently testing again disk migration through nbd, and it seem to be 
> buggy since qemu 2.9. (random hang on long disk copy)
> https://bugs.launchpad.net/ubuntu/+source/qemu/+bug/1711602
> 
> 
> They are patches on qemu 2.10
> http://lists.nongnu.org/archive/html/qemu-devel/2017-08/msg04513.html
> 
> I'll try to backport them to 2.9 if needed.
> 
> 
> Alexandre

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH qemu 0/1] fix qemu 2.9 drive mirroring to nbd target

2018-02-08 Thread Wolfgang Bumiller
applied, thanks

They could have cherry-picked the two function-rename & error handling
commits in between, then they wouldn't have had to resolve the conflicts
they had to resolve ;-) (wouldn't have saved them from auditing the
result though, so I guess it doesn't matter much, but for future
fixups...)

On Wed, Feb 07, 2018 at 11:40:34PM +0100, Alexandre Derumier wrote:
> Qemu 2.9 have implemented coroutine for drive mirroring
> and it's currently buggy with nbd target.
> 
> When I test with big volumes (100-200G), mirroring always die
> after some minutes.
> 
> This has been fixed in qemu 2.10.
> 
> Theses patches are coming from qemu-kvm-ev-2.9.0-16.el7_4.11.1
> https://cbs.centos.org/koji/buildinfo?buildID=21003
> (already backported from 2.10 to 2.9)
> 
> 
> 
> Alexandre Derumier (1):
>   add nbd patches to fix qemu 2.9 drive mirroring to nbd target
> 
>  ...bd-make-it-thread-safe-fix-qcow2-over-nbd.patch | 136 +
>  .../extra/0036-kvm-nbd-strict-nbd_wr_syncv.patch   |  66 +++
>  ...read_sync-and-friends-return-0-on-success.patch | 620 
> +
>  .../extra/0038-kvm-nbd-make-nbd_drop-public.patch  | 151 +
>  ...get-rid-of-nbd_negotiate_read-and-friends.patch | 292 ++
>  ...-Fix-regression-when-server-sends-garbage.patch | 153 +
>  ...fix-build-failure-in-nbd_read_reply_entry.patch |  55 ++
>  ...avoid-spurious-qio_channel_yield-re-entry.patch | 184 ++
>  ...-avoid-read_reply_co-entry-if-send-failed.patch | 160 ++
>  ...s-improve-nbd-fault-injector.py-startup-p.patch |  61 ++
>  ...s-test-NBD-over-UNIX-domain-sockets-in-08.patch | 454 +++
>  ...lient-nbd_co_send_request-fix-return-code.patch |  45 ++
>  debian/patches/series  |  13 +
>  13 files changed, 2390 insertions(+)
>  create mode 100644 
> debian/patches/extra/0035-kvm-nbd-make-it-thread-safe-fix-qcow2-over-nbd.patch
>  create mode 100644 
> debian/patches/extra/0036-kvm-nbd-strict-nbd_wr_syncv.patch
>  create mode 100644 
> debian/patches/extra/0037-kvm-nbd-read_sync-and-friends-return-0-on-success.patch
>  create mode 100644 
> debian/patches/extra/0038-kvm-nbd-make-nbd_drop-public.patch
>  create mode 100644 
> debian/patches/extra/0039-kvm-nbd-server-get-rid-of-nbd_negotiate_read-and-friends.patch
>  create mode 100644 
> debian/patches/extra/0040-kvm-nbd-client-Fix-regression-when-server-sends-garbage.patch
>  create mode 100644 
> debian/patches/extra/0041-kvm-fix-build-failure-in-nbd_read_reply_entry.patch
>  create mode 100644 
> debian/patches/extra/0042-kvm-nbd-client-avoid-spurious-qio_channel_yield-re-entry.patch
>  create mode 100644 
> debian/patches/extra/0043-kvm-nbd-client-avoid-read_reply_co-entry-if-send-failed.patch
>  create mode 100644 
> debian/patches/extra/0044-kvm-qemu-iotests-improve-nbd-fault-injector.py-startup-p.patch
>  create mode 100644 
> debian/patches/extra/0045-kvm-qemu-iotests-test-NBD-over-UNIX-domain-sockets-in-08.patch
>  create mode 100644 
> debian/patches/extra/0046-kvm-block-nbd-client-nbd_co_send_request-fix-return-code.patch
> 
> -- 
> 2.11.0

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu 1/1] fix qemu 2.9 drive mirroring to nbd target

2018-02-08 Thread Alexandre Derumier

cherry pick from qemu-kvm-ev-2.9.0-16.el7_4.11.1
https://cbs.centos.org/koji/buildinfo?buildID=21003

Tue Jun 13 2017 Miroslav Rezanina  - rhev-2.9.0-10.el7

- kvm-nbd-make-it-thread-safe-fix-qcow2-over-nbd.patch [bz#1454582]

Tue Aug 15 2017 Miroslav Rezanina  - rhev-2.9.0-16.el7_4.4
- kvm-nbd-strict-nbd_wr_syncv.patch [bz#1467509]
- kvm-nbd-read_sync-and-friends-return-0-on-success.patch [bz#1467509]
- kvm-nbd-make-nbd_drop-public.patch [bz#1467509]
- kvm-nbd-server-get-rid-of-nbd_negotiate_read-and-friends.patch [bz#1467509]

Mon Oct 09 2017 Miroslav Rezanina  - rhev-2.9.0-16.el7_4.9
- kvm-nbd-client-Fix-regression-when-server-sends-garbage.patch [bz#1495474]
- kvm-fix-build-failure-in-nbd_read_reply_entry.patch [bz#1495474]
- kvm-nbd-client-avoid-spurious-qio_channel_yield-re-entry.patch [bz#1495474]
- kvm-nbd-client-avoid-read_reply_co-entry-if-send-failed.patch [bz#1495474]
- kvm-qemu-iotests-improve-nbd-fault-injector.py-startup-p.patch [bz#1495474]
- kvm-qemu-iotests-test-NBD-over-UNIX-domain-sockets-in-08.patch [bz#1495474]
- kvm-block-nbd-client-nbd_co_send_request-fix-return-code.patch [bz#1495474]
- Resolves: bz#1495474
---
 ...bd-make-it-thread-safe-fix-qcow2-over-nbd.patch | 136 +
 .../extra/0036-kvm-nbd-strict-nbd_wr_syncv.patch   |  66 +++
 ...read_sync-and-friends-return-0-on-success.patch | 620 +
 .../extra/0038-kvm-nbd-make-nbd_drop-public.patch  | 151 +
 ...get-rid-of-nbd_negotiate_read-and-friends.patch | 292 ++
 ...-Fix-regression-when-server-sends-garbage.patch | 153 +
 ...fix-build-failure-in-nbd_read_reply_entry.patch |  55 ++
 ...avoid-spurious-qio_channel_yield-re-entry.patch | 184 ++
 ...-avoid-read_reply_co-entry-if-send-failed.patch | 160 ++
 ...s-improve-nbd-fault-injector.py-startup-p.patch |  61 ++
 ...s-test-NBD-over-UNIX-domain-sockets-in-08.patch | 454 +++
 ...lient-nbd_co_send_request-fix-return-code.patch |  45 ++
 debian/patches/series  |  13 +
 13 files changed, 2390 insertions(+)
 create mode 100644 
debian/patches/extra/0035-kvm-nbd-make-it-thread-safe-fix-qcow2-over-nbd.patch
 create mode 100644 debian/patches/extra/0036-kvm-nbd-strict-nbd_wr_syncv.patch
 create mode 100644 
debian/patches/extra/0037-kvm-nbd-read_sync-and-friends-return-0-on-success.patch
 create mode 100644 debian/patches/extra/0038-kvm-nbd-make-nbd_drop-public.patch
 create mode 100644 
debian/patches/extra/0039-kvm-nbd-server-get-rid-of-nbd_negotiate_read-and-friends.patch
 create mode 100644 
debian/patches/extra/0040-kvm-nbd-client-Fix-regression-when-server-sends-garbage.patch
 create mode 100644 
debian/patches/extra/0041-kvm-fix-build-failure-in-nbd_read_reply_entry.patch
 create mode 100644 
debian/patches/extra/0042-kvm-nbd-client-avoid-spurious-qio_channel_yield-re-entry.patch
 create mode 100644 
debian/patches/extra/0043-kvm-nbd-client-avoid-read_reply_co-entry-if-send-failed.patch
 create mode 100644 
debian/patches/extra/0044-kvm-qemu-iotests-improve-nbd-fault-injector.py-startup-p.patch
 create mode 100644 
debian/patches/extra/0045-kvm-qemu-iotests-test-NBD-over-UNIX-domain-sockets-in-08.patch
 create mode 100644 
debian/patches/extra/0046-kvm-block-nbd-client-nbd_co_send_request-fix-return-code.patch

diff --git 
a/debian/patches/extra/0035-kvm-nbd-make-it-thread-safe-fix-qcow2-over-nbd.patch
 
b/debian/patches/extra/0035-kvm-nbd-make-it-thread-safe-fix-qcow2-over-nbd.patch
new file mode 100644
index 000..6b9df0d
--- /dev/null
+++ 
b/debian/patches/extra/0035-kvm-nbd-make-it-thread-safe-fix-qcow2-over-nbd.patch
@@ -0,0 +1,136 @@
+From 689e5170df5dedec420d78d375e780f3968e Mon Sep 17 00:00:00 2001
+From: Eric Blake 
+Date: Sun, 11 Jun 2017 03:30:07 +0200
+Subject: [PATCH 04/13] nbd: make it thread-safe, fix qcow2 over nbd
+
+RH-Author: Eric Blake 
+Message-id: <20170611033007.399-1-ebl...@redhat.com>
+Patchwork-id: 75581
+O-Subject: [RHEV-7.4 qemu-kvm-rhev PATCH] nbd: make it thread-safe, fix qcow2 
over nbd
+Bugzilla: 1454582
+RH-Acked-by: Laurent Vivier 
+RH-Acked-by: Max Reitz 
+RH-Acked-by: Jeffrey Cody 
+
+From: Paolo Bonzini 
+
+NBD is not thread safe, because it accesses s->in_flight without
+a CoMutex.  Fixing this will be required for multiqueue.
+CoQueue doesn't have spurious wakeups but, when another coroutine can
+run between qemu_co_queue_next's wakeup and qemu_co_queue_wait's
+re-locking of the mutex, the wait condition can become false and
+a loop is necessary.
+
+In fact, it turns out that the loop is necessary even without this
+multi-threaded scenario.  A particular sequence of coroutine wakeups
+is happening ~80% of the time when starting a guest with qcow2 image
+served over NBD (i.e. qemu-nbd --format=raw, and QEMU's -drive option
+has -format=qcow2).  This patch fixes that issue too.
+