[pve-devel] applied: [PATCH cluster] updatecerts: create base directories of observed files

2020-04-30 Thread Thomas Lamprecht
replaces the random hacks where we do some hail-mary mkdir in a
writer or the like, to ensure that the directory structure exists and
we can write safely.

more central and safer would be pmxcfs itself, but to late in the
release cycle to do that now.

Chicken out if pmxcfs is not mounted, we don't want to trash it's
(future) mountpoint..

Signed-off-by: Thomas Lamprecht 
---

as discussed with Fabian off-list

 data/PVE/CLI/pvecm.pm |  1 +
 data/PVE/Cluster.pm   | 17 +
 2 files changed, 18 insertions(+)

diff --git a/data/PVE/CLI/pvecm.pm b/data/PVE/CLI/pvecm.pm
index 4b52dee..b381f4f 100755
--- a/data/PVE/CLI/pvecm.pm
+++ b/data/PVE/CLI/pvecm.pm
@@ -569,6 +569,7 @@ __PACKAGE__->register_method ({
# no-good for ExecStartPost as it fails the whole service in this case
PVE::Tools::run_fork_with_timeout(30, sub {
PVE::Cluster::Setup::updatecerts_and_ssh($param->@{qw(force 
silent)});
+   PVE::Cluster::prepare_observed_file_basedirs();
});
 
return undef;
diff --git a/data/PVE/Cluster.pm b/data/PVE/Cluster.pm
index f40215a..e73f3aa 100644
--- a/data/PVE/Cluster.pm
+++ b/data/PVE/Cluster.pm
@@ -5,6 +5,7 @@ use warnings;
 
 use Encode;
 use File::stat qw();
+use File::Path qw(make_path);
 use JSON;
 use Net::SSLeay;
 use POSIX qw(ENOENT);
@@ -74,6 +75,22 @@ my $observed = {
 'virtual-guest/cpu-models.conf' => 1,
 };
 
+sub prepare_observed_file_basedirs {
+
+if (check_cfs_is_mounted(1)) {
+   warn "pmxcfs isn't mounted (/etc/pve), chickening out..\n";
+   return;
+}
+
+for my $f (sort keys %$observed) {
+   next if $f !~ m!^(.*)/[^/]+$!;
+   my $dir = "$basedir/$1";
+   next if -e $dir; # can also be a link, so just use -e xist check
+   print "creating directory '$dir' for observerd files\n";
+   make_path($dir);
+}
+}
+
 sub base_dir {
 return $basedir;
 }
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] ui: fix missing htmlEncodes

2020-04-30 Thread Dominik Csapak
username can include some special characters, so we have
to escape them

Signed-off-by: Dominik Csapak 
---
 www/manager6/Workspace.js  | 2 +-
 www/manager6/dc/ACLView.js | 2 +-
 www/manager6/dc/GroupView.js   | 1 +
 www/manager6/dc/Log.js | 2 ++
 www/manager6/dc/PermissionView.js  | 3 ++-
 www/manager6/dc/TFAEdit.js | 1 +
 www/manager6/dc/Tasks.js   | 1 +
 www/manager6/dc/TokenEdit.js   | 1 +
 www/manager6/dc/TokenView.js   | 4 ++--
 www/manager6/dc/UserEdit.js| 1 +
 www/manager6/dc/UserView.js| 4 ++--
 www/manager6/form/GroupSelector.js | 1 +
 www/manager6/form/TokenSelector.js | 1 +
 www/manager6/form/UserSelector.js  | 1 +
 www/manager6/window/Settings.js| 2 +-
 15 files changed, 19 insertions(+), 8 deletions(-)

diff --git a/www/manager6/Workspace.js b/www/manager6/Workspace.js
index 01b462c7..a95b88d7 100644
--- a/www/manager6/Workspace.js
+++ b/www/manager6/Workspace.js
@@ -182,7 +182,7 @@ Ext.define('PVE.StdWorkspace', {
 updateUserInfo: function() {
var me = this;
var ui = me.query('#userinfo')[0];
-   ui.setText(Proxmox.UserName || '');
+   ui.setText(Ext.String.htmlEncode(Proxmox.UserName || ''));
ui.updateLayout();
 },
 
diff --git a/www/manager6/dc/ACLView.js b/www/manager6/dc/ACLView.js
index d0efe22e..24fd67d9 100644
--- a/www/manager6/dc/ACLView.js
+++ b/www/manager6/dc/ACLView.js
@@ -118,7 +118,7 @@ Ext.define('PVE.dc.ACLView', {
return '@' + ugid;
}
 
-   return ugid;
+   return Ext.String.htmlEncode(ugid);
};
 
var columns = [
diff --git a/www/manager6/dc/GroupView.js b/www/manager6/dc/GroupView.js
index c40c5ba1..960ad114 100644
--- a/www/manager6/dc/GroupView.js
+++ b/www/manager6/dc/GroupView.js
@@ -92,6 +92,7 @@ Ext.define('PVE.dc.GroupView', {
header: gettext('Users'),
sortable: false,
dataIndex: 'users',
+   renderer: Ext.String.htmlEncode,
flex: 1
}
],
diff --git a/www/manager6/dc/Log.js b/www/manager6/dc/Log.js
index 48ce272e..fa58c08a 100644
--- a/www/manager6/dc/Log.js
+++ b/www/manager6/dc/Log.js
@@ -68,6 +68,7 @@ Ext.define('PVE.dc.Log', {
{ 
header: gettext("User name"), 
dataIndex: 'user',
+   renderer: Ext.String.htmlEncode,
width: 150
},
{ 
@@ -79,6 +80,7 @@ Ext.define('PVE.dc.Log', {
{ 
header: gettext("Message"), 
dataIndex: 'msg',
+   renderer: Ext.String.htmlEncode,
flex: 1   
}
],
diff --git a/www/manager6/dc/PermissionView.js 
b/www/manager6/dc/PermissionView.js
index 483ab015..cc582261 100644
--- a/www/manager6/dc/PermissionView.js
+++ b/www/manager6/dc/PermissionView.js
@@ -140,7 +140,8 @@ Ext.define('PVE.dc.PermissionView', {
 height: 600,
 layout: 'fit',
 cbind: {
-   title: '{userid} - ' + gettext('Granted Permissions'),
+   title: (get) => Ext.String.htmlEncode(get('userid')) +
+   ` - ${gettext('Granted Permissions')}`,
 },
 items: [{
xtype: 'pveUserPermissionGrid',
diff --git a/www/manager6/dc/TFAEdit.js b/www/manager6/dc/TFAEdit.js
index bf51b8c9..3aada4cd 100644
--- a/www/manager6/dc/TFAEdit.js
+++ b/www/manager6/dc/TFAEdit.js
@@ -376,6 +376,7 @@ Ext.define('PVE.window.TFAEdit', {
{
xtype: 'displayfield',
fieldLabel: gettext('User name'),
+   renderer: Ext.String.htmlEncode,
cbind: {
value: '{userid}'
}
diff --git a/www/manager6/dc/Tasks.js b/www/manager6/dc/Tasks.js
index a011fe4f..b1441a72 100644
--- a/www/manager6/dc/Tasks.js
+++ b/www/manager6/dc/Tasks.js
@@ -101,6 +101,7 @@ Ext.define('PVE.dc.Tasks', {
{
header: gettext("User name"),
dataIndex: 'user',
+   renderer: Ext.String.htmlEncode,
width: 150
},
{
diff --git a/www/manager6/dc/TokenEdit.js b/www/manager6/dc/TokenEdit.js
index cdb5d911..13f1dff8 100644
--- a/www/manager6/dc/TokenEdit.js
+++ b/www/manager6/dc/TokenEdit.js
@@ -41,6 +41,7 @@ Ext.define('PVE.dc.TokenEdit', {
},
name: 'userid',
value: Proxmox.UserName,
+   renderer: Ext.String.htmlEncode,
fieldLabel: gettext('User'),
},
{
diff --git a/www/manager6/dc/TokenView.js b/www/manager6/dc/TokenView.js
index c81d5f2f..69c60569 100644
--- a/www/manager6/dc/TokenView.js
+++ b/www/manager6/dc/TokenView.

[pve-devel] [PATCH widget-toolkit] add missing htmlEncodes

2020-04-30 Thread Dominik Csapak
username can include some special characters, so we have
to escape them

Signed-off-by: Dominik Csapak 
---
 window/TaskViewer.js | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/window/TaskViewer.js b/window/TaskViewer.js
index 347542e..bbbf716 100644
--- a/window/TaskViewer.js
+++ b/window/TaskViewer.js
@@ -127,6 +127,7 @@ Ext.define('Proxmox.window.TaskViewer', {
},
user: {
header: gettext('User name'),
+   renderer: Ext.String.htmlEncode,
required: true
},
node: {
@@ -146,7 +147,8 @@ Ext.define('Proxmox.window.TaskViewer', {
renderer: Proxmox.Utils.render_timestamp
},
upid: {
-   header: gettext('Unique task ID')
+   header: gettext('Unique task ID'),
+   renderer: Ext.String.htmlEncode,
}
};
 
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH to pve-docs] Also mentioning FC-based storage in pve

2020-04-30 Thread Thomas Lamprecht
On 4/29/20 5:30 PM, Andreas Steinel wrote:
> Signed-off-by: Andreas Steinel 
> ---
>  pvesm.adoc | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/pvesm.adoc b/pvesm.adoc
> index 5340c3d..b76ce87 100644
> --- a/pvesm.adoc
> +++ b/pvesm.adoc
> @@ -84,8 +84,8 @@ data to different nodes.
>  
>  ^1^: On file based storages, snapshots are possible with the 'qcow2' format.
>  
> -^2^: It is possible to use LVM on top of an iSCSI storage. That way
> -you get a `shared` LVM storage.
> +^2^: It is possible to use LVM on top of an iSCSI or FC-based storage.
> +That way you get a `shared` LVM storage.
>  
>  
>  Thin Provisioning
> 

Applied, much thanks!

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH docs v2] add documenation for ldap syncing

2020-04-30 Thread Aaron Lauterer
Sorry, I didn't get to read v1 in time. Some nits regarding grammar, 
reading flow and such inline:


Other than those:
Reviewed-By: Aaron Lauterer 

On 4/30/20 2:27 PM, Dominik Csapak wrote:

explaining the main Requirements and limitations, as well as the
most important sync options

Signed-off-by: Dominik Csapak 
---
changes from v1:
* incorporated suggestions from Alwin, thanks :)
* re-worded the sentence about limitations to specify the character
   limitations of user.cfg
  pveum.adoc | 47 +++
  1 file changed, 47 insertions(+)

diff --git a/pveum.adoc b/pveum.adoc
index c89d4b8..80b3385 100644
--- a/pveum.adoc
+++ b/pveum.adoc
@@ -170,6 +170,53 @@ A server and authentication domain need to be specified. 
Like with
  ldap an optional fallback server, optional port, and SSL
  encryption can be configured.
  
+[[pveum_ldap_sync]]

+Syncing LDAP-based realms
+~
+
+It is possible to sync users and groups for LDAP based realms using
+  pveum sync 
+or in the `Authentication` panel of the GUI to the user.cfg.



The last part `to the user.cfg` makes this sentence somewhat cumbersome 
and hard to read.


Without knowing the full technical details, would it be okay to rewrite 
it to:


"... or in the `Authentication` panel of the GUI. Users and groups are 
synced to the `user.cfg`".


Or maybe even drop the mention of the user.cfg completely?


+
+Requirements and limitations
+
+
+The `bind_dn` is used to query the users and groups, so this account


s/, so this/. This/


+should be able to see all desired entries.


s/should be able to see/needs access to/

though this is just a preference of mine and not something that really 
should be changed.



+
+The fields which represent the names of the users and groups can be configured
+via the `user_attr` and `group_name_attr` respectively. Only entries which
+adhere to the usual character limitations of the user.cfg are synced.


is there another chapter explaining the character limitations in the 
user.cfg to which we could link to?



+
+Groups are synced with `-$realm` attached to the name, to avoid naming > +conflicts. Please make sure that a sync does not overwrite manually 

created

+groups.
+
+Options
+^^^
+
+The main options for syncing are:
+
+* `dry-run`: No data is written to the config. This is useful if you want to
+  see which users and groups would get synced to the user.cfg. This is set
+  when you click `Preview` in the GUI.
+
+* `enable-new`: If set, the newly synced users are enabled and can login.
+  The default is `true`.
+
+* `full`: If set, the sync uses the LDAP Directory as a source of truth,
+  overwriting information set manually in the user.cfg and deleting users
+  and groups which are not returned. If not set, only new data is


possibly
s/returned/present in the LDAP/
to make it clearer?


+  written to the config, and no stale users are deleted.
+
+* `purge`: If set, sync removes all corresponding ACLs when removing users
+  and groups. This is only useful with the option `full`.
+
+* `scope`: The scope of what to sync. It can be either `users`, `groups` or
+  `both`.
+
+These options are either set as parameters or as defaults, via the
+realm option `sync-defaults-options`.
  
  [[pveum_tfa_auth]]

  Two-factor authentication



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH docs v2] add documenation for ldap syncing

2020-04-30 Thread Dominik Csapak
explaining the main Requirements and limitations, as well as the
most important sync options

Signed-off-by: Dominik Csapak 
---
changes from v1:
* incorporated suggestions from Alwin, thanks :)
* re-worded the sentence about limitations to specify the character
  limitations of user.cfg
 pveum.adoc | 47 +++
 1 file changed, 47 insertions(+)

diff --git a/pveum.adoc b/pveum.adoc
index c89d4b8..80b3385 100644
--- a/pveum.adoc
+++ b/pveum.adoc
@@ -170,6 +170,53 @@ A server and authentication domain need to be specified. 
Like with
 ldap an optional fallback server, optional port, and SSL
 encryption can be configured.
 
+[[pveum_ldap_sync]]
+Syncing LDAP-based realms
+~
+
+It is possible to sync users and groups for LDAP based realms using
+  pveum sync 
+or in the `Authentication` panel of the GUI to the user.cfg.
+
+Requirements and limitations
+
+
+The `bind_dn` is used to query the users and groups, so this account
+should be able to see all desired entries.
+
+The fields which represent the names of the users and groups can be configured
+via the `user_attr` and `group_name_attr` respectively. Only entries which
+adhere to the usual character limitations of the user.cfg are synced.
+
+Groups are synced with `-$realm` attached to the name, to avoid naming
+conflicts. Please make sure that a sync does not overwrite manually created
+groups.
+
+Options
+^^^
+
+The main options for syncing are:
+
+* `dry-run`: No data is written to the config. This is useful if you want to
+  see which users and groups would get synced to the user.cfg. This is set
+  when you click `Preview` in the GUI.
+
+* `enable-new`: If set, the newly synced users are enabled and can login.
+  The default is `true`.
+
+* `full`: If set, the sync uses the LDAP Directory as a source of truth,
+  overwriting information set manually in the user.cfg and deleting users
+  and groups which are not returned. If not set, only new data is
+  written to the config, and no stale users are deleted.
+
+* `purge`: If set, sync removes all corresponding ACLs when removing users
+  and groups. This is only useful with the option `full`.
+
+* `scope`: The scope of what to sync. It can be either `users`, `groups` or
+  `both`.
+
+These options are either set as parameters or as defaults, via the
+realm option `sync-defaults-options`.
 
 [[pveum_tfa_auth]]
 Two-factor authentication
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH docs] add documenation for ldap syncing

2020-04-30 Thread Alwin Antreich
My suggestions inline.

On Thu, Apr 30, 2020 at 01:14:27PM +0200, Dominik Csapak wrote:
> explaining the main Requirements and limitations, as well as the
> most important sync options
> 
> Signed-off-by: Dominik Csapak 
> ---
>  pveum.adoc | 47 +++
>  1 file changed, 47 insertions(+)
> 
> diff --git a/pveum.adoc b/pveum.adoc
> index c89d4b8..5881fa9 100644
> --- a/pveum.adoc
> +++ b/pveum.adoc
> @@ -170,6 +170,53 @@ A server and authentication domain need to be specified. 
> Like with
>  ldap an optional fallback server, optional port, and SSL
>  encryption can be configured.
>  
> +[[pveum_ldap_sync]]
> +Syncing LDAP-based realms
> +~
> +
> +It is possible to sync users and groups for ldap based realms using
s/ldap/LDAP

> +  pveum sync 
> +or in the `Authentication` panel of the GUI to the user.cfg.
> +
> +Requirements and limitations
> +
> +
> +The `bind_dn` will be used to query the users and groups, so this account
> +should be able to see all desired entries.
s/will be/is/

> +
> +The names of the users and groups (configurable via `user_attr` and
> +`group_name_attr` respectively) have to adhere to the limitations of usual
> +users and groups in the config.
For me, this is hard to read. It may be better in two sentences. And
what does it mean, adhere to the limitations?

eg:
The user and group names have to adhere to the limitation of the
configuration.  Configurable via `user_attr` and `group_name_attr`
respectively.

> +
> +Groups will be synced with `-$realm` attached to the name, to avoid naming
s/will be/are/

> +conflicts. Please make sure that a sync does not overwrite manually created
> +groups.
> +
> +Options
> +^^^
> +
> +The main options for syncing are:
> +
> +* `dry-run`: No data will actually be synced. This is useful if you want to
> +  see which users and groups would get synced to the user.cfg. This is set
> +  when you click `Preview` in the GUI.
s/will actually/is/

> +
> +* `enable-new`: If set, the newly synced users are enabled and can login.
> +  The default is `true`.
> +
> +* `full`: If set, the sync usses the LDAP Directory as source of truth,
s/usses/uses/
s/as source/as a source/

> +  overwriting information set manually in the user.cfg and deleting users
> +  and groups which were not returned. If not set, only new data
s/were not returned/are not returned/

> +  will be written to the config, and no stale users will be deleted.
s/will be/is/

> +
> +* `purge`: If set, sync removes all corresponding ACLs when removing users
> +  and groups. This is only useful with the option `full`.
> +
> +* `scope`: The scope of what to sync. Can be either `users`, `groups` or
s/Can be/It can be/

> +  `both`.
> +
> +These options either to be set either as parameters, or as defaults, via the
These options are either set as parameters or as defaults, via the

> +realm option `sync-defaults-options`.
>  
>  [[pveum_tfa_auth]]
>  Two-factor authentication
> -- 
> 2.20.1

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH manager 2/3] Allow setting targetstorage for offline migration

2020-04-30 Thread Dominik Csapak




On 4/30/20 1:15 PM, Fabian Ebner wrote:

Let's make this one an RFC ;)
The problem is that offline migration with target storage might not 
always work depending on supported export/import formats. Then users 
might start an offline migration, which then fails after a few disks 
have already been copied.
If we had a check for the export/import formats before starting 
migration, it would improve. But currently the erroring out happens on a 
per disk basis inside storage_migrate.


So I'm not sure anymore if this is an improvement. If not, and if patch 
#3 is fine, I'll send a v2 without this one.




if we cannot produce a warning/error beforehand, i would abstain
from allowing it in the gui (for now)

we had a similar issue with local disks, which is the
reason most of this panel exists in the first place

having the user wait for an error late into the migration
is really not nice and frustrating for the user

but for 3/3 i wanted to send a very similar patch myself,
so that one is ack'ed by me

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH manager 1/3] Don't show empty parentheses when size is not known

2020-04-30 Thread Dominik Csapak

one comment inline

On 4/30/20 12:59 PM, Fabian Ebner wrote:

The size of VM state files and the size of unused disks not
referenced by any snapshot is not saved in the VM configuration,
so it's not available here either.

Signed-off-by: Fabian Ebner 
---
  www/manager6/window/Migrate.js | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/manager6/window/Migrate.js b/www/manager6/window/Migrate.js
index 61bc6a49..9fc66a9b 100644
--- a/www/manager6/window/Migrate.js
+++ b/www/manager6/window/Migrate.js
@@ -269,7 +269,7 @@ Ext.define('PVE.window.Migrate', {
migration['with-local-disks'] = 1;
migration.preconditions.push({
text:'Migration with local disk might take 
long: ' + disk.volid
-   +' (' + 
PVE.Utils.render_size(disk.size) + ')',
+   + (disk.size ? ' (' + 
PVE.Utils.render_size(disk.size) + ')' : ''),\


we are already deeply nested here and the line is already very long,
i would rather put the text above in a variable
(and maybe use template strings)



severity: 'warning'
});
}



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager] ui: dc/SyncWindow: add help button

2020-04-30 Thread Dominik Csapak
with link to the LDAP Syncing section of the documentation

Signed-off-by: Dominik Csapak 
---
 www/manager6/dc/SyncWindow.js | 8 
 1 file changed, 8 insertions(+)

diff --git a/www/manager6/dc/SyncWindow.js b/www/manager6/dc/SyncWindow.js
index e85e5b88..9355c551 100644
--- a/www/manager6/dc/SyncWindow.js
+++ b/www/manager6/dc/SyncWindow.js
@@ -21,6 +21,7 @@ Ext.define('PVE.dc.SyncWindow', {
},
'button': {
click: function(btn) {
+   if (btn.reference === 'help_btn') return;
this.sync_realm(btn.reference === 'preview_btn');
},
},
@@ -144,6 +145,13 @@ Ext.define('PVE.dc.SyncWindow', {
 ],
 
 buttons: [
+   {
+   xtype: 'proxmoxHelpButton',
+   reference: 'help_btn',
+   onlineHelp: 'pveum_ldap_sync',
+   hidden: false,
+   },
+   '->',
{
text: gettext('Preview'),
reference: 'preview_btn',
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH docs] add documenation for ldap syncing

2020-04-30 Thread Dominik Csapak
explaining the main Requirements and limitations, as well as the
most important sync options

Signed-off-by: Dominik Csapak 
---
 pveum.adoc | 47 +++
 1 file changed, 47 insertions(+)

diff --git a/pveum.adoc b/pveum.adoc
index c89d4b8..5881fa9 100644
--- a/pveum.adoc
+++ b/pveum.adoc
@@ -170,6 +170,53 @@ A server and authentication domain need to be specified. 
Like with
 ldap an optional fallback server, optional port, and SSL
 encryption can be configured.
 
+[[pveum_ldap_sync]]
+Syncing LDAP-based realms
+~
+
+It is possible to sync users and groups for ldap based realms using
+  pveum sync 
+or in the `Authentication` panel of the GUI to the user.cfg.
+
+Requirements and limitations
+
+
+The `bind_dn` will be used to query the users and groups, so this account
+should be able to see all desired entries.
+
+The names of the users and groups (configurable via `user_attr` and
+`group_name_attr` respectively) have to adhere to the limitations of usual
+users and groups in the config.
+
+Groups will be synced with `-$realm` attached to the name, to avoid naming
+conflicts. Please make sure that a sync does not overwrite manually created
+groups.
+
+Options
+^^^
+
+The main options for syncing are:
+
+* `dry-run`: No data will actually be synced. This is useful if you want to
+  see which users and groups would get synced to the user.cfg. This is set
+  when you click `Preview` in the GUI.
+
+* `enable-new`: If set, the newly synced users are enabled and can login.
+  The default is `true`.
+
+* `full`: If set, the sync usses the LDAP Directory as source of truth,
+  overwriting information set manually in the user.cfg and deleting users
+  and groups which were not returned. If not set, only new data
+  will be written to the config, and no stale users will be deleted.
+
+* `purge`: If set, sync removes all corresponding ACLs when removing users
+  and groups. This is only useful with the option `full`.
+
+* `scope`: The scope of what to sync. Can be either `users`, `groups` or
+  `both`.
+
+These options either to be set either as parameters, or as defaults, via the
+realm option `sync-defaults-options`.
 
 [[pveum_tfa_auth]]
 Two-factor authentication
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH manager 2/3] Allow setting targetstorage for offline migration

2020-04-30 Thread Fabian Ebner

Let's make this one an RFC ;)
The problem is that offline migration with target storage might not 
always work depending on supported export/import formats. Then users 
might start an offline migration, which then fails after a few disks 
have already been copied.
If we had a check for the export/import formats before starting 
migration, it would improve. But currently the erroring out happens on a 
per disk basis inside storage_migrate.


So I'm not sure anymore if this is an improvement. If not, and if patch 
#3 is fine, I'll send a v2 without this one.


On 30.04.20 12:59, Fabian Ebner wrote:

Signed-off-by: Fabian Ebner 
---
  www/manager6/window/Migrate.js | 12 ++--
  1 file changed, 2 insertions(+), 10 deletions(-)

diff --git a/www/manager6/window/Migrate.js b/www/manager6/window/Migrate.js
index 9fc66a9b..20e057ad 100644
--- a/www/manager6/window/Migrate.js
+++ b/www/manager6/window/Migrate.js
@@ -44,13 +44,6 @@ Ext.define('PVE.window.Migrate', {
return gettext('Offline');
}
},
-   setStorageselectorHidden: function(get) {
-   if (get('migration.with-local-disks') && get('running')) {
-   return false;
-   } else {
-   return true;
-   }
-   },
setLocalResourceCheckboxHidden: function(get) {
if (get('running') || !get('migration.hasLocalResources') ||
Proxmox.UserName !== 'root@pam') {
@@ -126,8 +119,7 @@ Ext.define('PVE.window.Migrate', {
if (vm.get('migration.with-local-disks')) {
params['with-local-disks'] = 1;
}
-   //only submit targetstorage if vm is running, storage migration to 
different storage is only possible online
-   if (vm.get('migration.with-local-disks') && vm.get('running')) {
+   if (vm.get('migration.with-local-disks')) {
params.targetstorage = values.targetstorage;
}
  
@@ -352,7 +344,7 @@ Ext.define('PVE.window.Migrate', {

fieldLabel: gettext('Target storage'),
storageContent: 'images',
bind: {
-   hidden: '{setStorageselectorHidden}'
+   hidden: '{!migration.with-local-disks}',
}
},
{



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 3/3] Allow setting no target storage and make it default

2020-04-30 Thread Fabian Ebner
so the current disk locations can be preserved even if
there are multiple local disks. And users don't have to
manually select the current storage if there is only one
local disk.

Signed-off-by: Fabian Ebner 
---

Not too happy about the "use current layout" text. Maybe
somebody has a better idea.

 www/manager6/window/Migrate.js | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/www/manager6/window/Migrate.js b/www/manager6/window/Migrate.js
index 20e057ad..8f3cf1c2 100644
--- a/www/manager6/window/Migrate.js
+++ b/www/manager6/window/Migrate.js
@@ -119,7 +119,7 @@ Ext.define('PVE.window.Migrate', {
if (vm.get('migration.with-local-disks')) {
params['with-local-disks'] = 1;
}
-   if (vm.get('migration.with-local-disks')) {
+   if (vm.get('migration.with-local-disks') && values.targetstorage) {
params.targetstorage = values.targetstorage;
}
 
@@ -343,6 +343,9 @@ Ext.define('PVE.window.Migrate', {
name: 'targetstorage',
fieldLabel: gettext('Target storage'),
storageContent: 'images',
+   allowBlank: true,
+   autoSelect: false,
+   emptyText: 'use current layout',
bind: {
hidden: '{!migration.with-local-disks}',
}
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 1/3] Don't show empty parentheses when size is not known

2020-04-30 Thread Fabian Ebner
The size of VM state files and the size of unused disks not
referenced by any snapshot is not saved in the VM configuration,
so it's not available here either.

Signed-off-by: Fabian Ebner 
---
 www/manager6/window/Migrate.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/manager6/window/Migrate.js b/www/manager6/window/Migrate.js
index 61bc6a49..9fc66a9b 100644
--- a/www/manager6/window/Migrate.js
+++ b/www/manager6/window/Migrate.js
@@ -269,7 +269,7 @@ Ext.define('PVE.window.Migrate', {
migration['with-local-disks'] = 1;
migration.preconditions.push({
text:'Migration with local disk might take 
long: ' + disk.volid
-   +' (' + 
PVE.Utils.render_size(disk.size) + ')',
+   + (disk.size ? ' (' + 
PVE.Utils.render_size(disk.size) + ')' : ''),
severity: 'warning'
});
}
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH manager 2/3] Allow setting targetstorage for offline migration

2020-04-30 Thread Fabian Ebner
Signed-off-by: Fabian Ebner 
---
 www/manager6/window/Migrate.js | 12 ++--
 1 file changed, 2 insertions(+), 10 deletions(-)

diff --git a/www/manager6/window/Migrate.js b/www/manager6/window/Migrate.js
index 9fc66a9b..20e057ad 100644
--- a/www/manager6/window/Migrate.js
+++ b/www/manager6/window/Migrate.js
@@ -44,13 +44,6 @@ Ext.define('PVE.window.Migrate', {
return gettext('Offline');
}
},
-   setStorageselectorHidden: function(get) {
-   if (get('migration.with-local-disks') && get('running')) {
-   return false;
-   } else {
-   return true;
-   }
-   },
setLocalResourceCheckboxHidden: function(get) {
if (get('running') || !get('migration.hasLocalResources') ||
Proxmox.UserName !== 'root@pam') {
@@ -126,8 +119,7 @@ Ext.define('PVE.window.Migrate', {
if (vm.get('migration.with-local-disks')) {
params['with-local-disks'] = 1;
}
-   //only submit targetstorage if vm is running, storage migration to 
different storage is only possible online
-   if (vm.get('migration.with-local-disks') && vm.get('running')) {
+   if (vm.get('migration.with-local-disks')) {
params.targetstorage = values.targetstorage;
}
 
@@ -352,7 +344,7 @@ Ext.define('PVE.window.Migrate', {
fieldLabel: gettext('Target storage'),
storageContent: 'images',
bind: {
-   hidden: '{setStorageselectorHidden}'
+   hidden: '{!migration.with-local-disks}',
}
},
{
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] migrate: skip rescan for efidisk and shared volumes

2020-04-30 Thread Dominik Csapak
we really only want to rescan the disk size of the disks we actually
need, and that are only the local disks (for which we have to allocate
the correct size on the target)

also we want to always skip the efidisk, since we get the wanted
size after the loop, and this produced a confusing log line
(for details why we do not want the 'real' size,
see commit 818ce80ec1a89c4abee61145c858b9323180e31b)

Signed-off-by: Dominik Csapak 
---
 PVE/QemuMigrate.pm | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 7644922..f116add 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -518,6 +518,9 @@ sub sync_disks {
my $volid_hash = PVE::QemuServer::scan_volids($storecfg, $vmid);
PVE::QemuConfig->foreach_volume($conf, sub {
my ($key, $drive) = @_;
+   return if $key eq 'efidisk0'; # skip efidisk, will be handled later
+   return if !defined($local_volumes->{$key}); # only update sizes for 
local volumes
+
my ($updated, $old_size, $new_size) = 
PVE::QemuServer::Drive::update_disksize($drive, $volid_hash);
if (defined($updated)) {
$conf->{$key} = PVE::QemuServer::print_drive($updated);
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 firewall] fix #2686: don't add arp-ip-src filter for dhcp

2020-04-30 Thread Mira Limbeck
When the IPFilter setting is enabled and the container has DHCP
configured on an interface no 'arp-ip-src' filter should be added as we
don't have an IP address.
Previously '--arp-ip-src dhcp' was passed to ebtables which led to an error.

Signed-off-by: Mira Limbeck 
---
v2:
 - changed regex to a simple string comparison

 src/PVE/Firewall.pm | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/src/PVE/Firewall.pm b/src/PVE/Firewall.pm
index d22b15a..250a642 100644
--- a/src/PVE/Firewall.pm
+++ b/src/PVE/Firewall.pm
@@ -3904,7 +3904,9 @@ sub compile_ebtables_filter {
# ebtables changes this to a .0/MASK network but we just
# want the address here, no network - see #2193
$ip =~ s|/(\d+)$||;
-   push @$arpfilter, $ip;
+   if ($ip ne 'dhcp') {
+   push @$arpfilter, $ip;
+   }
}
generate_tap_layer2filter($ruleset, $iface, $macaddr, 
$vmfw_conf, $vmid, $arpfilter);
}
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH firewall] fix #2686: don't add arp-ip-src filter for dhcp

2020-04-30 Thread Mira Limbeck

On 4/30/20 12:00 PM, Mira Limbeck wrote:

When the IPFilter setting is enabled and the container has DHCP
configured on an interface no 'arp-ip-src' filter should be added for
this interface as we don't have an IP address.

Previously '--arp-ip-src dhcp' was passed to ebtables which led to an error.

Signed-off-by: Mira Limbeck 
---
  src/PVE/Firewall.pm | 4 +++-
  1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/src/PVE/Firewall.pm b/src/PVE/Firewall.pm
index d22b15a..19f1400 100644
--- a/src/PVE/Firewall.pm
+++ b/src/PVE/Firewall.pm
@@ -3904,7 +3904,9 @@ sub compile_ebtables_filter {
# ebtables changes this to a .0/MASK network but we just
# want the address here, no network - see #2193
$ip =~ s|/(\d+)$||;
-   push @$arpfilter, $ip;
+   if ($ip !~ m/^dhcp$/) {
With the anchored regex we could simply change that to $ip ne 'dhcp', 
will send a v2.

+   push @$arpfilter, $ip;
+   }
}
generate_tap_layer2filter($ruleset, $iface, $macaddr, 
$vmfw_conf, $vmid, $arpfilter);
}


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH firewall] fix #2686: don't add arp-ip-src filter for dhcp

2020-04-30 Thread Mira Limbeck
When the IPFilter setting is enabled and the container has DHCP
configured on an interface no 'arp-ip-src' filter should be added for
this interface as we don't have an IP address.

Previously '--arp-ip-src dhcp' was passed to ebtables which led to an error.

Signed-off-by: Mira Limbeck 
---
 src/PVE/Firewall.pm | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/src/PVE/Firewall.pm b/src/PVE/Firewall.pm
index d22b15a..19f1400 100644
--- a/src/PVE/Firewall.pm
+++ b/src/PVE/Firewall.pm
@@ -3904,7 +3904,9 @@ sub compile_ebtables_filter {
# ebtables changes this to a .0/MASK network but we just
# want the address here, no network - see #2193
$ip =~ s|/(\d+)$||;
-   push @$arpfilter, $ip;
+   if ($ip !~ m/^dhcp$/) {
+   push @$arpfilter, $ip;
+   }
}
generate_tap_layer2filter($ruleset, $iface, $macaddr, 
$vmfw_conf, $vmid, $arpfilter);
}
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] Same migration IP on multiple devices

2020-04-30 Thread Harald Leithner
Hi,

seams the problem still exists in current version
(pve-manager/6.1-8/806edfe1 (running kernel: 5.3.18-3-pve))

Did you have time to check this again?

thx

Harald

Am 26.11.2019 um 15:22 schrieb Harald Leithner:
> Hi,
> 
> good to know and thx for the fix. I think the same is true for creating
> a ceph mon from the gui.
> 
> You get an error message if you create a ceph mon complaining about
> multiple ips. This can be overridden in the console with --mon-address
> but maybe should be fixed too.
> 
> also your fix will not work (untested) because the last proxmox update
> introduced the same check in 2 other files.
> 
> Files I patch:
> /usr/share/perl5/PVE/Cluster.pm
> /usr/share/perl5/PVE/CLI/pvecm.pm
> /usr/share/perl5/PVE/QemuServer.pm
> 
> thx
> 
> Harald
> 
> Am 26.11.2019 um 13:55 schrieb Thomas Lamprecht:
>> Hi,
>>
>> On 11/26/19 1:39 PM, Harald Leithner wrote:
>>> one of my 3 nodes clusters has a full mesh. While trying to migrate a VM
>>> I get the error message "could not get migration ip: multiple IP address
>>> configured for "
>>>
>>> This only happens when the migration IP is configured on more then one
>>> device.
>>>
>>> This is the suggested configuration for a full mesh at
>>> https://pve.proxmox.com/wiki/Full_Mesh_Network_for_Ceph_Server
>>>
>>> Removing the check from the source works without problems, is there
>>> anything I miss?
>>>
>>
>> No, you're right, this is a legitimate case which got caught by the
>> check by mistake. Instead of removing it, I refined it to ignore
>> multiple addresses as long as they're identical to each other.
>>
>> Much thanks for reporting!
>>
>> cheers,
>> Thomas
>>
> 
> 
> ___
> pve-devel mailing list
> pve-devel@pve.proxmox.com
> https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
> 

-- 
ITronic

Harald Leithner
Wiedner Hauptstraße 120/5.1, 1050 Wien, Austria
Tel: +43-1-545 0 604
Mobil: +43-699-123 78 4 78
Mail: leith...@itronic.at | itronic.at



signature.asc
Description: OpenPGP digital signature
___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH v2 container] create_vm: avoid premature write_config caused by update_pct_config

2020-04-30 Thread Fabian Ebner
by moving the write_config calls from vmconfig_*_pending to their
call sites. The single other call site for update_pct_config in
update_vm is also adapted. The first write_config call in
vmconfig_hotplug_pending was redundant as the code between it and
the second write_config call shouldn't be able to die. This allowed
to drop the $changes variable as well.

The update_pct_config call lead to a write_config call and so the
configuration file was created before it was intended to be created.

When the CFS is updated in between the write_config call and the
PVE::Cluster::check_vmid_unused call in create_and_lock_config,
the container file would already exist and so creation would
fail after writing out a basically empty config.
(Meaning this is necessary for the proposed "update CFS after
obtaining a configuration file lock" changes)

Even worse, a race was possible for two containers created with the
same ID at the same time:
Assuming the initial PVE::Cluster::check_vmid_unused check in the
parameter verification passes for both create_vm calls, the later one
would potentially overwrite the earlier configuration file with its
update_pct_config call.

Additionally, the file read for $old_config was always the one written
by update_pct_config. Meaning that for a create_vm call with force=1,
already existing old volumes were not removed.

Signed-off-by: Fabian Ebner 
---

Changes from v1:
* instead of re-ordering, move the write_config
  calls to the (grand-)parent call sites.

There are now two places where write_config is immediately followed
by load_config. I didn't want to change the semantics for the
non-create_vm call sites, so I left them there. Are there any
possible side effects? Otherwise, the load_config calls could be
dropped as a follow-up. Note that the third place in lxc-pve-poststop-hook
has no load_config call.

 src/PVE/API2/LXC/Config.pm |  1 +
 src/PVE/LXC.pm |  1 +
 src/PVE/LXC/Config.pm  | 10 --
 src/lxc-pve-poststop-hook  |  5 -
 4 files changed, 6 insertions(+), 11 deletions(-)

diff --git a/src/PVE/API2/LXC/Config.pm b/src/PVE/API2/LXC/Config.pm
index 42e16d1..73fec36 100644
--- a/src/PVE/API2/LXC/Config.pm
+++ b/src/PVE/API2/LXC/Config.pm
@@ -204,6 +204,7 @@ __PACKAGE__->register_method({
my $running = PVE::LXC::check_running($vmid);
 
my $errors = PVE::LXC::Config->update_pct_config($vmid, $conf, 
$running, $param, \@delete, \@revert);
+   PVE::LXC::Config->write_config($vmid, $conf);
$conf = PVE::LXC::Config->load_config($vmid);
 
PVE::LXC::update_lxc_config($vmid, $conf);
diff --git a/src/PVE/LXC.pm b/src/PVE/LXC.pm
index efbd1d9..2b41ae6 100644
--- a/src/PVE/LXC.pm
+++ b/src/PVE/LXC.pm
@@ -2173,6 +2173,7 @@ sub vm_start {
 if (scalar(keys %{$conf->{pending}})) {
my $storecfg = PVE::Storage::config();
PVE::LXC::Config->vmconfig_apply_pending($vmid, $conf, $storecfg);
+   PVE::LXC::Config->write_config($vmid, $conf);
$conf = PVE::LXC::Config->load_config($vmid); # update/reload
 }
 
diff --git a/src/PVE/LXC/Config.pm b/src/PVE/LXC/Config.pm
index 1443f87..dcc8755 100644
--- a/src/PVE/LXC/Config.pm
+++ b/src/PVE/LXC/Config.pm
@@ -1161,19 +1161,13 @@ sub vmconfig_hotplug_pending {
$errors->{$opt} = "unable to hotplug $opt: $msg";
 };
 
-my $changes;
 foreach my $opt (sort keys %{$conf->{pending}}) { # add/change
next if $selection && !$selection->{$opt};
if ($LXC_FASTPLUG_OPTIONS->{$opt}) {
$conf->{$opt} = delete $conf->{pending}->{$opt};
-   $changes = 1;
}
 }
 
-if ($changes) {
-   $class->write_config($vmid, $conf);
-}
-
 my $cgroup = PVE::LXC::CGroup->new($vmid);
 
 # There's no separate swap size to configure, there's memory and "total"
@@ -1258,8 +1252,6 @@ sub vmconfig_hotplug_pending {
delete $conf->{pending}->{$opt};
}
 }
-
-$class->write_config($vmid, $conf);
 }
 
 sub vmconfig_apply_pending {
@@ -1316,8 +1308,6 @@ sub vmconfig_apply_pending {
$conf->{$opt} = delete $conf->{pending}->{$opt};
}
 }
-
-$class->write_config($vmid, $conf);
 }
 
 my $rescan_volume = sub {
diff --git a/src/lxc-pve-poststop-hook b/src/lxc-pve-poststop-hook
index 1dba48c..2683ae2 100755
--- a/src/lxc-pve-poststop-hook
+++ b/src/lxc-pve-poststop-hook
@@ -51,7 +51,10 @@ PVE::LXC::Tools::lxc_hook('post-stop', 'lxc', sub {
 
 my $config_updated = 0;
 if ($conf->{pending}) {
-   eval { PVE::LXC::Config->vmconfig_apply_pending($vmid, $conf, 
$storage_cfg) };
+   eval {
+   PVE::LXC::Config->vmconfig_apply_pending($vmid, $conf, 
$storage_cfg);
+   PVE::LXC::Config->write_config($vmid, $conf);
+   };
warn "$@" if $@;
PVE::LXC::update_lxc_config($vmid, $conf);
$config_updated = 1;
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxm

[pve-devel] [PATCH manager] NodeConfig: ensure locked context has current view

2020-04-30 Thread Fabian Grünbichler
similar to the recent changes for pve-guest-common - we start each API
call with a cfs_update, but while we were waiting for the flock another
R-M-W cycle might have happened, so we need to refresh after obtaining
the lock.

Signed-off-by: Fabian Grünbichler 
---

Notes:
there's only a single API call using this, so it's pretty straight-forward.

patch generated on-top of ACME patch set

 PVE/NodeConfig.pm | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/PVE/NodeConfig.pm b/PVE/NodeConfig.pm
index a0921558..6a95d7aa 100644
--- a/PVE/NodeConfig.pm
+++ b/PVE/NodeConfig.pm
@@ -50,7 +50,13 @@ sub write_config {
 }
 
 sub lock_config {
-my ($node, $code, @param) = @_;
+my ($node, $realcode, @param) = @_;
+
+# make sure configuration file is up-to-date
+my $code = sub {
+   PVE::Cluster::cfs_update();
+   $realcode->(@_);
+};
 
 my $res = lock_file($node_config_lock, 10, $code, @param);
 
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] applied: [PATCH qemu-server 1/2] migrate: only stop NBD if we got a NBD url from the target

2020-04-30 Thread Mira Limbeck

On 4/30/20 10:13 AM, Thomas Lamprecht wrote:

On 4/30/20 10:04 AM, Mira Limbeck wrote:

On 4/30/20 8:41 AM, Thomas Lamprecht wrote:

On 4/30/20 8:35 AM, Fabian Grünbichler wrote:

On April 29, 2020 4:24 pm, Thomas Lamprecht wrote:

Signed-off-by: Thomas Lamprecht 
---

This was rather quickly assembled to fix an obvious issue, some in depth look
at this would be nice, @Fabi or @Fabian :)

LGTM!


It really should be OK, but I'm still a bit wondering why 
"$self->{storage_migration}"
was true in my case, a VM with only a single disk on ceph..
Not that the reason for this can have other fallout.

I'm wondering that too. I tried reproducing it with qemu-server 6.1-14 and 
could not (using an external ceph cluster as storage).

Did you test it with qemu-server 6.1-15 or an even newer version? Was the disk 
in $local_volumes for some reason?


Fabian investigated and got the reason, see:
https://pve.proxmox.com/pipermail/pve-devel/2020-April/043272.html

Yes, sorry for the noise, haven't catched up yet completely with 
everything. Reading up on it atm.


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] applied: [PATCH qemu-server 1/2] migrate: only stop NBD if we got a NBD url from the target

2020-04-30 Thread Thomas Lamprecht
On 4/30/20 10:04 AM, Mira Limbeck wrote:
> On 4/30/20 8:41 AM, Thomas Lamprecht wrote:
>> On 4/30/20 8:35 AM, Fabian Grünbichler wrote:
>>> On April 29, 2020 4:24 pm, Thomas Lamprecht wrote:
 Signed-off-by: Thomas Lamprecht 
 ---

 This was rather quickly assembled to fix an obvious issue, some in depth 
 look
 at this would be nice, @Fabi or @Fabian :)
>>> LGTM!
>>>
>> It really should be OK, but I'm still a bit wondering why 
>> "$self->{storage_migration}"
>> was true in my case, a VM with only a single disk on ceph..
>> Not that the reason for this can have other fallout.
> 
> I'm wondering that too. I tried reproducing it with qemu-server 6.1-14 and 
> could not (using an external ceph cluster as storage).
> 
> Did you test it with qemu-server 6.1-15 or an even newer version? Was the 
> disk in $local_volumes for some reason?
> 

Fabian investigated and got the reason, see:
https://pve.proxmox.com/pipermail/pve-devel/2020-April/043272.html


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] applied: [PATCH qemu-server 1/2] migrate: only stop NBD if we got a NBD url from the target

2020-04-30 Thread Mira Limbeck

On 4/30/20 8:41 AM, Thomas Lamprecht wrote:

On 4/30/20 8:35 AM, Fabian Grünbichler wrote:

On April 29, 2020 4:24 pm, Thomas Lamprecht wrote:

Signed-off-by: Thomas Lamprecht 
---

This was rather quickly assembled to fix an obvious issue, some in depth look
at this would be nice, @Fabi or @Fabian :)

LGTM!


It really should be OK, but I'm still a bit wondering why 
"$self->{storage_migration}"
was true in my case, a VM with only a single disk on ceph..
Not that the reason for this can have other fallout.


I'm wondering that too. I tried reproducing it with qemu-server 6.1-14 
and could not (using an external ceph cluster as storage).


Did you test it with qemu-server 6.1-15 or an even newer version? Was 
the disk in $local_volumes for some reason?





  PVE/QemuMigrate.pm | 5 -
  1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 7472e9d..7644922 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -750,6 +750,7 @@ sub phase2 {
my $targetdrive = $3;
$targetdrive =~ s/drive-//g;
  
+	$self->{stopnbd} = 1;

$self->{target_drive}->{$targetdrive}->{drivestr} = $drivestr;
$self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
} elsif ($line =~ m!^storage migration listens on 
nbd:unix:(/run/qemu-server/(\d+)_nbd\.migrate):exportname=(\S+) volume:(\S+)$!) 
{
@@ -760,6 +761,7 @@ sub phase2 {
my $targetdrive = $3;
$targetdrive =~ s/drive-//g;
  
+	$self->{stopnbd} = 1;

$self->{target_drive}->{$targetdrive}->{drivestr} = $drivestr;
$self->{target_drive}->{$targetdrive}->{nbd_uri} = $nbd_uri;
push @$tunnel_addr, "$nbd_unix_addr:$nbd_unix_addr";
@@ -1177,7 +1179,8 @@ sub phase3_cleanup {
  $self->switch_replication_job_target() if $self->{replicated_volumes};
  
  if ($self->{livemigration}) {

-   if ($self->{storage_migration}) {
+   if ($self->{stopnbd}) {
+   $self->log('info', "stopping NBD storage migration server on 
target.");
# stop nbd server on remote vm - requirement for resume since 2.9
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
  
--

2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] applied: [PATCH docs] installation: improve section for creating USB drive from Windows

2020-04-30 Thread Thomas Lamprecht
For those not even having the attention span to read 4 short and
straight forward written sentences..

Co-authored-by: Aaron Lauterer 
Signed-off-by: Thomas Lamprecht 
---
 pve-installation-media.adoc | 19 +--
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/pve-installation-media.adoc b/pve-installation-media.adoc
index 2c2eb0c..154d296 100644
--- a/pve-installation-media.adoc
+++ b/pve-installation-media.adoc
@@ -104,14 +104,21 @@ increase the write speed.
 Instructions for Windows
 
 
-Download Rufus from https://rufus.ie/. Either install it or use the portable
-version. Select the destination drive and the {pve} ISO file.
+Using Etcher
+
 
-Once you click 'Start' a dialog asking to download a different version of GRUB
-will show up. Click 'No'. In the next dialog select the 'DD' mode.
+Etcher works out of the box. Download Etcher from https://etcher.io. It will
+guide you through the process of selecting the ISO and your USB Drive.
 
-An alternative to Rufus is Etcher. Download Etcher from https://etcher.io. It
-will guide you through the process of selecting the ISO and your USB Drive.
+Using Rufus
+^^^
+
+Rufus is a more lightweight alternative. Download it from https://rufus.ie/.
+Either install Rufus or use the portable version. Select the destination drive
+and the {pve} ISO file.
+
+IMPORTANT:  Once you click 'Start' a dialog asking to download a different 
version of
+GRUB will show up. Click 'No'. In the next dialog select the 'DD' mode.
 
 ifdef::wiki[]
 Boot your Server from the USB Flash Drive
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH container] create_vm: fix order of config creation/reading/locking

2020-04-30 Thread Fabian Ebner

On 30.04.20 08:59, Fabian Grünbichler wrote:

On April 29, 2020 11:58 am, Fabian Ebner wrote:

The update_pct_config call leads to a write_config call and so the
configuration file was created before it was intended to be created.

When the CFS is updated in between the write_config call and the
PVE::Cluster::check_vmid_unused call in create_and_lock_config,
the container file would already exist and so creation would
fail after writing out a basically empty config.
(Meaning this is necessary for the proposed "update CFS after
obtaining a configuration file lock" changes)

Even worse, a race was possible for two containers created with the
same ID at the same time:
Assuming the initial PVE::Cluster::check_vmid_unused check in the
parameter verification passes for both create_vm calls, the later one
would potentially overwrite the earlier configuration file with its
update_pct_config call.

Additionally, the file read for $old_config was always the one written
by update_pct_config. Meaning that for a create_vm call with force=1,
already existing old volumes were not removed.

Signed-off-by: Fabian Ebner 


hmm, update_pct_config there was not intended to write out the config,
but just to fill the $conf hash.

three alternative approaches that would return to the original semantics:
1 skip hotplug/apply pending if pending section is empty (should always
   be the case in this code path)
2 add explicit parameter to skip
3 drop the hotplug/apply pending calls in update_pct_config, add them to
the update_vm API endpoint instead.

I'd prefer 3 - 1 - 2 in that order ;)

3 would be called with the same semantics in both call sites (create_vm
and update_vm): here are add/delete/revert parameters, here's a conf
hash; merge those operating solely on the conf hash.



Ok. I'll send a v2 with approach number 3.


---

Note that this should be applied before [0] to avoid temporary
(further ;)) breakage of container creation.

[0]: https://pve.proxmox.com/pipermail/pve-devel/2020-April/043155.html

  src/PVE/API2/LXC.pm | 9 -
  1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/src/PVE/API2/LXC.pm b/src/PVE/API2/LXC.pm
index 148ba6a..46d2fd7 100644
--- a/src/PVE/API2/LXC.pm
+++ b/src/PVE/API2/LXC.pm
@@ -334,10 +334,6 @@ __PACKAGE__->register_method({
# check/activate default storage
&$check_and_activate_storage($storage) if !defined($mp_param->{rootfs});
  
-	PVE::LXC::Config->update_pct_config($vmid, $conf, 0, $no_disk_param);

-
-   $conf->{unprivileged} = 1 if $unprivileged;
-
my $emsg = $restore ? "unable to restore CT $vmid -" : "unable to create CT 
$vmid -";
  
  	eval { PVE::LXC::Config->create_and_lock_config($vmid, $force) };

@@ -346,8 +342,11 @@ __PACKAGE__->register_method({
my $code = sub {
my $old_conf = PVE::LXC::Config->load_config($vmid);
my $was_template;
-
my $vollist = [];
+
+   PVE::LXC::Config->update_pct_config($vmid, $conf, 0, 
$no_disk_param);
+   $conf->{unprivileged} = 1 if $unprivileged;
+
eval {
my $orig_mp_param; # only used if $restore
if ($restore) {
--
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel




___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel



___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu-server] migrate: don't accidentally take NBD code paths

2020-04-30 Thread Fabian Grünbichler
by avoiding auto-vivification of $self->{online_local_volumes} via
iteration. most code paths don't care whether it's undef or a reference
to an empty list, but this caused the (already) fixed bug of calling
nbd_stop without having started an NBD server in the first place.

Signed-off-by: Fabian Grünbichler 
---
This technically makes the previous NBD stop related patches no longer
needed - but let's keep them until we clean up the whole volume handling
properly to avoid falling into this trap again.

 PVE/QemuMigrate.pm | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 7644922..d9b104c 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -713,10 +713,14 @@ sub phase2 {
 $input .= "nbd_protocol_version: $nbd_protocol_version\n";
 
 my $number_of_online_replicated_volumes = 0;
-foreach my $volid (@{$self->{online_local_volumes}}) {
-   next if !$self->{replicated_volumes}->{$volid};
-   $number_of_online_replicated_volumes++;
-   $input .= "replicated_volume: $volid\n";
+
+# prevent auto-vivification
+if ($self->{online_local_volumes}) {
+   foreach my $volid (@{$self->{online_local_volumes}}) {
+   next if !$self->{replicated_volumes}->{$volid};
+   $number_of_online_replicated_volumes++;
+   $input .= "replicated_volume: $volid\n";
+   }
 }
 
 my $target_replicated_volumes = {};
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel