[pve-devel] [PATCH] memory hotplug option is not hotpluggable
Signed-off-by: Alexandre Derumier aderum...@odiso.com --- PVE/QemuServer.pm |9 ++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm index 52b1a1c..556bbb7 100644 --- a/PVE/QemuServer.pm +++ b/PVE/QemuServer.pm @@ -3746,7 +3746,6 @@ sub set_migration_caps { my $fast_plug_option = { 'name' = 1, -'hotplug' = 1, 'onboot' = 1, 'shares' = 1, 'startup' = 1, @@ -3790,7 +3789,9 @@ sub vmconfig_hotplug_pending { foreach my $opt (@delete) { next if $selection !$selection-{$opt}; eval { - if ($opt eq 'tablet') { + if ($opt eq 'hotplug') { + die skip\n if ($conf-{hotplug} =~ /memory/); + } elsif ($opt eq 'tablet') { die skip\n if !$hotplug_features-{usb}; if ($defaults-{tablet}) { vm_deviceplug($storecfg, $conf, $vmid, $opt); @@ -3834,7 +3835,9 @@ sub vmconfig_hotplug_pending { next if $selection !$selection-{$opt}; my $value = $conf-{pending}-{$opt}; eval { - if ($opt eq 'tablet') { + if ($opt eq 'hotplug') { + die skip\n if ($value =~ /memory/) || ($value !~ /memory/ $conf-{hotplug} =~ /memory/); + } elsif ($opt eq 'tablet') { die skip\n if !$hotplug_features-{usb}; if ($value == 1) { vm_deviceplug($storecfg, $conf, $vmid, $opt); -- 1.7.10.4 ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] [PATCH] memory hotplug option is not hotpluggable
applied, thanks! ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] BUG: Firewall Alias not resolve correct !!!
*So, is that not possible to usar numbers in the alias?* Thanks for the bug report. I just committed a fix for that: https://git.proxmox.com/?p=pve-firewall.git;a=commitdiff;h=04f5088f6b7338732c8071904b9be44f564b6c94 - Dietmar ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] [PATCH 3/5] api2 : node : add migrate_all
Signed-off-by: Alexandre Derumier aderum...@odiso.com --- PVE/API2/Nodes.pm | 102 + 1 file changed, 102 insertions(+) diff --git a/PVE/API2/Nodes.pm b/PVE/API2/Nodes.pm index 839fba3..9cb05f0 100644 --- a/PVE/API2/Nodes.pm +++ b/PVE/API2/Nodes.pm @@ -190,6 +190,7 @@ __PACKAGE__-register_method({ }, method = 'GET', proxyto = 'node', +proxyto = 'node', protected = 1, # openvz /proc entries are only readable by root description = Get user_beancounters failcnt for all active containers., parameters = { @@ -1277,6 +1278,7 @@ __PACKAGE__-register_method ({ path = 'startall', method = 'POST', protected = 1, +proxyto = 'node', description = Start all VMs and containers (when onboot=1)., parameters = { additionalProperties = 0, @@ -1403,6 +1405,7 @@ __PACKAGE__-register_method ({ path = 'stopall', method = 'POST', protected = 1, +proxyto = 'node', description = Stop all VMs and Containers., parameters = { additionalProperties = 0, @@ -1467,6 +1470,105 @@ __PACKAGE__-register_method ({ }}); +my $create_migrate_worker = sub { +my ($nodename, $type, $vmid, $target) = @_; + +my $upid; +if ($type eq 'openvz') { + my $online = PVE::OpenVZ::check_running($vmid) ? 1 : 0; + print STDERR Migrating CT $vmid\n; + $upid = PVE::API2::OpenVZ-migrate_vm({node = $nodename, vmid = $vmid, target = $target, + online = $online }); +} elsif ($type eq 'qemu') { + my $online = PVE::QemuServer::check_running($vmid, 1) ? 1 : 0; + print STDERR Migrating VM $vmid\n; + $upid = PVE::API2::Qemu-migrate_vm({node = $nodename, vmid = $vmid, target = $target, + online = $online }); +} else { + die unknown VM type '$type'\n; +} + +my $res = PVE::Tools::upid_decode($upid); + +return $res-{pid}; +}; + +__PACKAGE__-register_method ({ +name = 'migrateall', +path = 'migrateall', +method = 'POST', +proxyto = 'node', +protected = 1, +description = Migrate all VMs and Containers., +parameters = { + additionalProperties = 0, + properties = { + node = get_standard_option('pve-node'), +target = get_standard_option('pve-node', { description = Target node. }), +maxworkers = { +description = Max parralel migration job., +type = 'integer', +minimum = 1 +}, + }, +}, +returns = { + type = 'string', +}, +code = sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv-get_user(); + + my $nodename = $param-{node}; + $nodename = PVE::INotify::nodename() if $nodename eq 'localhost'; + +my $target = $param-{target}; +my $maxWorkers = $param-{maxworkers}; + + my $code = sub { + + $rpcenv-{type} = 'priv'; # to start tasks in background + + my $migrateList = $get_start_stop_list($nodename); + + foreach my $order (sort {$b = $a} keys %$migrateList) { + my $vmlist = $migrateList-{$order}; + my $workers = {}; + foreach my $vmid (sort {$b = $a} keys %$vmlist) { + my $d = $vmlist-{$vmid}; + my $pid; + eval { $pid = $create_migrate_worker($nodename, $d-{type}, $vmid, $target); }; + warn $@ if $@; + next if !$pid; + + $workers-{$pid} = 1; + while (scalar(keys %$workers) = $maxWorkers) { + foreach my $p (keys %$workers) { + if (!PVE::ProcFSTools::check_process_running($p)) { + delete $workers-{$p}; + } + } + sleep(1); + } + } + while (scalar(keys %$workers)) { + foreach my $p (keys %$workers) { + if (!PVE::ProcFSTools::check_process_running($p)) { + delete $workers-{$p}; + } + } + sleep(1); + } + } + return; + }; + + return $rpcenv-fork_worker('migrateall', undef, $authuser, $code); + +}}); + package PVE::API2::Nodes; use strict; -- 1.7.10.4 ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] [PATCH 4/5] add migrate_all form
Signed-off-by: Alexandre Derumier aderum...@odiso.com --- www/manager/Makefile |1 + www/manager/window/MigrateAll.js | 84 ++ 2 files changed, 85 insertions(+) create mode 100644 www/manager/window/MigrateAll.js diff --git a/www/manager/Makefile b/www/manager/Makefile index 6ff03cb..9ffe3b4 100644 --- a/www/manager/Makefile +++ b/www/manager/Makefile @@ -113,6 +113,7 @@ JSSRC= \ node/Config.js \ qemu/StatusView.js \ window/Migrate.js \ + window/MigrateAll.js\ qemu/Monitor.js \ qemu/Summary.js \ qemu/OSTypeEdit.js \ diff --git a/www/manager/window/MigrateAll.js b/www/manager/window/MigrateAll.js new file mode 100644 index 000..fb677f5 --- /dev/null +++ b/www/manager/window/MigrateAll.js @@ -0,0 +1,84 @@ +Ext.define('PVE.window.MigrateAll', { +extend: 'Ext.window.Window', + +resizable: false, + +migrate: function(target, maxworkers) { + var me = this; + PVE.Utils.API2Request({ + params: { target: target, maxworkers: maxworkers}, + url: '/nodes/' + me.nodename + '/' + /migrateall, + waitMsgTarget: me, + method: 'POST', + failure: function(response, opts) { + Ext.Msg.alert('Error', response.htmlStatus); + }, + success: function(response, options) { + var upid = response.result.data; + + var win = Ext.create('PVE.window.TaskViewer', { + upid: upid + }); + win.show(); + me.close(); + } + }); +}, + +initComponent : function() { + var me = this; + + if (!me.nodename) { + throw no node name specified; + } + + me.formPanel = Ext.create('Ext.form.Panel', { + bodyPadding: 10, + border: false, + fieldDefaults: { + labelWidth: 100, + anchor: '100%' + }, + items: [ + { + xtype: 'PVE.form.NodeSelector', + name: 'target', + fieldLabel: 'Target node', + allowBlank: false, + onlineValidator: true + }, + { + xtype: 'numberfield', + name: 'maxworkers', + minValue: 1, + maxValue: 100, + value: 1, + fieldLabel: 'Parallel jobs', + allowBlank: false + }, + ] + }); + + var form = me.formPanel.getForm(); + + var submitBtn = Ext.create('Ext.Button', { + text: 'Migrate', + handler: function() { + var values = form.getValues(); + me.migrate(values.target, values.maxworkers); + } + }); + + Ext.apply(me, { + title: Migrate All VMs, + width: 350, + modal: true, + layout: 'auto', + border: false, + items: [ me.formPanel ], + buttons: [ submitBtn ] + }); + + me.callParent(); +} +}); -- 1.7.10.4 ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] pve-manager : vms maintenance (migrateall|stopall|startall)
This is an old patches serie that we use at work since 2years now. I have add a request for this during the last week session training ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] [PATCH 2/5] api2: node : startall : add force option
force start if onboot = 0 Signed-off-by: Alexandre Derumier aderum...@odiso.com --- PVE/API2/Nodes.pm | 11 +-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/PVE/API2/Nodes.pm b/PVE/API2/Nodes.pm index 5ffb277..839fba3 100644 --- a/PVE/API2/Nodes.pm +++ b/PVE/API2/Nodes.pm @@ -1282,6 +1282,11 @@ __PACKAGE__-register_method ({ additionalProperties = 0, properties = { node = get_standard_option('pve-node'), + force = { + optional = 1, + type = 'boolean', + description = force if onboot=0., + }, }, }, returns = { @@ -1296,6 +1301,8 @@ __PACKAGE__-register_method ({ my $nodename = $param-{node}; $nodename = PVE::INotify::nodename() if $nodename eq 'localhost'; + my $force = $param-{force}; + my $code = sub { $rpcenv-{type} = 'priv'; # to start tasks in background @@ -1305,8 +1312,8 @@ __PACKAGE__-register_method ({ last if PVE::Cluster::check_cfs_quorum($i != 0 ? 1 : 0); sleep(1); } - - my $startList = $get_start_stop_list($nodename, 1); + my $autostart = $force ? undef : 1; + my $startList = $get_start_stop_list($nodename, $autostart); # Note: use numeric sorting with = foreach my $order (sort {$a = $b} keys %$startList) { -- 1.7.10.4 ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] [PATCH 1/5] add stopall/startall vm buttons
Signed-off-by: Alexandre Derumier aderum...@odiso.com --- www/manager/node/Config.js | 33 - 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/www/manager/node/Config.js b/www/manager/node/Config.js index c351ffd..0992e15 100644 --- a/www/manager/node/Config.js +++ b/www/manager/node/Config.js @@ -29,6 +29,37 @@ Ext.define('PVE.node.Config', { }); }; + var startallvmBtn = Ext.create('PVE.button.Button', { + text: gettext('Start All VMs'), + confirmMsg: Ext.String.format(gettext(Do you really want to start all Vms on node {0}?), nodename), + handler: function() { + PVE.Utils.API2Request({ + params: { force: 1 }, + url: '/nodes/' + nodename + '/startall', + method: 'POST', + waitMsgTarget: me, + failure: function(response, opts) { + Ext.Msg.alert('Error', response.htmlStatus); + } + }); + } + }); + + var stopallvmBtn = Ext.create('PVE.button.Button', { + text: gettext('Stop All VMs'), + confirmMsg: Ext.String.format(gettext(Do you really want to stop all Vms on node {0}?), nodename), + handler: function() { + PVE.Utils.API2Request({ + url: '/nodes/' + nodename + '/stopall', + method: 'POST', + waitMsgTarget: me, + failure: function(response, opts) { + Ext.Msg.alert('Error', response.htmlStatus); + } + }); + } + }); + var restartBtn = Ext.create('PVE.button.Button', { text: gettext('Restart'), disabled: !caps.nodes['Sys.PowerMgmt'], @@ -60,7 +91,7 @@ Ext.define('PVE.node.Config', { title: gettext('Node') + ' + nodename + ', hstateid: 'nodetab', defaults: { statusStore: me.statusStore }, - tbar: [ restartBtn, shutdownBtn, shellBtn ] + tbar: [ startallvmBtn, stopallvmBtn, restartBtn, shutdownBtn, shellBtn ] }); if (caps.nodes['Sys.Audit']) { -- 1.7.10.4 ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] [PATCH 5/5] add migration_all button
Signed-off-by: Alexandre Derumier aderum...@odiso.com --- www/manager/node/Config.js | 13 - 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/www/manager/node/Config.js b/www/manager/node/Config.js index 0992e15..7e0d04a 100644 --- a/www/manager/node/Config.js +++ b/www/manager/node/Config.js @@ -60,6 +60,17 @@ Ext.define('PVE.node.Config', { } }); + var migrateallvmBtn = Ext.create('PVE.button.Button', { + text: gettext('Migrate All VMs'), + handler: function() { +var win = Ext.create('PVE.window.MigrateAll', { +nodename: nodename, +}); +win.show(); +me.mon(win, 'close', me.reload, me); + } + }); + var restartBtn = Ext.create('PVE.button.Button', { text: gettext('Restart'), disabled: !caps.nodes['Sys.PowerMgmt'], @@ -91,7 +102,7 @@ Ext.define('PVE.node.Config', { title: gettext('Node') + ' + nodename + ', hstateid: 'nodetab', defaults: { statusStore: me.statusStore }, - tbar: [ startallvmBtn, stopallvmBtn, restartBtn, shutdownBtn, shellBtn ] + tbar: [ startallvmBtn, stopallvmBtn, migrateallvmBtn, restartBtn, shutdownBtn, shellBtn ] }); if (caps.nodes['Sys.Audit']) { -- 1.7.10.4 ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] [PATCH] enable debug package for pve-cluster
On Mon, 9 Feb 2015 19:07:31 +0100 (CET) Dietmar Maurer diet...@proxmox.com wrote: So there must be something wrong with that clog data structure. Will dig deeper tomorrow. Looks like an instance of the struct is freed to early so that the cast fails. -- Hilsen/Regards Michael Rasmussen Get my public GnuPG keys: michael at rasmussen dot cc http://pgp.mit.edu:11371/pks/lookup?op=getsearch=0xD3C9A00E mir at datanom dot net http://pgp.mit.edu:11371/pks/lookup?op=getsearch=0xE501F51C mir at miras dot org http://pgp.mit.edu:11371/pks/lookup?op=getsearch=0xE3E80917 -- /usr/games/fortune -es says: A thing is not necessarily true because a man dies for it. -- Oscar Wilde, The Portrait of Mr. W. H. pgp40v90Ukda6.pgp Description: OpenPGP digital signature ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] [PATCH] enable debug package for pve-cluster
Am 09.02.2015 um 17:43 schrieb Dietmar Maurer: I re-compiled the package with same compiler and libs, so maybe it generate the same code. Running addr2line: # addr2line -e /usr/bin/pmxcfs 0041a730 /home/dietmar/pve2-devel/pve-cluster/build/src/logger.c:314 what IP did you get at the second crash? Here are some more lines: pmxcfs[4156]: segfault at 21341873 ip 0041aa9c sp 7f7057a3f930 error 4 in pmxcfs[40+25000] pmxcfs[37420]: segfault at 634824b0 ip 0041aa9c sp 7f865d60f930 error 4 in pmxcfs[40+25000] pmxcfs[26353]: segfault at 630ce86c ip 0041a730 sp 7fd9c146f3b8 error 4 in pmxcfs[40+25000] pmxcfs[1972]: segfault at 6283b6ac ip 0041a730 sp 7fd697c6f3b8 error 4 in pmxcfs[40+25000] pmxcfs[4272]: segfault at 213c379f ip 0041a730 sp 7f22b4c943b8 error 4 in pmxcfs[40+25000] pmxcfs[4162]: segfault at 2196483f ip 0041a730 sp 7f83afdc23b8 error 4 in pmxcfs[40+25000] The ip is always: 41a730 or 41aa9c Stefan ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] Debian Appliance Builder exec: cd: not found
I am trying to build a appliance that requires some packages to be built from source. When I try to change the working directory with : dab exec cd /usr/local/src I get : /sbin/defenv: 10: exec: cd: not found If I use: dab exec /bin/sh -c cd /usr/local/src; pwd it returns: / I am trying to change the working directory so I can run make make install. What is the correct way to do this? ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] crashes of pmxcfs
Hi Dietmar, i can build a debug package on my own to debug this issue. I just don't know how to pass -DDEBUG to gcc with your makefile / configure script. Can you tell me? Stefan Am 09.02.2015 um 11:30 schrieb Stefan Priebe - Profihost AG: Am 09.02.2015 um 08:50 schrieb Stefan Priebe - Profihost AG: Am 07.02.2015 um 15:55 schrieb Dietmar Maurer: THis won't help as my build and your build will differ. SO the kernel message a have from the crash will not be applyable with gdb to my debug symbols. Sure, you also need to reproduce the bug :-( I guess we should really provide a debug package for such problems. Yeah then it's not neccessary to reproduce that one. We can just grab the line where it happen. Stefan Today i had the same crash again. pmxcfs was not running anymore caused by a segfault. Stefan ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] [PATCH] zfs: auto import after reboot
On 02/03/2015 12:59 PM, Wolfgang Link wrote: @@ -419,6 +419,14 @@ sub volume_snapshot_rollback { sub activate_storage { my ($class, $storeid, $scfg, $cache) = @_; + +my @param = ('-o', 'name', '-H'); + +my $text = zfs_request($class, $scfg, undef, 'zpool_list', @param); + +if ($text !~ $scfg-{pool}) { + run_command(zpool import -d /dev/disk/by-id/ -a); +} return 1; } activate_storage is inherted by ZFSPlugin, so we cannot do that! So please overwrite it inside ZFSPlugin.pm. ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] crashes of pmxcfs
Am 09.02.2015 um 12:24 schrieb Alexandre DERUMIER: I just don't know how to pass -DDEBUG to gcc with your makefile / configure script. I think it need to be done in debian/rules override_dh_auto_configure: ./autogen.sh ./configure --prefix=/usr sure but it does not help - it seems the configure and or autogen scripts are not prepared for debugging symbols ;-( I know how to debug but never created them my self. configure does not know enable-debug Stefan - Mail original - De: Stefan Priebe s.pri...@profihost.ag À: dietmar diet...@proxmox.com, pve-devel pve-devel@pve.proxmox.com Envoyé: Lundi 9 Février 2015 12:20:14 Objet: Re: [pve-devel] crashes of pmxcfs Hi Dietmar, i can build a debug package on my own to debug this issue. I just don't know how to pass -DDEBUG to gcc with your makefile / configure script. Can you tell me? Stefan Am 09.02.2015 um 11:30 schrieb Stefan Priebe - Profihost AG: Am 09.02.2015 um 08:50 schrieb Stefan Priebe - Profihost AG: Am 07.02.2015 um 15:55 schrieb Dietmar Maurer: THis won't help as my build and your build will differ. SO the kernel message a have from the crash will not be applyable with gdb to my debug symbols. Sure, you also need to reproduce the bug :-( I guess we should really provide a debug package for such problems. Yeah then it's not neccessary to reproduce that one. We can just grab the line where it happen. Stefan Today i had the same crash again. pmxcfs was not running anymore caused by a segfault. Stefan ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] [PATCH] zfs: auto import after reboot
On 02/09/2015 12:24 PM, Wolfgang Link wrote: ctivate_storage is inherted by ZFSPlugin, so we cannot do that! So please overwrite it inside ZFSPlugin.pm. ahead in this patch I override it in ZFSPlugin.pm Sorry, you are correct! I just applied your patch. ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] [PATCH] enable debug package for pve-cluster
Signed-off-by: Stefan Priebe s.pri...@profihost.ag --- debian/control |5 + debian/rules |6 ++ 2 files changed, 11 insertions(+) diff --git a/debian/control b/debian/control index b4375a6..1be793a 100644 --- a/debian/control +++ b/debian/control @@ -12,3 +12,8 @@ Description: Cluster Infrastructure for Proxmox Virtual Environment This package contains the Cluster Infrastructure for the Proxmox Virtual Environment, namely a distributed filesystem to store configuration data on all nodes. + +Package: pve-cluster-dbg +Architecture: any +Description: debug Cluster Infrastructure for Proxmox Virtual Environment + diff --git a/debian/rules b/debian/rules index a433c17..8cae8e7 100755 --- a/debian/rules +++ b/debian/rules @@ -12,6 +12,8 @@ BUILDROOT := $(shell pwd) %: dh $@ +override_dh_strip: + dh_strip --dbg-package=pve-cluster-dbg override_dh_auto_configure: @@ -19,6 +21,10 @@ override_dh_auto_configure: ./configure --prefix=/usr +override_dh_auto_install: + + make -j1 install DESTDIR=../debian/pve-cluster + override_dh_install: dh_install -- 1.7.10.4 ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] [PATCH] enable debug package for pve-cluster
Signed-off-by: Stefan Priebe s.pri...@profihost.ag --- debian/control |5 + debian/rules |6 ++ 2 files changed, 11 insertions(+) diff --git a/debian/control b/debian/control index b4375a6..1be793a 100644 --- a/debian/control +++ b/debian/control @@ -12,3 +12,8 @@ Description: Cluster Infrastructure for Proxmox Virtual Environment This package contains the Cluster Infrastructure for the Proxmox Virtual Environment, namely a distributed filesystem to store configuration data on all nodes. + +Package: pve-cluster-dbg +Architecture: any +Description: debug Cluster Infrastructure for Proxmox Virtual Environment + diff --git a/debian/rules b/debian/rules index a433c17..8cae8e7 100755 --- a/debian/rules +++ b/debian/rules @@ -12,6 +12,8 @@ BUILDROOT := $(shell pwd) %: dh $@ +override_dh_strip: + dh_strip --dbg-package=pve-cluster-dbg override_dh_auto_configure: @@ -19,6 +21,10 @@ override_dh_auto_configure: ./configure --prefix=/usr +override_dh_auto_install: + + make -j1 install DESTDIR=../debian/pve-cluster + override_dh_install: dh_install -- 1.7.10.4 ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] crashes of pmxcfs
I just don't know how to pass -DDEBUG to gcc with your makefile / configure script. I think it need to be done in debian/rules override_dh_auto_configure: ./autogen.sh ./configure --prefix=/usr - Mail original - De: Stefan Priebe s.pri...@profihost.ag À: dietmar diet...@proxmox.com, pve-devel pve-devel@pve.proxmox.com Envoyé: Lundi 9 Février 2015 12:20:14 Objet: Re: [pve-devel] crashes of pmxcfs Hi Dietmar, i can build a debug package on my own to debug this issue. I just don't know how to pass -DDEBUG to gcc with your makefile / configure script. Can you tell me? Stefan Am 09.02.2015 um 11:30 schrieb Stefan Priebe - Profihost AG: Am 09.02.2015 um 08:50 schrieb Stefan Priebe - Profihost AG: Am 07.02.2015 um 15:55 schrieb Dietmar Maurer: THis won't help as my build and your build will differ. SO the kernel message a have from the crash will not be applyable with gdb to my debug symbols. Sure, you also need to reproduce the bug :-( I guess we should really provide a debug package for such problems. Yeah then it's not neccessary to reproduce that one. We can just grab the line where it happen. Stefan Today i had the same crash again. pmxcfs was not running anymore caused by a segfault. Stefan ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
Re: [pve-devel] [PATCH] zfs: auto import after reboot
On 02/09/2015 12:07 PM, Dietmar Maurer wrote: On 02/03/2015 12:59 PM, Wolfgang Link wrote: @@ -419,6 +419,14 @@ sub volume_snapshot_rollback { sub activate_storage { my ($class, $storeid, $scfg, $cache) = @_; + +my @param = ('-o', 'name', '-H'); + +my $text = zfs_request($class, $scfg, undef, 'zpool_list', @param); + +if ($text !~ $scfg-{pool}) { +run_command(zpool import -d /dev/disk/by-id/ -a); +} return 1; } activate_storage is inherted by ZFSPlugin, so we cannot do that! So please overwrite it inside ZFSPlugin.pm. ahead in this patch I override it in ZFSPlugin.pm Is this wrong? ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel
[pve-devel] [PATCH] memory form : hotplug improvements
we need to change memory field step dynamically, and also map values to dimm size. Fixme : apply same behaviour for maxmemory field Signed-off-by: Alexandre Derumier aderum...@odiso.com --- www/manager/qemu/HardwareView.js |4 ++ www/manager/qemu/MemoryEdit.js | 112 +++--- 2 files changed, 107 insertions(+), 9 deletions(-) diff --git a/www/manager/qemu/HardwareView.js b/www/manager/qemu/HardwareView.js index 99a7157..ba986c4 100644 --- a/www/manager/qemu/HardwareView.js +++ b/www/manager/qemu/HardwareView.js @@ -111,6 +111,9 @@ Ext.define('PVE.qemu.HardwareView', { }, balloon: { visible: false + }, + hotplug: { + visible: false } }; @@ -216,6 +219,7 @@ Ext.define('PVE.qemu.HardwareView', { var win = Ext.create(editor, { pveSelNode: me.pveSelNode, confid: rec.data.key, + hotplug: me.getObjectValue('hotplug'), url: '/api2/extjs/' + baseurl }); diff --git a/www/manager/qemu/MemoryEdit.js b/www/manager/qemu/MemoryEdit.js index 2ed6bb0..3bad580 100644 --- a/www/manager/qemu/MemoryEdit.js +++ b/www/manager/qemu/MemoryEdit.js @@ -29,9 +29,10 @@ Ext.define('PVE.qemu.MemoryInputPanel', { initComponent : function() { var me = this; - var labelWidth = 160; + var hotplug = me.hotplug; + var items = [ { xtype: 'radiofield', @@ -55,13 +56,94 @@ Ext.define('PVE.qemu.MemoryInputPanel', { xtype: 'numberfield', name: 'memory', minValue: 32, - maxValue: 512*1024, + maxValue: 4096*1024, value: '512', step: 32, fieldLabel: gettext('Memory') + ' (MB)', labelAlign: 'right', labelWidth: labelWidth, - allowBlank: false + allowBlank: false, + listeners: { +change: function(f, value, oldvalue) { + var me = this; + + if(!hotplug) { + return; + } + + //fill an array with dimms size + var dimmarray = new Array (255); + var dimm_size = 512; + var current_size = 1024; + var i; + var j; + var dimm_id = 0; + for (j = 0; j 8; j++) { + for (i = 0; i 32; i++) { + dimmarray[dimm_id] = current_size; + current_size += dimm_size; + dimm_id++; + } + dimm_size *= 2; + } + //find nearest value in array + var k = 0, closest, closestDiff, currentDiff + closest = dimmarray[0]; + for(k; k dimmarray.length;k++) { + closestDiff = Math.abs(value - closest); + currentDiff = Math.abs(value - dimmarray[k]); + if(currentDiff closestDiff) { + closest = dimmarray[k]; + } + closestDiff = null; + currentDiff = null; + } + if(value != closest){ + value = closest; + } + f.setValue(value); + + //dynamic step + if(value oldvalue) { + if(value 16384) { + me.step = 512; + } else if(value = 16384 value 49152) { + me.step = 1024; + } else if (value = 49152 value 114688) { + me.step = 2048; + } else if (value = 114688 value 245760) { + me.step = 4096; + } else if (value = 245760 value 507904) { + me.step = 8192; + } else if (value = 507904 value 1032192) { + me.step = 16384; + } else if (value = 1032192 value 2080768) { + me.step = 32768; + } else if (value = 2080768 value 4177920) { + me.step = 65536; + } + } else if (value oldvalue) { + if(value = 16384) { + me.step = 512; +
Re: [pve-devel] Debian Appliance Builder exec: cd: not found
Instead, you should use make with -C option On February 9, 2015 at 9:32 PM Wendel Toews wen...@dynamo-electric.com wrote: I am trying to build a appliance that requires some packages to be built from source. When I try to change the working directory with : dab exec cd /usr/local/src I get : /sbin/defenv: 10: exec: cd: not found If I use: dab exec /bin/sh -c cd /usr/local/src; pwd it returns: / I am trying to change the working directory so I can run make make install. What is the correct way to do this? ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel ___ pve-devel mailing list pve-devel@pve.proxmox.com http://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel