[pve-devel] [PATCH kernel] add pve-kernel-X.Y-libc-dev package

2020-05-27 Thread Thomas Lamprecht
This was long overdue, allows to access the full feature set of our
kernel for some tools using the Linux API directly.

Packaging mostly taken from Debian[0]

[0]: 
https://salsa.debian.org/kernel-team/linux/-/blob/debian/4.19.118-2/debian/rules.real#L367

Signed-off-by: Thomas Lamprecht 
---

Package name could be probably better, just took the first thing coming to my
mind. Also, the approach of butting Kernel MAJ.MIN version in there or using
the kernel package version, or none at all, should be evaluated.

I'd guess none could be preferred as it should be backwards compatible anyway
(never break userspace™) so the newest one is always wanted.

note: This was working really quick, almost suspicious... Tested by building
QEMU (which inspired my doing this now in the first place due to the sizeof bug
we have with Debian's plin linux-libc-dev package on build)

 debian/control.in | 12 
 debian/rules  | 22 --
 2 files changed, 32 insertions(+), 2 deletions(-)

diff --git a/debian/control.in b/debian/control.in
index 9b807c1d40c5..c457564eafe9 100644
--- a/debian/control.in
+++ b/debian/control.in
@@ -69,3 +69,15 @@ Depends: busybox,
 Recommends: grub-pc | grub-efi-amd64 | grub-efi-ia32 | grub-efi-arm64,
 Description: The Proxmox PVE Kernel Image
  This package contains the linux kernel and initial ramdisk used for booting
+
+Package: pve-kernel-@KVMAJMIN@-libc-dev
+Section: devel
+Priority: optional
+Architecture: any
+Provides: linux-libc-dev,
+Conflicts: linux-libc-dev,
+Replaces: linux-libc-dev,
+Depends: ${misc:Depends}
+Description: Linux support headers for userspace development
+ This package provides userspaces headers from the Linux kernel.  These headers
+ are used by the installed headers for GNU libc and other system libraries.
diff --git a/debian/rules b/debian/rules
index e530eb548707..dc839b127507 100755
--- a/debian/rules
+++ b/debian/rules
@@ -15,6 +15,7 @@ CHANGELOG_DATE:=$(shell dpkg-parsechangelog -SDate)
 
 PVE_KERNEL_PKG=pve-kernel-${KVNAME}
 PVE_HEADER_PKG=pve-headers-${KVNAME}
+PVE_USR_HEADER_PKG=pve-kernel-${KERNEL_MAJMIN}-libc-dev
 LINUX_TOOLS_PKG=linux-tools-${KERNEL_MAJMIN}
 KERNEL_SRC_COPY=${KERNEL_SRC}_tmp
 
@@ -87,7 +88,7 @@ debian/control: $(wildcard debian/*.in)
 
 build: .compile_mark .tools_compile_mark .modules_compile_mark
 
-install: .install_mark .tools_install_mark .headers_install_mark
+install: .install_mark .tools_install_mark .headers_install_mark 
.usr_headers_install_mark
dh_installdocs -A debian/copyright debian/SOURCE
dh_installchangelogs
dh_installman
@@ -97,7 +98,7 @@ install: .install_mark .tools_install_mark 
.headers_install_mark
 
 binary: install
debian/rules fwcheck abicheck
-   dh_strip -N${PVE_HEADER_PKG}
+   dh_strip -N${PVE_HEADER_PKG} -N${PVE_USR_HEADER_PKG}
dh_makeshlibs
dh_shlibdeps
dh_installdeb
@@ -207,6 +208,23 @@ binary: install
ln -sf /usr/src/linux-headers-${KVNAME} 
debian/${PVE_HEADER_PKG}/lib/modules/${KVNAME}/build
touch $@
 
+.usr_headers_install_mark: PKG_DIR = debian/${PVE_USR_HEADER_PKG}
+.usr_headers_install_mark: OUT_DIR = ${PKG_DIR}/usr
+.usr_headers_install_mark: .config_mark
+   rm -rf '${PKG_DIR}'
+   mkdir -p  '${PKG_DIR}'
+   $(MAKE) -C ${KERNEL_SRC} headers_check ARCH=$(KERNEL_HEADER_ARCH)
+   $(MAKE) -C ${KERNEL_SRC} headers_install ARCH=$(KERNEL_HEADER_ARCH) 
INSTALL_HDR_PATH='$(CURDIR)'/$(OUT_DIR)
+   rm -rf $(OUT_DIR)/include/drm $(OUT_DIR)/include/scsi
+   find $(OUT_DIR)/include \( -name .install -o -name ..install.cmd \) 
-execdir rm {} +
+
+# Move include/asm to arch-specific directory
+   mkdir -p $(OUT_DIR)/include/$(DEB_HOST_MULTIARCH)
+   mv $(OUT_DIR)/include/asm $(OUT_DIR)/include/$(DEB_HOST_MULTIARCH)/
+   test ! -d $(OUT_DIR)/include/arch || \
+   mv $(OUT_DIR)/include/arch 
$(OUT_DIR)/include/$(DEB_HOST_MULTIARCH)/
+   touch $@
+
 .modules_compile_mark: ${MODULES}/zfs.ko
touch $@
 
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [pve-network] vlan: ovs: use dot1q-tunnel when vlanaware is enabled

2020-05-27 Thread Alexandre Derumier
Signed-off-by: Alexandre Derumier 
---
 PVE/Network/SDN/Zones/VlanPlugin.pm | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/PVE/Network/SDN/Zones/VlanPlugin.pm 
b/PVE/Network/SDN/Zones/VlanPlugin.pm
index 8364451..987c553 100644
--- a/PVE/Network/SDN/Zones/VlanPlugin.pm
+++ b/PVE/Network/SDN/Zones/VlanPlugin.pm
@@ -61,7 +61,12 @@ sub generate_sdn_config {
@iface_config = ();
push @iface_config, "ovs_type OVSIntPort";
push @iface_config, "ovs_bridge $bridge";
-   push @iface_config, "ovs_options tag=$tag";
+   if($vnet->{vlanaware}) {
+   push @iface_config, "ovs_options vlan_mode=dot1q-tunnel tag=$tag";
+   } else {
+   push @iface_config, "ovs_options tag=$tag";
+   }
+
push(@{$config->{$vnet_uplink}}, @iface_config) if 
!$config->{$vnet_uplink};
 
@iface_config = ();
-- 
2.20.1

___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [PATCH v2 pve-manager] api, ui: allow to remove subscriptio

2020-05-27 Thread Thomas Lamprecht
On 5/26/20 9:39 PM, Moayad Almalat wrote:
> Signed-off-by: Moayad Almalat 
> ---
>  PVE/API2/Subscription.pm  | 24 
>  PVE/CLI/pvesubscription.pm|  1 +
>  www/manager6/node/Subscription.js |  9 +
>  3 files changed, 34 insertions(+)
> 

applied, some comments inline. Please fix now also the patch for the PMG, 
thanks!

> diff --git a/PVE/API2/Subscription.pm b/PVE/API2/Subscription.pm
> index 6657c00d..e1a9c31c 100644
> --- a/PVE/API2/Subscription.pm
> +++ b/PVE/API2/Subscription.pm
> @@ -245,4 +245,28 @@ __PACKAGE__->register_method ({
>   return undef;
>  }});
>  
> +__PACKAGE__->register_method ({
> +name => 'delete',
> +path => '',
> +method => 'DELETE',
> +permissions => {
> + check => ['perm', '/nodes/{node}', [ 'Sys.Modify' ]],
> +},
> +description => "Set subscription key.",

Not "Set" but "Delete" ;-) I made a followup fix for this.

> +proxyto => 'node',
> +protected => 1,
> +parameters => {
> + additionalProperties => 0,

wrong indentation, you got that one and the wrong "description" one probably 
from
basing this all on the 'set' API endpoint definition.

> + properties => {
> + node => get_standard_option('pve-node'),
> + },
> +},
> +returns => { type => 'null'},
> +code => sub {
> + my $subscription_file = '/etc/subscription';
> + return if ! -e $subscription_file; 

trailing white space at the end of above line

> + unlink($subscription_file) or die "cannot delete subscription key: $!";
> + return undef;
> +}});
> +
>  1;
> diff --git a/PVE/CLI/pvesubscription.pm b/PVE/CLI/pvesubscription.pm
> index cd81c415..751dde58 100755
> --- a/PVE/CLI/pvesubscription.pm
> +++ b/PVE/CLI/pvesubscription.pm
> @@ -28,6 +28,7 @@ our $cmddef = {
>}
>}],
>  set => [ 'PVE::API2::Subscription', 'set', ['key'], { node => $nodename 
> } ],
> +delete => [ 'PVE::API2::Subscription', 'delete', undef, { node => 
> $nodename } ],
>  };
>  
>  1;
> diff --git a/www/manager6/node/Subscription.js 
> b/www/manager6/node/Subscription.js
> index e4a35874..15319429 100644
> --- a/www/manager6/node/Subscription.js
> +++ b/www/manager6/node/Subscription.js
> @@ -163,6 +163,15 @@ Ext.define('PVE.node.Subscription', {
>   win.on('destroy', reload);
>   }
>   },
> + {
> + text: gettext('Remove Subscription'),
> + xtype: 'proxmoxStdRemoveButton',
> + confirmMsg: gettext('Are you sure to remove the 
> subscription key?'),
> + baseurl: baseurl,
> + dangerous: true,
> + selModel: false,
> + callback: reload,
> + },
>   {
>   text: gettext('Check'),
>   handler: function() {
> 


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu 4/4] savevm-async: add debug timing prints

2020-05-27 Thread Stefan Reiter
Signed-off-by: Stefan Reiter 
---

Doesn't have to be applied, but I thought I'd send it along anyway, since it
helped me test patch 3 greatly.

 savevm-async.c | 16 
 1 file changed, 16 insertions(+)

diff --git a/savevm-async.c b/savevm-async.c
index 4ce83a0691..8848884593 100644
--- a/savevm-async.c
+++ b/savevm-async.c
@@ -202,6 +202,8 @@ static void process_savevm_finalize(void *opaque)
 AioContext *iohandler_ctx = iohandler_get_aio_context();
 MigrationState *ms = migrate_get_current();
 
+int64_t start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+
 qemu_bh_delete(snap_state.finalize_bh);
 snap_state.finalize_bh = NULL;
 snap_state.co = NULL;
@@ -226,6 +228,8 @@ static void process_savevm_finalize(void *opaque)
 }
 
 DPRINTF("state saving complete\n");
+DPRINTF("timing: process_savevm_finalize (state saving) took %ld ms\n",
+qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
 
 /* clear migration state */
 migrate_set_state(>state, MIGRATION_STATUS_SETUP,
@@ -247,6 +251,9 @@ static void process_savevm_finalize(void *opaque)
 vm_start();
 snap_state.saved_vm_running = false;
 }
+
+DPRINTF("timing: process_savevm_finalize (full) took %ld ms\n",
+qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
 }
 
 static void coroutine_fn process_savevm_co(void *opaque)
@@ -256,6 +263,8 @@ static void coroutine_fn process_savevm_co(void *opaque)
 BdrvNextIterator it;
 BlockDriverState *bs = NULL;
 
+int64_t start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+
 ret = qemu_file_get_error(snap_state.file);
 if (ret < 0) {
 save_snapshot_error("qemu_savevm_state_setup failed");
@@ -290,11 +299,15 @@ static void coroutine_fn process_savevm_co(void *opaque)
 }
 }
 
+DPRINTF("timing: process_savevm_co took %ld ms\n",
+qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
+
 /* If a drive runs in an IOThread we can flush it async, and only
  * need to sync-flush whatever IO happens between now and
  * vm_stop_force_state. bdrv_next can only be called from main AioContext,
  * so move there now and after every flush.
  */
+int64_t start_time_flush = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 aio_co_reschedule_self(qemu_get_aio_context());
 for (bs = bdrv_first(); bs; bs = bdrv_next()) {
 /* target has BDRV_O_NO_FLUSH, no sense calling bdrv_flush on it */
@@ -311,6 +324,9 @@ static void coroutine_fn process_savevm_co(void *opaque)
 }
 }
 
+DPRINTF("timing: async flushing took %ld ms\n",
+qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time_flush);
+
 qemu_bh_schedule(snap_state.finalize_bh);
 }
 
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu 2/4] util/async: Add aio_co_reschedule_self()

2020-05-27 Thread Stefan Reiter
From: Kevin Wolf 

From: Kevin Wolf 

Add a function that can be used to move the currently running coroutine
to a different AioContext (and therefore potentially a different
thread).

Signed-off-by: Kevin Wolf 
---

Required for patch 3. See this discussion on the QEMU mailing list:
https://lists.gnu.org/archive/html/qemu-devel/2020-05/msg07421.html

 include/block/aio.h | 10 ++
 util/async.c| 30 ++
 2 files changed, 40 insertions(+)

diff --git a/include/block/aio.h b/include/block/aio.h
index 62ed954344..d5399c67d6 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -17,6 +17,7 @@
 #ifdef CONFIG_LINUX_IO_URING
 #include 
 #endif
+#include "qemu/coroutine.h"
 #include "qemu/queue.h"
 #include "qemu/event_notifier.h"
 #include "qemu/thread.h"
@@ -654,6 +655,15 @@ static inline bool aio_node_check(AioContext *ctx, bool 
is_external)
  */
 void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
 
+/**
+ * aio_co_reschedule_self:
+ * @new_ctx: the new context
+ *
+ * Move the currently running coroutine to new_ctx. If the coroutine is already
+ * running in new_ctx, do nothing.
+ */
+void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
+
 /**
  * aio_co_wake:
  * @co: the coroutine
diff --git a/util/async.c b/util/async.c
index 3165a28f2f..4eba1e6f1b 100644
--- a/util/async.c
+++ b/util/async.c
@@ -558,6 +558,36 @@ void aio_co_schedule(AioContext *ctx, Coroutine *co)
 aio_context_unref(ctx);
 }
 
+typedef struct AioCoRescheduleSelf {
+Coroutine *co;
+AioContext *new_ctx;
+} AioCoRescheduleSelf;
+
+static void aio_co_reschedule_self_bh(void *opaque)
+{
+AioCoRescheduleSelf *data = opaque;
+aio_co_schedule(data->new_ctx, data->co);
+}
+
+void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
+{
+AioContext *old_ctx = qemu_get_current_aio_context();
+
+if (old_ctx != new_ctx) {
+AioCoRescheduleSelf data = {
+.co = qemu_coroutine_self(),
+.new_ctx = new_ctx,
+};
+/*
+ * We can't directly schedule the coroutine in the target context
+ * because this would be racy: The other thread could try to enter the
+ * coroutine before it has yielded in this one.
+ */
+aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, );
+qemu_coroutine_yield();
+}
+}
+
 void aio_co_wake(struct Coroutine *co)
 {
 AioContext *ctx;
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu 3/4] savevm-async: flush IOThread-drives async before entering blocking part

2020-05-27 Thread Stefan Reiter
By flushing all drives where its possible to so before entering the
blocking part (where the VM is stopped), we can reduce the time spent in
said part for every disk that has an IOThread (other drives cannot be
flushed async anyway).

Signed-off-by: Stefan Reiter 
---

It's a bit hard to get benchmark numbers here, since snapshot timings tend to
vary greatly for me. But with some very unscientific testing using patch 4 and
some gut feeling, on a VM running 'stress-ng -d 4' I'd say it shaves off about
2-3 seconds of VM downtime on average. I've never seen it produce worse results
than without it though, even for idle VMs.

 savevm-async.c | 23 +++
 1 file changed, 23 insertions(+)

diff --git a/savevm-async.c b/savevm-async.c
index 2894c94233..4ce83a0691 100644
--- a/savevm-async.c
+++ b/savevm-async.c
@@ -253,6 +253,8 @@ static void coroutine_fn process_savevm_co(void *opaque)
 {
 int ret;
 int64_t maxlen;
+BdrvNextIterator it;
+BlockDriverState *bs = NULL;
 
 ret = qemu_file_get_error(snap_state.file);
 if (ret < 0) {
@@ -288,6 +290,27 @@ static void coroutine_fn process_savevm_co(void *opaque)
 }
 }
 
+/* If a drive runs in an IOThread we can flush it async, and only
+ * need to sync-flush whatever IO happens between now and
+ * vm_stop_force_state. bdrv_next can only be called from main AioContext,
+ * so move there now and after every flush.
+ */
+aio_co_reschedule_self(qemu_get_aio_context());
+for (bs = bdrv_first(); bs; bs = bdrv_next()) {
+/* target has BDRV_O_NO_FLUSH, no sense calling bdrv_flush on it */
+if (bs == blk_bs(snap_state.target)) {
+continue;
+}
+
+AioContext *bs_ctx = bdrv_get_aio_context(bs);
+if (bs_ctx != qemu_get_aio_context()) {
+DPRINTF("savevm: async flushing drive %s\n", bs->filename);
+aio_co_reschedule_self(bs_ctx);
+bdrv_flush(bs);
+aio_co_reschedule_self(qemu_get_aio_context());
+}
+}
+
 qemu_bh_schedule(snap_state.finalize_bh);
 }
 
-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


[pve-devel] [PATCH qemu 1/4] savevm-async: move more code to cleanup and rename to finalize

2020-05-27 Thread Stefan Reiter
process_savevm_cleanup is renamed to process_savevm_finalize to
accomodate more code that is not all cleanup related.

The benefit of this is that it allows us to call functions which need to
run in the main AIOContext directly. It doesn't majorly affect snapshot
performance, since the first instruction that is moved stops the VM,
so the downtime stays about the same.

The target bdrv is additionally moved to the IOHandler context before
process_savevm_co to make sure the coroutine can call functions that
require it to own the bdrv's context. process_savevm_finalize then moves
it back to the main context to run its part.

Signed-off-by: Stefan Reiter 
---

Can be applied standalone.

 savevm-async.c | 87 +-
 1 file changed, 51 insertions(+), 36 deletions(-)

diff --git a/savevm-async.c b/savevm-async.c
index c3fe741c38..2894c94233 100644
--- a/savevm-async.c
+++ b/savevm-async.c
@@ -50,7 +50,7 @@ static struct SnapshotState {
 int saved_vm_running;
 QEMUFile *file;
 int64_t total_time;
-QEMUBH *cleanup_bh;
+QEMUBH *finalize_bh;
 Coroutine *co;
 } snap_state;
 
@@ -196,12 +196,42 @@ static const QEMUFileOps block_file_ops = {
 .close =  block_state_close,
 };
 
-static void process_savevm_cleanup(void *opaque)
+static void process_savevm_finalize(void *opaque)
 {
 int ret;
-qemu_bh_delete(snap_state.cleanup_bh);
-snap_state.cleanup_bh = NULL;
+AioContext *iohandler_ctx = iohandler_get_aio_context();
+MigrationState *ms = migrate_get_current();
+
+qemu_bh_delete(snap_state.finalize_bh);
+snap_state.finalize_bh = NULL;
 snap_state.co = NULL;
+
+/* We need to own the target bdrv's context for the following functions,
+ * so move it back. It can stay in the main context and live out its live
+ * there, since we're done with it after this method ends anyway.
+ */
+aio_context_acquire(iohandler_ctx);
+blk_set_aio_context(snap_state.target, qemu_get_aio_context(), NULL);
+aio_context_release(iohandler_ctx);
+
+ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
+if (ret < 0) {
+save_snapshot_error("vm_stop_force_state error %d", ret);
+}
+
+(void)qemu_savevm_state_complete_precopy(snap_state.file, false, false);
+ret = qemu_file_get_error(snap_state.file);
+if (ret < 0) {
+save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
+}
+
+DPRINTF("state saving complete\n");
+
+/* clear migration state */
+migrate_set_state(>state, MIGRATION_STATUS_SETUP,
+  ret ? MIGRATION_STATUS_FAILED : 
MIGRATION_STATUS_COMPLETED);
+ms->to_dst_file = NULL;
+
 qemu_savevm_state_cleanup();
 
 ret = save_snapshot_cleanup();
@@ -219,16 +249,15 @@ static void process_savevm_cleanup(void *opaque)
 }
 }
 
-static void process_savevm_coro(void *opaque)
+static void coroutine_fn process_savevm_co(void *opaque)
 {
 int ret;
 int64_t maxlen;
-MigrationState *ms = migrate_get_current();
 
 ret = qemu_file_get_error(snap_state.file);
 if (ret < 0) {
 save_snapshot_error("qemu_savevm_state_setup failed");
-goto out;
+return;
 }
 
 while (snap_state.state == SAVE_STATE_ACTIVE) {
@@ -245,7 +274,7 @@ static void process_savevm_coro(void *opaque)
 save_snapshot_error("qemu_savevm_state_iterate error %d", ret);
 break;
 }
-DPRINTF("savevm inerate pending size %lu ret %d\n", pending_size, 
ret);
+DPRINTF("savevm iterate pending size %lu ret %d\n", pending_size, 
ret);
 } else {
 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
 ret = global_state_store();
@@ -253,40 +282,20 @@ static void process_savevm_coro(void *opaque)
 save_snapshot_error("global_state_store error %d", ret);
 break;
 }
-ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
-if (ret < 0) {
-save_snapshot_error("vm_stop_force_state error %d", ret);
-break;
-}
-DPRINTF("savevm inerate finished\n");
-/* upstream made the return value here inconsistent
- * (-1 instead of 'ret' in one case and 0 after flush which can
- * still set a file error...)
- */
-(void)qemu_savevm_state_complete_precopy(snap_state.file, false, 
false);
-ret = qemu_file_get_error(snap_state.file);
-if (ret < 0) {
-save_snapshot_error("qemu_savevm_state_iterate error %d", 
ret);
-break;
-}
-DPRINTF("save complete\n");
+
+DPRINTF("savevm iterate complete\n");
 break;
 }
 }
 
-qemu_bh_schedule(snap_state.cleanup_bh);
-
-out:
-/* set migration state accordingly and clear soon-to-be stale file */
-

[pve-devel] [PATCH qemu 0/4] Fix vmstate-snapshots w/ iothread=1

2020-05-27 Thread Stefan Reiter
Once again, iothreads making trouble. When enabled, snapshots including RAM
deadlock QEMU, because our async-snapshot implementation (which recently moved
back to using coroutines) tries to access and modify the state of disks running
in seperate iothreads from the main one.

Patch 1/4 fixes the issue and can be applied standalone, patches 2 and 3 improve
snapshot performance for iothread-disks and patch 4 adds some useful debug
prints for testing the aforementioned performance patches. See individual patch
notes for more.

For easier reviewing I sent the patches for the QEMU source itself, if necessary
I can also apply them and then send pve-qemu patches including them as .patch
files.

Kevin Wolf (1):
  util/async: Add aio_co_reschedule_self()

Stefan Reiter (3):
  savevm-async: move more code to cleanup and rename to finalize
  savevm-async: flush IOThread-drives async before entering blocking
part
  savevm-async: add debug timing prints

 include/block/aio.h |  10 
 savevm-async.c  | 124 +++-
 util/async.c|  30 +++
 3 files changed, 129 insertions(+), 35 deletions(-)

-- 
2.20.1


___
pve-devel mailing list
pve-devel@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel


Re: [pve-devel] [RFC manager 2/2] hardware view: Add disk import button

2020-05-27 Thread Dominik Csapak

sry for the delay of the review

a few comments inline

On 5/22/20 12:08 PM, Dominic Jäger wrote:

Is it a bad idea to move column2 as I did here? Seems strange to have column1
and 2 so different but I haven't found an easier way to make it available to
the subclass yet.

@Thomas Is this sort of what you had in mind for this feature?


i do not know what you're talked about, but seeing this
i would rather put the 'importing' code directly
into the hdedit panel, and make the 'mode' configurable

since most of the logic is in the window anyway.
that would avoid the whole issue of the accessing
columns of the superclass etc.



Signed-off-by: Dominic Jäger 
---
  www/manager6/Makefile|   1 +
  www/manager6/form/DiskStorageSelector.js |   5 +
  www/manager6/qemu/HDEdit.js  |  50 +
  www/manager6/qemu/HDImport.js| 128 +++
  www/manager6/qemu/HardwareView.js|  13 +++
  5 files changed, 176 insertions(+), 21 deletions(-)
  create mode 100644 www/manager6/qemu/HDImport.js

diff --git a/www/manager6/Makefile b/www/manager6/Makefile
index a29e280d..c1645748 100644
--- a/www/manager6/Makefile
+++ b/www/manager6/Makefile
@@ -143,6 +143,7 @@ JSSRC=  
\
qemu/Smbios1Edit.js \
qemu/CDEdit.js  \
qemu/HDEdit.js  \
+   qemu/HDImport.js\
qemu/HDResize.js\
qemu/HDMove.js  \
qemu/HDEfi.js   \
diff --git a/www/manager6/form/DiskStorageSelector.js 
b/www/manager6/form/DiskStorageSelector.js
index 445e3ac0..2c1555e1 100644
--- a/www/manager6/form/DiskStorageSelector.js
+++ b/www/manager6/form/DiskStorageSelector.js
@@ -32,6 +32,11 @@ Ext.define('PVE.form.DiskStorageSelector', {
  // string because else we get a type confusion
  defaultSize: '32',
  
+setDiskSize: function(newSize) {

+   let field = this.getComponent('disksize');
+   field.setValue(newSize);
+},
+
  changeStorage: function(f, value) {
var me = this;
var formatsel = me.getComponent('diskformat');
diff --git a/www/manager6/qemu/HDEdit.js b/www/manager6/qemu/HDEdit.js
index fd890600..5d6c12e9 100644
--- a/www/manager6/qemu/HDEdit.js
+++ b/www/manager6/qemu/HDEdit.js
@@ -13,6 +13,28 @@ Ext.define('PVE.qemu.HDInputPanel', {
  
  viewModel: {},
  
+diskStorageSelector: {

+   xtype: 'pveDiskStorageSelector',
+   storageContent: 'images',
+   name: 'disk',
+   reference: 'storageSelector',
+},
+
+column2: [
+   {
+   xtype: 'CacheTypeSelector',
+   name: 'cache',
+   value: '__default__',
+   fieldLabel: gettext('Cache')
+   },
+   {
+   xtype: 'proxmoxcheckbox',
+   fieldLabel: gettext('Discard'),
+   reference: 'discard',
+   name: 'discard'
+   },
+],
+
  controller: {
  
  	xclass: 'Ext.app.ViewController',

@@ -164,7 +186,6 @@ Ext.define('PVE.qemu.HDInputPanel', {
me.drive = {};
  
  	me.column1 = [];

-   me.column2 = [];
  
  	me.advancedColumn1 = [];

me.advancedColumn2 = [];
@@ -188,6 +209,8 @@ Ext.define('PVE.qemu.HDInputPanel', {
me.column1.push(me.scsiController);
}
  
+	me.diskStorageSelector.nodename = me.nodename;

+   me.diskStorageSelector.autoSelect = me.insideWizard;
if (me.unused) {
me.unusedDisks = Ext.create('Proxmox.form.KVComboBox', {
name: 'unusedId',
@@ -201,13 +224,7 @@ Ext.define('PVE.qemu.HDInputPanel', {
});
me.column1.push(me.unusedDisks);
} else if (me.isCreate) {
-   me.column1.push({
-   xtype: 'pveDiskStorageSelector',
-   storageContent: 'images',
-   name: 'disk',
-   nodename: me.nodename,
-   autoSelect: me.insideWizard
-   });
+   me.column1.push(me.diskStorageSelector);
} else {
me.column1.push({
xtype: 'textfield',
@@ -219,18 +236,6 @@ Ext.define('PVE.qemu.HDInputPanel', {
}
  
  	me.column2.push(

-   {
-   xtype: 'CacheTypeSelector',
-   name: 'cache',
-   value: '__default__',
-   fieldLabel: gettext('Cache')
-   },
-   {
-   xtype: 'proxmoxcheckbox',
-   fieldLabel: gettext('Discard'),
-   reference: 'discard',
-   name: 'discard'
-   }
);
  
  	me.advancedColumn1.push(

@@ -358,7 +363,9 @@ Ext.define('PVE.qemu.HDEdit', {
  
  backgroundDelay: 5,
  
-initComponent : function() {

+isImport: false,
+
+initComponent: