Re: [libvirt] [PATCH v3 28/30] qemu: Allow forcing VFIO when computing memlock limit

2019-12-10 Thread Cole Robinson
On 12/2/19 9:26 AM, Michal Privoznik wrote:
> With NVMe disks, one can start a blockjob with a NVMe disk
> that is not visible in domain XML (at least right away). Usually,
> it's fairly easy to override this limitation of
> qemuDomainGetMemLockLimitBytes() - for instance for hostdevs we
> temporarily add the device to domain def, let the function
> calculate the limit and then remove the device. But it's not so
> easy with virStorageSourcePtr - in some cases they don't
> necessarily are attached to a disk. And even if they are it's
> done later in the process and frankly, I find it too complicated
> to be able to use the simple trick we use with hostdevs.
> 
> Signed-off-by: Michal Privoznik 

Reviewed-by: Cole Robinson 

- Cole

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list



[libvirt] [PATCH v3 28/30] qemu: Allow forcing VFIO when computing memlock limit

2019-12-02 Thread Michal Privoznik
With NVMe disks, one can start a blockjob with a NVMe disk
that is not visible in domain XML (at least right away). Usually,
it's fairly easy to override this limitation of
qemuDomainGetMemLockLimitBytes() - for instance for hostdevs we
temporarily add the device to domain def, let the function
calculate the limit and then remove the device. But it's not so
easy with virStorageSourcePtr - in some cases they don't
necessarily are attached to a disk. And even if they are it's
done later in the process and frankly, I find it too complicated
to be able to use the simple trick we use with hostdevs.

Signed-off-by: Michal Privoznik 
---
 src/qemu/qemu_command.c |  2 +-
 src/qemu/qemu_domain.c  | 46 ++---
 src/qemu/qemu_domain.h  |  6 --
 src/qemu/qemu_hotplug.c | 12 +--
 tests/qemumemlocktest.c |  2 +-
 5 files changed, 41 insertions(+), 27 deletions(-)

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 97d11c1d5f..7cc6a55c75 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -10463,7 +10463,7 @@ qemuBuildCommandLine(virQEMUDriverPtr driver,
 
 /* In some situations, eg. VFIO passthrough, QEMU might need to lock a
  * significant amount of memory, so we need to set the limit accordingly */
-virCommandSetMaxMemLock(cmd, qemuDomainGetMemLockLimitBytes(def));
+virCommandSetMaxMemLock(cmd, qemuDomainGetMemLockLimitBytes(def, false));
 
 if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_MSG_TIMESTAMP) &&
 cfg->logTimestamp)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index e81d986e9e..be4aef049d 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -11875,12 +11875,14 @@ ppc64VFIODeviceIsNV2Bridge(const char *device)
 /**
  * getPPC64MemLockLimitBytes:
  * @def: domain definition
+ * @forceVFIO: force VFIO usage
  *
  * A PPC64 helper that calculates the memory locking limit in order for
  * the guest to operate properly.
  */
 static unsigned long long
-getPPC64MemLockLimitBytes(virDomainDefPtr def)
+getPPC64MemLockLimitBytes(virDomainDefPtr def,
+  bool forceVFIO)
 {
 unsigned long long memKB = 0;
 unsigned long long baseLimit = 0;
@@ -11971,7 +11973,7 @@ getPPC64MemLockLimitBytes(virDomainDefPtr def)
 passthroughLimit = maxMemory +
128 * (1ULL<<30) / 512 * nPCIHostBridges +
8192;
-} else if (usesVFIO) {
+} else if (usesVFIO || forceVFIO) {
 /* For regular (non-NVLink2 present) VFIO passthrough, the value
  * of passthroughLimit is:
  *
@@ -12009,16 +12011,20 @@ getPPC64MemLockLimitBytes(virDomainDefPtr def)
 /**
  * qemuDomainGetMemLockLimitBytes:
  * @def: domain definition
+ * @forceVFIO: force VFIO calculation
  *
  * Calculate the memory locking limit that needs to be set in order for
  * the guest to operate properly. The limit depends on a number of factors,
  * including certain configuration options and less immediately apparent ones
  * such as the guest architecture or the use of certain devices.
+ * The @forceVFIO argument can be used to tell this function will use VFIO even
+ * though @def doesn't indicates so right now.
  *
  * Returns: the memory locking limit, or 0 if setting the limit is not needed
  */
 unsigned long long
-qemuDomainGetMemLockLimitBytes(virDomainDefPtr def)
+qemuDomainGetMemLockLimitBytes(virDomainDefPtr def,
+   bool forceVFIO)
 {
 unsigned long long memKB = 0;
 bool usesVFIO = false;
@@ -12039,7 +12045,7 @@ qemuDomainGetMemLockLimitBytes(virDomainDefPtr def)
 return VIR_DOMAIN_MEMORY_PARAM_UNLIMITED;
 
 if (ARCH_IS_PPC64(def->os.arch) && def->virtType == VIR_DOMAIN_VIRT_KVM)
-return getPPC64MemLockLimitBytes(def);
+return getPPC64MemLockLimitBytes(def, forceVFIO);
 
 /* For device passthrough using VFIO the guest memory and MMIO memory
  * regions need to be locked persistent in order to allow DMA.
@@ -12059,18 +12065,20 @@ qemuDomainGetMemLockLimitBytes(virDomainDefPtr def)
  *
  * Note that this may not be valid for all platforms.
  */
-for (i = 0; i < def->nhostdevs; i++) {
-if (virHostdevIsVFIODevice(def->hostdevs[i]) ||
-virHostdevIsMdevDevice(def->hostdevs[i])) {
-usesVFIO = true;
-break;
+if (!forceVFIO) {
+for (i = 0; i < def->nhostdevs; i++) {
+if (virHostdevIsVFIODevice(def->hostdevs[i]) ||
+virHostdevIsMdevDevice(def->hostdevs[i])) {
+usesVFIO = true;
+break;
+}
 }
-}
 
-if (virDomainDefHasNVMeDisk(def))
-usesVFIO = true;
+if (virDomainDefHasNVMeDisk(def))
+usesVFIO = true;
+}
 
-if (usesVFIO)
+if (usesVFIO || forceVFIO)
 memKB = virDomainDefGetMemoryTotal(def) + 1024 * 1024;
 
  done:
@@ -12081,9 +12089,12 @@