[PATCH 2/5] cpu_arm.c: modernize virCPUarmUpdate

2020-05-22 Thread Daniel Henrique Barboza
Use automatic cleanup of variables.

Signed-off-by: Daniel Henrique Barboza 
---
 src/cpu/cpu_arm.c | 14 +-
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/src/cpu/cpu_arm.c b/src/cpu/cpu_arm.c
index 6f6c6a1479..cd4f720c95 100644
--- a/src/cpu/cpu_arm.c
+++ b/src/cpu/cpu_arm.c
@@ -415,8 +415,7 @@ static int
 virCPUarmUpdate(virCPUDefPtr guest,
 const virCPUDef *host)
 {
-int ret = -1;
-virCPUDefPtr updated = NULL;
+g_autoptr(virCPUDef) updated = NULL;
 
 if (guest->mode != VIR_CPU_MODE_HOST_MODEL)
 return 0;
@@ -424,24 +423,21 @@ virCPUarmUpdate(virCPUDefPtr guest,
 if (!host) {
 virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("unknown host CPU model"));
-goto cleanup;
+return -1;
 }
 
 if (!(updated = virCPUDefCopyWithoutModel(guest)))
-goto cleanup;
+return -1;
 
 updated->mode = VIR_CPU_MODE_CUSTOM;
 if (virCPUDefCopyModel(updated, host, true) < 0)
-goto cleanup;
+return -1;
 
 virCPUDefStealModel(guest, updated, false);
 guest->mode = VIR_CPU_MODE_CUSTOM;
 guest->match = VIR_CPU_MATCH_EXACT;
-ret = 0;
 
- cleanup:
-virCPUDefFree(updated);
-return ret;
+return 0;
 }
 
 
-- 
2.26.2



[PATCH 4/5] qemu_process.c: modernize qemuProcessUpdateCPU code path

2020-05-22 Thread Daniel Henrique Barboza
Use automatic cleanup on qemuProcessUpdateCPU and the functions called
by it.

Signed-off-by: Daniel Henrique Barboza 
---
 src/qemu/qemu_process.c | 50 ++---
 1 file changed, 17 insertions(+), 33 deletions(-)

diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 45af8f810c..a1ef1d42b0 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -4187,8 +4187,8 @@ qemuProcessFetchGuestCPU(virQEMUDriverPtr driver,
  virCPUDataPtr *disabled)
 {
 qemuDomainObjPrivatePtr priv = vm->privateData;
-virCPUDataPtr dataEnabled = NULL;
-virCPUDataPtr dataDisabled = NULL;
+g_autoptr(virCPUData) dataEnabled = NULL;
+g_autoptr(virCPUData) dataDisabled = NULL;
 bool generic;
 int rc;
 
@@ -4201,7 +4201,7 @@ qemuProcessFetchGuestCPU(virQEMUDriverPtr driver,
 return 0;
 
 if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
-goto error;
+return -1;
 
 if (generic) {
 rc = qemuMonitorGetGuestCPU(priv->mon,
@@ -4213,19 +4213,14 @@ qemuProcessFetchGuestCPU(virQEMUDriverPtr driver,
 }
 
 if (qemuDomainObjExitMonitor(driver, vm) < 0)
-goto error;
+return -1;
 
 if (rc == -1)
-goto error;
+return -1;
 
-*enabled = dataEnabled;
-*disabled = dataDisabled;
+*enabled = g_steal_pointer();
+*disabled = g_steal_pointer();
 return 0;
-
- error:
-virCPUDataFree(dataEnabled);
-virCPUDataFree(dataDisabled);
-return -1;
 }
 
 
@@ -4261,9 +4256,8 @@ qemuProcessUpdateLiveGuestCPU(virDomainObjPtr vm,
 {
 virDomainDefPtr def = vm->def;
 qemuDomainObjPrivatePtr priv = vm->privateData;
-virCPUDefPtr orig = NULL;
+g_autoptr(virCPUDef) orig = NULL;
 int rc;
-int ret = -1;
 
 if (!enabled)
 return 0;
@@ -4274,10 +4268,10 @@ qemuProcessUpdateLiveGuestCPU(virDomainObjPtr vm,
 return 0;
 
 if (!(orig = virCPUDefCopy(def->cpu)))
-goto cleanup;
+return -1;
 
 if ((rc = virCPUUpdateLive(def->os.arch, def->cpu, enabled, disabled)) < 
0) {
-goto cleanup;
+return -1;
 } else if (rc == 0) {
 /* Store the original CPU in priv if QEMU changed it and we didn't
  * get the original CPU via migration, restore, or snapshot revert.
@@ -4288,11 +4282,7 @@ qemuProcessUpdateLiveGuestCPU(virDomainObjPtr vm,
 def->cpu->check = VIR_CPU_CHECK_FULL;
 }
 
-ret = 0;
-
- cleanup:
-virCPUDefFree(orig);
-return ret;
+return 0;
 }
 
 
@@ -4351,10 +4341,9 @@ qemuProcessUpdateCPU(virQEMUDriverPtr driver,
  virDomainObjPtr vm,
  qemuDomainAsyncJob asyncJob)
 {
-virCPUDataPtr cpu = NULL;
-virCPUDataPtr disabled = NULL;
+g_autoptr(virCPUData) cpu = NULL;
+g_autoptr(virCPUData) disabled = NULL;
 g_autoptr(virDomainCapsCPUModels) models = NULL;
-int ret = -1;
 
 /* The host CPU model comes from host caps rather than QEMU caps so
  * fallback must be allowed no matter what the user specified in the XML.
@@ -4362,21 +4351,16 @@ qemuProcessUpdateCPU(virQEMUDriverPtr driver,
 vm->def->cpu->fallback = VIR_CPU_FALLBACK_ALLOW;
 
 if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, , ) < 0)
-goto cleanup;
+return -1;
 
 if (qemuProcessUpdateLiveGuestCPU(vm, cpu, disabled) < 0)
-goto cleanup;
+return -1;
 
 if (qemuProcessFetchCPUDefinitions(driver, vm, asyncJob, ) < 0 ||
 virCPUTranslate(vm->def->os.arch, vm->def->cpu, models) < 0)
-goto cleanup;
-
-ret = 0;
+return -1;
 
- cleanup:
-virCPUDataFree(cpu);
-virCPUDataFree(disabled);
-return ret;
+return 0;
 }
 
 
-- 
2.26.2



[PATCH 0/5] qemuProcessUpdateCPU: do not change 'fallback' for pSeries guests

2020-05-22 Thread Daniel Henrique Barboza
Hi,

Patch 5/5 contains a fix for [1]. The first 4 patches are
g_auto() cleanups I made along the way while investigating
the bug. 


[1] https://bugzilla.redhat.com/show_bug.cgi?id=1660711



Daniel Henrique Barboza (5):
  cpu_conf.c: modernize virCPUDefCopyWithoutModel and virCPUDefCopy
  cpu_arm.c: modernize virCPUarmUpdate
  cpu_s390.c: modernize virCPUs390Update
  qemu_process.c: modernize qemuProcessUpdateCPU code path
  qemuProcessUpdateCPU: do not change 'fallback' to ALLOW for pSeries
guests

 src/conf/cpu_conf.c | 22 +---
 src/cpu/cpu_arm.c   | 14 --
 src/cpu/cpu_s390.c  | 18 +
 src/qemu/qemu_process.c | 58 +
 4 files changed, 43 insertions(+), 69 deletions(-)

-- 
2.26.2



[PATCH 5/5] qemuProcessUpdateCPU: do not change 'fallback' to ALLOW for pSeries guests

2020-05-22 Thread Daniel Henrique Barboza
Commit v3.10.0-182-g237f045d9a ("qemu: Ignore fallback CPU attribute
on reconnect") forced CPU 'fallback' to ALLOW, regardless of user
choice. This fixed a situation in which guests created with older
Libvirt versions, which used CPU mode 'host-model' in runtime, would
fail to launch in a newer Libvirt if the fallback was set to FORBID.
This would lead to a scenario where the CPU was translated to 'host-model'
to 'custom', but then the FORBID setting would make the translation
process fail.

This fix has a side effect for PSeries guests. PSeries can operate
with 'host-model' in runtime due to specific PPC64 mechanics regarding
compatibility mode. In fact, the update() implementation of the
cpuDriverPPC64 driver is a NO-OP if CPU mode is 'host-model', and
the driver does not implement translate(). The result is that PSeries
guests aren't affected by the problem, but they are being affected by
the fix - users are seeing 'fallback' mode being changed without
necessity during daemon restart.

All other cpuArchDrivers implements update() and changes guest mode
to VIR_CPU_MODE_CUSTOM, meaning that PSeries is currently the only
exception to this logic. Let's make it official.

https://bugzilla.redhat.com/show_bug.cgi?id=1660711

CC: Jiri Denemark 
Signed-off-by: Daniel Henrique Barboza 
---
 src/qemu/qemu_process.c | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index a1ef1d42b0..fec1720f33 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -4347,8 +4347,14 @@ qemuProcessUpdateCPU(virQEMUDriverPtr driver,
 
 /* The host CPU model comes from host caps rather than QEMU caps so
  * fallback must be allowed no matter what the user specified in the XML.
+ *
+ * Note: PSeries domains are able to run with host-model CPU by design,
+ * even on Libvirt newer than 2.3, never replacing host-model with
+ * custom in the virCPUUpdate() call prior to this function. It is not
+ * needed to change the user defined 'fallback' attribute in this case.
  */
-vm->def->cpu->fallback = VIR_CPU_FALLBACK_ALLOW;
+if (!qemuDomainIsPSeries(vm->def))
+vm->def->cpu->fallback = VIR_CPU_FALLBACK_ALLOW;
 
 if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, , ) < 0)
 return -1;
-- 
2.26.2



[PATCH 1/5] cpu_conf.c: modernize virCPUDefCopyWithoutModel and virCPUDefCopy

2020-05-22 Thread Daniel Henrique Barboza
Use automatic cleanup of variables.

Signed-off-by: Daniel Henrique Barboza 
---
 src/conf/cpu_conf.c | 22 +++---
 1 file changed, 7 insertions(+), 15 deletions(-)

diff --git a/src/conf/cpu_conf.c b/src/conf/cpu_conf.c
index 07404c6fb0..c6d36e0cb5 100644
--- a/src/conf/cpu_conf.c
+++ b/src/conf/cpu_conf.c
@@ -227,7 +227,7 @@ virCPUDefStealModel(virCPUDefPtr dst,
 virCPUDefPtr
 virCPUDefCopyWithoutModel(const virCPUDef *cpu)
 {
-virCPUDefPtr copy;
+g_autoptr(virCPUDef) copy = NULL;
 
 if (!cpu)
 return NULL;
@@ -246,42 +246,34 @@ virCPUDefCopyWithoutModel(const virCPUDef *cpu)
 
 if (cpu->cache) {
 if (VIR_ALLOC(copy->cache) < 0)
-goto error;
+return NULL;
 
 *copy->cache = *cpu->cache;
 }
 
 if (cpu->tsc) {
 if (VIR_ALLOC(copy->tsc) < 0)
-goto error;
+return NULL;
 
 *copy->tsc = *cpu->tsc;
 }
 
-return copy;
-
- error:
-virCPUDefFree(copy);
-return NULL;
+return g_steal_pointer();
 }
 
 
 virCPUDefPtr
 virCPUDefCopy(const virCPUDef *cpu)
 {
-virCPUDefPtr copy;
+g_autoptr(virCPUDef) copy = NULL;
 
 if (!(copy = virCPUDefCopyWithoutModel(cpu)))
 return NULL;
 
 if (virCPUDefCopyModel(copy, cpu, false) < 0)
-goto error;
-
-return copy;
+return NULL;
 
- error:
-virCPUDefFree(copy);
-return NULL;
+return g_steal_pointer();
 }
 
 
-- 
2.26.2



[PATCH 3/5] cpu_s390.c: modernize virCPUs390Update

2020-05-22 Thread Daniel Henrique Barboza
Use automatic cleanup of variables.

Signed-off-by: Daniel Henrique Barboza 
---
 src/cpu/cpu_s390.c | 18 +++---
 1 file changed, 7 insertions(+), 11 deletions(-)

diff --git a/src/cpu/cpu_s390.c b/src/cpu/cpu_s390.c
index dd030c5a11..c1c5686244 100644
--- a/src/cpu/cpu_s390.c
+++ b/src/cpu/cpu_s390.c
@@ -45,8 +45,7 @@ static int
 virCPUs390Update(virCPUDefPtr guest,
  const virCPUDef *host)
 {
-virCPUDefPtr updated = NULL;
-int ret = -1;
+g_autoptr(virCPUDef) updated = NULL;
 size_t i;
 
 if (guest->mode == VIR_CPU_MODE_CUSTOM) {
@@ -58,37 +57,34 @@ virCPUs390Update(virCPUDefPtr guest,
 virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("optional CPU features are not supported"));
 }
-goto cleanup;
+return -1;
 }
 
 if (!host) {
 virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("unknown host CPU model"));
-goto cleanup;
+return -1;
 }
 
 if (!(updated = virCPUDefCopyWithoutModel(guest)))
-goto cleanup;
+return -1;
 
 updated->mode = VIR_CPU_MODE_CUSTOM;
 if (virCPUDefCopyModel(updated, host, true) < 0)
-goto cleanup;
+return -1;
 
 for (i = 0; i < guest->nfeatures; i++) {
if (virCPUDefUpdateFeature(updated,
   guest->features[i].name,
   guest->features[i].policy) < 0)
-   goto cleanup;
+   return -1;
 }
 
 virCPUDefStealModel(guest, updated, false);
 guest->mode = VIR_CPU_MODE_CUSTOM;
 guest->match = VIR_CPU_MATCH_EXACT;
-ret = 0;
 
- cleanup:
-virCPUDefFree(updated);
-return ret;
+return 0;
 }
 
 
-- 
2.26.2



Re: [PATCH v2 1/3] libxl: Add 'permissive' option for PCI devices

2020-05-22 Thread Jim Fehlig

On 5/21/20 11:01 AM, Laine Stump wrote:

On 5/8/20 4:54 PM, Jim Fehlig wrote:

On 4/24/20 9:07 PM, Marek Marczykowski-Górecki wrote:

From: Simon Gaiser 

By setting the permissive flag the Xen guest access to the PCI config space
is not filtered. This might be a security risk, but it's required for
some devices and the IOMMU and interrupt remapping should (mostly?)
contain it. Because of it (and that the attribute is Xen only), in an
example include "permissive='no'" - to not expose users copying from
documentation to extra risk.

Example usage:

 
   ...
   


As you know I always struggle with XML modeling and naming, and IIRC the 
'managed' attribute has been a source of pain in the past, so it would be nice 
to have someone else opine on the introduction of 'permissive' attribute. I 
see Laine reviewed V1 and as I recall has commented on the managed attribute 
in the past, so let's see if he has any advice :-).



Sorry, this message got lost in the firehose, and Jim just now reminded me of it 
on IRC.


Hi Laine,

Thanks for you comments!

I seriously don't remember what I may have said in the past about the 
managed='yes' attribute. Could have been something about the default being 'yes' 
instead of 'no', or could have been about its placement as an attribute of the 
toplevel  element rather than being made a part of the  
subelement (since it is really associated with what is done on the host side to 
get the device setup and hooked into the qemu process).



For this "permissive" attribute, I don't really know enough about it to know 
where it should go. Does it affect how the device appears in the guest? (in 
which case maybe it should be a part of ) or only the trappings around 
it in the host? (so maybe it should be a part of ). If not totally one 
or the other, could the setting of this option be changed and guest ABI would 
remain unchanged (but just some operations would now fail / succeed when they 
had done the opposite in the past)? In that case, again maybe .


I don't follow xen development enough these days to confidently answer your 
questions. I'm quite sure the setting doesn't affect how the device appears in 
the guest, only some filtering of PCI config spaces access. Marek, can you 
verify that? If so, I lean towards your suggestion of adding the attribute to 
.


I guess the answers to that would help guide me if I was making the decision 
about where to put the new setting.



As for the *name* of it? That's even more difficult for me to have any opinion 
on since, again, I really have no understanding of what it's doing or why it's 
necessary.


From the xl.cfg(5) man page:

By default pciback only allows PV guests to write "known safe" values into PCI 
configuration space, likewise QEMU (both qemu-xen and qemu-xen-traditional) 
imposes the same constraint on HVM guests.  However, many devices require writes 
to other areas of the configuration space in order to operate properly.  This 
option tells the backend (pciback or QEMU) to allow all writes to the PCI 
configuration space of this device by this domain.


It sounds more and more like a config option for the . Do any of these 
options sound plausible?


  

  

  

  

  


  

Regards,
Jim


One thing I would ask (maybe I did before) - wherever you put it, we (people who 
use qemu) would *really* appreciate if you added a clause to the post-parse 
validation in the qemu driver to cause an error if this option is found in a 
qemu domain config :-)


I think you mentioned it in V1. Marek, can you add such a check in the qemu 
driver for V3?


Regards,
Jim





One option I considered was 'filtered' and 'unfiltered' values for a "config 
space" attribute, similar to sgio for scsi hostdevs. But I can't think of a 
good name for an attribute that "filters writes to the PCI hostdev config space".



 
   
 
   
 

Signed-off-by: Simon Gaiser 
Signed-off-by: Marek Marczykowski-Górecki 
---
Changes in v2:
  - improve documentation (version info, example)
  - update schema
  - use virTristateBool
  - add a test
  - improve commit message
  - news entry
---
  docs/formatdomain.html.in  |  7 +--
  docs/news.xml  | 11 +++-


Additions to news is usually done in a sparate patch.


docs/schemas/domaincommon.rng  | 10 ++-
  src/conf/domain_conf.c | 19 +++-
  src/conf/domain_conf.h |  1 +-
  src/libxl/libxl_conf.c |  1 +-
  tests/libxlxml2domconfigdata/moredevs-hvm.json |  6 ++-
  tests/libxlxml2domconfigdata/moredevs-hvm.xml  |  5 +-
  8 files changed, 58 insertions(+), 2 deletions(-)

diff --git a/docs/formatdomain.html.in b/docs/formatdomain.html.in
index c530573..0d1146e 100644
--- a/docs/formatdomain.html.in
+++ b/docs/formatdomain.html.in
@@ -4987,7 +4987,7 @@
  
  ...
  devices
-  hostdev 

Re: [PATCH 4/5] qemu: Prefer -numa cpu over -numa node,cpus=

2020-05-22 Thread Igor Mammedov
On Fri, 22 May 2020 18:28:31 +0200
Michal Privoznik  wrote:

> On 5/22/20 6:07 PM, Igor Mammedov wrote:
> > On Fri, 22 May 2020 16:14:14 +0200
> > Michal Privoznik  wrote:
> >   
> >> QEMU is trying to obsolete -numa node,cpus= because that uses
> >> ambiguous vCPU id to [socket, die, core, thread] mapping. The new
> >> form is:
> >>
> >>-numa cpu,node-id=N,socket-id=S,die-id=D,core-id=C,thread-id=T
> >>
> >> which is repeated for every vCPU and places it at [S, D, C, T]
> >> into guest NUMA node N.
> >>
> >> While in general this is magic mapping, we can deal with it.
> >> Firstly, with QEMU 2.7 or newer, libvirt ensures that if topology
> >> is given then maxvcpus must be sockets * dies * cores * threads
> >> (i.e. there are no 'holes').
> >> Secondly, if no topology is given then libvirt itself places each
> >> vCPU into a different socket (basically, it fakes topology of:
> >> [maxvcpus, 1, 1, 1])
> >> Thirdly, we can copy whatever QEMU is doing when mapping vCPUs
> >> onto topology, to make sure vCPUs don't start to move around.
> >>
> >> Note, migration from old to new cmd line works and therefore
> >> doesn't need any special handling.
> >>
> >> Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1678085
> >>
> >> Signed-off-by: Michal Privoznik 
> >> ---
> >>   src/qemu/qemu_command.c   | 108 +-
> >>   .../hugepages-nvdimm.x86_64-latest.args   |   4 +-
> >>   ...memory-default-hugepage.x86_64-latest.args |  10 +-
> >>   .../memfd-memory-numa.x86_64-latest.args  |  10 +-
> >>   ...y-hotplug-nvdimm-access.x86_64-latest.args |   4 +-
> >>   ...ry-hotplug-nvdimm-align.x86_64-latest.args |   4 +-
> >>   ...ry-hotplug-nvdimm-label.x86_64-latest.args |   4 +-
> >>   ...ory-hotplug-nvdimm-pmem.x86_64-latest.args |   4 +-
> >>   ...ory-hotplug-nvdimm-ppc64.ppc64-latest.args |   4 +-
> >>   ...hotplug-nvdimm-readonly.x86_64-latest.args |   4 +-
> >>   .../memory-hotplug-nvdimm.x86_64-latest.args  |   4 +-
> >>   ...vhost-user-fs-fd-memory.x86_64-latest.args |   4 +-
> >>   ...vhost-user-fs-hugepages.x86_64-latest.args |   4 +-
> >>   ...host-user-gpu-secondary.x86_64-latest.args |   3 +-
> >>   .../vhost-user-vga.x86_64-latest.args |   3 +-
> >>   15 files changed, 158 insertions(+), 16 deletions(-)
> >>
> >> diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
> >> index 7d84fd8b5e..0de4fe4905 100644
> >> --- a/src/qemu/qemu_command.c
> >> +++ b/src/qemu/qemu_command.c
> >> @@ -7079,6 +7079,91 @@ qemuBuildNumaOldCPUs(virBufferPtr buf,
> >>   }
> >>   
> >>   
> >> +/**
> >> + * qemuTranlsatevCPUID:
> >> + *
> >> + * For given vCPU @id and vCPU topology (@cpu) compute corresponding
> >> + * @socket, @die, @core and @thread). This assumes linear topology,
> >> + * that is every [socket, die, core, thread] combination is valid vCPU
> >> + * ID and there are no 'holes'. This is ensured by
> >> + * qemuValidateDomainDef() if QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS is
> >> + * set.  
> > I wouldn't make this assumption, each machine can have (and has) it's own 
> > layout,
> > and now it's not hard to change that per machine version if necessary.
> > 
> > I'd suppose one could pull the list of possible CPUs from QEMU started
> > in preconfig mode with desired -smp x,y,z using QUERY_HOTPLUGGABLE_CPUS
> > and then continue to configure numa with QMP commands using provided
> > CPUs layout.  
> 
> Continue where? At the 'preconfig mode' the guest is already started, 
> isn't it? Are you suggesting that libvirt starts a dummy QEMU process, 
> fetches the CPU topology from it an then starts if for real? Libvirt 
QEMU started but it's very far from starting guest, at that time it's possible
configure numa mapping at runtime and continue to -S or running state
without restarting QEMU. For the follow up starts, used topology and numa 
options
can be cached and reused at CLI time as long as machine/-smp combination stays
the same.

> tries to avoid that as much as it can.
> 
> > 
> > How to present it to libvirt user I'm not sure (give them that list perhaps
> > and let select from it???)  
> 
> This is what I am trying to figure out in the cover letter. Maybe we 
> need to let users configure the topology (well, vCPU id to [socket, die, 
> core, thread] mapping), but then again, in my testing the guest ignored 
> that and displayed different topology (true, I was testing with -cpu 
> host, so maybe that's why).
there is ongiong issue with EPYC VCPUs topology, but I otherwise it should work.
Just report bug to qemu-devel, if it's broken.
 
> 
> > But it's irrelevant, to the patch, magical IDs for socket/core/...whatever
> > should not be generated by libvirt anymore, but rather taken from QEMU for 
> > given
> > machine + -smp combination.  
> 
> Taken when? We can do this for running machines, but not for freshly 
> started ones, can we?

it can be used for freshly started as well,
QEMU -S -preconfig -M pc -smp ...
(QMP) query-hotpluggable-cpus
(QMP) 

Re:Re: [PATCH 0/3] Support network interface downscript

2020-05-22 Thread Chen Hanxiao



At 2020-05-22 23:09:21, "Michal Privoznik"  wrote:
>On 5/21/20 3:59 PM, Chen Hanxiao wrote:
>> QEMU has the ability to run a script when a NIC is brought up and down.
>> Libvirt only enables use of the up script at this time.
>> This series add support for postscript when NIC is down/detached.
>> 
>> Chen Hanxiao (3):
>>downscript: Support network interface downscript
>>downscript: add test case
>>doc: downscript: updating the documentation
>> 
>>   docs/formatdomain.html.in   |  6 ++-
>>   docs/schemas/domaincommon.rng   |  8 
>>   src/conf/domain_conf.c  |  9 
>>   src/conf/domain_conf.h  |  1 +
>>   src/qemu/qemu_extdevice.c   |  4 ++
>>   src/qemu/qemu_hotplug.c |  6 +++
>>   tests/qemuxml2argvdata/downscript.xml   | 60 +
>>   tests/qemuxml2xmloutdata/downscript.xml | 60 +
>>   tests/qemuxml2xmltest.c |  1 +
>>   9 files changed, 154 insertions(+), 1 deletion(-)
>>   create mode 100644 tests/qemuxml2argvdata/downscript.xml
>>   create mode 100644 tests/qemuxml2xmloutdata/downscript.xml
>> 
>
>I don't mean to be picky, especially with new contributors (well, you 
>have one contribution already, exactly one month ago). Anyway, we 
>usually structure patches differently. In the first patch we add XML 
>parsing, formatting, RNG change, documentation and xml2xml test case. In 
>the second the feature is implemented in the driver and xml2argv test 
>case is introduced. In the third patch the news.xml is adjusted.
>
>The reason is that this way it is easier for downstream maintainers to 
>backport patches.
>
>Mind posting a v2 which follows that? Apart from a small nit in 1/3 the 
>rest looks good.
>

I've contributed to libvirt for years(from another email), but did not send
patched for years either :).


I'll post a v2 to address all the comments aroud the weekends.


Regards,
- Hanxiao

Re: [PATCH 1/3] downscript: Support network interface downscript

2020-05-22 Thread Michal Privoznik

On 5/21/20 3:59 PM, Chen Hanxiao wrote:

https://gitlab.com/libvirt/libvirt/-/issues/13

Add support for downscript:


 
 
 


Signed-off-by: Chen Hanxiao 
---
  docs/schemas/domaincommon.rng | 8 
  src/conf/domain_conf.c| 9 +
  src/conf/domain_conf.h| 1 +
  src/qemu/qemu_extdevice.c | 4 
  src/qemu/qemu_hotplug.c   | 6 ++
  5 files changed, 28 insertions(+)

diff --git a/docs/schemas/domaincommon.rng b/docs/schemas/domaincommon.rng
index 9d60b090f3..6727cd743b 100644
--- a/docs/schemas/domaincommon.rng
+++ b/docs/schemas/domaincommon.rng
@@ -3191,6 +3191,14 @@

  

+  
+
+  
+
+  
+  
+
+  

  

diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index c201fc901d..1406cf079e 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -2520,6 +2520,7 @@ virDomainNetDefClear(virDomainNetDefPtr def)
  VIR_FREE(def->teaming.persistent);
  VIR_FREE(def->virtPortProfile);
  VIR_FREE(def->script);
+VIR_FREE(def->downscript);
  VIR_FREE(def->domain_name);
  VIR_FREE(def->ifname);
  VIR_FREE(def->ifname_guest);
@@ -11977,6 +11978,7 @@ virDomainNetDefParseXML(virDomainXMLOptionPtr xmlopt,
  g_autofree char *ifname_guest = NULL;
  g_autofree char *ifname_guest_actual = NULL;
  g_autofree char *script = NULL;
+g_autofree char *downscript = NULL;
  g_autofree char *address = NULL;
  g_autofree char *port = NULL;
  g_autofree char *localaddr = NULL;
@@ -12149,6 +12151,9 @@ virDomainNetDefParseXML(virDomainXMLOptionPtr xmlopt,
  } else if (!script &&
 virXMLNodeNameEqual(cur, "script")) {
  script = virXMLPropString(cur, "path");
+} else if (!downscript &&
+   virXMLNodeNameEqual(cur, "downscript")) {
+downscript = virXMLPropString(cur, "path");
  } else if (!domain_name &&
 virXMLNodeNameEqual(cur, "backenddomain")) {
  domain_name = virXMLPropString(cur, "name");
@@ -12482,6 +12487,8 @@ virDomainNetDefParseXML(virDomainXMLOptionPtr xmlopt,
  
  if (script != NULL)

  def->script = g_steal_pointer();
+if (downscript != NULL)
+def->downscript = g_steal_pointer();
  if (domain_name != NULL)
  def->domain_name = g_steal_pointer(_name);
  if (ifname != NULL)
@@ -26567,6 +26574,8 @@ virDomainNetDefFormat(virBufferPtr buf,
  
  virBufferEscapeString(buf, "\n",

def->script);
+virBufferEscapeString(buf, "\n",
+  def->downscript);
  virBufferEscapeString(buf, "\n", 
def->domain_name);
  
  if (def->ifname &&

diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index ddc75d8de2..e152c599ca 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -1055,6 +1055,7 @@ struct _virDomainNetDef {
  unsigned long sndbuf;
  } tune;
  char *script;
+char *downscript;
  char *domain_name; /* backend domain name */
  char *ifname; /* interface name on the host () */
  int managed_tap; /* enum virTristateBool - ABSENT == YES */
diff --git a/src/qemu/qemu_extdevice.c b/src/qemu/qemu_extdevice.c
index 2096272761..4962521de4 100644
--- a/src/qemu/qemu_extdevice.c
+++ b/src/qemu/qemu_extdevice.c
@@ -213,6 +213,7 @@ qemuExtDevicesStop(virQEMUDriverPtr driver,
 virDomainObjPtr vm)
  {
  virDomainDefPtr def = vm->def;
+virDomainNetType actualType;
  size_t i;
  
  if (qemuExtDevicesInitPaths(driver, def) < 0)

@@ -230,10 +231,13 @@ qemuExtDevicesStop(virQEMUDriverPtr driver,
  
  for (i = 0; i < def->nnets; i++) {

  virDomainNetDefPtr net = def->nets[i];
+actualType = virDomainNetGetActualType(net);
  qemuSlirpPtr slirp = QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp;
  
  if (slirp)

  qemuSlirpStop(slirp, vm, driver, net);
+if (actualType == VIR_DOMAIN_NET_TYPE_ETHERNET && net->downscript)
+virNetDevRunEthernetScript(net->ifname, net->downscript);
  }
  
  for (i = 0; i < def->nfss; i++) {

diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index 5608566d69..97c6508a55 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -5942,6 +5942,12 @@ qemuDomainDetachDeviceLive(virDomainObjPtr vm,
  ret = qemuDomainRemoveDevice(driver, vm, );
  }
  
+if ((virDomainDeviceType)match->type == VIR_DOMAIN_DEVICE_NET) {

+virDomainNetDefPtr net = detach.data.net;
+if (net->script)
+virNetDevRunEthernetScript(net->ifname, net->downscript);
+}


The fact that you need to typecast the comparison should have raised a 
red flag. But this is not placed correctly anyway. The way the device 
detach works is:


1) we send the detach 

Re: [PATCH 4/5] qemu: Prefer -numa cpu over -numa node,cpus=

2020-05-22 Thread Michal Privoznik

On 5/22/20 6:07 PM, Igor Mammedov wrote:

On Fri, 22 May 2020 16:14:14 +0200
Michal Privoznik  wrote:


QEMU is trying to obsolete -numa node,cpus= because that uses
ambiguous vCPU id to [socket, die, core, thread] mapping. The new
form is:

   -numa cpu,node-id=N,socket-id=S,die-id=D,core-id=C,thread-id=T

which is repeated for every vCPU and places it at [S, D, C, T]
into guest NUMA node N.

While in general this is magic mapping, we can deal with it.
Firstly, with QEMU 2.7 or newer, libvirt ensures that if topology
is given then maxvcpus must be sockets * dies * cores * threads
(i.e. there are no 'holes').
Secondly, if no topology is given then libvirt itself places each
vCPU into a different socket (basically, it fakes topology of:
[maxvcpus, 1, 1, 1])
Thirdly, we can copy whatever QEMU is doing when mapping vCPUs
onto topology, to make sure vCPUs don't start to move around.

Note, migration from old to new cmd line works and therefore
doesn't need any special handling.

Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1678085

Signed-off-by: Michal Privoznik 
---
  src/qemu/qemu_command.c   | 108 +-
  .../hugepages-nvdimm.x86_64-latest.args   |   4 +-
  ...memory-default-hugepage.x86_64-latest.args |  10 +-
  .../memfd-memory-numa.x86_64-latest.args  |  10 +-
  ...y-hotplug-nvdimm-access.x86_64-latest.args |   4 +-
  ...ry-hotplug-nvdimm-align.x86_64-latest.args |   4 +-
  ...ry-hotplug-nvdimm-label.x86_64-latest.args |   4 +-
  ...ory-hotplug-nvdimm-pmem.x86_64-latest.args |   4 +-
  ...ory-hotplug-nvdimm-ppc64.ppc64-latest.args |   4 +-
  ...hotplug-nvdimm-readonly.x86_64-latest.args |   4 +-
  .../memory-hotplug-nvdimm.x86_64-latest.args  |   4 +-
  ...vhost-user-fs-fd-memory.x86_64-latest.args |   4 +-
  ...vhost-user-fs-hugepages.x86_64-latest.args |   4 +-
  ...host-user-gpu-secondary.x86_64-latest.args |   3 +-
  .../vhost-user-vga.x86_64-latest.args |   3 +-
  15 files changed, 158 insertions(+), 16 deletions(-)

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 7d84fd8b5e..0de4fe4905 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7079,6 +7079,91 @@ qemuBuildNumaOldCPUs(virBufferPtr buf,
  }
  
  
+/**

+ * qemuTranlsatevCPUID:
+ *
+ * For given vCPU @id and vCPU topology (@cpu) compute corresponding
+ * @socket, @die, @core and @thread). This assumes linear topology,
+ * that is every [socket, die, core, thread] combination is valid vCPU
+ * ID and there are no 'holes'. This is ensured by
+ * qemuValidateDomainDef() if QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS is
+ * set.

I wouldn't make this assumption, each machine can have (and has) it's own 
layout,
and now it's not hard to change that per machine version if necessary.

I'd suppose one could pull the list of possible CPUs from QEMU started
in preconfig mode with desired -smp x,y,z using QUERY_HOTPLUGGABLE_CPUS
and then continue to configure numa with QMP commands using provided
CPUs layout.


Continue where? At the 'preconfig mode' the guest is already started, 
isn't it? Are you suggesting that libvirt starts a dummy QEMU process, 
fetches the CPU topology from it an then starts if for real? Libvirt 
tries to avoid that as much as it can.




How to present it to libvirt user I'm not sure (give them that list perhaps
and let select from it???)


This is what I am trying to figure out in the cover letter. Maybe we 
need to let users configure the topology (well, vCPU id to [socket, die, 
core, thread] mapping), but then again, in my testing the guest ignored 
that and displayed different topology (true, I was testing with -cpu 
host, so maybe that's why).



But it's irrelevant, to the patch, magical IDs for socket/core/...whatever
should not be generated by libvirt anymore, but rather taken from QEMU for given
machine + -smp combination.


Taken when? We can do this for running machines, but not for freshly 
started ones, can we?


Michal



Re: [PATCH 4/5] qemu: Prefer -numa cpu over -numa node,cpus=

2020-05-22 Thread Igor Mammedov
On Fri, 22 May 2020 16:14:14 +0200
Michal Privoznik  wrote:

> QEMU is trying to obsolete -numa node,cpus= because that uses
> ambiguous vCPU id to [socket, die, core, thread] mapping. The new
> form is:
> 
>   -numa cpu,node-id=N,socket-id=S,die-id=D,core-id=C,thread-id=T
> 
> which is repeated for every vCPU and places it at [S, D, C, T]
> into guest NUMA node N.
> 
> While in general this is magic mapping, we can deal with it.
> Firstly, with QEMU 2.7 or newer, libvirt ensures that if topology
> is given then maxvcpus must be sockets * dies * cores * threads
> (i.e. there are no 'holes').
> Secondly, if no topology is given then libvirt itself places each
> vCPU into a different socket (basically, it fakes topology of:
> [maxvcpus, 1, 1, 1])
> Thirdly, we can copy whatever QEMU is doing when mapping vCPUs
> onto topology, to make sure vCPUs don't start to move around.
> 
> Note, migration from old to new cmd line works and therefore
> doesn't need any special handling.
> 
> Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1678085
> 
> Signed-off-by: Michal Privoznik 
> ---
>  src/qemu/qemu_command.c   | 108 +-
>  .../hugepages-nvdimm.x86_64-latest.args   |   4 +-
>  ...memory-default-hugepage.x86_64-latest.args |  10 +-
>  .../memfd-memory-numa.x86_64-latest.args  |  10 +-
>  ...y-hotplug-nvdimm-access.x86_64-latest.args |   4 +-
>  ...ry-hotplug-nvdimm-align.x86_64-latest.args |   4 +-
>  ...ry-hotplug-nvdimm-label.x86_64-latest.args |   4 +-
>  ...ory-hotplug-nvdimm-pmem.x86_64-latest.args |   4 +-
>  ...ory-hotplug-nvdimm-ppc64.ppc64-latest.args |   4 +-
>  ...hotplug-nvdimm-readonly.x86_64-latest.args |   4 +-
>  .../memory-hotplug-nvdimm.x86_64-latest.args  |   4 +-
>  ...vhost-user-fs-fd-memory.x86_64-latest.args |   4 +-
>  ...vhost-user-fs-hugepages.x86_64-latest.args |   4 +-
>  ...host-user-gpu-secondary.x86_64-latest.args |   3 +-
>  .../vhost-user-vga.x86_64-latest.args |   3 +-
>  15 files changed, 158 insertions(+), 16 deletions(-)
> 
> diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
> index 7d84fd8b5e..0de4fe4905 100644
> --- a/src/qemu/qemu_command.c
> +++ b/src/qemu/qemu_command.c
> @@ -7079,6 +7079,91 @@ qemuBuildNumaOldCPUs(virBufferPtr buf,
>  }
>  
>  
> +/**
> + * qemuTranlsatevCPUID:
> + *
> + * For given vCPU @id and vCPU topology (@cpu) compute corresponding
> + * @socket, @die, @core and @thread). This assumes linear topology,
> + * that is every [socket, die, core, thread] combination is valid vCPU
> + * ID and there are no 'holes'. This is ensured by
> + * qemuValidateDomainDef() if QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS is
> + * set.
I wouldn't make this assumption, each machine can have (and has) it's own 
layout,
and now it's not hard to change that per machine version if necessary.

I'd suppose one could pull the list of possible CPUs from QEMU started
in preconfig mode with desired -smp x,y,z using QUERY_HOTPLUGGABLE_CPUS
and then continue to configure numa with QMP commands using provided
CPUs layout.

How to present it to libvirt user I'm not sure (give them that list perhaps
and let select from it???)
But it's irrelevant, to the patch, magical IDs for socket/core/...whatever
should not be generated by libvirt anymore, but rather taken from QEMU for given
machine + -smp combination.

CCing Peter,
 as I vaguely recall him working on this issue  (preconfig + numa over QMP)


> + * Moreover, if @diesSupported is false (QEMU lacks
> + * QEMU_CAPS_SMP_DIES) then @die is set to zero and @socket is
> + * computed without taking numbed of dies into account.
> + *
> + * The algorithm is shamelessly copied over from QEMU's
> + * x86_topo_ids_from_idx() and its history (before introducing dies).
> + */
> +static void
> +qemuTranlsatevCPUID(unsigned int id,
> +bool diesSupported,
> +virCPUDefPtr cpu,
> +unsigned int *socket,
> +unsigned int *die,
> +unsigned int *core,
> +unsigned int *thread)
> +{
> +if (cpu && cpu->sockets) {
> +*thread = id % cpu->threads;
> +*core = id / cpu->threads % cpu->cores;
> +if (diesSupported) {
> +*die = id / (cpu->cores * cpu->threads) % cpu->dies;
> +*socket = id / (cpu->dies * cpu->cores * cpu->threads);
> +} else {
> +*die = 0;
> +*socket = id / (cpu->cores * cpu->threads) % cpu->sockets;
> +}
> +} else {
> +/* If no topology was provided, then qemuBuildSmpCommandLine()
> + * puts all vCPUs into a separate socket. */
> +*thread = 0;
> +*core = 0;
> +*die = 0;
> +*socket = id;
> +}
> +}
> +
> +
> +static void
> +qemuBuildNumaNewCPUs(virCommandPtr cmd,
> + virCPUDefPtr cpu,
> + virBitmapPtr cpumask,
> + size_t nodeid,
> + virQEMUCapsPtr 

Re: [PATCH 0/3] Support network interface downscript

2020-05-22 Thread Michal Privoznik

On 5/21/20 3:59 PM, Chen Hanxiao wrote:

QEMU has the ability to run a script when a NIC is brought up and down.
Libvirt only enables use of the up script at this time.
This series add support for postscript when NIC is down/detached.

Chen Hanxiao (3):
   downscript: Support network interface downscript
   downscript: add test case
   doc: downscript: updating the documentation

  docs/formatdomain.html.in   |  6 ++-
  docs/schemas/domaincommon.rng   |  8 
  src/conf/domain_conf.c  |  9 
  src/conf/domain_conf.h  |  1 +
  src/qemu/qemu_extdevice.c   |  4 ++
  src/qemu/qemu_hotplug.c |  6 +++
  tests/qemuxml2argvdata/downscript.xml   | 60 +
  tests/qemuxml2xmloutdata/downscript.xml | 60 +
  tests/qemuxml2xmltest.c |  1 +
  9 files changed, 154 insertions(+), 1 deletion(-)
  create mode 100644 tests/qemuxml2argvdata/downscript.xml
  create mode 100644 tests/qemuxml2xmloutdata/downscript.xml



I don't mean to be picky, especially with new contributors (well, you 
have one contribution already, exactly one month ago). Anyway, we 
usually structure patches differently. In the first patch we add XML 
parsing, formatting, RNG change, documentation and xml2xml test case. In 
the second the feature is implemented in the driver and xml2argv test 
case is introduced. In the third patch the news.xml is adjusted.


The reason is that this way it is easier for downstream maintainers to 
backport patches.


Mind posting a v2 which follows that? Apart from a small nit in 1/3 the 
rest looks good.


Michal



Re: [PATCH] docs: Fix a typo in formatdomain.html

2020-05-22 Thread Michal Privoznik

On 5/22/20 12:25 PM, Han Han wrote:

Signed-off-by: Han Han 
---
  docs/formatdomain.html.in | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)


Reviewed-by: Michal Privoznik 

And pushed.

Michal



[PATCH 4/5] qemu: Prefer -numa cpu over -numa node,cpus=

2020-05-22 Thread Michal Privoznik
QEMU is trying to obsolete -numa node,cpus= because that uses
ambiguous vCPU id to [socket, die, core, thread] mapping. The new
form is:

  -numa cpu,node-id=N,socket-id=S,die-id=D,core-id=C,thread-id=T

which is repeated for every vCPU and places it at [S, D, C, T]
into guest NUMA node N.

While in general this is magic mapping, we can deal with it.
Firstly, with QEMU 2.7 or newer, libvirt ensures that if topology
is given then maxvcpus must be sockets * dies * cores * threads
(i.e. there are no 'holes').
Secondly, if no topology is given then libvirt itself places each
vCPU into a different socket (basically, it fakes topology of:
[maxvcpus, 1, 1, 1])
Thirdly, we can copy whatever QEMU is doing when mapping vCPUs
onto topology, to make sure vCPUs don't start to move around.

Note, migration from old to new cmd line works and therefore
doesn't need any special handling.

Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1678085

Signed-off-by: Michal Privoznik 
---
 src/qemu/qemu_command.c   | 108 +-
 .../hugepages-nvdimm.x86_64-latest.args   |   4 +-
 ...memory-default-hugepage.x86_64-latest.args |  10 +-
 .../memfd-memory-numa.x86_64-latest.args  |  10 +-
 ...y-hotplug-nvdimm-access.x86_64-latest.args |   4 +-
 ...ry-hotplug-nvdimm-align.x86_64-latest.args |   4 +-
 ...ry-hotplug-nvdimm-label.x86_64-latest.args |   4 +-
 ...ory-hotplug-nvdimm-pmem.x86_64-latest.args |   4 +-
 ...ory-hotplug-nvdimm-ppc64.ppc64-latest.args |   4 +-
 ...hotplug-nvdimm-readonly.x86_64-latest.args |   4 +-
 .../memory-hotplug-nvdimm.x86_64-latest.args  |   4 +-
 ...vhost-user-fs-fd-memory.x86_64-latest.args |   4 +-
 ...vhost-user-fs-hugepages.x86_64-latest.args |   4 +-
 ...host-user-gpu-secondary.x86_64-latest.args |   3 +-
 .../vhost-user-vga.x86_64-latest.args |   3 +-
 15 files changed, 158 insertions(+), 16 deletions(-)

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 7d84fd8b5e..0de4fe4905 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7079,6 +7079,91 @@ qemuBuildNumaOldCPUs(virBufferPtr buf,
 }
 
 
+/**
+ * qemuTranlsatevCPUID:
+ *
+ * For given vCPU @id and vCPU topology (@cpu) compute corresponding
+ * @socket, @die, @core and @thread). This assumes linear topology,
+ * that is every [socket, die, core, thread] combination is valid vCPU
+ * ID and there are no 'holes'. This is ensured by
+ * qemuValidateDomainDef() if QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS is
+ * set.
+ *
+ * Moreover, if @diesSupported is false (QEMU lacks
+ * QEMU_CAPS_SMP_DIES) then @die is set to zero and @socket is
+ * computed without taking numbed of dies into account.
+ *
+ * The algorithm is shamelessly copied over from QEMU's
+ * x86_topo_ids_from_idx() and its history (before introducing dies).
+ */
+static void
+qemuTranlsatevCPUID(unsigned int id,
+bool diesSupported,
+virCPUDefPtr cpu,
+unsigned int *socket,
+unsigned int *die,
+unsigned int *core,
+unsigned int *thread)
+{
+if (cpu && cpu->sockets) {
+*thread = id % cpu->threads;
+*core = id / cpu->threads % cpu->cores;
+if (diesSupported) {
+*die = id / (cpu->cores * cpu->threads) % cpu->dies;
+*socket = id / (cpu->dies * cpu->cores * cpu->threads);
+} else {
+*die = 0;
+*socket = id / (cpu->cores * cpu->threads) % cpu->sockets;
+}
+} else {
+/* If no topology was provided, then qemuBuildSmpCommandLine()
+ * puts all vCPUs into a separate socket. */
+*thread = 0;
+*core = 0;
+*die = 0;
+*socket = id;
+}
+}
+
+
+static void
+qemuBuildNumaNewCPUs(virCommandPtr cmd,
+ virCPUDefPtr cpu,
+ virBitmapPtr cpumask,
+ size_t nodeid,
+ virQEMUCapsPtr qemuCaps)
+{
+const bool diesSupported = virQEMUCapsGet(qemuCaps, QEMU_CAPS_SMP_DIES);
+ssize_t vcpuid = -1;
+
+while ((vcpuid = virBitmapNextSetBit(cpumask, vcpuid)) >= 0) {
+unsigned int socket;
+unsigned int die;
+unsigned int core;
+unsigned int thread;
+
+qemuTranlsatevCPUID(vcpuid, diesSupported, cpu,
+, , , );
+
+virCommandAddArg(cmd, "-numa");
+
+/* The simple fact that dies are supported by QEMU doesn't mean we can
+ * put it onto command line. QEMU will accept die-id only if -smp dies
+ * was set to a value greater than 1. On the other hand, this allows us
+ * to generate shorter command line. */
+if (diesSupported && cpu && cpu->dies > 1) {
+virCommandAddArgFormat(cmd,
+   
"cpu,node-id=%zu,socket-id=%u,die-id=%u,core-id=%u,thread-id=%u",
+   nodeid, socket, die, core, thread);
+} else {
+   

[PATCH 3/5] qemuBuildNumaArgStr: Separate out old style of building CPU list

2020-05-22 Thread Michal Privoznik
Signed-off-by: Michal Privoznik 
---
 src/qemu/qemu_command.c | 38 +-
 1 file changed, 25 insertions(+), 13 deletions(-)

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 5a438d07c3..7d84fd8b5e 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7057,6 +7057,28 @@ qemuBuildIOThreadCommandLine(virCommandPtr cmd,
 }
 
 
+static int
+qemuBuildNumaOldCPUs(virBufferPtr buf,
+ virBitmapPtr cpu)
+{
+g_autofree char *cpumask = NULL;
+char *tmpmask = NULL;
+char *next = NULL;
+
+if (!(cpumask = virBitmapFormat(cpu)))
+return -1;
+
+for (tmpmask = cpumask; tmpmask; tmpmask = next) {
+if ((next = strchr(tmpmask, ',')))
+*(next++) = '\0';
+virBufferAddLit(buf, ",cpus=");
+virBufferAdd(buf, tmpmask, -1);
+}
+
+return 0;
+}
+
+
 static int
 qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 virDomainDefPtr def,
@@ -7109,13 +7131,6 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 goto cleanup;
 
 for (i = 0; i < ncells; i++) {
-g_autofree char *cpumask = NULL;
-char *tmpmask = NULL;
-char *next = NULL;
-
-if (!(cpumask = virBitmapFormat(virDomainNumaGetNodeCpumask(def->numa, 
i
-goto cleanup;
-
 if (needBackend) {
 virCommandAddArg(cmd, "-object");
 virCommandAddArgBuffer(cmd, [i]);
@@ -7124,12 +7139,9 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 virCommandAddArg(cmd, "-numa");
 virBufferAsprintf(, "node,nodeid=%zu", i);
 
-for (tmpmask = cpumask; tmpmask; tmpmask = next) {
-if ((next = strchr(tmpmask, ',')))
-*(next++) = '\0';
-virBufferAddLit(, ",cpus=");
-virBufferAdd(, tmpmask, -1);
-}
+if (qemuBuildNumaOldCPUs(,
+ virDomainNumaGetNodeCpumask(def->numa, i)) < 
0)
+goto cleanup;
 
 if (needBackend)
 virBufferAsprintf(, ",memdev=ram-node%zu", i);
-- 
2.26.2



[PATCH 1/5] qemuBuildNumaArgStr: Move vars into loops

2020-05-22 Thread Michal Privoznik
There are couple of variables that are used only in a single
loop. Move their definitions into their respective loops and use
g_autofree on @cpumask.

Signed-off-by: Michal Privoznik 
---
 src/qemu/qemu_command.c | 12 +---
 1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 419eca5675..5af22e9359 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7066,12 +7066,8 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 size_t i, j;
 virQEMUCapsPtr qemuCaps = priv->qemuCaps;
 g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
-char *cpumask = NULL;
-char *tmpmask = NULL;
-char *next = NULL;
 virBufferPtr nodeBackends = NULL;
 bool needBackend = false;
-int rc;
 int ret = -1;
 size_t ncells = virDomainNumaGetNodeCount(def->numa);
 
@@ -7091,6 +7087,7 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_MEMORY_RAM) ||
 virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_MEMORY_FILE) ||
 virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_MEMORY_MEMFD)) {
+int rc;
 
 for (i = 0; i < ncells; i++) {
 if ((rc = qemuBuildMemoryCellBackendStr(def, cfg, i, priv,
@@ -7112,7 +7109,10 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 goto cleanup;
 
 for (i = 0; i < ncells; i++) {
-VIR_FREE(cpumask);
+g_autofree char *cpumask = NULL;
+char *tmpmask = NULL;
+char *next = NULL;
+
 if (!(cpumask = virBitmapFormat(virDomainNumaGetNodeCpumask(def->numa, 
i
 goto cleanup;
 
@@ -7159,8 +7159,6 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 ret = 0;
 
  cleanup:
-VIR_FREE(cpumask);
-
 if (nodeBackends) {
 for (i = 0; i < ncells; i++)
 virBufferFreeAndReset([i]);
-- 
2.26.2



[PATCH 2/5] qemuBuildNumaArgStr: Use g_autofree on @nodeBackends

2020-05-22 Thread Michal Privoznik
Signed-off-by: Michal Privoznik 
---
 src/qemu/qemu_command.c | 14 +-
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 5af22e9359..5a438d07c3 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7066,13 +7066,13 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 size_t i, j;
 virQEMUCapsPtr qemuCaps = priv->qemuCaps;
 g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
-virBufferPtr nodeBackends = NULL;
+g_autofree virBufferPtr nodeBackends = NULL;
 bool needBackend = false;
 int ret = -1;
 size_t ncells = virDomainNumaGetNodeCount(def->numa);
 
 if (!virDomainNumatuneNodesetIsAvailable(def->numa, priv->autoNodeset))
-goto cleanup;
+return -1;
 
 if (!virQEMUCapsGetMachineNumaMemSupported(qemuCaps,
def->virtType,
@@ -7080,7 +7080,7 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 needBackend = true;
 
 if (VIR_ALLOC_N(nodeBackends, ncells) < 0)
-goto cleanup;
+return -1;
 
 /* using of -numa memdev= cannot be combined with -numa mem=, thus we
  * need to check which approach to use */
@@ -7159,12 +7159,8 @@ qemuBuildNumaArgStr(virQEMUDriverConfigPtr cfg,
 ret = 0;
 
  cleanup:
-if (nodeBackends) {
-for (i = 0; i < ncells; i++)
-virBufferFreeAndReset([i]);
-
-VIR_FREE(nodeBackends);
-}
+for (i = 0; i < ncells; i++)
+virBufferFreeAndReset([i]);
 
 return ret;
 }
-- 
2.26.2



[PATCH 5/5] virCPUDefParseXML: Parse uint using virXPathUInt

2020-05-22 Thread Michal Privoznik
Signed-off-by: Michal Privoznik 
---
 src/conf/cpu_conf.c | 14 --
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/src/conf/cpu_conf.c b/src/conf/cpu_conf.c
index 07404c6fb0..2991ab8204 100644
--- a/src/conf/cpu_conf.c
+++ b/src/conf/cpu_conf.c
@@ -527,39 +527,33 @@ virCPUDefParseXML(xmlXPathContextPtr ctxt,
 }
 
 if (virXPathNode("./topology[1]", ctxt)) {
-unsigned long ul;
-
-if (virXPathULong("string(./topology[1]/@sockets)", ctxt, ) < 0) {
+if (virXPathUInt("string(./topology[1]/@sockets)", ctxt, 
>sockets) < 0) {
 virReportError(VIR_ERR_XML_ERROR, "%s",
_("Missing 'sockets' attribute in CPU topology"));
 goto cleanup;
 }
-def->sockets = (unsigned int) ul;
 
 if (virXPathNode("./topology[1]/@dies", ctxt)) {
-if (virXPathULong("string(./topology[1]/@dies)", ctxt, ) < 0) {
+if (virXPathUInt("string(./topology[1]/@dies)", ctxt, >dies) 
< 0) {
 virReportError(VIR_ERR_XML_ERROR, "%s",
_("Malformed 'dies' attribute in CPU 
topology"));
 goto cleanup;
 }
-def->dies = (unsigned int) ul;
 } else {
 def->dies = 1;
 }
 
-if (virXPathULong("string(./topology[1]/@cores)", ctxt, ) < 0) {
+if (virXPathUInt("string(./topology[1]/@cores)", ctxt, >cores) < 
0) {
 virReportError(VIR_ERR_XML_ERROR, "%s",
_("Missing 'cores' attribute in CPU topology"));
 goto cleanup;
 }
-def->cores = (unsigned int) ul;
 
-if (virXPathULong("string(./topology[1]/@threads)", ctxt, ) < 0) {
+if (virXPathUInt("string(./topology[1]/@threads)", ctxt, 
>threads) < 0) {
 virReportError(VIR_ERR_XML_ERROR, "%s",
_("Missing 'threads' attribute in CPU topology"));
 goto cleanup;
 }
-def->threads = (unsigned int) ul;
 
 if (!def->sockets || !def->cores || !def->threads || !def->dies) {
 virReportError(VIR_ERR_XML_ERROR, "%s",
-- 
2.26.2



[PATCH 0/5] qemu: Prefer -numa cpu over -numa node,cpus=

2020-05-22 Thread Michal Privoznik
Another round of patches to catch up with the latest command line
building recommendations. This time, the winner is: -numa cpu

Thing is, the way vCPUs are assigned to NUMA nodes is not very
deterministic right now. QEMU relies on this weird mapping where firstly
threads are iterated over, then cores, then (possibly) dies and then
sockets. So they came up with a better schema:

  -numa cpu,node-id=N,socket-id=S,die-id=D,core-id=C,thread-id=T

where everything is specified on the command line. Long story short,
some QEMU code is copied over to keep status quo (or, whatever you
want). It was buried down down (include/hw/i386/topology.h). I wanted to
allow users configure the vCPU topology too, but then decided not to
because neither Libvirt nor QEMU can really guarantee the topology will
look the same in the guest (at least in my testing with cpu
mode='host-passthrough' it didn't). Or, what you're proposing? I can
post them as a follow up if needed.

Michal Prívozník (5):
  qemuBuildNumaArgStr: Move vars into loops
  qemuBuildNumaArgStr: Use g_autofree on @nodeBackends
  qemuBuildNumaArgStr: Separate out old style of building CPU list
  qemu: Prefer -numa cpu over -numa node,cpus=
  virCPUDefParseXML: Parse uint using virXPathUInt

 src/conf/cpu_conf.c   |  14 +-
 src/qemu/qemu_command.c   | 168 +++---
 .../hugepages-nvdimm.x86_64-latest.args   |   4 +-
 ...memory-default-hugepage.x86_64-latest.args |  10 +-
 .../memfd-memory-numa.x86_64-latest.args  |  10 +-
 ...y-hotplug-nvdimm-access.x86_64-latest.args |   4 +-
 ...ry-hotplug-nvdimm-align.x86_64-latest.args |   4 +-
 ...ry-hotplug-nvdimm-label.x86_64-latest.args |   4 +-
 ...ory-hotplug-nvdimm-pmem.x86_64-latest.args |   4 +-
 ...ory-hotplug-nvdimm-ppc64.ppc64-latest.args |   4 +-
 ...hotplug-nvdimm-readonly.x86_64-latest.args |   4 +-
 .../memory-hotplug-nvdimm.x86_64-latest.args  |   4 +-
 ...vhost-user-fs-fd-memory.x86_64-latest.args |   4 +-
 ...vhost-user-fs-hugepages.x86_64-latest.args |   4 +-
 ...host-user-gpu-secondary.x86_64-latest.args |   3 +-
 .../vhost-user-vga.x86_64-latest.args |   3 +-
 16 files changed, 195 insertions(+), 53 deletions(-)

-- 
2.26.2



[libvirt] Request for a new release of libvirt-java in Maven

2020-05-22 Thread Slavka Peleva
Hello,

The Apache CloudStack project is using Maven libvirt java bindings in KVM
code. In libvirt-java project were added qemu methods that will prevent the
usage of bash scripts or Process class methods. It will be great to use
those features in CloudStack.

>From what I see, the previous release was requested in this list, and there
doesn’t seem to be a formal procedure for this, so how should I proceed?

Best regards and thank you in advance,

Slavka


Re: [libvirt PATCH 0/2] Fix some nodedev documentation

2020-05-22 Thread Erik Skultety
On Thu, May 21, 2020 at 03:09:33PM -0500, Jonathon Jongsma wrote:
> In response to a comment by Daniel Berrange about documentation on my previous
> patch series, I noticed that the node device xml format documentation appears
> to be split between two different pages. It seems better to document the 
> schema
> fully on the formatdomain page and refer to it from the other page. Another
> small patch fixes a documentation error.
>
> Jonathon Jongsma (2):
>   docs: Fix names for PF and VF PCI device capabilities
>   docs: Document full node device xml in formatnode.html.in
>
>  docs/drvnodedev.html.in | 127 
>  docs/formatnode.html.in |  74 +--
>  2 files changed, 83 insertions(+), 118 deletions(-)

I pushed it with the following squashed to the second patch:

diff --git a/docs/drvnodedev.html.in b/docs/drvnodedev.html.in
index 293450ebb4..e28af4e737 100644
--- a/docs/drvnodedev.html.in
+++ b/docs/drvnodedev.html.in
@@ -143,11 +143,12 @@
   A PCI device capable of creating mediated devices will include a nested
   capability mdev_types which enumerates all supported mdev
   types on the physical device, along with the type attributes available
-  through sysfs. A detailed description of the XML format for the mdev
-  capability can be found here.
+  through sysfs. A detailed description of the XML format for the
+  mdev_types capability can be found
+  here.
 
 
-  The following example shows how we might represent an Nvidia GPU device
+  The following example shows how we might represent an NVIDIA GPU device
   that supports mediated devices. See below for more
   information about mediated devices.
 
@@ -183,8 +184,8 @@
   only appear on the mdev virtual bus. Therefore, no detach/reattach
   procedure from/to the host driver procedure is involved even though
   mediated devices are used in a direct device assignment manner.  A
-  detailed description of the XML format for the mdev capability can be
-  found here.
+  detailed description of the XML format for the mdev
+  capability can be found here.
 

 Example of a mediated device

Erik



[PATCH] docs: Fix a typo in formatdomain.html

2020-05-22 Thread Han Han
Signed-off-by: Han Han 
---
 docs/formatdomain.html.in | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/formatdomain.html.in b/docs/formatdomain.html.in
index 23eb0292..05a1ed45 100644
--- a/docs/formatdomain.html.in
+++ b/docs/formatdomain.html.in
@@ -7039,7 +7039,7 @@ qemu-kvm -net nic,model=? /dev/null
   input type='keyboard' bus='virtio'/
   input type='tablet' bus='virtio'/
   input type='passthrough' bus='virtio'
-source evdev='/dev/input/event1/
+source evdev='/dev/input/event1'/
   /input
 /devices
 ...
-- 
2.25.0



Re: [libvirt PATCH 2/2] docs: Document full node device xml in formatnode.html.in

2020-05-22 Thread Erik Skultety
On Thu, May 21, 2020 at 03:09:35PM -0500, Jonathon Jongsma wrote:
> Some of the node device xml schema was documented in drvnodedev.html.in
> rather than in formatnode.html.in. Move all of the schema documentation
> to formatnode.html.in and provide reference links from the
> drvnodedev.html.in page.
>
> Signed-off-by: Jonathon Jongsma 
> ---
...

> -  For a more info about mediated devices, refer to the
> -  paragraph below.
> +  The following example shows how we might represent an Nvidia GPU device

The official spelling is NVIDIA (although Nvidia seems to be widely accepted as
well)

...

> +  that supports mediated devices. See below for more
> +  information about mediated devices.
>  
>
>  
> @@ -262,32 +182,11 @@
>as multiple separate PCIe devices on the host's PCI bus, mediated 
> devices
>only appear on the mdev virtual bus. Therefore, no detach/reattach
>procedure from/to the host driver procedure is involved even though
> -  mediated devices are used in a direct device assignment manner.
> +  mediated devices are used in a direct device assignment manner.  A
> +  detailed description of the XML format for the mdev capability can be

s/mdev/mdev/

Reviewed-by: Erik Skultety 



[libvirt PATCH] docs: drvnodedev: Fix a few closing XML elements in the examples

2020-05-22 Thread Erik Skultety
Signed-off-by: Erik Skultety 
---

Pushed as trivial.

 docs/drvnodedev.html.in | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/docs/drvnodedev.html.in b/docs/drvnodedev.html.in
index 71a8a57b0c..a9c86f9fc7 100644
--- a/docs/drvnodedev.html.in
+++ b/docs/drvnodedev.html.in
@@ -184,7 +184,7 @@
 address domain='0x' bus='0x04' slot='0x00' function='0x0'/
   /capability
 ...
-device
+/device

 MDEV capability
 
@@ -300,8 +300,8 @@
   capability type='mdev'
 type id='nvidia-11'/
 iommuGroup number='12'/
-  capability/
-device/
+  /capability
+/device

 
   The support of mediated device's framework in libvirt's node device 
driver
--
2.25.4



Re: [PATCH v5 00/10] Introducing TPM Proxy device support for PPC64

2020-05-22 Thread Satheesh Rajendran
On Thu, May 21, 2020 at 10:07:20AM -0300, Daniel Henrique Barboza wrote:
> changes in v5:
> - rebased and fixed commits to master at d265171b5784
> - moved two TPMs validation from domain_conf.c (patch 05) to
>   qemu_domain.c (patch 06)
> 
> Gitlab tree: https://gitlab.com/danielhb/libvirt/tree/spapr_tpm_proxy_v5
> 

Tested-by: Satheesh Rajendran 


libvirt xml:
...

  

  
  

...





...

$ cat /home/sath/tpm_events_log/tpm_events
spapr_tpm_execute
spapr_h_tpm_comm

qemu log, trace outputs:
...
4359@1590140133.539001:spapr_tpm_execute data_in=0x2ff4, data_in_sz=63, 
data_out=0x2ff4, data_out_sz=4096
4359@1590140133.556488:spapr_h_tpm_comm tpm_device_path=/dev/tpm0 operation=0x1
..
..
4359@1590140133.576494:spapr_tpm_execute data_in=0x2ff4, data_in_sz=14, 
data_out=0x2ff4, data_out_sz=4096
4359@1590140133.585730:spapr_h_tpm_comm tpm_device_path=/dev/tpm0 operation=0x1
...
4359@1590140133.585740:spapr_tpm_execute data_in=0x2ff4, data_in_sz=86, 
data_out=0x2ff4, data_out_sz=4096
4359@1590140133.602373:spapr_h_tpm_comm tpm_device_path=/dev/tpm0 operation=0x1
...
---

Regards,
-Satheesh.

> v4 link: https://www.redhat.com/archives/libvir-list/2020-May/msg00814.html
> v3 link: https://www.redhat.com/archives/libvir-list/2020-May/msg00642.html
> v2 link: https://www.redhat.com/archives/libvir-list/2020-May/msg00604.html
> v1 link: https://www.redhat.com/archives/libvir-list/2020-May/msg00604.html
> 
> Daniel Henrique Barboza (10):
>   docs: documentation and schema for the new TPM Proxy model
>   qemu: Extend QEMU capabilities with 'spapr-tpm-proxy'
>   qemu_extdevice.c: remove unneeded 'ret' variable
>   qemu_tpm, security, tests: change 'switch' clauses for 'if'
>   conf, qemu, security, tests: introducing 'def->tpms' array
>   qemu: add validations after TPM Proxy model introduction
>   tests: add XML schema tests for the TPM Proxy device
>   qemu: build command line for the TPM Proxy device
>   tests/qemuxml2argvtest.c: add TPM Proxy command line tests
>   docs/news.xml: update for the new TPM Proxy device
> 
>  docs/formatdomain.html.in | 19 -
>  docs/news.xml | 17 +
>  docs/schemas/domaincommon.rng |  1 +
>  src/conf/domain_audit.c   |  4 +-
>  src/conf/domain_conf.c| 50 +++-
>  src/conf/domain_conf.h|  6 +-
>  src/qemu/qemu_alias.c |  9 ++-
>  src/qemu/qemu_capabilities.c  |  4 +
>  src/qemu/qemu_capabilities.h  |  3 +
>  src/qemu/qemu_cgroup.c| 10 ++-
>  src/qemu/qemu_command.c   | 59 +++---
>  src/qemu/qemu_domain.c| 68 ++---
>  src/qemu/qemu_domain_address.c| 11 ++-
>  src/qemu/qemu_extdevice.c | 24 +++---
>  src/qemu/qemu_tpm.c   | 76 +--
>  src/qemu/qemu_validate.c  | 19 +
>  src/security/security_dac.c   |  8 +-
>  src/security/security_selinux.c   | 44 +--
>  src/security/virt-aa-helper.c | 14 ++--
>  .../qemucapabilitiesdata/caps_4.2.0.ppc64.xml |  1 +
>  .../qemucapabilitiesdata/caps_5.0.0.ppc64.xml |  1 +
>  tests/qemuxml2argvdata/ppc64-tpm-double.xml   | 34 +
>  .../ppc64-tpmproxy-double.xml | 38 ++
>  .../ppc64-tpmproxy-single.ppc64-latest.args   | 34 +
>  .../ppc64-tpmproxy-single.xml | 33 
>  .../ppc64-tpmproxy-with-tpm.ppc64-latest.args | 37 +
>  .../ppc64-tpmproxy-with-tpm.xml   | 36 +
>  tests/qemuxml2argvtest.c  | 33 +---
>  .../ppc64-tpmproxy-single.ppc64-latest.xml| 42 ++
>  .../ppc64-tpmproxy-with-tpm.ppc64-latest.xml  | 46 +++
>  tests/qemuxml2xmltest.c   |  2 +
>  31 files changed, 631 insertions(+), 152 deletions(-)
>  create mode 100644 tests/qemuxml2argvdata/ppc64-tpm-double.xml
>  create mode 100644 tests/qemuxml2argvdata/ppc64-tpmproxy-double.xml
>  create mode 100644 
> tests/qemuxml2argvdata/ppc64-tpmproxy-single.ppc64-latest.args
>  create mode 100644 tests/qemuxml2argvdata/ppc64-tpmproxy-single.xml
>  create mode 100644 
> tests/qemuxml2argvdata/ppc64-tpmproxy-with-tpm.ppc64-latest.args
>  create mode 100644 tests/qemuxml2argvdata/ppc64-tpmproxy-with-tpm.xml
>  create mode 100644 
> tests/qemuxml2xmloutdata/ppc64-tpmproxy-single.ppc64-latest.xml
>  create mode 100644 
> tests/qemuxml2xmloutdata/ppc64-tpmproxy-with-tpm.ppc64-latest.xml
> 
> -- 
> 2.26.2
> 



Re: [libvirt PATCH 4/4] scripts: emit raw enum value in API build description

2020-05-22 Thread Michal Privoznik

On 5/22/20 11:07 AM, Daniel P. Berrangé wrote:

output.write(" raw='%s'" % escape(rawValue))


For the whole series:

Reviewed-by: Michal Privoznik 

Michal



[libvirt PATCH v2 5/6] hostcpu: Implement virHostCPUGetSignature for s390

2020-05-22 Thread Jiri Denemark
Signed-off-by: Jiri Denemark 
Reviewed-by: Ján Tomko 
---
 src/util/virhostcpu.c | 21 ++-
 .../linux-s390x-with-frequency.signature  |  1 +
 2 files changed, 21 insertions(+), 1 deletion(-)
 create mode 100644 tests/virhostcpudata/linux-s390x-with-frequency.signature

diff --git a/src/util/virhostcpu.c b/src/util/virhostcpu.c
index a25a4fa333..39bbcf8ca8 100644
--- a/src/util/virhostcpu.c
+++ b/src/util/virhostcpu.c
@@ -1430,8 +1430,10 @@ virHostCPUReadSignature(virArch arch,
 g_autofree char *model = NULL;
 g_autofree char *stepping = NULL;
 g_autofree char *revision = NULL;
+g_autofree char *proc = NULL;
+g_autofree char *facilities = NULL;
 
-if (!ARCH_IS_X86(arch) && !ARCH_IS_PPC64(arch))
+if (!ARCH_IS_X86(arch) && !ARCH_IS_PPC64(arch) && !ARCH_IS_S390(arch))
 return 0;
 
 while (fgets(line, lineLen, cpuinfo)) {
@@ -1479,6 +1481,23 @@ virHostCPUReadSignature(virArch arch,
 *signature = g_strdup_printf("%s, rev %s", name, revision);
 return 0;
 }
+} else if (ARCH_IS_S390(arch)) {
+if (STREQ(parts[0], "vendor_id")) {
+if (!vendor)
+vendor = g_steal_pointer([1]);
+} else if (STREQ(parts[0], "processor 0")) {
+if (!proc)
+proc = g_steal_pointer([1]);
+} else if (STREQ(parts[0], "facilities")) {
+if (!facilities)
+facilities = g_steal_pointer([1]);
+}
+
+if (vendor && proc && facilities) {
+*signature = g_strdup_printf("%s, %s, facilities: %s",
+ vendor, proc, facilities);
+return 0;
+}
 }
 }
 
diff --git a/tests/virhostcpudata/linux-s390x-with-frequency.signature 
b/tests/virhostcpudata/linux-s390x-with-frequency.signature
new file mode 100644
index 00..70bb28a154
--- /dev/null
+++ b/tests/virhostcpudata/linux-s390x-with-frequency.signature
@@ -0,0 +1 @@
+IBM/S390, version = 00,  identification = 145F07,  machine = 2964, facilities: 
0 1 2 3 4 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 
31 32 33 34 35 36 37 40 41 42 43 44 45 46 47 48 49 50 51 52 53 55 57 64 65 66 
67 68 69 70 71 72 73 75 76 77 78 80 128 129 131 132 142 143
\ No newline at end of file
-- 
2.26.2



Re: [libvirt PATCH 4/4] scripts: emit raw enum value in API build description

2020-05-22 Thread Daniel P . Berrangé
On Tue, May 19, 2020 at 02:01:19PM +0100, Daniel P. Berrangé wrote:
> Currently the value for an enum is only emitted if it is a plain
> string. If the enum is an integer or hex value, or a complex code block,
> it is omitted from the API build. This fixes that by emitting the raw
> value if no string value is present.
> 
> With this change:
> 
> file='libvirt-common'
>  params='major,minor,micro'>
> file='libvirt-common'>
> file='libvirt-domain'
>  params='cpumaps,maplen,vcpu,cpumap'>
>   ...snip...
> 
> file='libvirt-common'
>  params='major,minor,micro'
>  raw='((major) * 100 + (minor) * 1000 + (micro) <= 
> LIBVIR_VERSION_NUMBER)'>
> file='libvirt-common'
>  raw='6004000'>
> file='libvirt-domain'
>  params='cpumaps,maplen,vcpu,cpumap'
>  raw='memcpy(cpumap, VIR_GET_CPUMAP(cpumaps, maplen, vcpu), maplen)'>
>   ...snip...
> 
> Signed-off-by: Daniel P. Berrangé 
> ---
>  scripts/apibuild.py | 10 --
>  1 file changed, 8 insertions(+), 2 deletions(-)
> 
> diff --git a/scripts/apibuild.py b/scripts/apibuild.py
> index 9faf15a75e..d63489ba62 100755
> --- a/scripts/apibuild.py
> +++ b/scripts/apibuild.py
> @@ -1027,11 +1027,14 @@ class CParser:
>  return token
>  
>  strValue = None
> +rawValue = None
>  if len(lst) == 1 and lst[0][0] == '"' and lst[0][-1] == '"':
>  strValue = lst[0][1:-1]
> +else:
> +rawValue = " ".join(lst)
>  (args, desc) = self.parseMacroComment(name, not 
> self.is_header)
>  self.index_add(name, self.filename, not self.is_header,
> -   "macro", (args, desc, params, strValue))
> +   "macro", (args, desc, params, strValue, 
> rawValue))
>  return token
>  
>  #
> @@ -2178,13 +2181,16 @@ class docBuilder:
>  desc = None
>  params = None
>  strValue = None
> +rawValue = None
>  else:
> -(args, desc, params, strValue) = id.info
> +(args, desc, params, strValue, rawValue) = id.info
>  
>  if params is not None:
>  output.write(" params='%s'" % params)
>  if strValue is not None:
>  output.write(" string='%s'" % strValue)
> +else:
> +output.write(" raw='%s'" % rawValue)
>  output.write(">\n")
>  
>  if desc is not None and desc != "":

This last chunk needs to be

 if strValue is not None:
 output.write(" string='%s'" % strValue)
+else:
+output.write(" raw='%s'" % escape(rawValue))
 output.write(">\n")
 
 if desc is not None and desc != "":


Regards,
Daniel
-- 
|: https://berrange.com  -o-https://www.flickr.com/photos/dberrange :|
|: https://libvirt.org -o-https://fstop138.berrange.com :|
|: https://entangle-photo.org-o-https://www.instagram.com/dberrange :|



Re: [libvirt PATCH 1/2] docs: Fix names for PF and VF PCI device capabilities

2020-05-22 Thread Erik Skultety
On Thu, May 21, 2020 at 03:09:34PM -0500, Jonathon Jongsma wrote:
> The proper name for physical function capability is 'phys_function', not
> 'physical_function'. Likewise, a virtual function capability is
> 'virt_functions' rather than 'virtual_function'.
>
> Signed-off-by: Jonathon Jongsma 
> ---
Reviewed-by: Erik Skultety 



Re: [PATCH] testCompareXMLToArgvValidateSchema: Construct @vm from scratch

2020-05-22 Thread Peter Krempa
On Thu, May 21, 2020 at 20:58:23 +0200, Michal Privoznik wrote:
> Currently, the @vm is passed in as an argument and
> testCompareXMLToArgvCreateArgs() is called over it which means
> under the hood qemuProcessPrepareDomain() is called. But at the
> point where ValidateSchema() is called, the domain object is
> already 'prepared', i.e. all device aliases are assigned and so

Not completely though. First pass consumes some things.

> on. But our code is not prepared to 'prepare' a domain twice - it
> simply overwrites all the pointers leading to a memory leak.
> 
> Fortunately, this is only the problem of this test.

I wanted to prevent parsing it twice, but that shouldn't be a problem
for now. Definitely it's lesser problem until we stop loading the
schema for every single test run.

> Resolve this by constructing the domain object from scratch.
> 
> Signed-off-by: Michal Privoznik 
> ---
>  tests/qemuxml2argvtest.c | 14 --
>  1 file changed, 12 insertions(+), 2 deletions(-)

Reviewed-by: Peter Krempa