Re: [Qemu-devel] [Qemu-arm] [PATCH] hw/intc/arm_gic: Drop GIC_BASE_IRQ macro

2018-08-25 Thread Philippe Mathieu-Daudé
Le ven. 24 août 2018 13:18, Peter Maydell  a
écrit :

> The GIC_BASE_IRQ macro is a leftover from when we shared code
> between the GICv2 and the v7M NVIC. Since the NVIC is now
> split off, GIC_BASE_IRQ is always 0, and we can just delete it.
>
> Signed-off-by: Peter Maydell 
>

Reviewed-by: Philippe Mathieu-Daudé 

---
>  hw/intc/gic_internal.h   |  2 --
>  hw/intc/arm_gic.c| 31 ++-
>  hw/intc/arm_gic_common.c |  1 -
>  3 files changed, 14 insertions(+), 20 deletions(-)
>
> diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
> index 45c2af0bf59..8d29b40ca10 100644
> --- a/hw/intc/gic_internal.h
> +++ b/hw/intc/gic_internal.h
> @@ -26,8 +26,6 @@
>
>  #define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1)))
>
> -#define GIC_BASE_IRQ 0
> -
>  #define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm))
>  #define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &=
> ~(cm))
>  #define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled &
> (cm)) != 0)
> diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
> index 542b4b93eab..b3ac2d11fc5 100644
> --- a/hw/intc/arm_gic.c
> +++ b/hw/intc/arm_gic.c
> @@ -955,7 +955,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr
> offset, MemTxAttrs attrs)
>  res = 0;
>  if (!(s->security_extn && !attrs.secure) &&
> gic_has_groups(s)) {
>  /* Every byte offset holds 8 group status bits */
> -irq = (offset - 0x080) * 8 + GIC_BASE_IRQ;
> +irq = (offset - 0x080) * 8;
>  if (irq >= s->num_irq) {
>  goto bad_reg;
>  }
> @@ -974,7 +974,6 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr
> offset, MemTxAttrs attrs)
>  irq = (offset - 0x100) * 8;
>  else
>  irq = (offset - 0x180) * 8;
> -irq += GIC_BASE_IRQ;
>  if (irq >= s->num_irq)
>  goto bad_reg;
>  res = 0;
> @@ -994,7 +993,6 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr
> offset, MemTxAttrs attrs)
>  irq = (offset - 0x200) * 8;
>  else
>  irq = (offset - 0x280) * 8;
> -irq += GIC_BASE_IRQ;
>  if (irq >= s->num_irq)
>  goto bad_reg;
>  res = 0;
> @@ -1019,7 +1017,6 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr
> offset, MemTxAttrs attrs)
>  goto bad_reg;
>  }
>
> -irq += GIC_BASE_IRQ;
>  if (irq >= s->num_irq)
>  goto bad_reg;
>  res = 0;
> @@ -1036,7 +1033,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr
> offset, MemTxAttrs attrs)
>  }
>  } else if (offset < 0x800) {
>  /* Interrupt Priority.  */
> -irq = (offset - 0x400) + GIC_BASE_IRQ;
> +irq = (offset - 0x400);
>  if (irq >= s->num_irq)
>  goto bad_reg;
>  res = gic_dist_get_priority(s, cpu, irq, attrs);
> @@ -1046,7 +1043,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr
> offset, MemTxAttrs attrs)
>  /* For uniprocessor GICs these RAZ/WI */
>  res = 0;
>  } else {
> -irq = (offset - 0x800) + GIC_BASE_IRQ;
> +irq = (offset - 0x800);
>  if (irq >= s->num_irq) {
>  goto bad_reg;
>  }
> @@ -1060,7 +1057,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr
> offset, MemTxAttrs attrs)
>  }
>  } else if (offset < 0xf00) {
>  /* Interrupt Configuration.  */
> -irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ;
> +irq = (offset - 0xc00) * 4;
>  if (irq >= s->num_irq)
>  goto bad_reg;
>  res = 0;
> @@ -1183,7 +1180,7 @@ static void gic_dist_writeb(void *opaque, hwaddr
> offset,
>   */
>  if (!(s->security_extn && !attrs.secure) &&
> gic_has_groups(s)) {
>  /* Every byte offset holds 8 group status bits */
> -irq = (offset - 0x80) * 8 + GIC_BASE_IRQ;
> +irq = (offset - 0x80) * 8;
>  if (irq >= s->num_irq) {
>  goto bad_reg;
>  }
> @@ -1204,7 +1201,7 @@ static void gic_dist_writeb(void *opaque, hwaddr
> offset,
>  }
>  } else if (offset < 0x180) {
>  /* Interrupt Set Enable.  */
> -irq = (offset - 0x100) * 8 + GIC_BASE_IRQ;
> +irq = (offset - 0x100) * 8;
>  if (irq >= s->num_irq)
>  goto bad_reg;
>  if (irq < GIC_NR_SGIS) {
> @@ -1239,7 +1236,7 @@ static void gic_dist_writeb(void *opaque, hwaddr
> offset,
>  }
>  } else if (offset < 0x200) {
>  /* Interrupt Clear Enable.  */
> -irq = (offset - 0x180) * 8 + GIC_BASE_IRQ;
> +irq = (offset - 0x180) * 8;
>  if (irq >= s->num_irq)
>  goto bad_reg;
>  if (irq < GIC_NR_SGIS) {
> @@ -1264,7 +1261,7 @@ static void gic_dist_writeb(void *opaque, hwaddr
> 

Re: [Qemu-devel] [PATCH v4 3/3] arm: Add BBC micro:bit machine

2018-08-25 Thread Joel Stanley
On Thu, 16 Aug 2018 at 07:12, Peter Maydell  wrote:

> > +static void microbit_machine_init(MachineClass *mc)
> > +{
> > +mc->desc = "BBC micro:bit";
> > +mc->init = microbit_init;
> > +mc->max_cpus = 1;
> > +}
> > +DEFINE_MACHINE("microbit", microbit_machine_init);
>
> Your subclass of TYPE_MACHINE has extra state, so it can't
> use DEFINE_MACHINE (which creates a subclass whose instance_size
> is the same as the parent TYPE_MACHINE). You need to do this
> longhand:
>
> static void machine_class_init(ObjectClass *oc, void *data)
> {
> MachineClass *mc = MACHINE_CLASS(oc);
>
> mc->desc = "BBC micro:bit";
> mc->init = microbit_init;
> mc->max_cpus = 1;
> }
>
> static const TypeInfo microbit_info = {
> .name = TYPE_MICROBIT_MACHINE,
> .parent = TYPE_MACHINE,
> .instance_size = sizeof(MICROBITMachineState),
> .class_init = microbit_class_init,
> };
>
> static void microbit_machine_init(void)
> {
> type_register_static(_info);
> }
>
> type_init(microbit_machine_init);
>
>
> (code untested but should be correct; compare against other boards.)

Thanks for spelling it out. I spent a decent chunk of time looking at
other boards, and this aspect of Qemu still baffles me.



Re: [Qemu-devel] [PATCH v5 2/3] arm: Add Nordic Semiconductor nRF51 SoC

2018-08-25 Thread Joel Stanley
Hi Peter,

On Thu, 16 Aug 2018 at 07:24, Peter Maydell  wrote:

> > +static Property nrf51_soc_properties[] = {
> > +DEFINE_PROP_LINK("memory", NRF51State, board_memory, 
> > TYPE_MEMORY_REGION,
> > + MemoryRegion *),
> > +DEFINE_PROP_UINT32("sram-size", NRF51State, sram_size, 
> > NRF51822_SRAM_SIZE),
> > +DEFINE_PROP_UINT32("flash-size", NRF51State, flash_size, 
> > NRF51822_FLASH_SIZE),
> > +DEFINE_PROP_END_OF_LIST(),
> > +};
>
> Ah, I was wondering how flash_size was handled when I looked at patch 3.
>
> Instead of your patch 3 board model code reaching into the internals
> of the NRF51State struct to pull out the flash_size field, the board
> code should set this property on the object it creates, and then it
> knows how big the flash it's asked for is and can pass that value also
> to the armv7m_load_kernel() function.

Thanks for the review. I've sorted out the other comments relating to
this patch, but I wanted to discuss this one.

I agree that it would be neater to do this. I didn't as the flash is
part of the NRF51822 SoC, opposed to some external flash that is on
the microbit board and connected to the SoC. This is mentioned in the
comment at the start of the file:

 /*
  * The size and base is for the NRF51822 part. If other parts
  * are supported in the future, add a sub-class of NRF51SoC for
  * the specific variants
  */

What would you prefer we do here?

Cheers,

Joel



Re: [Qemu-devel] [PATCH V4 4/4] target-i386: add i440fx 0xcf8 port as coalesced_pio

2018-08-25 Thread Liran Alon



> On 25 Aug 2018, at 15:19, Peng Hao  wrote:
> 
> diff --git a/hw/pci-host/piix.c b/hw/pci-host/piix.c
> index 0e60834..da73743 100644
> --- a/hw/pci-host/piix.c
> +++ b/hw/pci-host/piix.c
> @@ -327,6 +327,10 @@ static void i440fx_pcihost_realize(DeviceState *dev, 
> Error **errp)
> 
> sysbus_add_io(sbd, 0xcfc, >data_mem);
> sysbus_init_ioports(sbd, 0xcfc, 4);
> +
> +/* register i440fx 0xcf8 port as coalesced pio */
> +memory_region_set_flush_coalesced(>data_mem);
> +memory_region_add_coalescing(>conf_mem, 0, 4);
> }
> 

Is there a reason to not register this port as coalesced PIO also for Q35?
In q35_host_realize()?
If not, I would do that as an extra patch as part of this series.

-Liran




Re: [Qemu-devel] [RFC v4 1/6] pci_expander_bridge: add type TYPE_PXB_PCIE_HOST

2018-08-25 Thread Marcel Apfelbaum

Hi Zihan,

On 08/19/2018 04:51 AM, Zihan Yang wrote:

Hi Marcel,

Marcel Apfelbaum  于2018年8月18日周六 上午1:14写道:

Hi Zihan,

On 08/09/2018 09:33 AM, Zihan Yang wrote:

The inner host bridge created by pxb-pcie is TYPE_PXB_PCI_HOST by default,
change it to a new type TYPE_PXB_PCIE_HOST to better utilize ECAM of PCIe

Signed-off-by: Zihan Yang 
---
   hw/pci-bridge/pci_expander_bridge.c | 127 
++--
   1 file changed, 122 insertions(+), 5 deletions(-)

diff --git a/hw/pci-bridge/pci_expander_bridge.c 
b/hw/pci-bridge/pci_expander_bridge.c
index e62de42..6dd38de 100644
--- a/hw/pci-bridge/pci_expander_bridge.c
+++ b/hw/pci-bridge/pci_expander_bridge.c
@@ -15,10 +15,12 @@
   #include "hw/pci/pci.h"
   #include "hw/pci/pci_bus.h"
   #include "hw/pci/pci_host.h"
+#include "hw/pci/pcie_host.h"
   #include "hw/pci/pci_bridge.h"
   #include "qemu/range.h"
   #include "qemu/error-report.h"
   #include "sysemu/numa.h"
+#include "qapi/visitor.h"

   #define TYPE_PXB_BUS "pxb-bus"
   #define PXB_BUS(obj) OBJECT_CHECK(PXBBus, (obj), TYPE_PXB_BUS)
@@ -40,11 +42,20 @@ typedef struct PXBBus {
   #define TYPE_PXB_PCIE_DEVICE "pxb-pcie"
   #define PXB_PCIE_DEV(obj) OBJECT_CHECK(PXBDev, (obj), TYPE_PXB_PCIE_DEVICE)

+#define PROP_PXB_PCIE_DEV "pxbdev"
+
+#define PROP_PXB_PCIE_DOMAIN_NR "domain_nr"
+#define PROP_PXB_PCIE_MAX_BUS "max_bus"
+#define PROP_PXB_BUS_NR "bus_nr"
+#define PROP_PXB_NUMA_NODE "numa_node"
+
   typedef struct PXBDev {
   /*< private >*/
   PCIDevice parent_obj;
   /*< public >*/

+uint32_t domain_nr; /* PCI domain number, non-zero means separate domain */

The commit message suggests this patch is only about
re-factoring of the pxb host type, but you add here more fields.
Please use two separate patches.


+uint8_t max_bus;/* max bus number to use(including this one) */

That's a great idea! Limiting the max_bus will save us a lot
of resource space,  we will not need 256 buses on pxbs probably.

My concern is what happens with the current mode.
Currently bus_nr is used to divide PCI domain 0 buses between pxbs.
So if you have a pxb with bus_nr 100, and another with bus_nr 200,
we divide them like this:
  main host bridge 0...99
  pxb1 100 -199
  pxb2 200-255

What will be the meaning of max_bus if we don't use the domain_nr parameter?
Maybe it will mean that some bus numbers are not assigned at all, for
example:
pxb1: bus_nr 100, max_bus 150
pxb2: bus_nr 200, max_bus 210

It may work.

Yes, it should mean so. Actually max_bus does not have to be specified
if domain_nr
is not used, but if users decide to use domain_nr and want to save
space, max_bus
could be used.


   uint8_t bus_nr;
   uint16_t numa_node;
   } PXBDev;
@@ -58,6 +69,16 @@ static PXBDev *convert_to_pxb(PCIDevice *dev)
   static GList *pxb_dev_list;

   #define TYPE_PXB_HOST "pxb-host"
+#define TYPE_PXB_PCIE_HOST "pxb-pcie-host"
+#define PXB_PCIE_HOST_DEVICE(obj) \
+ OBJECT_CHECK(PXBPCIEHost, (obj), TYPE_PXB_PCIE_HOST)
+
+typedef struct PXBPCIEHost {
+PCIExpressHost parent_obj;
+
+/* pointers to PXBDev */
+PXBDev *pxbdev;
+} PXBPCIEHost;

   static int pxb_bus_num(PCIBus *bus)
   {
@@ -111,6 +132,35 @@ static const char *pxb_host_root_bus_path(PCIHostState 
*host_bridge,
   return bus->bus_path;
   }

+/* Use a dedicated function for PCIe since pxb-host does
+ * not have a domain_nr field */
+static const char *pxb_pcie_host_root_bus_path(PCIHostState *host_bridge,
+  PCIBus *rootbus)
+{
+if (!pci_bus_is_express(rootbus)) {
+/* pxb-pcie-host cannot reside on a PCI bus */
+return NULL;
+}
+PXBBus *bus = PXB_PCIE_BUS(rootbus);
+
+/* get the pointer to PXBDev */
+Object *obj = object_property_get_link(OBJECT(host_bridge),
+   PROP_PXB_PCIE_DEV, NULL);

I don't think you need a link here.
I think rootbus->parent_dev returns the pxb device.
(See the implementation of pxb_bus_num() )

OK, I'll change it in next version.


+
+snprintf(bus->bus_path, 8, "%04lx:%02x",
+ object_property_get_uint(obj, PROP_PXB_PCIE_DOMAIN_NR, NULL),
+ pxb_bus_num(rootbus));
+return bus->bus_path;
+}
+
+static void pxb_pcie_host_get_mmcfg_size(Object *obj, Visitor *v, const char 
*name,
+void *opaque, Error **errp)
+{
+PCIExpressHost *e = PCIE_HOST_BRIDGE(obj);
+
+visit_type_uint64(v, name, >size, errp);
+}
+
   static char *pxb_host_ofw_unit_address(const SysBusDevice *dev)
   {
   const PCIHostState *pxb_host;
@@ -142,6 +192,31 @@ static char *pxb_host_ofw_unit_address(const SysBusDevice 
*dev)
   return NULL;
   }

+static void pxb_pcie_host_initfn(Object *obj)
+{
+PXBPCIEHost *s = PXB_PCIE_HOST_DEVICE(obj);
+PCIHostState *phb = PCI_HOST_BRIDGE(obj);
+
+memory_region_init_io(>conf_mem, obj, _host_conf_le_ops, phb,
+  "pci-conf-idx", 4);

Re: [Qemu-devel] [RFC v4 3/6] i386/acpi-build: describe new pci domain in AML

2018-08-25 Thread Marcel Apfelbaum




On 08/19/2018 05:00 AM, Zihan Yang wrote:

Marcel Apfelbaum  于2018年8月18日周六 上午1:49写道:



On 08/09/2018 09:34 AM, Zihan Yang wrote:

Describe new pci segments of host bridges in AML as new pci devices,
with _SEG and _BBN to let them be in DSDT.

Besides, bus_nr indicates the bus number of pxb-pcie under pcie.0 bus,
but since we put it into separate domain, it should be zero,

Why should it be 0? Isn't possible we start another domain from bus_nr> 0?

In the last version, I got a misunderstanding about bus_nr, and you pointed me
out that we should start from bus 0 when in new domain. I can support this
start_bus in later patch, but I wonder when would we want to start a domain
from bus_nr > 0? I'm not quite familiar with the use case.


That is a good point. I can't think of a reason to start a new PCI
domain with a bus number  >0 , but is it possible.
So if it doesn't complicate the implementation we don't
want this kind of limitation.
On the other hand, if it makes the code too complicated,
we can argue for requiring it.


Thanks,
Marcel

[...]



Re: [Qemu-devel] [PATCH 09/20] target/arm: Handle SVE vector length changes in system mode

2018-08-25 Thread Richard Henderson
On 08/17/2018 09:22 AM, Peter Maydell wrote:
>> +/*
>> + * When FP is enabled, but SVE is disabled, the effective len is 0.
>> + * ??? How should sve_exception_el interact with AArch32 state?
>> + * That isn't included in the CheckSVEEnabled pseudocode, so is the
>> + * host kernel required to explicitly disable SVE for an EL using aa32?
>> + */
> I'm not clear what you're asking here. If the EL is AArch32
> then why does it make a difference if SVE is enabled or disabled?
> You can't get at it...
> 
>> +old_len = (sve_exception_el(env, old_el)
>> +   ? 0 : sve_zcr_len_for_el(env, old_el));
>> +new_len = (sve_exception_el(env, new_el)
>> +   ? 0 : sve_zcr_len_for_el(env, new_el));

Yes the registers are inaccessible.  But...

It may be that we must produce old_len/new_len == 0 if old_el/new_el is in
32-bit mode, so that the high part of the SVE registers are zeroed.

However, it may be UNDEFINED what happens if the OS switches to an el in 32-bit
mode while CPACR.ZEN == 3.  And if so, then there may be no point in adding an
additional test here.

So far I have re-worded the comment as:

 * ??? Do we need a conditional for old_el/new_el in aa32 state?
 * That isn't included in the CheckSVEEnabled pseudocode, so is the
 * host kernel required to explicitly disable SVE for an EL using aa32?

Thoughts on the underlying issue?


r~



Re: [Qemu-devel] [PATCH v3 2/3] kvm: Add support to KVM_GET_MSR_FEATURE_INDEX_LIST and KVM_GET_MSRS system ioctl

2018-08-25 Thread Eduardo Habkost
On Thu, Aug 23, 2018 at 10:23:53PM +0200, Paolo Bonzini wrote:
> On 23/08/2018 19:36, Eduardo Habkost wrote:
> >> Right, but if KVM_CAP_GET_MSR_FEATURES is not available I guess you can
> >> assume the MSRs to be zero for everything except "-cpu host".
> > Yes, that would be simplest way to implement the above.  Then
> > QEMU would refuse to run if the feature was part of the requested
> > configuration (2), or not care at all because the feature was not
> > set in the configuration (1).
> > 
> > But I'm not sure I follow the suggestion to not consider the MSR
> > to be zero on "-cpu host".  If we don't need and KVM-side code to
> > support a given MSR feature, we can enable it on all models.
> 
> The case I was thinking about, is where there is no KVM-side code to
> support the MSR feature, but you still need the host CPU to have it.
> Without KVM's feature MSR support, you cannot read the host value of the
> MSR.

Don't we always need some KVM code to support the feature, even
if it's just to make KVM enable the feature by default?

> 
> My idea was that feature MSRs will default to the host value returned by
> KVM_GET_MSR on the /dev/kvm file descriptor, so "-cpu host" could just
> ignore KVM_CAP_GET_MSR_FEATURES and use KVM's default value of the MSRs.
> 
> However, I guess we can just use the default value for _all_ CPU models,
> because in practice the only effect would be on nested VMX MSRs, and
> nested VMX can only be migrated on kernels that have
> KVM_CAP_GET_MSR_FEATURES.  So in practice people using nested VMX are
> using "-cpu host" anyway, not "-cpu SandyBridge,+vmx".

If the default is to use the host value, I see two problems:
1) we can't use the default on any migration-safe CPU model
(i.e.  all except "host"); 2) it will be unsafe for older QEMU
versions (that don't know yet how to call KVM_SET_MSR for that
MSR).

If this is just about VMX caps, this shouldn't be a problem
because we already treat VMX as not migration-safe.  We will need
to keep that in mind if we want to make VMX migration-safe in the
future, though.

> 
> My proposalis: basically, if KVM_CAP_GET_MSR_FEATURES is unavailable:
> 
> - MSR-based features would cause an error if specified on the command line;
> 
> - kvm_arch_init_vcpu would skip the KVM_SET_MSR for feature MSRs completely.

If we already had some MSR features being enabled by default
before KVM_CAP_GET_MSR_FEATURES was introduced (I didn't know we
had such cases), this sense.  But I don't think we should
do it for new MSRs.

-- 
Eduardo



Re: [Qemu-devel] [PATCH 4/7] target/mips: Add MXU instruction D16MUL

2018-08-25 Thread Richard Henderson
On 08/24/2018 12:44 PM, Craig Janeczek via Qemu-devel wrote:
> +gen_load_mxu_gpr(t1, opcode->D16MUL.xrb);
> +tcg_gen_ext16s_tl(t0, t1);
> +tcg_gen_shri_tl(t1, t1, 16);
> +tcg_gen_ext16s_tl(t1, t1);

  tcg_gen_sextract_tl(t0, t1, 0, 16);
  tcg_gen_sextract_tl(t1, t1, 16, 16);


r~



Re: [Qemu-devel] [PATCH 3/7] target/mips: Add MXU instruction S8LDD

2018-08-25 Thread Richard Henderson
On 08/24/2018 12:44 PM, Craig Janeczek via Qemu-devel wrote:
> +case OPC_MXU_S8LDD:
> +gen_load_gpr(t0, opcode->S8LDD.rb);
> +tcg_gen_movi_tl(t1, opcode->S8LDD.s8);
> +tcg_gen_ext8s_tl(t1, t1);
> +tcg_gen_add_tl(t0, t0, t1);

This is

  gen_load_gpr(t0, rb);
  tcg_gen_addi_tl(t0, t0, (int8_t)s8);

> +tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_SB);

You might want MO_UB so that you don't need

  tcg_gen_andi_tl(t1, t1, 0xff)

in several places.

Hmm.  Of course there's the two sign-extend cases that do want this.  So maybe
some more logic above the load is warranted.

> +switch (opcode->S8LDD.optn3) {
> +case 0: /*XRa[7:0] = tmp8 */
> +tcg_gen_andi_tl(t1, t1, 0xFF);
> +gen_load_mxu_gpr(t0, opcode->S8LDD.xra);
> +tcg_gen_andi_tl(t0, t0, 0xFF00);
> +tcg_gen_or_tl(t0, t0, t1);

  gen_load_mxu_gpr(t0, xra);
  tcg_gen_deposit_tl(t0, t0, t1, 0, 8);

> +break;
> +case 1: /* XRa[15:8] = tmp8 */
> +tcg_gen_andi_tl(t1, t1, 0xFF);
> +gen_load_mxu_gpr(t0, opcode->S8LDD.xra);
> +tcg_gen_andi_tl(t0, t0, 0x00FF);
> +tcg_gen_shli_tl(t1, t1, 8);
> +tcg_gen_or_tl(t0, t0, t1);

  tcg_gen_deposit_tl(t0, t0, t1, 8, 8);

> +case 4: /* XRa = {8'b0, tmp8, 8'b0, tmp8} */
> +tcg_gen_andi_tl(t1, t1, 0xFF);
> +tcg_gen_mov_tl(t0, t1);
> +tcg_gen_shli_tl(t1, t1, 16);
> +tcg_gen_or_tl(t0, t0, t1);
> +break;

  tcg_gen_deposit_tl(t0, t1, t1, 16, 16);

> +case 5: /* XRa = {tmp8, 8'b0, tmp8, 8'b0} */
> +tcg_gen_andi_tl(t1, t1, 0xFF);
> +tcg_gen_shli_tl(t1, t1, 8);
> +tcg_gen_mov_tl(t0, t1);
> +tcg_gen_shli_tl(t1, t1, 16);
> +tcg_gen_or_tl(t0, t0, t1);

  tcg_gen_shli_tl(t1, t1, 8);
  tcg_gen_deposit_tl(t0, t1, t1, 16, 16);

> +case 7: /* XRa = {tmp8, tmp8, tmp8, tmp8} */
> +tcg_gen_andi_tl(t1, t1, 0xFF);
> +tcg_gen_mov_tl(t0, t1);
> +tcg_gen_shli_tl(t1, t1, 8);
> +tcg_gen_or_tl(t0, t0, t1);
> +tcg_gen_shli_tl(t1, t1, 8);
> +tcg_gen_or_tl(t0, t0, t1);
> +tcg_gen_shli_tl(t1, t1, 8);
> +tcg_gen_or_tl(t0, t0, t1);

  tcg_gen_muli_tl(t0, t1, 0x01010101);


r~



Re: [Qemu-devel] [PATCH 2/7] target/mips: Add MXU instructions S32I2M and S32M2I

2018-08-25 Thread Richard Henderson
On 08/24/2018 12:44 PM, Craig Janeczek via Qemu-devel wrote:
> Adds support for emulating the S32I2M and S32M2I MXU instructions.
> 
> Signed-off-by: Craig Janeczek 
> ---
>  target/mips/translate.c | 55 +
>  1 file changed, 55 insertions(+)
> 
> diff --git a/target/mips/translate.c b/target/mips/translate.c
> index 50f0cb558f..381dfad36e 100644
> --- a/target/mips/translate.c
> +++ b/target/mips/translate.c
> @@ -364,6 +364,9 @@ enum {
>  OPC_CLO  = 0x21 | OPC_SPECIAL2,
>  OPC_DCLZ = 0x24 | OPC_SPECIAL2,
>  OPC_DCLO = 0x25 | OPC_SPECIAL2,
> +/* MXU */
> +OPC_MXU_S32I2M = 0x2F | OPC_SPECIAL2,
> +OPC_MXU_S32M2I = 0x2E | OPC_SPECIAL2,

I haven't been able to find any documentation of the bit
layout of these instructions.  Any pointers?

> +typedef union {
> +struct {
> +uint32_t op:6;
> +uint32_t xra:5;
> +uint32_t:5;
> +uint32_t rb:5;
> +uint32_t:5;
> +uint32_t special2:6;
> +} S32I2M;
> +
> +struct {
> +uint32_t op:6;
> +uint32_t xra:5;
> +uint32_t:5;
> +uint32_t rb:5;
> +uint32_t:5;
> +uint32_t special2:6;
> +} S32M2I;
> +} MXU_OPCODE;

Do not use bitfields.  The layout differs by host compiler.
Use extract32(input, pos, len).


> +
> +/* MXU Instructions */
> +static void gen_mxu(DisasContext *ctx, uint32_t opc)
> +{
> +#ifndef TARGET_MIPS64 /* Only works in 32 bit mode */
> +TCGv t0;
> +t0 = tcg_temp_new();
> +MXU_OPCODE *opcode = (MXU_OPCODE *)>opcode;
> +
> +switch (opc) {
> +case OPC_MXU_S32I2M:
> +gen_load_gpr(t0, opcode->S32I2M.rb);
> +gen_store_mxu_gpr(t0, opcode->S32I2M.xra);
> +break;
> +
> +case OPC_MXU_S32M2I:
> +gen_load_mxu_gpr(t0, opcode->S32M2I.xra);
> +gen_store_gpr(t0, opcode->S32M2I.rb);
> +break;
> +}
> +
> +tcg_temp_free(t0);
> +#else
> +generate_exception_end(ctx, EXCP_RI);
> +#endif
> +}

There's nothing here (yet, I suppose) that won't compile for MIPS64.
I'd suggest avoiding ifdefs as much as possible.


r~



Re: [Qemu-devel] [PATCH] slirp: Implement RFC2132 TFTP server name

2018-08-25 Thread Samuel Thibault
Hello,

Fam Zheng, le ven. 24 août 2018 21:53:12 +0800, a ecrit:
>const char *vnameserver, const char *vnameserver6,
>const char *smb_export, const char *vsmbserver,
>const char **dnssearch, const char *vdomainname,
> +  const char *tftp_server_name,

I'd say rather put it between the vhostname and tftp_export parameters.

> @@ -321,6 +322,9 @@ Slirp *slirp_init(int restricted, bool in_enabled, struct 
> in_addr vnetwork,
>  slirp->vdhcp_startaddr = vdhcp_start;
>  slirp->vnameserver_addr = vnameserver;
>  slirp->vnameserver_addr6 = vnameserver6;
> +if (tftp_server_name) {
> +slirp->tftp_server_name = g_strdup(tftp_server_name);
> +}

I'd say do not bother testing for tftp_server_name != NULL, just always
use g_strdup, as is done for other values.

Samuel



Re: [Qemu-devel] [PATCH 1/7] target/mips: Add MXU register support

2018-08-25 Thread Richard Henderson
On 08/24/2018 12:44 PM, Craig Janeczek via Qemu-devel wrote:
> +/* MXU General purpose registers moves. */
> +static inline void gen_load_mxu_gpr (TCGv t, int reg)
> +{
> +if (reg == 0)
> +tcg_gen_movi_tl(t, 0);
> +else
> +tcg_gen_mov_tl(t, mxu_gpr[reg-1]);
> +}
> +
> +static inline void gen_store_mxu_gpr (TCGv t, int reg)
> +{
> +if (reg != 0)
> +tcg_gen_mov_tl(mxu_gpr[reg-1], t);
> +}
> +
>  /* Moves to/from shadow registers. */
>  static inline void gen_load_srsgpr (int from, int to)
>  {
> @@ -20742,6 +20767,11 @@ void mips_tcg_init(void)
>  fpu_fcr31 = tcg_global_mem_new_i32(cpu_env,
> offsetof(CPUMIPSState, 
> active_fpu.fcr31),
> "fcr31");
> +
> +for (i = 0; i < 16; i++)
> +mxu_gpr[i] = tcg_global_mem_new(cpu_env,
> +offsetof(CPUMIPSState, 
> active_tc.mxu_gpr[i]),
> +mxuregnames[i]);
>  }

You need to fix the ./scripts/checkpatch.pl errors.
But otherwise the logic is ok.


r~



Re: [Qemu-devel] [PATCH] spapr: fix leak of rev array

2018-08-25 Thread Richard Henderson
On 08/24/2018 01:31 PM, Emilio G. Cota wrote:
> Introduced in 04d595b300 ("spapr: do not use CPU_FOREACH_REVERSE",
> 2018-08-23)
> 
> Fixes: CID1395181
> Reported-by: Peter Maydell 
> Signed-off-by: Emilio G. Cota 
> ---
>  hw/ppc/spapr.c | 1 +
>  1 file changed, 1 insertion(+)

Reviewed-by: Richard Henderson 


r~




Re: [Qemu-devel] [PATCH 2/3] hw/nvram/fw_cfg: Use memberwise copy of MemoryRegionOps struct

2018-08-25 Thread Richard Henderson
On 08/24/2018 10:04 AM, Peter Maydell wrote:
> We've now removed the 'old_mmio' member from MemoryRegionOps,
> so we can perform the copy as a simple struct copy rather
> than having to do it via a memberwise copy.
> 
> Signed-off-by: Peter Maydell 
> ---
>  hw/nvram/fw_cfg.c | 7 +--
>  1 file changed, 1 insertion(+), 6 deletions(-)

Reviewed-by: Richard Henderson 


r~




Re: [Qemu-devel] [PATCH 1/3] memory: Remove old_mmio accessors

2018-08-25 Thread Richard Henderson
On 08/24/2018 10:04 AM, Peter Maydell wrote:
> Now that all the users of old_mmio MemoryRegion accessors
> have been converted, we can remove the core code support.
> 
> Signed-off-by: Peter Maydell 
> ---
>  docs/devel/memory.txt |  2 --
>  include/exec/memory.h |  5 
>  memory.c  | 64 ++-
>  3 files changed, 2 insertions(+), 69 deletions(-)

Reviewed-by: Richard Henderson 


r~



[Qemu-devel] [Bug 1788665] Re: Low 2D graphics performance with Windows 10 (1803) VGA passthrough VM using "Spectre" protection

2018-08-25 Thread Heiko Sieger
For comparison, here the Linux/host cat /proc/cpuinfo (partial):

processor   : 11
vendor_id   : GenuineIntel
cpu family  : 6
model   : 45
model name  : Intel(R) Core(TM) i7-3930K CPU @ 3.20GHz
stepping: 7
microcode   : 0x714
cpu MHz : 4200.015
cache size  : 12288 KB
physical id : 0
siblings: 12
core id : 5
cpu cores   : 6
apicid  : 11
initial apicid  : 11
fpu : yes
fpu_exception   : yes
cpuid level : 13
wp  : yes
flags   : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov 
pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb 
rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology 
nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 
ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer 
aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority 
ept vpid xsaveopt dtherm ida arat pln pts flush_l1d
bugs: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf
bogomips: 6399.97
clflush size: 64
cache_alignment : 64
address sizes   : 46 bits physical, 48 bits virtual
power management:

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1788665

Title:
  Low 2D graphics performance with Windows 10 (1803) VGA passthrough VM
  using "Spectre" protection

Status in QEMU:
  New

Bug description:
  Windows 10 (1803) VM using VGA passthrough via qemu script.

  After upgrading Windows 10 Pro VM to version 1803, or possibly after
  applying the March/April security updates from Microsoft, the VM would
  show low 2D graphics performance (sluggishness in 2D applications and
  low Passmark results).

  Turning off Spectre vulnerability protection in Windows remedies the
  issue.

  Expected behavior:
  qemu/kvm hypervisor to expose firmware capabilities of host to guest OS - see 
https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/CVE-2017-5715-and-hyper-v-vms

  Background:

  Starting in March or April Microsoft began to push driver updates in
  their updates / security updates. See https://support.microsoft.com
  /en-us/help/4073757/protect-your-windows-devices-against-spectre-
  meltdown

  One update concerns the Intel microcode - see
  https://support.microsoft.com/en-us/help/4100347. It is activated by
  default within Windows.

  Once the updates are applied within the Windows guest, 2D graphics
  performance drops significantly. Other performance benchmarks are not
  affected.

  A bare metal Windows installation does not display a performance loss
  after the update. See https://heiko-sieger.info/low-2d-graphics-
  benchmark-with-windows-10-1803-kvm-vm/

  Similar reports can be found here:
  
https://www.reddit.com/r/VFIO/comments/97unx4/passmark_lousy_2d_graphics_performance_on_windows/

  Hardware:

  6 core Intel Core i7-3930K (-MT-MCP-)

  Host OS:
  Linux Mint 19/Ubuntu 18.04
  Kernel: 4.15.0-32-generic x86_64
  Qemu: QEMU emulator version 2.11.1
  Intel microcode (host): 0x714
  dmesg | grep microcode
  [0.00] microcode: microcode updated early to revision 0x714, date = 
2018-05-08
  [2.810683] microcode: sig=0x206d7, pf=0x4, revision=0x714
  [2.813340] microcode: Microcode Update Driver: v2.2.

  Note: I manually updated the Intel microcode on the host from 0x713 to
  0x714. However, both microcode versions produce the same result in the
  Windows guest.

  Guest OS:
  Windows 10 Pro 64 bit, release 1803

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1788665/+subscriptions



[Qemu-devel] [Bug 1788665] Re: Low 2D graphics performance with Windows 10 (1803) VGA passthrough VM using "Spectre" protection

2018-08-25 Thread Heiko Sieger
Daniel Berrange: ...Essentially if your guest kernel shows "pti ssbd
ibrs ibpb stibp" features as present, then thanks to your "-cpu host"
usage, the guest should see them too.

1. I changed my qemu start script and added +vmx:
-cpu 
host,kvm=off,hv_vendor_id=1234567890ab,hv_vapic,hv_time,hv_relaxed,hv_spinlocks=0x1fff,+vmx
 \

2. I installed cygwin on the Windows guest and ran cat /proc/cpuinfo.
Here the output:

processor   : 11
vendor_id   : GenuineIntel
cpu family  : 6
model   : 45
model name  : Intel(R) Core(TM) i7-3930K CPU @ 3.20GHz
stepping: 7
cpu MHz : 3200.000
cache size  : 4096 KB
physical id : 0
siblings: 12
core id : 5
cpu cores   : 6
apicid  : 11
initial apicid  : 11
fpu : yes
fpu_exception   : yes
cpuid level : 13
wp  : yes
flags   : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov 
pat pse36 clflush mmx fxsr sse sse2 ss ht pni vmx ssse3 cx16 sse4_1 sse4_2 
x2apic popcnt aes xsave osxsave avx hypervisor lahf_lm arat xsaveopt tsc_adjust
clflush size: 64
cache_alignment : 64
address sizes   : 40 bits physical, 48 bits virtual
power management:

The flags  "pti ssbd ibrs ibpb stibp" are not listed! How do I pass
these flags/features to the Windows guest?

-- 
You received this bug notification because you are a member of qemu-
devel-ml, which is subscribed to QEMU.
https://bugs.launchpad.net/bugs/1788665

Title:
  Low 2D graphics performance with Windows 10 (1803) VGA passthrough VM
  using "Spectre" protection

Status in QEMU:
  New

Bug description:
  Windows 10 (1803) VM using VGA passthrough via qemu script.

  After upgrading Windows 10 Pro VM to version 1803, or possibly after
  applying the March/April security updates from Microsoft, the VM would
  show low 2D graphics performance (sluggishness in 2D applications and
  low Passmark results).

  Turning off Spectre vulnerability protection in Windows remedies the
  issue.

  Expected behavior:
  qemu/kvm hypervisor to expose firmware capabilities of host to guest OS - see 
https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/CVE-2017-5715-and-hyper-v-vms

  Background:

  Starting in March or April Microsoft began to push driver updates in
  their updates / security updates. See https://support.microsoft.com
  /en-us/help/4073757/protect-your-windows-devices-against-spectre-
  meltdown

  One update concerns the Intel microcode - see
  https://support.microsoft.com/en-us/help/4100347. It is activated by
  default within Windows.

  Once the updates are applied within the Windows guest, 2D graphics
  performance drops significantly. Other performance benchmarks are not
  affected.

  A bare metal Windows installation does not display a performance loss
  after the update. See https://heiko-sieger.info/low-2d-graphics-
  benchmark-with-windows-10-1803-kvm-vm/

  Similar reports can be found here:
  
https://www.reddit.com/r/VFIO/comments/97unx4/passmark_lousy_2d_graphics_performance_on_windows/

  Hardware:

  6 core Intel Core i7-3930K (-MT-MCP-)

  Host OS:
  Linux Mint 19/Ubuntu 18.04
  Kernel: 4.15.0-32-generic x86_64
  Qemu: QEMU emulator version 2.11.1
  Intel microcode (host): 0x714
  dmesg | grep microcode
  [0.00] microcode: microcode updated early to revision 0x714, date = 
2018-05-08
  [2.810683] microcode: sig=0x206d7, pf=0x4, revision=0x714
  [2.813340] microcode: Microcode Update Driver: v2.2.

  Note: I manually updated the Intel microcode on the host from 0x713 to
  0x714. However, both microcode versions produce the same result in the
  Windows guest.

  Guest OS:
  Windows 10 Pro 64 bit, release 1803

To manage notifications about this bug go to:
https://bugs.launchpad.net/qemu/+bug/1788665/+subscriptions



Re: [Qemu-devel] [PATCH 5/7] block/mirror: utilize job_exit shim

2018-08-25 Thread Max Reitz
On 2018-08-25 17:02, Max Reitz wrote:
> On 2018-08-23 00:05, John Snow wrote:
>>
>>
>> On 08/22/2018 08:15 AM, Max Reitz wrote:
>>> On 2018-08-17 21:04, John Snow wrote:
 Change the manual deferment to mirror_exit into the implicit
 callback to job_exit and the mirror_exit callback.

 This does change the order of some bdrv_unref calls and job_completed,
 but thanks to the new context in which we call .job_exit, this is safe
 to defer the possible flushing of any nodes to the job_finalize_single
 cleanup stage.
>>>
>>> Ah, right, I forgot this.  Hm, what exactly do you mean?  This function
>>> is executed in the main loop, so it can make 'src' go away.  I don't see
>>> any difference to before.
>>>
>>
>> This changes the order in which we unreference these objects; if you
>> look at this patch the job_completed call I delete is in the middle of
>> what becomes the .exit() callback, which means there is a subtle change
>> in the ordering of how references are put down.
>>
>> Take a look at the weird ordering of mirror_exit as it exists right now;
>> we call job_completed first and *then* put down the last references. If
>> you re-order this upstream right now, you'll deadlock QEMU because this
>> means job_completed is responsible for putting down the last reference
>> to some of these block/bds objects.
>>
>> However, job_completed takes an additional AIO context lock and calls
>> job_finalize_single under *two* locks, which will hang QEMU if we
>> attempt to flush any of these nodes when we put down the last reference.
> 
> If you say so...  I have to admit I don't really understand.  The
> comment doesn't explain why it's so important to keep src around until
> job_completed(), so I don't know.  I thought AioContexts are recursive
> so it doesn't matter whether you take them recursively or not.
> 
> Anyway.  So the difference now is that job_defer_to_main_loop() took the
> lock around the whole exit function, whereas the new exit shim only
> takes it around the .exit() method, but calls job_complete() without a
> lock -- and then job_finalize_single() gets its lock again, so the job
> methods are again called with locks.  That sounds OK to me.
> 
>> Performing the reordering here is *safe* because by removing the call to
>> job_completed and utilizing the exit shim, the .exit() callback executes
>> only under one lock, and when the finalize code runs later it is also
>> executed under only one lock, making this re-ordering safe.
>>
>> Clear as mud?
> 
> Well, I trust you that the drain issue was the reason that src had to
> stay around until after job_completed().  It seems a bit
> counter-intuitive, because the comment explaining that src needs to stay
> around until job_completed() doesn't say much -- but it does imply that
> without that bdrv_ref(), the BDS might be destroyed before
> job_completed().  Which is different from simply having only one
> reference left and then being deleted in job_completed().
> 
> Looking at 3f09bfbc7be, I'm inclined to believe the original reason may
> be that src->job points to the job and that we shouldn't delete it as
> long as it does (bdrv_delete() asserts that bs->job is NULL).  Oh no, a
> tangent appears.
> 
> ...I would assume that when bdrv_replace_node() is called, BlockJob.blk
> is updated to point to the new BDS.  But nobody seems to update the
> BDS.job field.  Investigation is in order.

Aha!  Mirror is run from the mirror filter (of course).  So the node
where BDS.job is set is actually not replaced.

Well, it is, but then we specifically switch the job BB back to point to
the mirror filter.  As long as it does not go away until then, all is
safe.  (And that bdrv_ref() guard doesn't change with this patch.)

Max



signature.asc
Description: OpenPGP digital signature


Re: [Qemu-devel] [PATCH 5/7] block/mirror: utilize job_exit shim

2018-08-25 Thread Max Reitz
On 2018-08-23 00:05, John Snow wrote:
> 
> 
> On 08/22/2018 08:15 AM, Max Reitz wrote:
>> On 2018-08-17 21:04, John Snow wrote:
>>> Change the manual deferment to mirror_exit into the implicit
>>> callback to job_exit and the mirror_exit callback.
>>>
>>> This does change the order of some bdrv_unref calls and job_completed,
>>> but thanks to the new context in which we call .job_exit, this is safe
>>> to defer the possible flushing of any nodes to the job_finalize_single
>>> cleanup stage.
>>
>> Ah, right, I forgot this.  Hm, what exactly do you mean?  This function
>> is executed in the main loop, so it can make 'src' go away.  I don't see
>> any difference to before.
>>
> 
> This changes the order in which we unreference these objects; if you
> look at this patch the job_completed call I delete is in the middle of
> what becomes the .exit() callback, which means there is a subtle change
> in the ordering of how references are put down.
> 
> Take a look at the weird ordering of mirror_exit as it exists right now;
> we call job_completed first and *then* put down the last references. If
> you re-order this upstream right now, you'll deadlock QEMU because this
> means job_completed is responsible for putting down the last reference
> to some of these block/bds objects.
> 
> However, job_completed takes an additional AIO context lock and calls
> job_finalize_single under *two* locks, which will hang QEMU if we
> attempt to flush any of these nodes when we put down the last reference.

If you say so...  I have to admit I don't really understand.  The
comment doesn't explain why it's so important to keep src around until
job_completed(), so I don't know.  I thought AioContexts are recursive
so it doesn't matter whether you take them recursively or not.

Anyway.  So the difference now is that job_defer_to_main_loop() took the
lock around the whole exit function, whereas the new exit shim only
takes it around the .exit() method, but calls job_complete() without a
lock -- and then job_finalize_single() gets its lock again, so the job
methods are again called with locks.  That sounds OK to me.

> Performing the reordering here is *safe* because by removing the call to
> job_completed and utilizing the exit shim, the .exit() callback executes
> only under one lock, and when the finalize code runs later it is also
> executed under only one lock, making this re-ordering safe.
> 
> Clear as mud?

Well, I trust you that the drain issue was the reason that src had to
stay around until after job_completed().  It seems a bit
counter-intuitive, because the comment explaining that src needs to stay
around until job_completed() doesn't say much -- but it does imply that
without that bdrv_ref(), the BDS might be destroyed before
job_completed().  Which is different from simply having only one
reference left and then being deleted in job_completed().

Looking at 3f09bfbc7be, I'm inclined to believe the original reason may
be that src->job points to the job and that we shouldn't delete it as
long as it does (bdrv_delete() asserts that bs->job is NULL).  Oh no, a
tangent appears.

...I would assume that when bdrv_replace_node() is called, BlockJob.blk
is updated to point to the new BDS.  But nobody seems to update the
BDS.job field.  Investigation is in order.

Max



signature.asc
Description: OpenPGP digital signature


Re: [Qemu-devel] [PATCH 1/7] jobs: change start callback to run callback

2018-08-25 Thread Max Reitz
On 2018-08-25 15:33, Max Reitz wrote:

[...]

> Having a central Error object doesn't really make sense.  Whenever the
> block job wants to return an error, it should probably do so as "return"
> values of methods (like .run()).  Same for ret, of course.
> 
> I understand that this is probably really only possible after v2 when
> you've made more use of abort/commit.  But still, I don't think this
> patch improves anything, so I would leave this clean-up until later when
> you can really do something.
> 
> I suppose the idea here is that you want to drop the errp parameter from
> job_completed(), because it is not going to be called by .exit().  But
> the obvious way around this would be to pass an errp to .exit() and then
> pass the result on to job_completed().

You know what, I wrote a really long reply and in the end I realized you
basically did exactly what I wanted.  (Or, rather, I gave two
alternatives, and you did one of them.)

I noticed that having a central error object may still make sense;
.create() shows why, jobs usually fail in .run() and then you need to
keep the error around for a bit.  You can either pass it around, or you
just keep it in Job (those are the two alternatives I gave).

(So I said, either rip out Job.error/Job.err and Job.ret completely, or
keep the iff relationship between Job.err and Job.ret, so together they
give you a meaningful state.)

Now by setting Job.ret and Job.err basically at the same time (both are
results of .run(), and whenever Job.ret is set somewhere else, it is
immediately followed by job_update_rc()), the iff relationship between
Job.err and Job.ret persists.  So that's good!

(In that case I don't know why you removed the "iff" part from the
comment, though.)


The only remaining question I have is why you still pass job->ret to
job_completed().  That seems superfluous to me.

Max


(No, I don't know what's up with me and completely misunderstanding this
series.)



signature.asc
Description: OpenPGP digital signature


[Qemu-devel] [PATCH v3 6/9] tests: add a few qmp tests

2018-08-25 Thread Marc-André Lureau
test_object_add_without_props() tests a bug in qmp_object_add() we
fixed in commit e64c75a975.  Sadly, we don't have systematic
object-add tests.  This lone test can go into qmp-cmd-test for want of
a better home.

test_qom_set_without_value() is about a bug in infrastructure used by
the QMP core, fixed in commit c489780203.  We covered the bug in
infrastructure unit tests (commit bce3035a44).  I wrote that test
earlier, to cover QMP level as well, the test could go into qmp-test.

Signed-off-by: Marc-André Lureau 
---
 tests/qmp-cmd-test.c | 31 +++
 tests/qmp-test.c | 18 ++
 2 files changed, 49 insertions(+)

diff --git a/tests/qmp-cmd-test.c b/tests/qmp-cmd-test.c
index c5b70df974..3ba8f68476 100644
--- a/tests/qmp-cmd-test.c
+++ b/tests/qmp-cmd-test.c
@@ -19,6 +19,15 @@
 
 const char common_args[] = "-nodefaults -machine none";
 
+static const char *get_error_class(QDict *resp)
+{
+QDict *error = qdict_get_qdict(resp, "error");
+const char *desc = qdict_get_try_str(error, "desc");
+
+g_assert(desc);
+return error ? qdict_get_try_str(error, "class") : NULL;
+}
+
 /* Query smoke tests */
 
 static int query_error_class(const char *cmd)
@@ -197,6 +206,24 @@ static void add_query_tests(QmpSchema *schema)
 }
 }
 
+static void test_object_add_without_props(void)
+{
+QTestState *qts;
+QDict *ret;
+
+qts = qtest_init(common_args);
+
+ret = qtest_qmp(qts,
+"{'execute': 'object-add', 'arguments':"
+" {'qom-type': 'memory-backend-ram', 'id': 'ram1' } }");
+g_assert_nonnull(ret);
+
+g_assert_cmpstr(get_error_class(ret), ==, "GenericError");
+
+qobject_unref(ret);
+qtest_quit(qts);
+}
+
 int main(int argc, char *argv[])
 {
 QmpSchema schema;
@@ -206,6 +233,10 @@ int main(int argc, char *argv[])
 
 qmp_schema_init();
 add_query_tests();
+
+qtest_add_func("qmp/object-add-without-props",
+   test_object_add_without_props);
+
 ret = g_test_run();
 
 qmp_schema_cleanup();
diff --git a/tests/qmp-test.c b/tests/qmp-test.c
index 4ae2245484..fdfe73b6d2 100644
--- a/tests/qmp-test.c
+++ b/tests/qmp-test.c
@@ -348,6 +348,23 @@ static void test_qmp_preconfig(void)
 qtest_quit(qs);
 }
 
+static void test_qom_set_without_value(void)
+{
+QTestState *qts;
+QDict *ret;
+
+qts = qtest_init(common_args);
+
+ret = qtest_qmp(qts, "{'execute': 'qom-set', 'arguments':"
+" { 'path': '/machine', 'property': 'rtc-time' } }");
+g_assert_nonnull(ret);
+
+g_assert_cmpstr(get_error_class(ret), ==, "GenericError");
+
+qobject_unref(ret);
+qtest_quit(qts);
+}
+
 int main(int argc, char *argv[])
 {
 g_test_init(, , NULL);
@@ -355,6 +372,7 @@ int main(int argc, char *argv[])
 qtest_add_func("qmp/protocol", test_qmp_protocol);
 qtest_add_func("qmp/oob", test_qmp_oob);
 qtest_add_func("qmp/preconfig", test_qmp_preconfig);
+qtest_add_func("qmp/qom-set-without-value", test_qom_set_without_value);
 
 return g_test_run();
 }
-- 
2.18.0.547.g1d89318c48




[Qemu-devel] [PATCH v3 8/9] qga: process_event() simplification

2018-08-25 Thread Marc-André Lureau
Simplify the code around qmp_dispatch():
- rely on qmp_dispatch/check_obj() for message checking
- have a single send_response() point
- constify send_response() argument

It changes a couple of error messages:

* When @req isn't a dictionary, from
Invalid JSON syntax
  to
QMP input must be a JSON object

* When @req lacks member "execute", from
this feature or command is not currently supported
  to
QMP input lacks member 'execute'

CC: Michael Roth 
Signed-off-by: Marc-André Lureau 
---
 qga/main.c | 47 +--
 1 file changed, 9 insertions(+), 38 deletions(-)

diff --git a/qga/main.c b/qga/main.c
index 6d70242d05..f0ec035996 100644
--- a/qga/main.c
+++ b/qga/main.c
@@ -544,15 +544,15 @@ fail:
 #endif
 }
 
-static int send_response(GAState *s, QDict *payload)
+static int send_response(GAState *s, const QDict *rsp)
 {
 const char *buf;
 QString *payload_qstr, *response_qstr;
 GIOStatus status;
 
-g_assert(payload && s->channel);
+g_assert(rsp && s->channel);
 
-payload_qstr = qobject_to_json(QOBJECT(payload));
+payload_qstr = qobject_to_json(QOBJECT(rsp));
 if (!payload_qstr) {
 return -EINVAL;
 }
@@ -578,53 +578,24 @@ static int send_response(GAState *s, QDict *payload)
 return 0;
 }
 
-static void process_command(GAState *s, QDict *req)
-{
-QDict *rsp;
-int ret;
-
-g_assert(req);
-g_debug("processing command");
-rsp = qmp_dispatch(_commands, QOBJECT(req), false);
-if (rsp) {
-ret = send_response(s, rsp);
-if (ret < 0) {
-g_warning("error sending response: %s", strerror(-ret));
-}
-qobject_unref(rsp);
-}
-}
-
 /* handle requests/control events coming in over the channel */
 static void process_event(void *opaque, QObject *obj, Error *err)
 {
 GAState *s = opaque;
-QDict *req, *rsp;
+QDict *rsp;
 int ret;
 
 g_debug("process_event: called");
 assert(!obj != !err);
 if (err) {
-goto err;
-}
-req = qobject_to(QDict, obj);
-if (!req) {
-error_setg(, "Input must be a JSON object");
-goto err;
-}
-if (!qdict_haskey(req, "execute")) {
-g_warning("unrecognized payload format");
-error_setg(, QERR_UNSUPPORTED);
-goto err;
+rsp = qmp_error_response(err);
+goto end;
 }
 
-process_command(s, req);
-qobject_unref(obj);
-return;
+g_debug("processing command");
+rsp = qmp_dispatch(_commands, obj, false);
 
-err:
-g_warning("failed to parse event: %s", error_get_pretty(err));
-rsp = qmp_error_response(err);
+end:
 ret = send_response(s, rsp);
 if (ret < 0) {
 g_warning("error sending error response: %s", strerror(-ret));
-- 
2.18.0.547.g1d89318c48




[Qemu-devel] [PATCH v3 3/9] Revert "qmp: isolate responses into io thread"

2018-08-25 Thread Marc-André Lureau
This reverts commit abe3cd0ff7f774966da6842620806ab7576fe4f3.

There is no need to add an additional queue to send the reply to the
IOThread, because QMP response is thread safe, and chardev write path
is thread safe. It will schedule the watcher in the associated
IOThread.

Signed-off-by: Marc-André Lureau 
Reviewed-by: Markus Armbruster 
---
 monitor.c | 120 ++
 1 file changed, 3 insertions(+), 117 deletions(-)

diff --git a/monitor.c b/monitor.c
index a1db4db487..084900c602 100644
--- a/monitor.c
+++ b/monitor.c
@@ -182,8 +182,6 @@ typedef struct {
 QemuMutex qmp_queue_lock;
 /* Input queue that holds all the parsed QMP requests */
 GQueue *qmp_requests;
-/* Output queue contains all the QMP responses in order */
-GQueue *qmp_responses;
 } MonitorQMP;
 
 /*
@@ -247,9 +245,6 @@ IOThread *mon_iothread;
 /* Bottom half to dispatch the requests received from I/O thread */
 QEMUBH *qmp_dispatcher_bh;
 
-/* Bottom half to deliver the responses back to clients */
-QEMUBH *qmp_respond_bh;
-
 struct QMPRequest {
 /* Owner of the request */
 Monitor *mon;
@@ -375,19 +370,10 @@ static void monitor_qmp_cleanup_req_queue_locked(Monitor 
*mon)
 }
 }
 
-/* Caller must hold the mon->qmp.qmp_queue_lock */
-static void monitor_qmp_cleanup_resp_queue_locked(Monitor *mon)
-{
-while (!g_queue_is_empty(mon->qmp.qmp_responses)) {
-qobject_unref((QDict *)g_queue_pop_head(mon->qmp.qmp_responses));
-}
-}
-
 static void monitor_qmp_cleanup_queues(Monitor *mon)
 {
 qemu_mutex_lock(>qmp.qmp_queue_lock);
 monitor_qmp_cleanup_req_queue_locked(mon);
-monitor_qmp_cleanup_resp_queue_locked(mon);
 qemu_mutex_unlock(>qmp.qmp_queue_lock);
 }
 
@@ -518,85 +504,6 @@ static void qmp_send_response(Monitor *mon, const QDict 
*rsp)
 qobject_unref(json);
 }
 
-static void qmp_queue_response(Monitor *mon, QDict *rsp)
-{
-if (mon->use_io_thread) {
-/*
- * Push a reference to the response queue.  The I/O thread
- * drains that queue and emits.
- */
-qemu_mutex_lock(>qmp.qmp_queue_lock);
-g_queue_push_tail(mon->qmp.qmp_responses, qobject_ref(rsp));
-qemu_mutex_unlock(>qmp.qmp_queue_lock);
-qemu_bh_schedule(qmp_respond_bh);
-} else {
-/*
- * Not using monitor I/O thread, i.e. we are in the main thread.
- * Emit right away.
- */
-qmp_send_response(mon, rsp);
-}
-}
-
-struct QMPResponse {
-Monitor *mon;
-QDict *data;
-};
-typedef struct QMPResponse QMPResponse;
-
-static QDict *monitor_qmp_response_pop_one(Monitor *mon)
-{
-QDict *data;
-
-qemu_mutex_lock(>qmp.qmp_queue_lock);
-data = g_queue_pop_head(mon->qmp.qmp_responses);
-qemu_mutex_unlock(>qmp.qmp_queue_lock);
-
-return data;
-}
-
-static void monitor_qmp_response_flush(Monitor *mon)
-{
-QDict *data;
-
-while ((data = monitor_qmp_response_pop_one(mon))) {
-qmp_send_response(mon, data);
-qobject_unref(data);
-}
-}
-
-/*
- * Pop a QMPResponse from any monitor's response queue into @response.
- * Return false if all the queues are empty; else true.
- */
-static bool monitor_qmp_response_pop_any(QMPResponse *response)
-{
-Monitor *mon;
-QDict *data = NULL;
-
-qemu_mutex_lock(_lock);
-QTAILQ_FOREACH(mon, _list, entry) {
-data = monitor_qmp_response_pop_one(mon);
-if (data) {
-response->mon = mon;
-response->data = data;
-break;
-}
-}
-qemu_mutex_unlock(_lock);
-return data != NULL;
-}
-
-static void monitor_qmp_bh_responder(void *opaque)
-{
-QMPResponse response;
-
-while (monitor_qmp_response_pop_any()) {
-qmp_send_response(response.mon, response.data);
-qobject_unref(response.data);
-}
-}
-
 static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
 /* Limit guest-triggerable events to 1 per second */
 [QAPI_EVENT_RTC_CHANGE]= { 1000 * SCALE_MS },
@@ -620,7 +527,7 @@ static void monitor_qapi_event_emit(QAPIEvent event, QDict 
*qdict)
 QTAILQ_FOREACH(mon, _list, entry) {
 if (monitor_is_qmp(mon)
 && mon->qmp.commands != _cap_negotiation_commands) {
-qmp_queue_response(mon, qdict);
+qmp_send_response(mon, qdict);
 }
 }
 }
@@ -818,7 +725,6 @@ static void monitor_data_init(Monitor *mon, bool skip_flush,
 mon->skip_flush = skip_flush;
 mon->use_io_thread = use_io_thread;
 mon->qmp.qmp_requests = g_queue_new();
-mon->qmp.qmp_responses = g_queue_new();
 }
 
 static void monitor_data_destroy(Monitor *mon)
@@ -833,9 +739,7 @@ static void monitor_data_destroy(Monitor *mon)
 qemu_mutex_destroy(>mon_lock);
 qemu_mutex_destroy(>qmp.qmp_queue_lock);
 monitor_qmp_cleanup_req_queue_locked(mon);
-monitor_qmp_cleanup_resp_queue_locked(mon);
 g_queue_free(mon->qmp.qmp_requests);
-

[Qemu-devel] [PATCH v3 9/9] qmp: common 'id' handling & make QGA conform to QMP spec

2018-08-25 Thread Marc-André Lureau
Let qmp_dispatch() copy the 'id' field. That way any qmp client will
conform to the specification, including QGA. Furthermore, it
simplifies the work for qemu monitor.

CC: Michael Roth 
Signed-off-by: Marc-André Lureau 
Reviewed-by: Markus Armbruster 
---
 monitor.c   | 33 -
 qapi/qmp-dispatch.c | 10 --
 tests/test-qga.c| 13 +
 3 files changed, 25 insertions(+), 31 deletions(-)

diff --git a/monitor.c b/monitor.c
index 295866d3ec..7126e403b0 100644
--- a/monitor.c
+++ b/monitor.c
@@ -248,8 +248,6 @@ QEMUBH *qmp_dispatcher_bh;
 struct QMPRequest {
 /* Owner of the request */
 Monitor *mon;
-/* "id" field of the request */
-QObject *id;
 /*
  * Request object to be handled or Error to be reported
  * (exactly one of them is non-null)
@@ -350,7 +348,6 @@ int monitor_read_password(Monitor *mon, ReadLineFunc 
*readline_func,
 
 static void qmp_request_free(QMPRequest *req)
 {
-qobject_unref(req->id);
 qobject_unref(req->req);
 error_free(req->err);
 g_free(req);
@@ -4043,18 +4040,14 @@ static int monitor_can_read(void *opaque)
  * Null @rsp can only happen for commands with QCO_NO_SUCCESS_RESP.
  * Nothing is emitted then.
  */
-static void monitor_qmp_respond(Monitor *mon, QDict *rsp, QObject *id)
+static void monitor_qmp_respond(Monitor *mon, QDict *rsp)
 {
 if (rsp) {
-if (id) {
-qdict_put_obj(rsp, "id", qobject_ref(id));
-}
-
 qmp_send_response(mon, rsp);
 }
 }
 
-static void monitor_qmp_dispatch(Monitor *mon, QObject *req, QObject *id)
+static void monitor_qmp_dispatch(Monitor *mon, QObject *req)
 {
 Monitor *old_mon;
 QDict *rsp;
@@ -4079,7 +4072,7 @@ static void monitor_qmp_dispatch(Monitor *mon, QObject 
*req, QObject *id)
 }
 }
 
-monitor_qmp_respond(mon, rsp, id);
+monitor_qmp_respond(mon, rsp);
 qobject_unref(rsp);
 }
 
@@ -4134,13 +4127,15 @@ static void monitor_qmp_bh_dispatcher(void *data)
 /*  qmp_oob_enabled() might change after "qmp_capabilities" */
 need_resume = !qmp_oob_enabled(req_obj->mon);
 if (req_obj->req) {
-trace_monitor_qmp_cmd_in_band(qobject_get_try_str(req_obj->id) ?: "");
-monitor_qmp_dispatch(req_obj->mon, req_obj->req, req_obj->id);
+QDict *qdict = qobject_to(QDict, req_obj->req);
+QObject *id = qdict ? qdict_get(qdict, "id") : NULL;
+trace_monitor_qmp_cmd_in_band(qobject_get_try_str(id) ?: "");
+monitor_qmp_dispatch(req_obj->mon, req_obj->req);
 } else {
 assert(req_obj->err);
 rsp = qmp_error_response(req_obj->err);
 req_obj->err = NULL;
-monitor_qmp_respond(req_obj->mon, rsp, NULL);
+monitor_qmp_respond(req_obj->mon, rsp);
 qobject_unref(rsp);
 }
 
@@ -4167,8 +4162,7 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
 
 qdict = qobject_to(QDict, req);
 if (qdict) {
-id = qobject_ref(qdict_get(qdict, "id"));
-qdict_del(qdict, "id");
+id = qdict_get(qdict, "id");
 } /* else will fail qmp_dispatch() */
 
 if (req && trace_event_get_state_backends(TRACE_HANDLE_QMP_COMMAND)) {
@@ -4179,17 +4173,14 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
 
 if (qdict && qmp_is_oob(qdict)) {
 /* OOB commands are executed immediately */
-trace_monitor_qmp_cmd_out_of_band(qobject_get_try_str(id)
-  ?: "");
-monitor_qmp_dispatch(mon, req, id);
+trace_monitor_qmp_cmd_out_of_band(qobject_get_try_str(id) ?: "");
+monitor_qmp_dispatch(mon, req);
 qobject_unref(req);
-qobject_unref(id);
 return;
 }
 
 req_obj = g_new0(QMPRequest, 1);
 req_obj->mon = mon;
-req_obj->id = id;
 req_obj->req = req;
 req_obj->err = err;
 
@@ -4224,7 +4215,7 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
 
 /*
  * Put the request to the end of queue so that requests will be
- * handled in time order.  Ownership for req_obj, req, id,
+ * handled in time order.  Ownership for req_obj, req,
  * etc. will be delivered to the handler side.
  */
 g_queue_push_tail(mon->qmp.qmp_requests, req_obj);
diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c
index 1d922e04f7..5f812bb9f2 100644
--- a/qapi/qmp-dispatch.c
+++ b/qapi/qmp-dispatch.c
@@ -58,6 +58,8 @@ static QDict *qmp_dispatch_check_obj(const QObject *request, 
bool allow_oob,
"QMP input member 'arguments' must be an object");
 return NULL;
 }
+} else if (!strcmp(arg_name, "id")) {
+continue;
 } else {
 error_setg(errp, "QMP input member '%s' is unexpected",
arg_name);
@@ -165,11 +167,11 @@ QDict *qmp_dispatch(QmpCommandList *cmds, QObject 
*request,
 bool allow_oob)

[Qemu-devel] [PATCH v3 5/9] json-lexer: make it safe to call destroy multiple times

2018-08-25 Thread Marc-André Lureau
We can easily avoid the burden of checking if the lexer was
initialized prior to calling destroy by the caller, let's do it.

This allows simplification in state tracking in the qmp-async RFC
series, the patch "qmp: add QmpSession" can call qmp_session_destroy()
multiple time, which in turns calls json_lexer_destroy().

Signed-off-by: Marc-André Lureau 
---
 qobject/json-lexer.c | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/qobject/json-lexer.c b/qobject/json-lexer.c
index e1745a3d95..39969047f4 100644
--- a/qobject/json-lexer.c
+++ b/qobject/json-lexer.c
@@ -351,5 +351,8 @@ void json_lexer_flush(JSONLexer *lexer)
 
 void json_lexer_destroy(JSONLexer *lexer)
 {
-g_string_free(lexer->token, true);
+if (lexer->token) {
+g_string_free(lexer->token, true);
+lexer->token = NULL;
+}
 }
-- 
2.18.0.547.g1d89318c48




[Qemu-devel] [PATCH v3 7/9] tests: add a qmp success-response test

2018-08-25 Thread Marc-André Lureau
Verify the usage of this schema feature and the API behaviour.  This
should be the only case where qmp_dispatch() returns NULL without
error.

Signed-off-by: Marc-André Lureau 
---
 tests/test-qmp-cmds.c   | 17 +
 tests/qapi-schema/qapi-schema-test.json |  2 ++
 tests/qapi-schema/qapi-schema-test.out  |  2 ++
 3 files changed, 21 insertions(+)

diff --git a/tests/test-qmp-cmds.c b/tests/test-qmp-cmds.c
index ab414fa0c9..8d5100a324 100644
--- a/tests/test-qmp-cmds.c
+++ b/tests/test-qmp-cmds.c
@@ -32,6 +32,10 @@ void qmp_test_flags_command(Error **errp)
 {
 }
 
+void qmp_cmd_success_response(Error **errp)
+{
+}
+
 Empty2 *qmp_user_def_cmd0(Error **errp)
 {
 return g_new0(Empty2, 1);
@@ -153,6 +157,17 @@ static void test_dispatch_cmd_failure(void)
 qobject_unref(req);
 }
 
+static void test_dispatch_cmd_success_response(void)
+{
+QDict *req = qdict_new();
+QDict *resp;
+
+qdict_put_str(req, "execute", "cmd-success-response");
+resp = qmp_dispatch(_commands, QOBJECT(req), false);
+assert(resp == NULL);
+qobject_unref(req);
+}
+
 static QObject *test_qmp_dispatch(QDict *req)
 {
 QDict *resp;
@@ -289,6 +304,8 @@ int main(int argc, char **argv)
 g_test_add_func("/qmp/dispatch_cmd", test_dispatch_cmd);
 g_test_add_func("/qmp/dispatch_cmd_failure", test_dispatch_cmd_failure);
 g_test_add_func("/qmp/dispatch_cmd_io", test_dispatch_cmd_io);
+g_test_add_func("/qmp/dispatch_cmd_success_response",
+test_dispatch_cmd_success_response);
 g_test_add_func("/qmp/dealloc_types", test_dealloc_types);
 g_test_add_func("/qmp/dealloc_partial", test_dealloc_partial);
 
diff --git a/tests/qapi-schema/qapi-schema-test.json 
b/tests/qapi-schema/qapi-schema-test.json
index 11aa4c8f8d..fb03163430 100644
--- a/tests/qapi-schema/qapi-schema-test.json
+++ b/tests/qapi-schema/qapi-schema-test.json
@@ -137,6 +137,8 @@
   'data': {'ud1a': 'UserDefOne', '*ud1b': 'UserDefOne'},
   'returns': 'UserDefTwo' }
 
+{ 'command': 'cmd-success-response', 'data': {}, 'success-response': false }
+
 # Returning a non-dictionary requires a name from the whitelist
 { 'command': 'guest-get-time', 'data': {'a': 'int', '*b': 'int' },
   'returns': 'int' }
diff --git a/tests/qapi-schema/qapi-schema-test.out 
b/tests/qapi-schema/qapi-schema-test.out
index 0da92455da..218ac7d556 100644
--- a/tests/qapi-schema/qapi-schema-test.out
+++ b/tests/qapi-schema/qapi-schema-test.out
@@ -156,6 +156,8 @@ object q_obj_user_def_cmd2-arg
 member ud1b: UserDefOne optional=True
 command user_def_cmd2 q_obj_user_def_cmd2-arg -> UserDefTwo
gen=True success_response=True boxed=False oob=False preconfig=False
+command cmd-success-response None -> None
+   gen=True success_response=False boxed=False oob=False preconfig=False
 object q_obj_guest-get-time-arg
 member a: int optional=False
 member b: int optional=True
-- 
2.18.0.547.g1d89318c48




[Qemu-devel] [PATCH v3 4/9] monitor: no need to save need_resume

2018-08-25 Thread Marc-André Lureau
There is no need for per-command need_resume granularity, it should
resume after running an non-oob command on oob-disabled monitor.

Signed-off-by: Marc-André Lureau 
Reviewed-by: Markus Armbruster 
---
 monitor.c | 13 -
 1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/monitor.c b/monitor.c
index 084900c602..295866d3ec 100644
--- a/monitor.c
+++ b/monitor.c
@@ -256,12 +256,6 @@ struct QMPRequest {
  */
 QObject *req;
 Error *err;
-/*
- * Whether we need to resume the monitor afterward.  This flag is
- * used to emulate the old QMP server behavior that the current
- * command must be completed before execution of the next one.
- */
-bool need_resume;
 };
 typedef struct QMPRequest QMPRequest;
 
@@ -4131,11 +4125,14 @@ static void monitor_qmp_bh_dispatcher(void *data)
 {
 QMPRequest *req_obj = monitor_qmp_requests_pop_any();
 QDict *rsp;
+bool need_resume;
 
 if (!req_obj) {
 return;
 }
 
+/*  qmp_oob_enabled() might change after "qmp_capabilities" */
+need_resume = !qmp_oob_enabled(req_obj->mon);
 if (req_obj->req) {
 trace_monitor_qmp_cmd_in_band(qobject_get_try_str(req_obj->id) ?: "");
 monitor_qmp_dispatch(req_obj->mon, req_obj->req, req_obj->id);
@@ -4147,7 +4144,7 @@ static void monitor_qmp_bh_dispatcher(void *data)
 qobject_unref(rsp);
 }
 
-if (req_obj->need_resume) {
+if (need_resume) {
 /* Pairs with the monitor_suspend() in handle_qmp_command() */
 monitor_resume(req_obj->mon);
 }
@@ -4195,7 +4192,6 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
 req_obj->id = id;
 req_obj->req = req;
 req_obj->err = err;
-req_obj->need_resume = false;
 
 /* Protect qmp_requests and fetching its length. */
 qemu_mutex_lock(>qmp.qmp_queue_lock);
@@ -4208,7 +4204,6 @@ static void handle_qmp_command(void *opaque, QObject 
*req, Error *err)
  */
 if (!qmp_oob_enabled(mon)) {
 monitor_suspend(mon);
-req_obj->need_resume = true;
 } else {
 /* Drop the request if queue is full. */
 if (mon->qmp.qmp_requests->length >= QMP_REQ_QUEUE_LEN_MAX) {
-- 
2.18.0.547.g1d89318c48




[Qemu-devel] [PATCH v3 1/9] monitor: consitify qmp_send_response() QDict argument

2018-08-25 Thread Marc-André Lureau
Signed-off-by: Marc-André Lureau 
Reviewed-by: Markus Armbruster 
---
 monitor.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/monitor.c b/monitor.c
index 021c11b1bf..a1db4db487 100644
--- a/monitor.c
+++ b/monitor.c
@@ -503,9 +503,9 @@ int monitor_fprintf(FILE *stream, const char *fmt, ...)
 return 0;
 }
 
-static void qmp_send_response(Monitor *mon, QDict *rsp)
+static void qmp_send_response(Monitor *mon, const QDict *rsp)
 {
-QObject *data = QOBJECT(rsp);
+const QObject *data = QOBJECT(rsp);
 QString *json;
 
 json = mon->flags & MONITOR_USE_PRETTY ? qobject_to_json_pretty(data) :
-- 
2.18.0.547.g1d89318c48




[Qemu-devel] [PATCH v3 2/9] qmp: constify qmp_is_oob()

2018-08-25 Thread Marc-André Lureau
Signed-off-by: Marc-André Lureau 
Reviewed-by: Markus Armbruster 
---
 include/qapi/qmp/dispatch.h | 2 +-
 qapi/qmp-dispatch.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h
index 4e2e749faf..68a528a9aa 100644
--- a/include/qapi/qmp/dispatch.h
+++ b/include/qapi/qmp/dispatch.h
@@ -50,7 +50,7 @@ bool qmp_has_success_response(const QmpCommand *cmd);
 QDict *qmp_error_response(Error *err);
 QDict *qmp_dispatch(QmpCommandList *cmds, QObject *request,
 bool allow_oob);
-bool qmp_is_oob(QDict *dict);
+bool qmp_is_oob(const QDict *dict);
 
 typedef void (*qmp_cmd_callback_fn)(QmpCommand *cmd, void *opaque);
 
diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c
index d8da1a62de..1d922e04f7 100644
--- a/qapi/qmp-dispatch.c
+++ b/qapi/qmp-dispatch.c
@@ -155,7 +155,7 @@ QDict *qmp_error_response(Error *err)
 /*
  * Does @qdict look like a command to be run out-of-band?
  */
-bool qmp_is_oob(QDict *dict)
+bool qmp_is_oob(const QDict *dict)
 {
 return qdict_haskey(dict, "exec-oob")
 && !qdict_haskey(dict, "execute");
-- 
2.18.0.547.g1d89318c48




[Qemu-devel] [PATCH v3 0/9] monitor: various code simplification and fixes

2018-08-25 Thread Marc-André Lureau
Hi,

This series is a rebased subset of "[PATCH v3 00/38] RFC: monitor: add
asynchronous command type".

v3:
- updated "tests: add a few qmp tests" to move tests to new files
- comment update in "qmp: common 'id' handling & make QGA conform to QMP spec"
- commit message updates, add r-b tags

Marc-André Lureau (9):
  monitor: consitify qmp_send_response() QDict argument
  qmp: constify qmp_is_oob()
  Revert "qmp: isolate responses into io thread"
  monitor: no need to save need_resume
  json-lexer: make it safe to call destroy multiple times
  tests: add a few qmp tests
  tests: add a qmp success-response test
  qga: process_event() simplification
  qmp: common 'id' handling & make QGA conform to QMP spec

 include/qapi/qmp/dispatch.h |   2 +-
 monitor.c   | 170 +++-
 qapi/qmp-dispatch.c |  12 +-
 qga/main.c  |  47 ++-
 qobject/json-lexer.c|   5 +-
 tests/qmp-cmd-test.c|  31 +
 tests/qmp-test.c|  18 +++
 tests/test-qga.c|  13 +-
 tests/test-qmp-cmds.c   |  17 +++
 tests/qapi-schema/qapi-schema-test.json |   2 +
 tests/qapi-schema/qapi-schema-test.out  |   2 +
 11 files changed, 119 insertions(+), 200 deletions(-)

-- 
2.18.0.547.g1d89318c48




[Qemu-devel] [PATCH] tcg: check for undefined labels

2018-08-25 Thread Roman Kapl
Currently, if a jump to a label that is not defined anywhere in the code
is generated, QEMU will hapilly emit the code, but with effectively
random jump target (no relocation done). At least check that there are
no unprocessed relocations remaining when running a debug build and
print a warning message.

This could help debug or detect earlier errors like
c2d9644e6d ("target/arm: Fix crash on conditional instruction in an IT block")

Signed-off-by: Roman Kapl 
---
 tcg/tcg.c | 29 +
 tcg/tcg.h |  3 ++-
 2 files changed, 31 insertions(+), 1 deletion(-)

diff --git a/tcg/tcg.c b/tcg/tcg.c
index f27b22bd3c..3412502069 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -256,6 +256,21 @@ static __attribute__((unused)) inline void 
tcg_patch64(tcg_insn_unit *p,
 }
 #endif
 
+static void tcg_pending_relocs_inc(TCGContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+s->pending_relocs++;
+#endif
+}
+
+static void tcg_pending_relocs_dec(TCGContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+tcg_debug_assert(s->pending_relocs > 0);
+s->pending_relocs--;
+#endif
+}
+
 /* label relocation processing */
 
 static void tcg_out_reloc(TCGContext *s, tcg_insn_unit *code_ptr, int type,
@@ -276,6 +291,7 @@ static void tcg_out_reloc(TCGContext *s, tcg_insn_unit 
*code_ptr, int type,
 r->addend = addend;
 r->next = l->u.first_reloc;
 l->u.first_reloc = r;
+tcg_pending_relocs_inc(s);
 }
 }
 
@@ -287,6 +303,7 @@ static void tcg_out_label(TCGContext *s, TCGLabel *l, 
tcg_insn_unit *ptr)
 tcg_debug_assert(!l->has_value);
 
 for (r = l->u.first_reloc; r != NULL; r = r->next) {
+tcg_pending_relocs_dec(s);
 patch_reloc(r->ptr, r->type, value, r->addend);
 }
 
@@ -3518,6 +3535,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
 #ifdef TCG_TARGET_NEED_POOL_LABELS
 s->pool_labels = NULL;
 #endif
+#ifdef CONFIG_DEBUG_TCG
+s->pending_relocs = 0;
+#endif
 
 num_insns = -1;
 QTAILQ_FOREACH(op, >ops, link) {
@@ -3587,6 +3607,15 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
 }
 }
 tcg_debug_assert(num_insns >= 0);
+
+#ifdef CONFIG_DEBUG_TCG
+if (s->pending_relocs) {
+qemu_log("warning: block at " TARGET_FMT_lx  " has "
+ "%d unresolved references to jump labels\n",
+ tb->pc, s->pending_relocs);
+}
+#endif
+
 s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
 
 /* Generate TB finalization at the end of block */
diff --git a/tcg/tcg.h b/tcg/tcg.h
index f9f12378e9..e80c511f7c 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -241,7 +241,7 @@ typedef struct TCGRelocation {
 int type;
 tcg_insn_unit *ptr;
 intptr_t addend;
-} TCGRelocation; 
+} TCGRelocation;
 
 typedef struct TCGLabel {
 unsigned has_value : 1;
@@ -679,6 +679,7 @@ struct TCGContext {
 #ifdef CONFIG_DEBUG_TCG
 int temps_in_use;
 int goto_tb_issue_mask;
+int pending_relocs;
 #endif
 
 /* Code generation.  Note that we specifically do not use tcg_insn_unit
-- 
2.18.0




Re: [Qemu-devel] [PATCH 1/7] jobs: change start callback to run callback

2018-08-25 Thread Max Reitz
On 2018-08-23 01:01, John Snow wrote:
> 
> 
> On 08/22/2018 06:51 AM, Max Reitz wrote:
>> On 2018-08-17 21:04, John Snow wrote:
>>> Presently we codify the entry point for a job as the "start" callback,
>>> but a more apt name would be "run" to clarify the idea that when this
>>> function returns we consider the job to have "finished," except for
>>> any cleanup which occurs in separate callbacks later.
>>>
>>> As part of this clarification, change the signature to include an error
>>> object and a return code. The error ptr is not yet used, and the return
>>> code while captured, will be overwritten by actions in the job_completed
>>> function.
>>>
>>> Signed-off-by: John Snow 
>>> ---
>>>  block/backup.c|  7 ---
>>>  block/commit.c|  7 ---
>>>  block/create.c|  8 +---
>>>  block/mirror.c| 10 ++
>>>  block/stream.c|  7 ---
>>>  include/qemu/job.h|  2 +-
>>>  job.c |  6 +++---
>>>  tests/test-bdrv-drain.c   |  7 ---
>>>  tests/test-blockjob-txn.c | 16 
>>>  tests/test-blockjob.c |  7 ---
>>>  10 files changed, 43 insertions(+), 34 deletions(-)
>>
>> [...]
>>
>>> diff --git a/job.c b/job.c
>>> index fa671b431a..898260b2b3 100644
>>> --- a/job.c
>>> +++ b/job.c
>>> @@ -544,16 +544,16 @@ static void coroutine_fn job_co_entry(void *opaque)
>>>  {
>>>  Job *job = opaque;
>>>  
>>> -assert(job && job->driver && job->driver->start);
>>> +assert(job && job->driver && job->driver->run);
>>>  job_pause_point(job);
>>> -job->driver->start(job);
>>> +job->ret = job->driver->run(job, NULL);
>>>  }
>>
>> Hmmm, this breaks the iff relationship with job->error.  We might call
>> job_update_rc() afterwards, but then job_completed() would need to free
>> it if it overwrites it with the error description from a potential error
>> object.
>>
>> Also, I suspect the following patches might fix the relationship anyway?
>>  (But then an "XXX: This does not hold right now, but will be fixed in a
>> future patch" in the documentation of Job.error might help.)
>>
>> Max
>>
> 
> Hmm... does it? ... I guess you mean that we are setting job->ret
> earlier than we used to, which gives us a window where you can have ret
> set, but error unset.
> 
> This will get settled out by the end of the series anyway:

Oh no, it appears I accidentally removed yet another chunk from my reply
to patch 2...

> - char *error gets replaced with Error *err,

Which is basically the same.  I noted in the deleted chunk that patch 2
just removes the iff relationship from the describing comment, but, well...

> - I remove the error object from job_completed
> - v2 will remove the ret argument, too.

The most important bit of the chunk I removed was that I was complaining
about Job.ret still being there.  I don't really see the point of this
patch here at this point.

Unfortunately I can't quite recall...

Having a central Error object doesn't really make sense.  Whenever the
block job wants to return an error, it should probably do so as "return"
values of methods (like .run()).  Same for ret, of course.

I understand that this is probably really only possible after v2 when
you've made more use of abort/commit.  But still, I don't think this
patch improves anything, so I would leave this clean-up until later when
you can really do something.

I suppose the idea here is that you want to drop the errp parameter from
job_completed(), because it is not going to be called by .exit().  But
the obvious way around this would be to pass an errp to .exit() and then
pass the result on to job_completed().

Max



signature.asc
Description: OpenPGP digital signature


Re: [Qemu-devel] [PATCH 2/7] jobs: canonize Error object

2018-08-25 Thread Max Reitz
On 2018-08-23 00:50, John Snow wrote:
> 
> 
> On 08/22/2018 06:59 AM, Max Reitz wrote:
>> On 2018-08-21 02:10, John Snow wrote:
>>>
>>>
>>> On 08/17/2018 03:04 PM, John Snow wrote:
 +error_setg_errno(>err, -job->ret, "job failed");
>>>
>>> Kevin specifically asked for me to change this, and I lost it in the
>>> shuffle. I'll send a v3 now, since there are enough nits to warrant it,
>>> and I think I want to adjust a few things to set up the "part II"
>>> portion of this changeset a little more nicely.
>>
>> But error_setg_errno() uses either strerror() or
>> g_win32_error_message(), depending on the host OS.  I prefer that over a
>> blind strerror(), to be honest.
>>
> 
> uhh, does it...?
> 
> it looks like error_setg_errno is always error_setg_errno_internal,
> which always uses strerror... am I misreading?

Ah, right, I was...  I thought the #ifdef below somehow was referring to
the same function, so it resolved to something else.  Yeah, sure,
usually the errno values comes from something we set ourselves and not
even the real OS API, so we need to always use strerror().

Sorry.

Max



signature.asc
Description: OpenPGP digital signature


Re: [Qemu-devel] [PATCH 4/7] block/commit: utilize job_exit shim

2018-08-25 Thread Max Reitz
On 2018-08-22 23:55, John Snow wrote:
> 
> 
> On 08/22/2018 07:58 AM, Max Reitz wrote:
>> On 2018-08-17 21:18, John Snow wrote:
>>>
>>>
>>> On 08/17/2018 03:04 PM, John Snow wrote:
 Change the manual deferment to commit_complete into the implicit
 callback to job_exit, renaming commit_complete to commit_exit.

 This conversion does change the timing of when job_completed is
 called to after the bdrv_replace_node and bdrv_unref calls, which
 could have implications for bjob->blk which will now be put down
 after this cleanup.

 Kevin highlights that we did not take any permissions for that backend
 at job creation time, so it is safe to reorder these operations.

 Signed-off-by: John Snow 
 ---
  block/commit.c | 20 
  1 file changed, 4 insertions(+), 16 deletions(-)

 diff --git a/block/commit.c b/block/commit.c
 index 4a17bb73ec..93c3b6a39e 100644
 --- a/block/commit.c
 +++ b/block/commit.c
 @@ -68,19 +68,13 @@ static int coroutine_fn commit_populate(BlockBackend 
 *bs, BlockBackend *base,
  return 0;
  }
  
 -typedef struct {
 -int ret;
 -} CommitCompleteData;
 -
 -static void commit_complete(Job *job, void *opaque)
 +static void commit_exit(Job *job)
  {
  CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
  BlockJob *bjob = >common;
 -CommitCompleteData *data = opaque;
  BlockDriverState *top = blk_bs(s->top);
  BlockDriverState *base = blk_bs(s->base);
  BlockDriverState *commit_top_bs = s->commit_top_bs;
 -int ret = data->ret;
  bool remove_commit_top_bs = false;
  
  /* Make sure commit_top_bs and top stay around until 
 bdrv_replace_node() */
 @@ -93,8 +87,8 @@ static void commit_complete(Job *job, void *opaque)
  
  if (!job_is_cancelled(job) && ret == 0) {
>>>
>>> forgot to actually squash the change in here that replaces `ret` with
>>> `job->ret`.
>>
>> A better interface would be one function that is called when .run() was
>> successful, and one that is called when it was not.  (They can still
>> resolve into a single function in the job that is just called with a
>> boolean that's either true or false, but accessing *job to find out
>> whether .run() was successful or not seems kind of convoluted to me.)
>>
>> Max
>>
> 
> Sorry, I need a better cover letter.

My mistake, I need to read more than the first few lines of a cover letter.

> .exit() is going away, and either .prepare() or .abort() will be called
> after .run(), so what you're asking for will be true, just not all at
> once in this series.

Yay!

Max



signature.asc
Description: OpenPGP digital signature


Re: [Qemu-devel] [PATCH 3/7] jobs: add exit shim

2018-08-25 Thread Max Reitz
On 2018-08-22 23:52, John Snow wrote:
> 
> 
> On 08/22/2018 07:43 AM, Max Reitz wrote:
>> On 2018-08-17 21:04, John Snow wrote:
>>> All jobs do the same thing when they leave their running loop:
>>> - Store the return code in a structure
>>> - wait to receive this structure in the main thread
>>> - signal job completion via job_completed
>>>
>>> Few jobs do anything beyond exactly this. Consolidate this exit
>>> logic for a net reduction in SLOC.
>>>
>>> More seriously, when we utilize job_defer_to_main_loop_bh to call
>>> a function that calls job_completed, job_finalize_single will run
>>> in a context where it has recursively taken the aio_context lock,
>>> which can cause hangs if it puts down a reference that causes a flush.
>>>
>>> You can observe this in practice by looking at mirror_exit's careful
>>> placement of job_completed and bdrv_unref calls.
>>>
>>> If we centralize job exiting, we can signal job completion from outside
>>> of the aio_context, which should allow for job cleanup code to run with
>>> only one lock, which makes cleanup callbacks less tricky to write.
>>>
>>> Signed-off-by: John Snow 
>>> ---
>>>  include/qemu/job.h |  7 +++
>>>  job.c  | 19 +++
>>>  2 files changed, 26 insertions(+)
>>
>> Currently all jobs do this, the question of course is why.  The answer
>> is because they are block jobs that need to do some graph manipulation
>> in the main thread, right?
>>
> 
> Yep.
> 
>> OK, that's reasonable enough, that sounds like even non-block jobs may
>> need this (i.e. modify some global qemu state that you can only do in
>> the main loop).  Interestingly, the create job only calls
>> job_completed() of which it says nowhere that it needs to be executed in
>> the main loop.
>>
> 
> Yeah, not all jobs will have anything meaningful to do in the main loop
> context. This is one of them.
> 
>> ...on second thought, do we really want to execute job_complete() in the
>> main loop?  First of all, all of the transactional functions will run in
>> the main loop.  Which makes sense, but it isn't noted anywhere.
>> Secondly, we may end up calling JobDriver.user_resume(), which is
>> probably not something we want to call in the main loop.
>>
> 
> I think we need to execute job_complete in the main loop, or otherwise
> restructure the code that can run between job_completed and
> job_finalize_single so that .prepare/.commit/.abort/.clean run in the
> main thread, which is something we want to preserve.

Sure.

> It's simpler just to say that complete will run from the main thread,
> like it does presently.

Yes, but we don't say that.

> Why would we not want to call user_resume from the main loop? That's
> directly where it's called from, since it gets invoked directly from the
> qmp thread.

Hmm!  True indeed.

The reason why we might not want to do it is because the job may not run
in the main loop, so modifying the job (especially invoking a job
method) may be dangerous without taking precautions.

>> OTOH, job_finish_sync() is something that has to be run in the main loop
>> because it polls the main loop (and as far as my FUSE experiments have
>> told me, polling a foreign AioContext doesn't work).
>>
>> So...  I suppose it would be nice if we had a real distinction which
>> functions are run in which AioContext.  It seems like we indeed want to
>> run job_completed() in the main loop, but what to do about the
>> user_resume() call in job_cancel_async()?
>>
> 
> I don't think we need to do anything -- at least, these functions
> *already* run from the main loop.

Yeah, but we don't mark that anywhere.  I really don't like that.  Jobs
need to know which of their functions are run in which AioContext.

> mirror_exit et al get scheduled from job_defer_to_main_loop and call
> job_completed there, so it's already always done from the main loop; I'm
> just cutting out the part where the jobs have to manually schedule this.

I'm not saying what you're doing is wrong, I'm just saying tracking
which things are running in which context is not easy because there are
no comments on how it's supposed to be run.  (Apart from your new
.exit() method which does say that it's run in the main loop.)

No, I don't find it obvious which functions are run in which context
when first I have to think about in which context those functions are
used (e.g. user_resume is usually the result of a QMP command, so it's
run in the main loop; the transactional methods are part of completion,
which is done in the main loop, so they are also called in the main
loop; and so on).

But that's not part of this series.  It just occurred to me when
tracking down which function belongs to which context when reviewing
this patch.

Max



signature.asc
Description: OpenPGP digital signature


Re: [Qemu-devel] [PULL 0/4] seccomp branch queue

2018-08-25 Thread Peter Maydell
On 23 August 2018 at 15:52, Eduardo Otubo  wrote:
> The following changes since commit 3392fbee4e435658733bbe9aab23392660558b59:
>
>   Merge remote-tracking branch 
> 'remotes/vivier2/tags/linux-user-for-3.1-pull-request' into staging 
> (2018-08-23 12:28:17 +0100)
>
> are available in the Git repository at:
>
>   https://github.com/otubo/qemu.git tags/pull-seccomp-20180823
>
> for you to fetch changes up to 70dfabeaa79ba4d7a3b699abe1a047c8012db114:
>
>   seccomp: set the seccomp filter to all threads (2018-08-23 16:45:44 +0200)
>
> 
> pull-seccomp-20180823
>
> 
> Marc-André Lureau (4):
>   seccomp: use SIGSYS signal instead of killing the thread
>   seccomp: prefer SCMP_ACT_KILL_PROCESS if available
>   configure: require libseccomp 2.2.0
>   seccomp: set the seccomp filter to all threads
>
>  configure  |  7 ++-
>  qemu-seccomp.c | 36 +++-
>  2 files changed, 37 insertions(+), 6 deletions(-)


Applied, thanks.

-- PMM



Re: [Qemu-devel] [PATCH 3/7] jobs: add exit shim

2018-08-25 Thread Max Reitz
On 2018-08-22 23:45, John Snow wrote:
> 
> 
> On 08/22/2018 07:52 AM, Max Reitz wrote:
>> On 2018-08-22 13:43, Max Reitz wrote:
>>
>> [...]
>>
>>> I'd like .main_loop_settle().  Or .main_loop_post_run().  I think it's
>>> OK to have names that aren't as cool and tense as possible, when in
>>> return they actually tell you what they're doing.  (Sure,
>>> .main_loop_post_run() sounds really stupid, but it tells you exactly
>>> when the function is called and what it's for.)
>>
>> Looking at the next patch, I realized that .main_loop_complete() or
>> .complete_in_main_loop() would work just as well.  (No, I don't see any
>> confusion with whether you need to call job_completed(), especially
>> since after this series you can probably make that function static to
>> job.c.)
>>
>> Max
>>
> 
> I'm sorry to announce that after the part two of this series, the
> callback will be erased completely, so the name is maybe less... important.

That's what I get for not reading the cover letter.

Max



signature.asc
Description: OpenPGP digital signature


Re: [Qemu-devel] [PULL 0/3] vfio: Fix coverity, postcopy, and error path issues

2018-08-25 Thread Peter Maydell
On 23 August 2018 at 19:27, Alex Williamson  wrote:
> The following changes since commit 3392fbee4e435658733bbe9aab23392660558b59:
>
>   Merge remote-tracking branch 
> 'remotes/vivier2/tags/linux-user-for-3.1-pull-request' into staging 
> (2018-08-23 12:28:17 +0100)
>
> are available in the Git repository at:
>
>   git://github.com/awilliam/qemu-vfio.git tags/vfio-fixes-20180823.1
>
> for you to fetch changes up to 154304cd6e99e4222ed762976f9d9aca33c094d3:
>
>   postcopy: Synchronize usage of the balloon inhibitor (2018-08-23 10:45:58 
> -0600)
>
> 
> VFIO fixes 2018-08-23
>
>  - Fix coverity reported issue with use of realpath (Alex Williamson)
>
>  - Cleanup file descriptor in error path (Alex Williamson)
>
>  - Fix postcopy use of new balloon inhibitor (Alex Williamson)
>
> 
> Alex Williamson (3):
>   vfio/pci: Handle subsystem realpath() returning NULL
>   vfio/pci: Fix failure to close file descriptor on error
>   postcopy: Synchronize usage of the balloon inhibitor
>
>  hw/vfio/common.c |  1 +
>  hw/vfio/pci.c|  2 +-
>  migration/postcopy-ram.c | 18 --
>  3 files changed, 18 insertions(+), 3 deletions(-)

Applied, thanks.

-- PMM



Re: [Qemu-devel] [PULL 00/58] QObject patches for 2018-08-24

2018-08-25 Thread Peter Maydell
On 24 August 2018 at 20:31, Markus Armbruster  wrote:
> The following changes since commit 1dfb85a8755096beecf182a617493d539259cbea:
>
>   Merge remote-tracking branch 'remotes/juanquintela/tags/check/20180822' 
> into staging (2018-08-24 14:46:31 +0100)
>
> are available in the Git repository at:
>
>   git://repo.or.cz/qemu/armbru.git tags/pull-qobject-2018-08-24
>
> for you to fetch changes up to 37aded92c27d0e56cd27f1c29494fc9f8c873cdd:
>
>   json: Update references to RFC 7159 to RFC 8259 (2018-08-24 20:27:14 +0200)
>
> 
> QObject patches for 2018-08-24
>
> JSON is such a simple language, so writing a parser should be easy,
> shouldn't it?  Well, the evidence is in, and it's a lot of patches.
> Summary of fixes:
>
> * Reject ASCII control characters in strings as RFC 7159 specifies
>
> * Reject all invalid UTF-8 sequences, not just some
>
> * Reject invalid \u escapes
>
> * Implement \u surrogate pairs as specified by RFC 7159
>
> * Don't ignore \u silently, map it to \xC0\80 (modified UTF-8)
>
> * qobject_from_json() is ridicilously broken for input containing more
>   than one value, fix
>
> * Don't ignore trailing unterminated structures
>
> * Less cavalierly cruel error reporting
>
> Topped off with tests and cleanups.

Applied, thanks.

-- PMM



Re: [Qemu-devel] [PULL 00/45] MIPS queue August 2018 v6

2018-08-25 Thread Peter Maydell
On 24 August 2018 at 16:59, Aleksandar Markovic
 wrote:
> From: Aleksandar Markovic 
>
> The following changes since commit 1dfb85a8755096beecf182a617493d539259cbea:
>
>   Merge remote-tracking branch 'remotes/juanquintela/tags/check/20180822' 
> into staging (2018-08-24 14:46:31 +0100)
>
> are available in the git repository at:
>
>   https://github.com/AMarkovic/qemu tags/mips-queue-aug-2018
>
> for you to fetch changes up to d45942d908edee175a90f915ab92ac302eedf33a:
>
>   target/mips: Add definition of nanoMIPS I7200 CPU (2018-08-24 17:51:59 
> +0200)
>
> 
> MIPS queue August 2018 v6
>
> 
>

Applied, thanks.

-- PMM



Re: [Qemu-devel] [PULL 03/74] es1370: simplify MemoryRegionOps

2018-08-25 Thread Paolo Bonzini
On 24/08/2018 17:04, Peter Maydell wrote:
> because these values aren't contiguous:
> 
> #define ES1370_REG_DAC1_FRAMEADR0xc30
> #define ES1370_REG_DAC2_FRAMEADR0xc38
> #define ES1370_REG_ADC_FRAMEADR 0xd30
> 
> so you can't calculate the value of 'd' from the addr
> this way.
> 
> (Similarly for the SCOUNT registers.)

SCOUNT is actually consecutive.  I've sent a patch for the other two.

Paolo



[Qemu-devel] [PATCH] es1370: fix ADC_FRAMEADR and ADC_FRAMECNT

2018-08-25 Thread Paolo Bonzini
They are not consecutive with DAC1_FRAME* and DAC2_FRAME*.

Fixes: 154c1d1f960c5147a3f8ef00907504112f271cd8
Signed-off-by: Paolo Bonzini 
---
 hw/audio/es1370.c | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c
index dd75c9e8f5..4f980a598b 100644
--- a/hw/audio/es1370.c
+++ b/hw/audio/es1370.c
@@ -506,10 +506,13 @@ static void es1370_write(void *opaque, hwaddr addr, 
uint64_t val, unsigned size)
 d - >chan[0], val >> 16, (val & 0x));
 break;
 
+case ES1370_REG_ADC_FRAMEADR:
+d += 2;
+goto frameadr;
 case ES1370_REG_DAC1_FRAMEADR:
 case ES1370_REG_DAC2_FRAMEADR:
-case ES1370_REG_ADC_FRAMEADR:
 d += (addr - ES1370_REG_DAC1_FRAMEADR) >> 3;
+frameadr:
 d->frame_addr = val;
 ldebug ("chan %td frame address %#x\n", d - >chan[0], val);
 break;
@@ -521,10 +524,13 @@ static void es1370_write(void *opaque, hwaddr addr, 
uint64_t val, unsigned size)
 lwarn ("writing to phantom frame address %#x\n", val);
 break;
 
+case ES1370_REG_ADC_FRAMECNT:
+d += 2;
+goto framecnt;
 case ES1370_REG_DAC1_FRAMECNT:
 case ES1370_REG_DAC2_FRAMECNT:
-case ES1370_REG_ADC_FRAMECNT:
 d += (addr - ES1370_REG_DAC1_FRAMECNT) >> 3;
+framecnt:
 d->frame_cnt = val;
 d->leftover = 0;
 ldebug ("chan %td frame count %d, buffer size %d\n",
-- 
2.17.1