Re: [Qemu-devel] [PATCH v9 07/14] hw/arm/smmuv3: Implement MMIO write operations

2018-03-09 Thread Auger Eric
Hi Peter,

On 08/03/18 19:37, Peter Maydell wrote:
> On 17 February 2018 at 18:46, Eric Auger  wrote:
>> Now we have relevant helpers for queue and irq
>> management, let's implement MMIO write operations.
>>
>> Signed-off-by: Eric Auger 
>>
>> ---
>>
>> v7 -> v8:
>> - precise in the commit message invalidation commands
>>   are not yet treated.
>> - use new queue helpers
>> - do not decode unhandled commands at this stage
>> ---
>>  hw/arm/smmuv3-internal.h |  24 +++---
>>  hw/arm/smmuv3.c  | 111 
>> +--
>>  hw/arm/trace-events  |   6 +++
>>  3 files changed, 132 insertions(+), 9 deletions(-)
>>
>> diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
>> index c0771ce..5af97ae 100644
>> --- a/hw/arm/smmuv3-internal.h
>> +++ b/hw/arm/smmuv3-internal.h
>> @@ -152,6 +152,25 @@ static inline uint64_t smmu_read64(uint64_t r, unsigned 
>> offset,
>>  return extract64(r, offset << 3, 32);
>>  }
>>
>> +static inline void smmu_write64(uint64_t *r, unsigned offset,
>> +unsigned size, uint64_t value)
>> +{
>> +if (size == 8 && !offset) {
>> +*r  = value;
>> +}
>> +
>> +/* 32 bit access */
>> +
>> +if (offset && offset != 4)  {
>> +qemu_log_mask(LOG_GUEST_ERROR,
>> +  "SMMUv3 MMIO write: bad offset/size %u/%u\n",
>> +  offset, size);
>> +return ;
>> +}
>> +
>> +*r = deposit64(*r, offset << 3, 32, value);
> 
> Similar remarks apply to this helper as to smmu_read64 in the earlier patch.
removed
> 
>> +}
>> +
>>  /* Interrupts */
>>
>>  #define smmuv3_eventq_irq_enabled(s)   \
>> @@ -159,9 +178,6 @@ static inline uint64_t smmu_read64(uint64_t r, unsigned 
>> offset,
>>  #define smmuv3_gerror_irq_enabled(s)  \
>>  (FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN))
>>
>> -void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask);
>> -void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t gerrorn);
>> -
>>  /* Queue Handling */
>>
>>  #define LOG2SIZE(q)extract64((q)->base, 0, 5)
>> @@ -310,6 +326,4 @@ enum { /* Command completion notification */
>>  addr; \
>>  })
>>
>> -int smmuv3_cmdq_consume(SMMUv3State *s);
>> -
>>  #endif
>> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
>> index 0b57215..fcfdbb0 100644
>> --- a/hw/arm/smmuv3.c
>> +++ b/hw/arm/smmuv3.c
>> @@ -37,7 +37,8 @@
>>   * @irq: irq type
>>   * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
>>   */
>> -void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask)
>> +static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
>> +   uint32_t gerror_mask)
>>  {
>>
>>  bool pulse = false;
>> @@ -75,7 +76,7 @@ void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, 
>> uint32_t gerror_mask)
>>  }
>>  }
>>
>> -void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
>> +static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
>>  {
>>  uint32_t pending = s->gerror ^ s->gerrorn;
>>  uint32_t toggled = s->gerrorn ^ new_gerrorn;
>> @@ -199,7 +200,7 @@ static void smmuv3_init_regs(SMMUv3State *s)
>>  s->sid_split = 0;
>>  }
>>
>> -int smmuv3_cmdq_consume(SMMUv3State *s)
>> +static int smmuv3_cmdq_consume(SMMUv3State *s)
>>  {
>>  SMMUCmdError cmd_error = SMMU_CERROR_NONE;
>>  SMMUQueue *q = >cmdq;
>> @@ -298,7 +299,109 @@ int smmuv3_cmdq_consume(SMMUv3State *s)
>>  static void smmu_write_mmio(void *opaque, hwaddr addr,
>>  uint64_t val, unsigned size)
>>  {
>> -/* not yet implemented */
>> +SMMUState *sys = opaque;
>> +SMMUv3State *s = ARM_SMMUV3(sys);
>> +
>> +/* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
>> +addr &= ~0x1;
>> +
>> +if (size != 4 && size != 8) {
>> +qemu_log_mask(LOG_GUEST_ERROR,
>> +  "SMMUv3 MMIO write: bad size %u\n", size);
>> +}
> 
> As with read, this can never happen so you don't need to check for it.
> 
> As with read, probably better to explicitly whitelist the 64-bit
> accessible offsets, and LOG_GUEST_ERROR and write-ignore the others.
done
> 
>> +
>> +trace_smmuv3_write_mmio(addr, val, size);
>> +
>> +switch (addr) {
>> +case A_CR0:
>> +s->cr[0] = val;
>> +s->cr0ack = val;
> 
> Spec says "reserved fields in SMMU_CR0 are not reflected in SMMU_CR0ACK",
> so you probably need to mask those out.
OK
> 
>> +/* in case the command queue has been enabled */
>> +smmuv3_cmdq_consume(s);
>> +return;
>> +case A_CR1:
>> +s->cr[1] = val;
>> +return;
>> +case A_CR2:
>> +s->cr[2] = val;
>> +return;
>> +case A_IRQ_CTRL:
>> +s->irq_ctrl = val;
>> +return;
>> +case 

Re: [Qemu-devel] [PATCH v9 07/14] hw/arm/smmuv3: Implement MMIO write operations

2018-03-08 Thread Peter Maydell
On 17 February 2018 at 18:46, Eric Auger  wrote:
> Now we have relevant helpers for queue and irq
> management, let's implement MMIO write operations.
>
> Signed-off-by: Eric Auger 
>
> ---
>
> v7 -> v8:
> - precise in the commit message invalidation commands
>   are not yet treated.
> - use new queue helpers
> - do not decode unhandled commands at this stage
> ---
>  hw/arm/smmuv3-internal.h |  24 +++---
>  hw/arm/smmuv3.c  | 111 
> +--
>  hw/arm/trace-events  |   6 +++
>  3 files changed, 132 insertions(+), 9 deletions(-)
>
> diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
> index c0771ce..5af97ae 100644
> --- a/hw/arm/smmuv3-internal.h
> +++ b/hw/arm/smmuv3-internal.h
> @@ -152,6 +152,25 @@ static inline uint64_t smmu_read64(uint64_t r, unsigned 
> offset,
>  return extract64(r, offset << 3, 32);
>  }
>
> +static inline void smmu_write64(uint64_t *r, unsigned offset,
> +unsigned size, uint64_t value)
> +{
> +if (size == 8 && !offset) {
> +*r  = value;
> +}
> +
> +/* 32 bit access */
> +
> +if (offset && offset != 4)  {
> +qemu_log_mask(LOG_GUEST_ERROR,
> +  "SMMUv3 MMIO write: bad offset/size %u/%u\n",
> +  offset, size);
> +return ;
> +}
> +
> +*r = deposit64(*r, offset << 3, 32, value);

Similar remarks apply to this helper as to smmu_read64 in the earlier patch.

> +}
> +
>  /* Interrupts */
>
>  #define smmuv3_eventq_irq_enabled(s)   \
> @@ -159,9 +178,6 @@ static inline uint64_t smmu_read64(uint64_t r, unsigned 
> offset,
>  #define smmuv3_gerror_irq_enabled(s)  \
>  (FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN))
>
> -void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask);
> -void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t gerrorn);
> -
>  /* Queue Handling */
>
>  #define LOG2SIZE(q)extract64((q)->base, 0, 5)
> @@ -310,6 +326,4 @@ enum { /* Command completion notification */
>  addr; \
>  })
>
> -int smmuv3_cmdq_consume(SMMUv3State *s);
> -
>  #endif
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index 0b57215..fcfdbb0 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -37,7 +37,8 @@
>   * @irq: irq type
>   * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
>   */
> -void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask)
> +static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
> +   uint32_t gerror_mask)
>  {
>
>  bool pulse = false;
> @@ -75,7 +76,7 @@ void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, 
> uint32_t gerror_mask)
>  }
>  }
>
> -void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
> +static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
>  {
>  uint32_t pending = s->gerror ^ s->gerrorn;
>  uint32_t toggled = s->gerrorn ^ new_gerrorn;
> @@ -199,7 +200,7 @@ static void smmuv3_init_regs(SMMUv3State *s)
>  s->sid_split = 0;
>  }
>
> -int smmuv3_cmdq_consume(SMMUv3State *s)
> +static int smmuv3_cmdq_consume(SMMUv3State *s)
>  {
>  SMMUCmdError cmd_error = SMMU_CERROR_NONE;
>  SMMUQueue *q = >cmdq;
> @@ -298,7 +299,109 @@ int smmuv3_cmdq_consume(SMMUv3State *s)
>  static void smmu_write_mmio(void *opaque, hwaddr addr,
>  uint64_t val, unsigned size)
>  {
> -/* not yet implemented */
> +SMMUState *sys = opaque;
> +SMMUv3State *s = ARM_SMMUV3(sys);
> +
> +/* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
> +addr &= ~0x1;
> +
> +if (size != 4 && size != 8) {
> +qemu_log_mask(LOG_GUEST_ERROR,
> +  "SMMUv3 MMIO write: bad size %u\n", size);
> +}

As with read, this can never happen so you don't need to check for it.

As with read, probably better to explicitly whitelist the 64-bit
accessible offsets, and LOG_GUEST_ERROR and write-ignore the others.

> +
> +trace_smmuv3_write_mmio(addr, val, size);
> +
> +switch (addr) {
> +case A_CR0:
> +s->cr[0] = val;
> +s->cr0ack = val;

Spec says "reserved fields in SMMU_CR0 are not reflected in SMMU_CR0ACK",
so you probably need to mask those out.

> +/* in case the command queue has been enabled */
> +smmuv3_cmdq_consume(s);
> +return;
> +case A_CR1:
> +s->cr[1] = val;
> +return;
> +case A_CR2:
> +s->cr[2] = val;
> +return;
> +case A_IRQ_CTRL:
> +s->irq_ctrl = val;
> +return;
> +case A_GERRORN:
> +smmuv3_write_gerrorn(s, val);
> +/*
> + * By acknowledging the CMDQ_ERR, SW may notify cmds can
> + * be processed again
> + */
> +smmuv3_cmdq_consume(s);
> +return;

[Qemu-devel] [PATCH v9 07/14] hw/arm/smmuv3: Implement MMIO write operations

2018-02-17 Thread Eric Auger
Now we have relevant helpers for queue and irq
management, let's implement MMIO write operations.

Signed-off-by: Eric Auger 

---

v7 -> v8:
- precise in the commit message invalidation commands
  are not yet treated.
- use new queue helpers
- do not decode unhandled commands at this stage
---
 hw/arm/smmuv3-internal.h |  24 +++---
 hw/arm/smmuv3.c  | 111 +--
 hw/arm/trace-events  |   6 +++
 3 files changed, 132 insertions(+), 9 deletions(-)

diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
index c0771ce..5af97ae 100644
--- a/hw/arm/smmuv3-internal.h
+++ b/hw/arm/smmuv3-internal.h
@@ -152,6 +152,25 @@ static inline uint64_t smmu_read64(uint64_t r, unsigned 
offset,
 return extract64(r, offset << 3, 32);
 }
 
+static inline void smmu_write64(uint64_t *r, unsigned offset,
+unsigned size, uint64_t value)
+{
+if (size == 8 && !offset) {
+*r  = value;
+}
+
+/* 32 bit access */
+
+if (offset && offset != 4)  {
+qemu_log_mask(LOG_GUEST_ERROR,
+  "SMMUv3 MMIO write: bad offset/size %u/%u\n",
+  offset, size);
+return ;
+}
+
+*r = deposit64(*r, offset << 3, 32, value);
+}
+
 /* Interrupts */
 
 #define smmuv3_eventq_irq_enabled(s)   \
@@ -159,9 +178,6 @@ static inline uint64_t smmu_read64(uint64_t r, unsigned 
offset,
 #define smmuv3_gerror_irq_enabled(s)  \
 (FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN))
 
-void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask);
-void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t gerrorn);
-
 /* Queue Handling */
 
 #define LOG2SIZE(q)extract64((q)->base, 0, 5)
@@ -310,6 +326,4 @@ enum { /* Command completion notification */
 addr; \
 })
 
-int smmuv3_cmdq_consume(SMMUv3State *s);
-
 #endif
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 0b57215..fcfdbb0 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -37,7 +37,8 @@
  * @irq: irq type
  * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
  */
-void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask)
+static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
+   uint32_t gerror_mask)
 {
 
 bool pulse = false;
@@ -75,7 +76,7 @@ void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t 
gerror_mask)
 }
 }
 
-void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
+static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
 {
 uint32_t pending = s->gerror ^ s->gerrorn;
 uint32_t toggled = s->gerrorn ^ new_gerrorn;
@@ -199,7 +200,7 @@ static void smmuv3_init_regs(SMMUv3State *s)
 s->sid_split = 0;
 }
 
-int smmuv3_cmdq_consume(SMMUv3State *s)
+static int smmuv3_cmdq_consume(SMMUv3State *s)
 {
 SMMUCmdError cmd_error = SMMU_CERROR_NONE;
 SMMUQueue *q = >cmdq;
@@ -298,7 +299,109 @@ int smmuv3_cmdq_consume(SMMUv3State *s)
 static void smmu_write_mmio(void *opaque, hwaddr addr,
 uint64_t val, unsigned size)
 {
-/* not yet implemented */
+SMMUState *sys = opaque;
+SMMUv3State *s = ARM_SMMUV3(sys);
+
+/* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
+addr &= ~0x1;
+
+if (size != 4 && size != 8) {
+qemu_log_mask(LOG_GUEST_ERROR,
+  "SMMUv3 MMIO write: bad size %u\n", size);
+}
+
+trace_smmuv3_write_mmio(addr, val, size);
+
+switch (addr) {
+case A_CR0:
+s->cr[0] = val;
+s->cr0ack = val;
+/* in case the command queue has been enabled */
+smmuv3_cmdq_consume(s);
+return;
+case A_CR1:
+s->cr[1] = val;
+return;
+case A_CR2:
+s->cr[2] = val;
+return;
+case A_IRQ_CTRL:
+s->irq_ctrl = val;
+return;
+case A_GERRORN:
+smmuv3_write_gerrorn(s, val);
+/*
+ * By acknowledging the CMDQ_ERR, SW may notify cmds can
+ * be processed again
+ */
+smmuv3_cmdq_consume(s);
+return;
+case A_GERROR_IRQ_CFG0: /* 64b */
+smmu_write64(>gerror_irq_cfg0, 0, size, val);
+return;
+case A_GERROR_IRQ_CFG0 + 4:
+smmu_write64(>gerror_irq_cfg0, 4, size, val);
+return;
+case A_GERROR_IRQ_CFG1:
+s->gerror_irq_cfg1 = val;
+return;
+case A_GERROR_IRQ_CFG2:
+s->gerror_irq_cfg2 = val;
+return;
+case A_STRTAB_BASE: /* 64b */
+smmu_write64(>strtab_base, 0, size, val);
+return;
+case A_STRTAB_BASE + 4:
+smmu_write64(>strtab_base, 4, size, val);
+return;
+case A_STRTAB_BASE_CFG:
+s->strtab_base_cfg = val;
+if (FIELD_EX32(val, STRTAB_BASE_CFG, FMT) == 1) {
+s->sid_split =