Re: [PATCH v12 4/6] hw/riscv: virt: Add PMU DT node to the device tree

2022-08-15 Thread Andrew Jones
On Tue, Aug 02, 2022 at 04:33:05PM -0700, Atish Patra wrote:
> Qemu virt machine can support few cache events and cycle/instret counters.
> It also supports counter overflow for these events.
> 
> Add a DT node so that OpenSBI/Linux kernel is aware of the virt machine
> capabilities. There are some dummy nodes added for testing as well.
> 
> Acked-by: Alistair Francis 
> Signed-off-by: Atish Patra 
> Signed-off-by: Atish Patra 
> ---
>  hw/riscv/virt.c| 16 +
>  target/riscv/pmu.c | 57 ++
>  target/riscv/pmu.h |  1 +
>  3 files changed, 74 insertions(+)
> 
> diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
> index f2ce5663a4c7..b58e6ed8b823 100644
> --- a/hw/riscv/virt.c
> +++ b/hw/riscv/virt.c
> @@ -30,6 +30,7 @@
>  #include "hw/char/serial.h"
>  #include "target/riscv/cpu.h"
>  #include "hw/core/sysbus-fdt.h"
> +#include "target/riscv/pmu.h"
>  #include "hw/riscv/riscv_hart.h"
>  #include "hw/riscv/virt.h"
>  #include "hw/riscv/boot.h"
> @@ -715,6 +716,20 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
>  aplic_phandles[socket] = aplic_s_phandle;
>  }
>  
> +static void create_fdt_pmu(RISCVVirtState *s)
> +{
> +char *pmu_name;
> +MachineState *mc = MACHINE(s);
> +RISCVCPU hart = s->soc[0].harts[0];
> +
> +pmu_name = g_strdup_printf("/soc/pmu");
> +qemu_fdt_add_subnode(mc->fdt, pmu_name);
> +qemu_fdt_setprop_string(mc->fdt, pmu_name, "compatible", "riscv,pmu");
> +riscv_pmu_generate_fdt_node(mc->fdt, hart.cfg.pmu_num, pmu_name);
> +
> +g_free(pmu_name);
> +}
> +
>  static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
> bool is_32_bit, uint32_t *phandle,
> uint32_t *irq_mmio_phandle,
> @@ -1043,6 +1058,7 @@ static void create_fdt(RISCVVirtState *s, const 
> MemMapEntry *memmap,
>  
>  create_fdt_flash(s, memmap);
>  create_fdt_fw_cfg(s, memmap);
> +create_fdt_pmu(s);
>  
>  update_bootargs:
>  if (cmdline && *cmdline) {
> diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
> index 34096941c0ce..59feb3c243dd 100644
> --- a/target/riscv/pmu.c
> +++ b/target/riscv/pmu.c
> @@ -20,11 +20,68 @@
>  #include "cpu.h"
>  #include "pmu.h"
>  #include "sysemu/cpu-timers.h"
> +#include "sysemu/device_tree.h"
>  
>  #define RISCV_TIMEBASE_FREQ 10 /* 1Ghz */
>  #define MAKE_32BIT_MASK(shift, length) \
>  (((uint32_t)(~0UL) >> (32 - (length))) << (shift))
>  
> +/**
 ^ This doesn't appear to be a kernel-doc comment, so I think we
 should drop the extra '*'

> + * To keep it simple, any event can be mapped to any programmable counters in
> + * QEMU. The generic cycle & instruction count events can also be monitored
> + * using programmable counters. In that case, mcycle & minstret must continue
> + * to provide the correct value as well. Heterogeneous PMU per hart is not
> + * supported yet. Thus, number of counters are same across all harts.
> + */
> +void riscv_pmu_generate_fdt_node(void *fdt, int num_ctrs, char *pmu_name)
> +{
> +uint32_t fdt_event_ctr_map[20] = {};
> +uint32_t cmask;
> +
> +/* All the programmable counters can map to any event */
> +cmask = MAKE_32BIT_MASK(3, num_ctrs);
> +
> +   /**
^ Same here and I think I saw at least one other comment block
like this in a previous patch of this series.

> +* The event encoding is specified in the SBI specification
> +* Event idx is a 20bits wide number encoded as follows:
> +* event_idx[19:16] = type
> +* event_idx[15:0] = code
> +* The code field in cache events are encoded as follows:
> +* event_idx.code[15:3] = cache_id
> +* event_idx.code[2:1] = op_id
> +* event_idx.code[0:0] = result_id
> +*/
> +
> +   /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
> +   fdt_event_ctr_map[0] = cpu_to_be32(0x0001);
> +   fdt_event_ctr_map[1] = cpu_to_be32(0x0001);
> +   fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
> +
> +   /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
> +   fdt_event_ctr_map[3] = cpu_to_be32(0x0002);
> +   fdt_event_ctr_map[4] = cpu_to_be32(0x0002);
> +   fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
> +
> +   /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
> +   fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
> +   fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
> +   fdt_event_ctr_map[8] = cpu_to_be32(cmask);
> +
> +   /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
> +   fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
> +   fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
> +   fdt_event_ctr_map[11] = cpu_to_be32(cmask);
> +
> +   /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
> +   fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
> +   fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
> +   fdt_event_ctr_map[14] = cpu_to_be32(cmask);
> +
> +   /* This a OpenSBI 

[PATCH v12 4/6] hw/riscv: virt: Add PMU DT node to the device tree

2022-08-02 Thread Atish Patra
Qemu virt machine can support few cache events and cycle/instret counters.
It also supports counter overflow for these events.

Add a DT node so that OpenSBI/Linux kernel is aware of the virt machine
capabilities. There are some dummy nodes added for testing as well.

Acked-by: Alistair Francis 
Signed-off-by: Atish Patra 
Signed-off-by: Atish Patra 
---
 hw/riscv/virt.c| 16 +
 target/riscv/pmu.c | 57 ++
 target/riscv/pmu.h |  1 +
 3 files changed, 74 insertions(+)

diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index f2ce5663a4c7..b58e6ed8b823 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -30,6 +30,7 @@
 #include "hw/char/serial.h"
 #include "target/riscv/cpu.h"
 #include "hw/core/sysbus-fdt.h"
+#include "target/riscv/pmu.h"
 #include "hw/riscv/riscv_hart.h"
 #include "hw/riscv/virt.h"
 #include "hw/riscv/boot.h"
@@ -715,6 +716,20 @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
 aplic_phandles[socket] = aplic_s_phandle;
 }
 
+static void create_fdt_pmu(RISCVVirtState *s)
+{
+char *pmu_name;
+MachineState *mc = MACHINE(s);
+RISCVCPU hart = s->soc[0].harts[0];
+
+pmu_name = g_strdup_printf("/soc/pmu");
+qemu_fdt_add_subnode(mc->fdt, pmu_name);
+qemu_fdt_setprop_string(mc->fdt, pmu_name, "compatible", "riscv,pmu");
+riscv_pmu_generate_fdt_node(mc->fdt, hart.cfg.pmu_num, pmu_name);
+
+g_free(pmu_name);
+}
+
 static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
bool is_32_bit, uint32_t *phandle,
uint32_t *irq_mmio_phandle,
@@ -1043,6 +1058,7 @@ static void create_fdt(RISCVVirtState *s, const 
MemMapEntry *memmap,
 
 create_fdt_flash(s, memmap);
 create_fdt_fw_cfg(s, memmap);
+create_fdt_pmu(s);
 
 update_bootargs:
 if (cmdline && *cmdline) {
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index 34096941c0ce..59feb3c243dd 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -20,11 +20,68 @@
 #include "cpu.h"
 #include "pmu.h"
 #include "sysemu/cpu-timers.h"
+#include "sysemu/device_tree.h"
 
 #define RISCV_TIMEBASE_FREQ 10 /* 1Ghz */
 #define MAKE_32BIT_MASK(shift, length) \
 (((uint32_t)(~0UL) >> (32 - (length))) << (shift))
 
+/**
+ * To keep it simple, any event can be mapped to any programmable counters in
+ * QEMU. The generic cycle & instruction count events can also be monitored
+ * using programmable counters. In that case, mcycle & minstret must continue
+ * to provide the correct value as well. Heterogeneous PMU per hart is not
+ * supported yet. Thus, number of counters are same across all harts.
+ */
+void riscv_pmu_generate_fdt_node(void *fdt, int num_ctrs, char *pmu_name)
+{
+uint32_t fdt_event_ctr_map[20] = {};
+uint32_t cmask;
+
+/* All the programmable counters can map to any event */
+cmask = MAKE_32BIT_MASK(3, num_ctrs);
+
+   /**
+* The event encoding is specified in the SBI specification
+* Event idx is a 20bits wide number encoded as follows:
+* event_idx[19:16] = type
+* event_idx[15:0] = code
+* The code field in cache events are encoded as follows:
+* event_idx.code[15:3] = cache_id
+* event_idx.code[2:1] = op_id
+* event_idx.code[0:0] = result_id
+*/
+
+   /* SBI_PMU_HW_CPU_CYCLES: 0x01 : type(0x00) */
+   fdt_event_ctr_map[0] = cpu_to_be32(0x0001);
+   fdt_event_ctr_map[1] = cpu_to_be32(0x0001);
+   fdt_event_ctr_map[2] = cpu_to_be32(cmask | 1 << 0);
+
+   /* SBI_PMU_HW_INSTRUCTIONS: 0x02 : type(0x00) */
+   fdt_event_ctr_map[3] = cpu_to_be32(0x0002);
+   fdt_event_ctr_map[4] = cpu_to_be32(0x0002);
+   fdt_event_ctr_map[5] = cpu_to_be32(cmask | 1 << 2);
+
+   /* SBI_PMU_HW_CACHE_DTLB : 0x03 READ : 0x00 MISS : 0x00 type(0x01) */
+   fdt_event_ctr_map[6] = cpu_to_be32(0x00010019);
+   fdt_event_ctr_map[7] = cpu_to_be32(0x00010019);
+   fdt_event_ctr_map[8] = cpu_to_be32(cmask);
+
+   /* SBI_PMU_HW_CACHE_DTLB : 0x03 WRITE : 0x01 MISS : 0x00 type(0x01) */
+   fdt_event_ctr_map[9] = cpu_to_be32(0x0001001B);
+   fdt_event_ctr_map[10] = cpu_to_be32(0x0001001B);
+   fdt_event_ctr_map[11] = cpu_to_be32(cmask);
+
+   /* SBI_PMU_HW_CACHE_ITLB : 0x04 READ : 0x00 MISS : 0x00 type(0x01) */
+   fdt_event_ctr_map[12] = cpu_to_be32(0x00010021);
+   fdt_event_ctr_map[13] = cpu_to_be32(0x00010021);
+   fdt_event_ctr_map[14] = cpu_to_be32(cmask);
+
+   /* This a OpenSBI specific DT property documented in OpenSBI docs */
+   qemu_fdt_setprop(fdt, pmu_name, "riscv,event-to-mhpmcounters",
+fdt_event_ctr_map, sizeof(fdt_event_ctr_map));
+}
+
 static bool riscv_pmu_counter_valid(RISCVCPU *cpu, uint32_t ctr_idx)
 {
 if (ctr_idx < 3 || ctr_idx >= RV_MAX_MHPMCOUNTERS ||
diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
index 036653627f78..3004ce37b636 100644
--- a/target/riscv/pmu.h
+++ b/target/riscv/pmu.h
@@ -31,5 +31,6 @@ int riscv_pmu_init(RISCVCPU *cpu, int num_counters);