Re: [PATCH v5 0/1] target/riscv: Add Zihintpause support

2022-07-24 Thread Alistair Francis
On Mon, Jul 25, 2022 at 1:48 PM Dao Lu  wrote:
>
> This patch adds RISC-V Zihintpause support. The extension is set to be enabled
> by default and opcode has been added to insn32.decode.
>
> Added trans_pause to exit the TB and return to main loop.
>
> The change can also be found in:
> https://github.com/dlu42/qemu/tree/zihintpause_support_v1
>
> Tested along with pause support added to cpu_relax function for linux, the
> changes I made to linux to test can be found here:
> https://github.com/dlu42/linux/tree/pause_support_v1
>
> 
> Changelog:
>
> v1 -> v2
> 1. Pause now also exit the TB and return to main loop
> 2. Move the REQUIRE_ZIHINTPAUSE macro inside the trans_pause function
>
> v2 -> v3
> No changes, v2 was lost from the list
>
> v3 -> v4
> No longer break the reservation in trans_pause
>
> v4 -> v5
> Rabase on top of https://github.com/alistair23/qemu/tree/riscv-to-apply.next
>
> Dao Lu (1):
>   Add Zihintpause support

Thanks!

Applied to riscv-to-apply.next

Alistair

>
>  target/riscv/cpu.c  |  2 ++
>  target/riscv/cpu.h  |  1 +
>  target/riscv/insn32.decode  |  7 ++-
>  target/riscv/insn_trans/trans_rvi.c.inc | 16 
>  4 files changed, 25 insertions(+), 1 deletion(-)
>
> --
> 2.25.1
>
>



[PATCH v5 1/1] target/riscv: Add Zihintpause support

2022-07-24 Thread Dao Lu
Added support for RISC-V PAUSE instruction from Zihintpause extension,
enabled by default.

Tested-by: Heiko Stuebner 
Reviewed-by: Alistair Francis 
Signed-off-by: Dao Lu 
---
 target/riscv/cpu.c  |  2 ++
 target/riscv/cpu.h  |  1 +
 target/riscv/insn32.decode  |  7 ++-
 target/riscv/insn_trans/trans_rvi.c.inc | 16 
 4 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 966e5f2dd7..d4635c7df4 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -73,6 +73,7 @@ static const struct isa_ext_data isa_edata_arr[] = {
 ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_12_0, ext_v),
 ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
 ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
+ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, 
ext_zihintpause),
 ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_12_0, ext_zfh),
 ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_12_0, ext_zfhmin),
 ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx),
@@ -987,6 +988,7 @@ static Property riscv_cpu_extensions[] = {
 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
+DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 561d7fa92c..4be4b82a83 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -426,6 +426,7 @@ struct RISCVCPUConfig {
 bool ext_zkt;
 bool ext_ifencei;
 bool ext_icsr;
+bool ext_zihintpause;
 bool ext_svinval;
 bool ext_svnapot;
 bool ext_svpbmt;
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 4033565393..595fdcdad8 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -149,7 +149,12 @@ srl  000 .. 101 . 0110011 @r
 sra  010 .. 101 . 0110011 @r
 or   000 .. 110 . 0110011 @r
 and  000 .. 111 . 0110011 @r
-fence pred:4 succ:4 - 000 - 000
+
+{
+  pause   0001      0 000 0 000
+  fence   pred:4 succ:4 - 000 - 000
+}
+
 fence_i         - 001 - 000
 csrrw . 001 . 1110011 @csr
 csrrs . 010 . 1110011 @csr
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc 
b/target/riscv/insn_trans/trans_rvi.c.inc
index ca8e3d1ea1..c49dbec0eb 100644
--- a/target/riscv/insn_trans/trans_rvi.c.inc
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
@@ -792,6 +792,22 @@ static bool trans_srad(DisasContext *ctx, arg_srad *a)
 return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
 }
 
+static bool trans_pause(DisasContext *ctx, arg_pause *a)
+{
+if (!ctx->cfg_ptr->ext_zihintpause) {
+return false;
+}
+
+/*
+ * PAUSE is a no-op in QEMU,
+ * end the TB and return to main loop
+ */
+gen_set_pc_imm(ctx, ctx->pc_succ_insn);
+tcg_gen_exit_tb(NULL, 0);
+ctx->base.is_jmp = DISAS_NORETURN;
+
+return true;
+}
 
 static bool trans_fence(DisasContext *ctx, arg_fence *a)
 {
-- 
2.25.1




[PATCH v5 0/1] target/riscv: Add Zihintpause support

2022-07-24 Thread Dao Lu
This patch adds RISC-V Zihintpause support. The extension is set to be enabled
by default and opcode has been added to insn32.decode.

Added trans_pause to exit the TB and return to main loop.

The change can also be found in:
https://github.com/dlu42/qemu/tree/zihintpause_support_v1

Tested along with pause support added to cpu_relax function for linux, the
changes I made to linux to test can be found here:
https://github.com/dlu42/linux/tree/pause_support_v1


Changelog:

v1 -> v2
1. Pause now also exit the TB and return to main loop
2. Move the REQUIRE_ZIHINTPAUSE macro inside the trans_pause function

v2 -> v3
No changes, v2 was lost from the list

v3 -> v4
No longer break the reservation in trans_pause

v4 -> v5
Rabase on top of https://github.com/alistair23/qemu/tree/riscv-to-apply.next 

Dao Lu (1):
  Add Zihintpause support

 target/riscv/cpu.c  |  2 ++
 target/riscv/cpu.h  |  1 +
 target/riscv/insn32.decode  |  7 ++-
 target/riscv/insn_trans/trans_rvi.c.inc | 16 
 4 files changed, 25 insertions(+), 1 deletion(-)

-- 
2.25.1




Re: [PATCH 00/16] Support VIRTIO_F_RING_RESET for virtio-net and vhost-user in virtio pci

2022-07-24 Thread Jason Wang
On Mon, Jul 25, 2022 at 10:34 AM Kangjie Xu
 wrote:
>
> Do you have any comments or suggestions about improvements to this patch
> set?

Will have a look and give feedback no later than the end of this week.

Thanks

>
>
> Regards,
>
> Kangjie
>
> 在 2022/7/18 19:16, Kangjie Xu 写道:
> > The virtio queue reset function has already been defined in the virtio spec 
> > 1.2.
> > The relevant virtio spec information is here:
> >
> >  https://github.com/oasis-tcs/virtio-spec/issues/124
> >  https://github.com/oasis-tcs/virtio-spec/issues/139
> >
> > This patch set is to support this function in QEMU. It consists of two 
> > parts: virtio-net
> > and vhost-user. The patches 1-7 are the implementation for virtio-net and 
> > the patches
> > 8-16 are for vhost-user.
> >
> > The process of virt queue reset can be concluded as:
> > 1. The virtqueue is disabled when VIRTIO_PCI_COMMON_Q_RESET is written.
> > 2. Then the virtqueue is restarted after the information of vrings is 
> > passed to QEMU and
> > VIRTIO_PCI_COMMON_Q_ENABLE is written.
> >
> > Test environment:
> >  Host: 5.4.189
> >  Qemu: QEMU emulator version 7.0.50
> >  Guest: 5.19.0-rc3 (With vq reset support)
> >  DPDK: 22.07-rc1 (With vq reset support)
> >  Test Cmd: ethtool -g eth1; ethtool -G eth1 rx $1 tx $2; ethtool -g 
> > eth1;
> >
> >  The frontend can resize the virtio queue, then virtio queue reset 
> > function should
> >  be triggered.
> >
> >  The default is split mode, modify Qemu virtio-net to add PACKED 
> > feature to
> >  test packed mode.
> >
> > Guest Kernel Patch:
> >  
> > https://lore.kernel.org/bpf/20220629065656.54420-1-xuanz...@linux.alibaba.com/
> >
> > DPDK Code:
> >  
> > https://github.com/middaywords/dpdk/commit/098c8e1dfae10b747da8dd8950a82890efca7bee
> >
> > Please review. Thanks.
> >
> > Kangjie Xu (9):
> >vhost: add op to enable or disable a single vring
> >vhost-user: enable/disable a single vring
> >vhost: extract the logic of unmapping the vrings and desc
> >vhost: introduce restart and release for vhost_dev's vqs
> >vhost-net: introduce restart and stop for vhost_net's vqs
> >virtio: introduce queue_enable in virtio
> >virtio-net: support queue_enable for vhost-user
> >virtio-net: support queue_reset for vhost-user
> >vhost-net: vq reset feature bit support
> >
> > Xuan Zhuo (7):
> >virtio-pci: virtio_pci_common_cfg add queue_notify_data
> >virtio: add VIRTIO_F_RING_RESET
> >virtio: pci: virtio_pci_common_cfg add queue_reset
> >virtio: introduce __virtio_queue_reset()
> >virtio: introduce virtio_queue_reset()
> >virtio-pci: support queue reset
> >virtio-net: support queue reset
> >
> >   hw/net/vhost_net.c| 56 ++
> >   hw/net/virtio-net.c   | 40 +
> >   hw/virtio/vhost-user.c| 55 +++---
> >   hw/virtio/vhost.c | 49 ++--
> >   hw/virtio/virtio-pci.c| 17 ++
> >   hw/virtio/virtio.c| 57 +--
> >   include/hw/virtio/vhost-backend.h |  4 ++
> >   include/hw/virtio/vhost.h |  6 ++
> >   include/hw/virtio/virtio-pci.h|  1 +
> >   include/hw/virtio/virtio.h|  4 ++
> >   include/net/vhost_net.h   |  5 ++
> >   .../standard-headers/linux/virtio_config.h|  5 ++
> >   include/standard-headers/linux/virtio_pci.h   |  4 ++
> >   13 files changed, 274 insertions(+), 29 deletions(-)
> >
>




Re: [RFC] hw/nvme: Use irqfd to send interrupts

2022-07-24 Thread Jinhao Fan
at 3:36 AM, Stefan Hajnoczi  wrote:

> 
> 
> On Sun, Jul 24, 2022, 11:21 Jinhao Fan  wrote:
> at 9:29 PM, Stefan Hajnoczi  wrote:
> 
> > 
> > Nice, perf(1) is good for that. You can enable trace events and add
> > kprobes/uprobes to record timestamps when specific functions are entered.
> > 
> 
> Thanks Stefan,
> 
> One last question: Currently we can achieve hundreds of KIOPS. That means
> perf can easily capture millions of trace events per second. I found perf
> has quite high overhead at such a rate of trace events. Do you have any
> advices on tracing high IOPS tasks?
> 
> I don't. BTW uprobes are expensive but kprobes are cheaper.
> 
> Stefan

Gotcha. Thanks!

Jinhao Fan




Re: [PATCH 00/16] Support VIRTIO_F_RING_RESET for virtio-net and vhost-user in virtio pci

2022-07-24 Thread Kangjie Xu
Do you have any comments or suggestions about improvements to this patch 
set?



Regards,

Kangjie

在 2022/7/18 19:16, Kangjie Xu 写道:

The virtio queue reset function has already been defined in the virtio spec 1.2.
The relevant virtio spec information is here:

 https://github.com/oasis-tcs/virtio-spec/issues/124
 https://github.com/oasis-tcs/virtio-spec/issues/139

This patch set is to support this function in QEMU. It consists of two parts: 
virtio-net
and vhost-user. The patches 1-7 are the implementation for virtio-net and the 
patches
8-16 are for vhost-user.

The process of virt queue reset can be concluded as:
1. The virtqueue is disabled when VIRTIO_PCI_COMMON_Q_RESET is written.
2. Then the virtqueue is restarted after the information of vrings is passed to 
QEMU and
VIRTIO_PCI_COMMON_Q_ENABLE is written.

Test environment:
 Host: 5.4.189
 Qemu: QEMU emulator version 7.0.50
 Guest: 5.19.0-rc3 (With vq reset support)
 DPDK: 22.07-rc1 (With vq reset support)
 Test Cmd: ethtool -g eth1; ethtool -G eth1 rx $1 tx $2; ethtool -g eth1;

 The frontend can resize the virtio queue, then virtio queue reset function 
should
 be triggered.

 The default is split mode, modify Qemu virtio-net to add PACKED feature to
 test packed mode.

Guest Kernel Patch:
 
https://lore.kernel.org/bpf/20220629065656.54420-1-xuanz...@linux.alibaba.com/

DPDK Code:
 
https://github.com/middaywords/dpdk/commit/098c8e1dfae10b747da8dd8950a82890efca7bee

Please review. Thanks.

Kangjie Xu (9):
   vhost: add op to enable or disable a single vring
   vhost-user: enable/disable a single vring
   vhost: extract the logic of unmapping the vrings and desc
   vhost: introduce restart and release for vhost_dev's vqs
   vhost-net: introduce restart and stop for vhost_net's vqs
   virtio: introduce queue_enable in virtio
   virtio-net: support queue_enable for vhost-user
   virtio-net: support queue_reset for vhost-user
   vhost-net: vq reset feature bit support

Xuan Zhuo (7):
   virtio-pci: virtio_pci_common_cfg add queue_notify_data
   virtio: add VIRTIO_F_RING_RESET
   virtio: pci: virtio_pci_common_cfg add queue_reset
   virtio: introduce __virtio_queue_reset()
   virtio: introduce virtio_queue_reset()
   virtio-pci: support queue reset
   virtio-net: support queue reset

  hw/net/vhost_net.c| 56 ++
  hw/net/virtio-net.c   | 40 +
  hw/virtio/vhost-user.c| 55 +++---
  hw/virtio/vhost.c | 49 ++--
  hw/virtio/virtio-pci.c| 17 ++
  hw/virtio/virtio.c| 57 +--
  include/hw/virtio/vhost-backend.h |  4 ++
  include/hw/virtio/vhost.h |  6 ++
  include/hw/virtio/virtio-pci.h|  1 +
  include/hw/virtio/virtio.h|  4 ++
  include/net/vhost_net.h   |  5 ++
  .../standard-headers/linux/virtio_config.h|  5 ++
  include/standard-headers/linux/virtio_pci.h   |  4 ++
  13 files changed, 274 insertions(+), 29 deletions(-)





Re: [PATCH v3 8/8] arm/aspeed: Replace mx25l25635e chip model

2022-07-24 Thread Andrew Jeffery



On Fri, 22 Jul 2022, at 16:06, Cédric Le Goater wrote:
> A mx25l25635f chip model is generally found on these machines. It's
> newer and uses 4B opcodes which is better to exercise the support in
> the Linux kernel.
>
> Signed-off-by: Cédric Le Goater 
> ---
>  hw/arm/aspeed.c | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
> index 1c611284819d..7e95abc55b09 100644
> --- a/hw/arm/aspeed.c
> +++ b/hw/arm/aspeed.c
> @@ -1157,7 +1157,7 @@ static void 
> aspeed_machine_palmetto_class_init(ObjectClass *oc, void *data)
>  amc->soc_name  = "ast2400-a1";
>  amc->hw_strap1 = PALMETTO_BMC_HW_STRAP1;
>  amc->fmc_model = "n25q256a";
> -amc->spi_model = "mx25l25635e";
> +amc->spi_model = "mx25l25635f";

Hmm, dmesg reported mx25l25635e on the palmetto I checked

>  amc->num_cs= 1;
>  amc->i2c_init  = palmetto_bmc_i2c_init;
>  mc->default_ram_size   = 256 * MiB;
> @@ -1208,7 +1208,7 @@ static void 
> aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, void *data)
>  amc->soc_name  = "ast2500-a1";
>  amc->hw_strap1 = AST2500_EVB_HW_STRAP1;
>  amc->fmc_model = "mx25l25635e";
> -amc->spi_model = "mx25l25635e";
> +amc->spi_model = "mx25l25635f";
>  amc->num_cs= 1;
>  amc->i2c_init  = ast2500_evb_i2c_init;
>  mc->default_ram_size   = 512 * MiB;
> @@ -1258,7 +1258,7 @@ static void 
> aspeed_machine_witherspoon_class_init(ObjectClass *oc, void *data)
>  mc->desc   = "OpenPOWER Witherspoon BMC (ARM1176)";
>  amc->soc_name  = "ast2500-a1";
>  amc->hw_strap1 = WITHERSPOON_BMC_HW_STRAP1;
> -amc->fmc_model = "mx25l25635e";
> +amc->fmc_model = "mx25l25635f";

The witherspoon I checked also reported mx25l25635e in dmesg for the 
FMC.

You do say "generally" in the commit message though.

Andrew



Re: [PATCH] .cirrus.yml: Change winsymlinks to 'native'

2022-07-24 Thread Yonggang Luo
On Mon, Jul 25, 2022 at 9:24 AM Bin Meng  wrote:
>
> On Wed, Jul 20, 2022 at 12:12 AM Bin Meng  wrote:
> >
> > From: Bin Meng 
> >
> > At present winsymlinks is set to 'nativestrict', and its behavior is:
> >
> >   a) if native symlinks are enabled and  exists, creates
> >   as a native Windows symlink;
> >   b) else if native symlinks are not enabled or if  does
> >  not exist, 'ln -s' fails.
> >
> > This causes the following error message was seen during the configure:
> >
> >   "ln: failed to create symbolic link
> >   'x86_64-softmmu/qemu-system-x86_64.exe': No such file or directory"
> >
> > Change winsymlinks to 'native' whose behavior is most similar to the
> > behavior of 'ln -s' on *nix, that is:
> >
> >   a) if native symlinks are enabled, and whether  exists
> >  or not, creates  as a native Windows symlink;
> >   b) else if native symlinks are not enabled, and whether 
> >  exists or not, 'ln -s' creates as a Windows shortcut file.
> >
> > Signed-off-by: Bin Meng 
> > ---
> >
> >  .cirrus.yml | 2 +-
> >  1 file changed, 1 insertion(+), 1 deletion(-)
> >
>
> Ping?


Reviewed-by: Yonggang Luo 


--
 此致
礼
罗勇刚
Yours
sincerely,
Yonggang Luo


Re: [PATCH] .cirrus.yml: Change winsymlinks to 'native'

2022-07-24 Thread Bin Meng
On Wed, Jul 20, 2022 at 12:12 AM Bin Meng  wrote:
>
> From: Bin Meng 
>
> At present winsymlinks is set to 'nativestrict', and its behavior is:
>
>   a) if native symlinks are enabled and  exists, creates
>   as a native Windows symlink;
>   b) else if native symlinks are not enabled or if  does
>  not exist, 'ln -s' fails.
>
> This causes the following error message was seen during the configure:
>
>   "ln: failed to create symbolic link
>   'x86_64-softmmu/qemu-system-x86_64.exe': No such file or directory"
>
> Change winsymlinks to 'native' whose behavior is most similar to the
> behavior of 'ln -s' on *nix, that is:
>
>   a) if native symlinks are enabled, and whether  exists
>  or not, creates  as a native Windows symlink;
>   b) else if native symlinks are not enabled, and whether 
>  exists or not, 'ln -s' creates as a Windows shortcut file.
>
> Signed-off-by: Bin Meng 
> ---
>
>  .cirrus.yml | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>

Ping?



Re: [PATCH] hw/intc: sifive_plic: Fix multi-socket plic configuraiton

2022-07-24 Thread Alistair Francis
On Sat, Jul 23, 2022 at 7:22 PM Atish Patra  wrote:
>
> Since commit 40244040a7ac, multi-socket configuration with plic is
> broken as the hartid for second socket is calculated incorrectly.
> The hartid stored in addr_config already includes the offset
> for the base hartid for that socket. Adding it again would lead
> to segfault while creating the plic device for the virt machine.
> qdev_connect_gpio_out was also invoked with incorrect number of gpio
> lines.
>
> Fixes: 40244040a7ac (hw/intc: sifive_plic: Avoid overflowing the addr_config 
> buffer)
>
> Signed-off-by: Atish Patra 

Can you share the -cpu options that causes the segfault? I'll add it
to my test case

Reviewed-by: Alistair Francis 

Alistair

> ---
>  hw/intc/sifive_plic.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c
> index 56d60e9ac935..fdac028a521f 100644
> --- a/hw/intc/sifive_plic.c
> +++ b/hw/intc/sifive_plic.c
> @@ -454,10 +454,10 @@ DeviceState *sifive_plic_create(hwaddr addr, char 
> *hart_config,
>
>  for (i = 0; i < plic->num_addrs; i++) {
>  int cpu_num = plic->addr_config[i].hartid;
> -CPUState *cpu = qemu_get_cpu(hartid_base + cpu_num);
> +CPUState *cpu = qemu_get_cpu(cpu_num);
>
>  if (plic->addr_config[i].mode == PLICMode_M) {
> -qdev_connect_gpio_out(dev, num_harts + cpu_num,
> +qdev_connect_gpio_out(dev, cpu_num,
>qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT));
>  }
>  if (plic->addr_config[i].mode == PLICMode_S) {
> --
> 2.25.1
>
>



Re: [PATCH v2 2/2] tests/tcg/s390x: Test unaligned accesses to lowcore

2022-07-24 Thread Richard Henderson

On 7/23/22 05:06, Ilya Leoshkevich wrote:

Add a small test to avoid regressions.

Signed-off-by: Ilya Leoshkevich 


Acked-by: Richard Henderson 


r~



---
  tests/tcg/s390x/Makefile.softmmu-target |  9 +
  tests/tcg/s390x/unaligned-lowcore.S | 19 +++
  2 files changed, 28 insertions(+)
  create mode 100644 tests/tcg/s390x/Makefile.softmmu-target
  create mode 100644 tests/tcg/s390x/unaligned-lowcore.S

diff --git a/tests/tcg/s390x/Makefile.softmmu-target 
b/tests/tcg/s390x/Makefile.softmmu-target
new file mode 100644
index 00..a34fa68473
--- /dev/null
+++ b/tests/tcg/s390x/Makefile.softmmu-target
@@ -0,0 +1,9 @@
+S390X_SRC=$(SRC_PATH)/tests/tcg/s390x
+VPATH+=$(S390X_SRC)
+QEMU_OPTS=-action panic=exit-failure -kernel
+
+%: %.S
+   $(CC) -march=z13 -m64 -nostartfiles -static -Wl,-Ttext=0 \
+   -Wl,--build-id=none $< -o $@
+
+TESTS += unaligned-lowcore
diff --git a/tests/tcg/s390x/unaligned-lowcore.S 
b/tests/tcg/s390x/unaligned-lowcore.S
new file mode 100644
index 00..246b517f11
--- /dev/null
+++ b/tests/tcg/s390x/unaligned-lowcore.S
@@ -0,0 +1,19 @@
+.org 0x1D0 /* program new PSW */
+.quad 0x2, 0   /* disabled wait */
+.org 0x200 /* lowcore padding */
+
+.globl _start
+_start:
+lctlg %c0,%c0,_c0
+vst %v0,_unaligned
+lpswe quiesce_psw
+
+.align 8
+quiesce_psw:
+.quad 0x2,0xfff/* see is_special_wait_psw() */
+_c0:
+.quad 0x1006   /* lowcore protection, AFP, VX */
+
+.byte 0
+_unaligned:
+.octa 0





Re: [PATCH v2 1/2] qapi: Add exit-failure PanicAction

2022-07-24 Thread Richard Henderson

On 7/23/22 05:06, Ilya Leoshkevich wrote:

Currently QEMU exits with code 0 on both panic an shutdown. For tests
it is useful to return 1 on panic, so that it counts as a test
failure.

Introduce a new exit-failure PanicAction that makes main() return
EXIT_FAILURE. Tests can use -action panic=exit-failure option to
activate this behavior.

Signed-off-by: Ilya Leoshkevich 


I like it.

Reviewed-by: Richard Henderson 


r~


---
  include/sysemu/sysemu.h |  2 +-
  qapi/run-state.json |  4 +++-
  qemu-options.hx |  2 +-
  softmmu/main.c  |  6 --
  softmmu/runstate.c  | 17 +
  5 files changed, 22 insertions(+), 9 deletions(-)

diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 812f66a31a..31aa45160b 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -103,7 +103,7 @@ void qemu_boot_set(const char *boot_order, Error **errp);
  bool defaults_enabled(void);
  
  void qemu_init(int argc, char **argv, char **envp);

-void qemu_main_loop(void);
+int qemu_main_loop(void);
  void qemu_cleanup(void);
  
  extern QemuOptsList qemu_legacy_drive_opts;

diff --git a/qapi/run-state.json b/qapi/run-state.json
index 6e2162d7b3..d42c370c4f 100644
--- a/qapi/run-state.json
+++ b/qapi/run-state.json
@@ -364,10 +364,12 @@
  #
  # @shutdown: Shutdown the VM and exit, according to the shutdown action
  #
+# @exit-failure: Shutdown the VM and exit with nonzero status
+#
  # Since: 6.0
  ##
  { 'enum': 'PanicAction',
-  'data': [ 'pause', 'shutdown', 'none' ] }
+  'data': [ 'pause', 'shutdown', 'exit-failure', 'none' ] }
  
  ##

  # @watchdog-set-action:
diff --git a/qemu-options.hx b/qemu-options.hx
index 79e00916a1..8e17c5064a 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -4239,7 +4239,7 @@ DEF("action", HAS_ARG, QEMU_OPTION_action,
  "   action when guest reboots [default=reset]\n"
  "-action shutdown=poweroff|pause\n"
  "   action when guest shuts down [default=poweroff]\n"
-"-action panic=pause|shutdown|none\n"
+"-action panic=pause|shutdown|exit-failure|none\n"
  "   action when guest panics [default=shutdown]\n"
  "-action watchdog=reset|shutdown|poweroff|inject-nmi|pause|debug|none\n"
  "   action when watchdog fires [default=reset]\n",
diff --git a/softmmu/main.c b/softmmu/main.c
index c00432ff09..1b675a8c03 100644
--- a/softmmu/main.c
+++ b/softmmu/main.c
@@ -32,11 +32,13 @@
  
  int qemu_main(int argc, char **argv, char **envp)

  {
+int status;
+
  qemu_init(argc, argv, envp);
-qemu_main_loop();
+status = qemu_main_loop();
  qemu_cleanup();
  
-return 0;

+return status;
  }
  
  #ifndef CONFIG_COCOA

diff --git a/softmmu/runstate.c b/softmmu/runstate.c
index 168e1b78a0..1e68680b9d 100644
--- a/softmmu/runstate.c
+++ b/softmmu/runstate.c
@@ -482,7 +482,8 @@ void qemu_system_guest_panicked(GuestPanicInformation *info)
  qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE,
  !!info, info);
  vm_stop(RUN_STATE_GUEST_PANICKED);
-} else if (panic_action == PANIC_ACTION_SHUTDOWN) {
+} else if (panic_action == PANIC_ACTION_SHUTDOWN ||
+   panic_action == PANIC_ACTION_EXIT_FAILURE) {
  qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_POWEROFF,
 !!info, info);
  vm_stop(RUN_STATE_GUEST_PANICKED);
@@ -662,7 +663,7 @@ void qemu_system_debug_request(void)
  qemu_notify_event();
  }
  
-static bool main_loop_should_exit(void)

+static bool main_loop_should_exit(int *status)
  {
  RunState r;
  ShutdownCause request;
@@ -680,6 +681,10 @@ static bool main_loop_should_exit(void)
  if (shutdown_action == SHUTDOWN_ACTION_PAUSE) {
  vm_stop(RUN_STATE_SHUTDOWN);
  } else {
+if (request == SHUTDOWN_CAUSE_GUEST_PANIC &&
+panic_action == PANIC_ACTION_EXIT_FAILURE) {
+*status = EXIT_FAILURE;
+}
  return true;
  }
  }
@@ -715,12 +720,14 @@ static bool main_loop_should_exit(void)
  return false;
  }
  
-void qemu_main_loop(void)

+int qemu_main_loop(void)
  {
+int status = EXIT_SUCCESS;
  #ifdef CONFIG_PROFILER
  int64_t ti;
  #endif
-while (!main_loop_should_exit()) {
+
+while (!main_loop_should_exit()) {
  #ifdef CONFIG_PROFILER
  ti = profile_getclock();
  #endif
@@ -729,6 +736,8 @@ void qemu_main_loop(void)
  dev_time += profile_getclock() - ti;
  #endif
  }
+
+return status;
  }
  
  void qemu_add_exit_notifier(Notifier *notify)





Re: [PATCH] target/ppc: fix unreachable code in do_ldst_quad()

2022-07-24 Thread Richard Henderson

On 7/20/22 19:27, Daniel Henrique Barboza wrote:

Coverity reports that commit fc34e81acd51 ("target/ppc: add macros to
check privilege level") turned the following code unreachable:

if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) {
 /* lq and stq were privileged prior to V. 2.07 */
 REQUIRE_SV(ctx);


 CID 1490757:  Control flow issues  (UNREACHABLE)
 This code cannot be reached: "if (ctx->le_mode) {

 if (ctx->le_mode) {
 gen_align_no_le(ctx);
 return true;
 }
}

This happens because the macro REQUIRE_SV(), in CONFIG_USER_MODE, will
always result in a 'return true' statement.


I think adding ifdefs isn't fantastic.  This isn't actually fix a bug, so we *could* just 
mark this as ignore in Coverity.


If you wanted to clean this up, remove the implicit control flow from REQUIRE_* and turn 
the macros into pure predicates, so that you get


if (REQUIRE_SV(ctx)) {
return true;
}
if (ctx->le_mode) {
...
}


r~



Re: [PATCH 4/4] semihosting: Fix handling of buffer in TARGET_SYS_TMPNAM

2022-07-24 Thread Richard Henderson

On 7/19/22 17:41, Peter Maydell wrote:

The TARGET_SYS_TMPNAM implementation has two bugs spotted by
Coverity:
  * confusion about whether 'len' has the length of the string
including or excluding the terminating NUL means we
lock_user() len bytes of memory but memcpy() len + 1 bytes
  * In the error-exit cases we forget to free() the buffer
that asprintf() returned to us

Resolves: Coverity CID 1490285, 1490289
Signed-off-by: Peter Maydell
---
  semihosting/arm-compat-semi.c | 13 +++--
  1 file changed, 11 insertions(+), 2 deletions(-)


Reviewed-by: Richard Henderson 

r~



Deadlock between bdrv_drain_all_begin and prepare_mmio_access

2022-07-24 Thread Liang Yan

Hello All,

I am facing a lock situation between main-loop thread 1 and vcpu thread 
4 when doing a qmp snapshot. QEMU is running on 6.0.x, checked the 
upstream code and did not see any big change since between. Guest is a 
Windows 10 VM. Unfortunately, I could not get into the windows vm or 
reproduce the issue by myself. No iothread is used here, native aio only.


From the code,

-> AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());

--> aio_poll(qemu_get_aio_context(), true);

Mainloop mutex is locked when start snapshot in thread 1, vcpu released 
thread lock when address_space_rw and try to get thread lock again in 
prepare_mmio_access.


It seems main loop thread is stuck at aio_poll with blocking, but I can 
not figure out what the addr=4275044592 belongs to from mmio read.


I do not quite understand what really happens here, either block jobs 
never drained out or maybe a block io read from vcpu and cause a 
deadlock? I hope domain experts here could help figure out the root 
cause, thanks in advance and let me know if need any further information.



Regards,

Liang


(gdb) thread 1
[Switching to thread 1 (Thread 0x7f9ebcf96040 (LWP 358660))]
#0  0x7f9ec6eb4ac6 in __ppoll (fds=0x562dda80bc90, nfds=2, 
timeout=, timeout@entry=0x0, sigmask=sigmask@entry=0x0) 
at ../sysdeps/unix/sysv/linux/ppoll.c:44

44    ../sysdeps/unix/sysv/linux/ppoll.c: No such file or directory.
(gdb) bt
#0  0x7f9ec6eb4ac6 in __ppoll (fds=0x562dda80bc90, nfds=2, 
timeout=, timeout@entry=0x0, sigmask=sigmask@entry=0x0) 
at ../sysdeps/unix/sysv/linux/ppoll.c:44
#1  0x562dd7f5a409 in ppoll (__ss=0x0, __timeout=0x0, 
__nfds=, __fds=) at 
/usr/include/x86_64-linux-gnu/bits/poll2.h:77
#2  qemu_poll_ns (fds=, nfds=, 
timeout=timeout@entry=-1) at ../../util/qemu-timer.c:336
#3  0x562dd7f93de9 in fdmon_poll_wait (ctx=0x562dda193860, 
ready_list=0x7ffedaeb3f48, timeout=-1) at ../../util/fdmon-poll.c:80
#4  0x562dd7f6d05b in aio_poll (ctx=, 
blocking=blocking@entry=true) at ../../util/aio-posix.c:607

#5  0x562dd7e67e54 in bdrv_drain_all_begin () at ../../block/io.c:642
#6  bdrv_drain_all_begin () at ../../block/io.c:607
#7  0x562dd7e68a6d in bdrv_drain_all () at ../../block/io.c:693
#8  0x562dd7e54963 in qmp_transaction 
(dev_list=dev_list@entry=0x7ffedaeb4070, 
has_props=has_props@entry=false, props=0x562dda803910, props@entry=0x0, 
errp=errp@entry=0x7ffedaeb4128)

    at ../../blockdev.c:2348
#9  0x562dd7e54d5b in blockdev_do_action (errp=0x7ffedaeb4128, 
action=0x7ffedaeb4060) at ../../blockdev.c:1055
#10 qmp_blockdev_snapshot_sync (has_device=, 
device=, has_node_name=, 
node_name=, snapshot_file=,
    has_snapshot_node_name=, 
snapshot_node_name=0x562dda83c970 "hvd-snapshot", has_format=false, 
format=0x0, has_mode=false, mode=NEW_IMAGE_MODE_EXISTING, 
errp=0x7ffedaeb4128)

    at ../../blockdev.c:1083
#11 0x562dd7f0e5aa in qmp_marshal_blockdev_snapshot_sync 
(args=, ret=, errp=0x7f9ebc61ae90) at 
qapi/qapi-commands-block-core.c:221
#12 0x562dd7f5c5db in do_qmp_dispatch_bh (opaque=0x7f9ebc61aea0) at 
../../qapi/qmp-dispatch.c:131
#13 0x562dd7f5dc27 in aio_bh_call (bh=0x7f9e3000b760) at 
../../util/async.c:164

#14 aio_bh_poll (ctx=ctx@entry=0x562dda193860) at ../../util/async.c:164
#15 0x562dd7f6ca82 in aio_dispatch (ctx=0x562dda193860) at 
../../util/aio-posix.c:381
#16 0x562dd7f5da42 in aio_ctx_dispatch (source=, 
callback=, user_data=) at 
../../util/async.c:306
#17 0x7f9ec7ade17d in g_main_context_dispatch () from 
/lib/x86_64-linux-gnu/libglib-2.0.so.0

#18 0x562dd7f4f320 in glib_pollfds_poll () at ../../util/main-loop.c:231
#19 os_host_main_loop_wait (timeout=) at 
../../util/main-loop.c:254
#20 main_loop_wait (nonblocking=nonblocking@entry=0) at 
../../util/main-loop.c:530

#21 0x562dd7d3cfd9 in qemu_main_loop () at ../../softmmu/runstate.c:725
#22 0x562dd7b7aa82 in main (argc=, argv=out>, envp=) at ../../softmmu/main.c:50



(gdb) thread 4
[Switching to thread 4 (Thread 0x7f9e377fd700 (LWP 358668))]
#0  __lll_lock_wait (futex=futex@entry=0x562dd8337a60 
, private=0) at lowlevellock.c:52

52    lowlevellock.c: No such file or directory.
(gdb) bt
#0  __lll_lock_wait (futex=futex@entry=0x562dd8337a60 
, private=0) at lowlevellock.c:52
#1  0x7f9ec6f9f0a3 in __GI___pthread_mutex_lock 
(mutex=mutex@entry=0x562dd8337a60 ) at 
../nptl/pthread_mutex_lock.c:80
#2  0x562dd7f667c8 in qemu_mutex_lock_impl (mutex=0x562dd8337a60 
, file=0x562dd804c76c "../../softmmu/physmem.c", 
line=2742) at ../../util/qemu-thread-posix.c:79
#3  0x562dd7dca8ce in qemu_mutex_lock_iothread_impl 
(file=file@entry=0x562dd804c76c "../../softmmu/physmem.c", 
line=line@entry=2742) at ../../softmmu/cpus.c:491
#4  0x562dd7da2e91 in prepare_mmio_access (mr=) at 
../../softmmu/physmem.c:2742
#5  0x562dd7da8bbb in flatview_read_continue 
(fv=fv@entry=0x7f9e2827a4c0, addr=addr@entry=4275044592, attrs=..., 
ptr=ptr@entry=0x7f9ebcef7028, len=len@entry=4, addr1=, 
l=,

    

Re: [RFC] hw/nvme: Use irqfd to send interrupts

2022-07-24 Thread Stefan Hajnoczi
On Sun, Jul 24, 2022, 11:21 Jinhao Fan  wrote:

> at 9:29 PM, Stefan Hajnoczi  wrote:
>
> >
> > Nice, perf(1) is good for that. You can enable trace events and add
> > kprobes/uprobes to record timestamps when specific functions are entered.
> >
>
> Thanks Stefan,
>
> One last question: Currently we can achieve hundreds of KIOPS. That means
> perf can easily capture millions of trace events per second. I found perf
> has quite high overhead at such a rate of trace events. Do you have any
> advices on tracing high IOPS tasks?


I don't. BTW uprobes are expensive but kprobes are cheaper.

Stefan

>


Re: [PATCH 3/4] semihosting: Check for errors on SET_ARG()

2022-07-24 Thread Richard Henderson

On 7/19/22 17:41, Peter Maydell wrote:

The SET_ARG() macro returns an error indication; we check this in the
TARGET_SYS_GET_CMDLINE case but not when we use it in implementing
TARGET_SYS_ELAPSED.  Check for and handle the errors via the do_fault
codepath, and update the comment documenting the SET_ARG() and
GET_ARG() macros to note how they handle memory access errors.

Resolves: Coverity CID 1490287
Signed-off-by: Peter Maydell
---


Reviewed-by: Richard Henderson 

r~



Re: [PATCH 2/4] semihosting: Don't copy buffer after console_write()

2022-07-24 Thread Richard Henderson

On 7/19/22 17:41, Peter Maydell wrote:

The console_write() semihosting function outputs guest data from a
buffer; it doesn't update that buffer.  It therefore doesn't need to
pass a length value to unlock_user(), but can pass 0, meaning "do not
copy any data back to the guest memory".

Signed-off-by: Peter Maydell 
---
  semihosting/syscalls.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)


Reviewed-by: Richard Henderson 


r~



Re: [PATCH 1/4] semihosting: Don't return negative values on qemu_semihosting_console_write() failure

2022-07-24 Thread Richard Henderson

On 7/19/22 17:41, Peter Maydell wrote:

The documentation comment for qemu_semihosting_console_write() says
  * Returns: number of bytes written -- this should only ever be short
  * on some sort of i/o error.

and the callsites rely on this.  However, the implementation code
path which sends console output to a chardev doesn't honour this,
and will return negative values on error.  Bring it into line with
the other implementation codepaths and the documentation, so that
it returns 0 on error.

Spotted by Coverity, because console_write() passes the return value
to unlock_user(), which doesn't accept a negative length.

Resolves: Coverity CID 1490288
Signed-off-by: Peter Maydell 
---
console_write() doesn't need to pass the length to unlock_user()
at all, as it happens -- see the next patch.


Reviewed-by: Richard Henderson 

r~



Re: [PATCH v6 3/5] target/riscv: smstateen check for fcsr

2022-07-24 Thread Mayuresh Chitale
On Fri, 2022-07-22 at 09:42 +0800, Weiwei Li wrote:
> 在 2022/7/21 下午11:31, Mayuresh Chitale 写道:
> > If smstateen is implemented and sstateen0.fcsr is clear then the
> > floating point operations must return illegal instruction
> > exception.
> > 
> > Signed-off-by: Mayuresh Chitale 
> > ---
> >   target/riscv/csr.c| 23 ++
> >   target/riscv/insn_trans/trans_rvf.c.inc   | 38
> > +--
> >   target/riscv/insn_trans/trans_rvzfh.c.inc |  4 +++
> >   3 files changed, 63 insertions(+), 2 deletions(-)
> > 
> > diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> > index ab06b117f9..a597b6cbc7 100644
> > --- a/target/riscv/csr.c
> > +++ b/target/riscv/csr.c
> > @@ -96,6 +96,10 @@ static RISCVException fs(CPURISCVState *env, int
> > csrno)
> >   !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
> >   return RISCV_EXCP_ILLEGAL_INST;
> >   }
> > +
> > +if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
> > +return smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR);
> > +}
> >   #endif
> >   return RISCV_EXCP_NONE;
> >   }
> > @@ -1876,6 +1880,9 @@ static RISCVException
> > write_mstateen0(CPURISCVState *env, int csrno,
> > target_ulong new_val)
> >   {
> >   uint64_t wr_mask = SMSTATEEN_STATEN | SMSTATEEN0_HSENVCFG;
> > +if (!riscv_has_ext(env, RVF)) {
> > +wr_mask |= SMSTATEEN0_FCSR;
> > +}
> >   
> >   return write_mstateen(env, csrno, wr_mask, new_val);
> >   }
> > @@ -1924,6 +1931,10 @@ static RISCVException
> > write_mstateen0h(CPURISCVState *env, int csrno,
> >   {
> >   uint64_t wr_mask = SMSTATEEN_STATEN | SMSTATEEN0_HSENVCFG;
> >   
> > +if (!riscv_has_ext(env, RVF)) {
> > +wr_mask |= SMSTATEEN0_FCSR;
> > +}
> > +
> >   return write_mstateenh(env, csrno, wr_mask, new_val);
> >   }
> >   
> > @@ -1973,6 +1984,10 @@ static RISCVException
> > write_hstateen0(CPURISCVState *env, int csrno,
> >   {
> >   uint64_t wr_mask = SMSTATEEN_STATEN | SMSTATEEN0_HSENVCFG;
> >   
> > +if (!riscv_has_ext(env, RVF)) {
> > +wr_mask |= SMSTATEEN0_FCSR;
> > +}
> > +
> >   return write_hstateen(env, csrno, wr_mask, new_val);
> >   }
> >   
> > @@ -2024,6 +2039,10 @@ static RISCVException
> > write_hstateen0h(CPURISCVState *env, int csrno,
> >   {
> >   uint64_t wr_mask = SMSTATEEN_STATEN | SMSTATEEN0_HSENVCFG;
> >   
> > +if (!riscv_has_ext(env, RVF)) {
> > +wr_mask |= SMSTATEEN0_FCSR;
> > +}
> > +
> >   return write_hstateenh(env, csrno, wr_mask, new_val);
> >   }
> >   
> > @@ -2083,6 +2102,10 @@ static RISCVException
> > write_sstateen0(CPURISCVState *env, int csrno,
> >   {
> >   uint64_t wr_mask = SMSTATEEN_STATEN | SMSTATEEN0_HSENVCFG;
> >   
> > +if (!riscv_has_ext(env, RVF)) {
> > +wr_mask |= SMSTATEEN0_FCSR;
> > +}
> > +
> >   return write_sstateen(env, csrno, wr_mask, new_val);
> >   }
> >   
> > diff --git a/target/riscv/insn_trans/trans_rvf.c.inc
> > b/target/riscv/insn_trans/trans_rvf.c.inc
> > index a1d3eb52ad..c43c48336b 100644
> > --- a/target/riscv/insn_trans/trans_rvf.c.inc
> > +++ b/target/riscv/insn_trans/trans_rvf.c.inc
> > @@ -24,9 +24,43 @@
> >   return false; \
> >   } while (0)
> >   
> > +#ifndef CONFIG_USER_ONLY
> > +#define SMSTATEEN_CHECK(ctx) do {\
> > +CPUState *cpu = ctx->cs; \
> > +CPURISCVState *env = cpu->env_ptr; \
> > +if (ctx->cfg_ptr->ext_smstateen && \
> > +(env->priv < PRV_M)) { \
> > +uint64_t stateen = env->mstateen[0]; \
> > +uint64_t hstateen = env->hstateen[0]; \
> > +uint64_t sstateen = env->sstateen[0]; \
> > +if (!(stateen & SMSTATEEN_STATEN)) {\
> > +hstateen = 0; \
> > +sstateen = 0; \
> > +} \
> > +if (ctx->virt_enabled) { \
> > +stateen &= hstateen; \
> > +if (!(hstateen & SMSTATEEN_STATEN)) {\
> > +sstateen = 0; \
> > +} \
> > +} \
> > +if (env->priv == PRV_U && has_ext(ctx, RVS)) {\eventually
> > meaning
> > +stateen &= sstateen; \
> > +} \
> > +if (!(stateen & SMSTATEEN0_FCSR)) { \
> > +return false; \
> > +} \
> > +} \
> > +} while (0)
> 
> It's better to add a space before '\'.
ok. will modify in the next version.
> 
> > +#else
> > +#define SMSTATEEN_CHECK(ctx)
> > +#endif
> > +
> >   #define REQUIRE_ZFINX_OR_F(ctx) do {\
> > -if (!ctx->cfg_ptr->ext_zfinx) { \
> > -REQUIRE_EXT(ctx, RVF); \
> > +if (!has_ext(ctx, RVF)) { \
> > +SMSTATEEN_CHECK(ctx); \
> > +if (!ctx->cfg_ptr->ext_zfinx) { \
> > +return false; \
> > +} \
> >   } \
> >   } while (0)
> 
> SMSTATEEN_CHECK is for CSR. and REQUIRE_ZFINX_OR_F is for Extension.
> I think It's better to separate them. By the way, if we want the
> smallest modification
> for current code, adding it to REQUIRE_FPU seems 

Re: [PATCH v6 1/5] target/riscv: Add smstateen support

2022-07-24 Thread Mayuresh Chitale
On Fri, 2022-07-22 at 08:31 +0800, Weiwei Li wrote:
> 在 2022/7/21 下午11:31, Mayuresh Chitale 写道:
> > Smstateen extension specifies a mechanism to close
> > the potential covert channels that could cause security issues.
> > 
> > This patch adds the CSRs defined in the specification and
> > the corresponding predicates and read/write functions.
> > 
> > Signed-off-by: Mayuresh Chitale 
> > ---
> >   target/riscv/cpu.h  |   4 +
> >   target/riscv/cpu_bits.h |  37 
> >   target/riscv/csr.c  | 370
> > 
> >   target/riscv/machine.c  |  21 +++
> >   4 files changed, 432 insertions(+)
> > 
> > diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> > index ffb1a18873..7f8e5b0014 100644
> > --- a/target/riscv/cpu.h
> > +++ b/target/riscv/cpu.h
> > @@ -354,6 +354,9 @@ struct CPUArchState {
> >   
> >   /* CSRs for execution enviornment configuration */
> >   uint64_t menvcfg;
> > +uint64_t mstateen[SMSTATEEN_MAX_COUNT];
> > +uint64_t hstateen[SMSTATEEN_MAX_COUNT];
> > +uint64_t sstateen[SMSTATEEN_MAX_COUNT];
> >   target_ulong senvcfg;
> >   uint64_t henvcfg;
> >   #endif
> > @@ -426,6 +429,7 @@ struct RISCVCPUConfig {
> >   bool ext_zkt;
> >   bool ext_ifencei;
> >   bool ext_icsr;
> > +bool ext_smstateen;
> >   bool ext_svinval;
> >   bool ext_svnapot;
> >   bool ext_svpbmt;
> > diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> > index 6be5a9e9f0..56b7c5bed6 100644
> > --- a/target/riscv/cpu_bits.h
> > +++ b/target/riscv/cpu_bits.h
> > @@ -199,6 +199,12 @@
> >   /* Supervisor Configuration CSRs */
> >   #define CSR_SENVCFG 0x10A
> >   
> > +/* Supervisor state CSRs */
> > +#define CSR_SSTATEEN0   0x10C
> > +#define CSR_SSTATEEN1   0x10D
> > +#define CSR_SSTATEEN2   0x10E
> > +#define CSR_SSTATEEN3   0x10F
> > +
> >   /* Supervisor Trap Handling */
> >   #define CSR_SSCRATCH0x140
> >   #define CSR_SEPC0x141
> > @@ -242,6 +248,16 @@
> >   #define CSR_HENVCFG 0x60A
> >   #define CSR_HENVCFGH0x61A
> >   
> > +/* Hypervisor state CSRs */
> > +#define CSR_HSTATEEN0   0x60C
> > +#define CSR_HSTATEEN0H  0x61C
> > +#define CSR_HSTATEEN1   0x60D
> > +#define CSR_HSTATEEN1H  0x61D
> > +#define CSR_HSTATEEN2   0x60E
> > +#define CSR_HSTATEEN2H  0x61E
> > +#define CSR_HSTATEEN3   0x60F
> > +#define CSR_HSTATEEN3H  0x61F
> > +
> >   /* Virtual CSRs */
> >   #define CSR_VSSTATUS0x200
> >   #define CSR_VSIE0x204
> > @@ -283,6 +299,27 @@
> >   #define CSR_MENVCFG 0x30A
> >   #define CSR_MENVCFGH0x31A
> >   
> > +/* Machine state CSRs */
> > +#define CSR_MSTATEEN0   0x30C
> > +#define CSR_MSTATEEN0H  0x31C
> > +#define CSR_MSTATEEN1   0x30D
> > +#define CSR_MSTATEEN1H  0x31D
> > +#define CSR_MSTATEEN2   0x30E
> > +#define CSR_MSTATEEN2H  0x31E
> > +#define CSR_MSTATEEN3   0x30F
> > +#define CSR_MSTATEEN3H  0x31F
> > +
> > +/* Common defines for all smstateen */
> > +#define SMSTATEEN_MAX_COUNT 4
> > +#define SMSTATEEN0_CS   (1ULL << 0)
> > +#define SMSTATEEN0_FCSR (1ULL << 1)
> > +#define SMSTATEEN0_HSCONTXT (1ULL << 57)
> > +#define SMSTATEEN0_IMSIC(1ULL << 58)
> > +#define SMSTATEEN0_AIA  (1ULL << 59)
> > +#define SMSTATEEN0_SVSLCT   (1ULL << 60)
> > +#define SMSTATEEN0_HSENVCFG (1ULL << 62)
> > +#define SMSTATEEN_STATEN(1ULL << 63)
> Maybe  SMSTATEEN_STATEEN better.
ok. Will update in the next version.
> > +
> >   /* Enhanced Physical Memory Protection (ePMP) */
> >   #define CSR_MSECCFG 0x747
> >   #define CSR_MSECCFGH0x757
> > diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> > index 235f2a011e..27032a416c 100644
> > --- a/target/riscv/csr.c
> > +++ b/target/riscv/csr.c
> > @@ -339,6 +339,68 @@ static RISCVException hmode32(CPURISCVState
> > *env, int csrno)
> >   
> >   }
> >   
> > +static RISCVException mstateen(CPURISCVState *env, int csrno)
> > +{
> > +CPUState *cs = env_cpu(env);
> > +RISCVCPU *cpu = RISCV_CPU(cs);
> > +
> > +if (!cpu->cfg.ext_smstateen) {
> > +return RISCV_EXCP_ILLEGAL_INST;
> > +}
> > +
> > +return any(env, csrno);
> > +}
> > +
> > +static RISCVException hstateen_pred(CPURISCVState *env, int csrno,
> > int base)
> > +{
> > +CPUState *cs = env_cpu(env);
> > +RISCVCPU *cpu = RISCV_CPU(cs);
> > +
> > +if (!cpu->cfg.ext_smstateen) {
> > +return RISCV_EXCP_ILLEGAL_INST;
> > +}
> > +
> > +if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEN)) {
> > +return RISCV_EXCP_ILLEGAL_INST;
> > +}
> > +
> > +return hmode(env, csrno);
> > +}
> > +
> > +static RISCVException hstateen(CPURISCVState *env, int csrno)
> > +{
> > +return hstateen_pred(env, csrno, CSR_HSTATEEN0);
> > +}
> > +
> > +static RISCVException hstateenh(CPURISCVState *env, int csrno)
> > +{
> > +return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
> > +}
> 

Re: [RFC] hw/nvme: Use irqfd to send interrupts

2022-07-24 Thread Jinhao Fan
at 9:29 PM, Stefan Hajnoczi  wrote:

> 
> Nice, perf(1) is good for that. You can enable trace events and add
> kprobes/uprobes to record timestamps when specific functions are entered.
> 

Thanks Stefan,

One last question: Currently we can achieve hundreds of KIOPS. That means
perf can easily capture millions of trace events per second. I found perf
has quite high overhead at such a rate of trace events. Do you have any
advices on tracing high IOPS tasks?




[PATCH v15 04/10] vhost: introduce new VhostOps vhost_set_config_call

2022-07-24 Thread Cindy Lu
This patch introduces new VhostOps vhost_set_config_call.
This function allows the qemu to set the config
event fd to kernel driver.

Signed-off-by: Cindy Lu 
---
 include/hw/virtio/vhost-backend.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/include/hw/virtio/vhost-backend.h 
b/include/hw/virtio/vhost-backend.h
index eab46d7f0b..c5ab49051e 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -128,6 +128,8 @@ typedef int (*vhost_get_device_id_op)(struct vhost_dev 
*dev, uint32_t *dev_id);
 
 typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev);
 
+typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
+   int fd);
 typedef struct VhostOps {
 VhostBackendType backend_type;
 vhost_backend_init vhost_backend_init;
@@ -174,6 +176,7 @@ typedef struct VhostOps {
 vhost_vq_get_addr_op  vhost_vq_get_addr;
 vhost_get_device_id_op vhost_get_device_id;
 vhost_force_iommu_op vhost_force_iommu;
+vhost_set_config_call_op vhost_set_config_call;
 } VhostOps;
 
 int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
-- 
2.34.3




[PATCH v15 10/10] virtio-pci: add support for configure interrupt

2022-07-24 Thread Cindy Lu
Add process to handle the configure interrupt, The function's
logic is the same with vq interrupt.Add extra process to check
the configure interrupt

Signed-off-by: Cindy Lu 
---
 hw/virtio/virtio-pci.c | 110 -
 include/hw/virtio/virtio-pci.h |   4 +-
 2 files changed, 96 insertions(+), 18 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 4362dab725..17e87ebccc 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -729,7 +729,8 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, 
int queue_no,
 VirtQueue *vq;
 
 if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
-return -1;
+*n = virtio_config_get_guest_notifier(vdev);
+*vector = vdev->config_vector;
 } else {
 if (!virtio_queue_get_num(vdev, queue_no)) {
 return -1;
@@ -789,7 +790,7 @@ undo:
 }
 return ret;
 }
-static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs)
 {
 int queue_no;
 int ret = 0;
@@ -804,6 +805,10 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy 
*proxy, int nvqs)
 return ret;
 }
 
+static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
+{
+return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
+}
 
 static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
   int queue_no)
@@ -828,7 +833,7 @@ static void 
kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
 kvm_virtio_pci_vq_vector_release(proxy, vector);
 }
 
-static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+static void kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy *proxy, int nvqs)
 {
 int queue_no;
 VirtIODevice *vdev = virtio_bus_get_device(>bus);
@@ -841,6 +846,11 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy 
*proxy, int nvqs)
 }
 }
 
+static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
+{
+kvm_virtio_pci_vector_release_one(proxy, VIRTIO_CONFIG_IRQ_IDX);
+}
+
 static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy,
unsigned int queue_no,
unsigned int vector,
@@ -922,9 +932,19 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, 
unsigned vector,
 }
 vq = virtio_vector_next_queue(vq);
 }
-
+/* unmask config intr */
+if (vector == vdev->config_vector) {
+n = virtio_config_get_guest_notifier(vdev);
+ret = virtio_pci_one_vector_unmask(proxy, VIRTIO_CONFIG_IRQ_IDX, 
vector,
+   msg, n);
+if (ret < 0) {
+goto undo_config;
+}
+}
 return 0;
-
+undo_config:
+n = virtio_config_get_guest_notifier(vdev);
+virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
 undo:
 vq = virtio_vector_first_queue(vdev, vector);
 while (vq && unmasked >= 0) {
@@ -958,6 +978,11 @@ static void virtio_pci_vector_mask(PCIDevice *dev, 
unsigned vector)
 }
 vq = virtio_vector_next_queue(vq);
 }
+
+if (vector == vdev->config_vector) {
+n = virtio_config_get_guest_notifier(vdev);
+virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n);
+}
 }
 
 static void virtio_pci_vector_poll(PCIDevice *dev,
@@ -989,6 +1014,34 @@ static void virtio_pci_vector_poll(PCIDevice *dev,
 msix_set_pending(dev, vector);
 }
 }
+/* poll the config intr */
+ret = virtio_pci_get_notifier(proxy, VIRTIO_CONFIG_IRQ_IDX, ,
+  );
+if (ret < 0) {
+return;
+}
+if (vector < vector_start || vector >= vector_end ||
+!msix_is_masked(dev, vector)) {
+return;
+}
+if (k->guest_notifier_pending) {
+if (k->guest_notifier_pending(vdev, VIRTIO_CONFIG_IRQ_IDX)) {
+msix_set_pending(dev, vector);
+}
+} else if (event_notifier_test_and_clear(notifier)) {
+msix_set_pending(dev, vector);
+}
+}
+
+void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, VirtQueue 
*vq,
+  int n, bool assign,
+  bool with_irqfd)
+{
+if (n == VIRTIO_CONFIG_IRQ_IDX) {
+virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd);
+} else {
+virtio_queue_set_guest_notifier_fd_handler(vq, assign, with_irqfd);
+}
 }
 
 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
@@ -997,17 +1050,25 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, 
int n, bool assign,
 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
 VirtIODevice *vdev = virtio_bus_get_device(>bus);
 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
-VirtQueue *vq = 

[PATCH v15 07/10] vhost: add support for configure interrupt

2022-07-24 Thread Cindy Lu
Add functions to support configure interrupt.
The configure interrupt process will start in vhost_dev_start
and stop in vhost_dev_stop.

Also add the functions to support vhost_config_pending and
vhost_config_mask.

Signed-off-by: Cindy Lu 
---
 hw/virtio/vhost.c | 78 ++-
 include/hw/virtio/vhost.h |  4 ++
 2 files changed, 81 insertions(+), 1 deletion(-)

diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 0827d631c0..4479b4c94a 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1590,7 +1590,68 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, 
VirtIODevice *vdev, int n,
 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
 r = hdev->vhost_ops->vhost_set_vring_call(hdev, );
 if (r < 0) {
-VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
+error_report("vhost_set_vring_call failed %d", -r);
+}
+}
+
+bool vhost_config_pending(struct vhost_dev *hdev)
+{
+assert(hdev->vhost_ops);
+if ((hdev->started == false) ||
+(hdev->vhost_ops->vhost_set_config_call == NULL)) {
+return false;
+}
+
+EventNotifier *notifier =
+>vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier;
+return event_notifier_test_and_clear(notifier);
+}
+
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask)
+{
+int fd;
+int r;
+EventNotifier *notifier =
+>vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier;
+EventNotifier *config_notifier = >config_notifier;
+assert(hdev->vhost_ops);
+
+if ((hdev->started == false) ||
+(hdev->vhost_ops->vhost_set_config_call == NULL)) {
+return;
+}
+if (mask) {
+assert(vdev->use_guest_notifier_mask);
+fd = event_notifier_get_fd(notifier);
+} else {
+fd = event_notifier_get_fd(config_notifier);
+}
+r = hdev->vhost_ops->vhost_set_config_call(hdev, fd);
+if (r < 0) {
+error_report("vhost_set_config_call failed %d", -r);
+}
+}
+
+static void vhost_stop_config_intr(struct vhost_dev *dev)
+{
+int fd = -1;
+assert(dev->vhost_ops);
+if (dev->vhost_ops->vhost_set_config_call) {
+dev->vhost_ops->vhost_set_config_call(dev, fd);
+}
+}
+
+static void vhost_start_config_intr(struct vhost_dev *dev)
+{
+int r;
+
+assert(dev->vhost_ops);
+int fd = event_notifier_get_fd(>vdev->config_notifier);
+if (dev->vhost_ops->vhost_set_config_call) {
+r = dev->vhost_ops->vhost_set_config_call(dev, fd);
+if (!r) {
+event_notifier_set(>vdev->config_notifier);
+}
 }
 }
 
@@ -1807,6 +1868,16 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice 
*vdev)
 }
 }
 
+r = event_notifier_init(
+>vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0);
+if (r < 0) {
+return r;
+}
+event_notifier_test_and_clear(
+>vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
+if (!vdev->use_guest_notifier_mask) {
+vhost_config_mask(hdev, vdev, true);
+}
 if (hdev->log_enabled) {
 uint64_t log_base;
 
@@ -1839,6 +1910,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice 
*vdev)
 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
 }
 }
+vhost_start_config_intr(hdev);
 return 0;
 fail_log:
 vhost_log_put(hdev, false);
@@ -1864,6 +1936,9 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice 
*vdev)
 
 /* should only be called after backend is connected */
 assert(hdev->vhost_ops);
+event_notifier_test_and_clear(
+>vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
+event_notifier_test_and_clear(>config_notifier);
 
 if (hdev->vhost_ops->vhost_dev_start) {
 hdev->vhost_ops->vhost_dev_start(hdev, false);
@@ -1881,6 +1956,7 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice 
*vdev)
 }
 memory_listener_unregister(>iommu_listener);
 }
+vhost_stop_config_intr(hdev);
 vhost_log_put(hdev, true);
 hdev->started = false;
 vdev->vhost_started = false;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index a346f23d13..3b336a15e3 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -30,6 +30,7 @@ struct vhost_virtqueue {
 unsigned used_size;
 EventNotifier masked_notifier;
 EventNotifier error_notifier;
+EventNotifier masked_config_notifier;
 struct vhost_dev *dev;
 };
 
@@ -38,6 +39,7 @@ typedef unsigned long vhost_log_chunk_t;
 #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
 #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
 #define VHOST_INVALID_FEATURE_BIT   (0xff)
+#define VHOST_QUEUE_NUM_CONFIG_INR 0
 
 struct vhost_log {
 unsigned long long size;
@@ -161,6 +163,8 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, 
VirtIODevice *vdev);
  * Disable direct notifications to vhost device.
  */
 void 

[PATCH v15 03/10] virtio-pci: decouple the single vector from the interrupt process

2022-07-24 Thread Cindy Lu
To reuse the interrupt process in configure interrupt
Need to decouple the single vector from the interrupt process.
We add new function kvm_virtio_pci_vector_use_one and _release_one.
These functions are used for the single vector, the whole process will
finish in the loop with vq number.

Signed-off-by: Cindy Lu 
---
 hw/virtio/virtio-pci.c | 131 +++--
 1 file changed, 73 insertions(+), 58 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index ea56677f7a..4362dab725 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -677,7 +677,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
 }
 
 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
-unsigned int queue_no,
 unsigned int vector)
 {
 VirtIOIRQFD *irqfd = >vector_irqfd[vector];
@@ -742,87 +741,103 @@ static int virtio_pci_get_notifier(VirtIOPCIProxy 
*proxy, int queue_no,
 return 0;
 }
 
-static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no)
 {
+unsigned int vector;
+int ret;
+EventNotifier *n;
 PCIDevice *dev = >pci_dev;
 VirtIODevice *vdev = virtio_bus_get_device(>bus);
 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-unsigned int vector;
-int ret, queue_no;
-EventNotifier *n;
-for (queue_no = 0; queue_no < nvqs; queue_no++) {
-if (!virtio_queue_get_num(vdev, queue_no)) {
-break;
-}
-ret = virtio_pci_get_notifier(proxy, queue_no, , );
-if (ret < 0) {
-break;
-}
-if (vector >= msix_nr_vectors_allocated(dev)) {
-continue;
-}
-ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
+
+ret = virtio_pci_get_notifier(proxy, queue_no, , );
+if (ret < 0) {
+return ret;
+}
+if (vector >= msix_nr_vectors_allocated(dev)) {
+return 0;
+}
+ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
+if (ret < 0) {
+goto undo;
+}
+/*
+ * If guest supports masking, set up irqfd now.
+ * Otherwise, delay until unmasked in the frontend.
+ */
+if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
 if (ret < 0) {
+kvm_virtio_pci_vq_vector_release(proxy, vector);
 goto undo;
 }
-/* If guest supports masking, set up irqfd now.
- * Otherwise, delay until unmasked in the frontend.
- */
-if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
-if (ret < 0) {
-kvm_virtio_pci_vq_vector_release(proxy, vector);
-goto undo;
-}
-}
 }
-return 0;
 
+return 0;
 undo:
-while (--queue_no >= 0) {
-vector = virtio_queue_vector(vdev, queue_no);
-if (vector >= msix_nr_vectors_allocated(dev)) {
-continue;
+
+vector = virtio_queue_vector(vdev, queue_no);
+if (vector >= msix_nr_vectors_allocated(dev)) {
+return ret;
+}
+if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
+ret = virtio_pci_get_notifier(proxy, queue_no, , );
+if (ret < 0) {
+return ret;
 }
-if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-ret = virtio_pci_get_notifier(proxy, queue_no, , );
-if (ret < 0) {
-break;
-}
-kvm_virtio_pci_irqfd_release(proxy, n, vector);
+kvm_virtio_pci_irqfd_release(proxy, n, vector);
+}
+return ret;
+}
+static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
+{
+int queue_no;
+int ret = 0;
+VirtIODevice *vdev = virtio_bus_get_device(>bus);
+
+for (queue_no = 0; queue_no < nvqs; queue_no++) {
+if (!virtio_queue_get_num(vdev, queue_no)) {
+return -1;
 }
-kvm_virtio_pci_vq_vector_release(proxy, vector);
+ret = kvm_virtio_pci_vector_use_one(proxy, queue_no);
 }
 return ret;
 }
 
-static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
+
+static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy,
+  int queue_no)
 {
-PCIDevice *dev = >pci_dev;
 VirtIODevice *vdev = virtio_bus_get_device(>bus);
 unsigned int vector;
-int queue_no;
-VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
 EventNotifier *n;
-int ret ;
+int ret;
+VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+PCIDevice *dev = >pci_dev;
+
+ret = virtio_pci_get_notifier(proxy, queue_no, , );
+if (ret < 0) {
+return;
+}
+if (vector >= 

[PATCH v15 09/10] virtio-mmio: add support for configure interrupt

2022-07-24 Thread Cindy Lu
Add configure interrupt support in virtio-mmio bus.
add function to set configure guest notifier.

Signed-off-by: Cindy Lu 
---
 hw/virtio/virtio-mmio.c | 27 +++
 1 file changed, 27 insertions(+)

diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index d240efef97..103260ec15 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -670,7 +670,30 @@ static int virtio_mmio_set_guest_notifier(DeviceState *d, 
int n, bool assign,
 
 return 0;
 }
+static int virtio_mmio_set_config_guest_notifier(DeviceState *d, bool assign,
+ bool with_irqfd)
+{
+VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
+VirtIODevice *vdev = virtio_bus_get_device(>bus);
+VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+EventNotifier *notifier = virtio_config_get_guest_notifier(vdev);
+int r = 0;
 
+if (assign) {
+r = event_notifier_init(notifier, 0);
+if (r < 0) {
+return r;
+}
+virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd);
+} else {
+virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd);
+event_notifier_cleanup(notifier);
+}
+if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
+vdc->guest_notifier_mask(vdev, VIRTIO_CONFIG_IRQ_IDX, !assign);
+}
+return r;
+}
 static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
bool assign)
 {
@@ -692,6 +715,10 @@ static int virtio_mmio_set_guest_notifiers(DeviceState *d, 
int nvqs,
 goto assign_error;
 }
 }
+r = virtio_mmio_set_config_guest_notifier(d, assign, with_irqfd);
+if (r < 0) {
+goto assign_error;
+}
 
 return 0;
 
-- 
2.34.3




[PATCH v15 06/10] virtio: add support for configure interrupt

2022-07-24 Thread Cindy Lu
Add the functions to support the configure interrupt in virtio
The function virtio_config_guest_notifier_read will notify the
guest if there is an configure interrupt.
The function virtio_config_set_guest_notifier_fd_handler is
to set the fd hander for the notifier

Signed-off-by: Cindy Lu 
---
 hw/virtio/virtio.c | 29 +
 include/hw/virtio/virtio.h |  4 
 2 files changed, 33 insertions(+)

diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 5d607aeaa0..ab1b46ae28 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3521,7 +3521,14 @@ static void 
virtio_queue_guest_notifier_read(EventNotifier *n)
 virtio_irq(vq);
 }
 }
+static void virtio_config_guest_notifier_read(EventNotifier *n)
+{
+VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
 
+if (event_notifier_test_and_clear(n)) {
+virtio_notify_config(vdev);
+}
+}
 void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
 bool with_irqfd)
 {
@@ -3538,6 +3545,23 @@ void 
virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
 }
 }
 
+void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
+ bool assign, bool with_irqfd)
+{
+EventNotifier *n;
+n = >config_notifier;
+if (assign && !with_irqfd) {
+event_notifier_set_handler(n, virtio_config_guest_notifier_read);
+} else {
+event_notifier_set_handler(n, NULL);
+}
+if (!assign) {
+/* Test and clear notifier before closing it,*/
+/* in case poll callback didn't have time to run. */
+virtio_config_guest_notifier_read(n);
+}
+}
+
 EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
 {
 return >guest_notifier;
@@ -3618,6 +3642,11 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue 
*vq)
 return >host_notifier;
 }
 
+EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev)
+{
+return >config_notifier;
+}
+
 void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
 {
 vq->host_notifier_enabled = enabled;
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index e985ef133f..a7b8cebb60 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -113,6 +113,7 @@ struct VirtIODevice
 bool use_guest_notifier_mask;
 AddressSpace *dma_as;
 QLIST_HEAD(, VirtQueue) *vector_queues;
+EventNotifier config_notifier;
 };
 
 struct VirtioDeviceClass {
@@ -327,6 +328,9 @@ void 
virtio_queue_aio_attach_host_notifier_no_poll(VirtQueue *vq, AioContext *ct
 void virtio_queue_aio_detach_host_notifier(VirtQueue *vq, AioContext *ctx);
 VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector);
 VirtQueue *virtio_vector_next_queue(VirtQueue *vq);
+EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev);
+void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev,
+ bool assign, bool with_irqfd);
 
 static inline void virtio_add_feature(uint64_t *features, unsigned int fbit)
 {
-- 
2.34.3




[PATCH v15 01/10] virtio: introduce macro VIRTIO_CONFIG_IRQ_IDX

2022-07-24 Thread Cindy Lu
To support configure interrupt for vhost-vdpa
Introduce VIRTIO_CONFIG_IRQ_IDX -1 as configure interrupt's queue index,
Then we can reuse the functions guest_notifier_mask and guest_notifier_pending.
Add the check of queue index in these drivers, if the driver does not support
configure interrupt, the function will just return

Signed-off-by: Cindy Lu 
---
 hw/display/vhost-user-gpu.c| 15 +++
 hw/net/virtio-net.c| 20 ++--
 hw/virtio/vhost-user-fs.c  | 16 
 hw/virtio/vhost-vsock-common.c | 16 
 hw/virtio/virtio-crypto.c  | 16 
 include/hw/virtio/virtio.h |  3 +++
 6 files changed, 84 insertions(+), 2 deletions(-)

diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
index 3340ef9e5f..8454514da5 100644
--- a/hw/display/vhost-user-gpu.c
+++ b/hw/display/vhost-user-gpu.c
@@ -485,6 +485,13 @@ vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, 
int idx)
 {
 VhostUserGPU *g = VHOST_USER_GPU(vdev);
 
+/* Add the check for configure interrupt, we use VIRTIO_CONFIG_IRQ_IDX -1
+ * as the Marco of configure interrupt, If this driver does not
+ * support, the function will just return false
+ */
+if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+return false;
+}
 return vhost_virtqueue_pending(>vhost->dev, idx);
 }
 
@@ -493,6 +500,14 @@ vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int 
idx, bool mask)
 {
 VhostUserGPU *g = VHOST_USER_GPU(vdev);
 
+/* Add the check for configure interrupt,Here use VIRTIO_CONFIG_IRQ_IDX -1
+ * as the Marco of configure interrupt, If this driver does not
+ * support, the function will return
+ */
+
+if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+return;
+}
 vhost_virtqueue_mask(>vhost->dev, vdev, idx, mask);
 }
 
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index dd0d056fde..b6d36b034c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3215,6 +3215,14 @@ static bool 
virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
 } else {
 nc = qemu_get_subqueue(n->nic, vq2q(idx));
 }
+/* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
+ * as the Marco of configure interrupt's IDX, If this driver does not
+ * support, the function will return false
+ */
+
+if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+return false;
+}
 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
 }
 
@@ -3238,8 +3246,16 @@ static void virtio_net_guest_notifier_mask(VirtIODevice 
*vdev, int idx,
 } else {
 nc = qemu_get_subqueue(n->nic, vq2q(idx));
 }
-vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
- vdev, idx, mask);
+/* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
+ * as the Marco of configure interrupt's IDX, If this driver does not
+ * support, the function will return
+ */
+
+if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+return;
+}
+
+vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
 }
 
 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index e513e4fdda..806c7680f0 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -161,6 +161,14 @@ static void vuf_guest_notifier_mask(VirtIODevice *vdev, 
int idx,
 {
 VHostUserFS *fs = VHOST_USER_FS(vdev);
 
+/* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
+ * as the Marco of configure interrupt's IDX, If this driver does not
+ * support, the function will return
+ */
+
+if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+return;
+}
 vhost_virtqueue_mask(>vhost_dev, vdev, idx, mask);
 }
 
@@ -168,6 +176,14 @@ static bool vuf_guest_notifier_pending(VirtIODevice *vdev, 
int idx)
 {
 VHostUserFS *fs = VHOST_USER_FS(vdev);
 
+/* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
+ * as the Marco of configure interrupt's IDX, If this driver does not
+ * support, the function will return
+ */
+
+if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+return false;
+}
 return vhost_virtqueue_pending(>vhost_dev, idx);
 }
 
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index 7394818e00..37a20ffe8a 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -125,6 +125,14 @@ static void 
vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx,
 {
 VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
 
+/* Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
+ * as the Marco of configure interrupt's IDX, If this driver does not
+ * support, the function will return
+ */
+
+if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+return;
+}
 vhost_virtqueue_mask(>vhost_dev, vdev, idx, 

[PATCH v15 08/10] virtio-net: add support for configure interrupt

2022-07-24 Thread Cindy Lu
Add functions to support configure interrupt in virtio_net
Add the functions to support vhost_net_config_pending
and vhost_net_config_mask.

Signed-off-by: Cindy Lu 
---
 hw/net/vhost_net.c  | 9 +
 hw/net/virtio-net.c | 4 ++--
 include/net/vhost_net.h | 2 ++
 3 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index ccac5b7a64..273a8d8ab7 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -459,6 +459,15 @@ void vhost_net_virtqueue_mask(VHostNetState *net, 
VirtIODevice *dev,
 vhost_virtqueue_mask(>dev, dev, idx, mask);
 }
 
+bool vhost_net_config_pending(VHostNetState *net)
+{
+return vhost_config_pending(>dev);
+}
+
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask)
+{
+vhost_config_mask(>dev, dev, mask);
+}
 VHostNetState *get_vhost_net(NetClientState *nc)
 {
 VHostNetState *vhost_net = 0;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index b6d36b034c..f4a2c6b981 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3221,7 +3221,7 @@ static bool 
virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
  */
 
 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
-return false;
+return vhost_net_config_pending(get_vhost_net(nc->peer));
 }
 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
 }
@@ -3252,9 +3252,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice 
*vdev, int idx,
  */
 
 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
 return;
 }
-
 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
 }
 
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 387e913e4e..fc37498550 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -39,6 +39,8 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t 
*data,
 bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
 void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
   int idx, bool mask);
+bool vhost_net_config_pending(VHostNetState *net);
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask);
 int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
 VHostNetState *get_vhost_net(NetClientState *nc);
 
-- 
2.34.3




[PATCH v15 05/10] vhost-vdpa: add support for config interrupt

2022-07-24 Thread Cindy Lu
Add new call back function in vhost-vdpa, The function
vhost_set_config_call can set the event fd to kernel.
This function will be called in the vhost_dev_start
and vhost_dev_stop

Signed-off-by: Cindy Lu 
---
 hw/virtio/trace-events | 1 +
 hw/virtio/vhost-vdpa.c | 8 
 2 files changed, 9 insertions(+)

diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 20af2e7ebd..1f510ba86e 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -58,6 +58,7 @@ vhost_vdpa_get_features(void *dev, uint64_t features) "dev: 
%p features: 0x%"PRI
 vhost_vdpa_set_owner(void *dev) "dev: %p"
 vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t 
avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 
0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64
 vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) "dev: %p 
first: 0x%"PRIx64" last: 0x%"PRIx64
+vhost_vdpa_set_config_call(void *dev, int fd)"dev: %p fd: %d"
 
 # virtio.c
 virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned 
out_num) "elem %p size %zd in_num %u out_num %u"
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 291cd19054..da8413c9fd 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -744,6 +744,13 @@ static int vhost_vdpa_set_vring_ready(struct vhost_dev 
*dev)
 return 0;
 }
 
+static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
+   int fd)
+{
+trace_vhost_vdpa_set_config_call(dev, fd);
+return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, );
+}
+
 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t 
*config,
uint32_t config_len)
 {
@@ -1322,4 +1329,5 @@ const VhostOps vdpa_ops = {
 .vhost_get_device_id = vhost_vdpa_get_device_id,
 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
 .vhost_force_iommu = vhost_vdpa_force_iommu,
+.vhost_set_config_call = vhost_vdpa_set_config_call,
 };
-- 
2.34.3




[PATCH v15 02/10] virtio-pci: decouple notifier from interrupt process

2022-07-24 Thread Cindy Lu
To reuse the notifier process. We add the virtio_pci_get_notifier
to get the notifier and vector. The INPUT for this function is IDX,
The OUTPUT is the notifier and the vector

Signed-off-by: Cindy Lu 
---
 hw/virtio/virtio-pci.c | 88 +++---
 1 file changed, 57 insertions(+), 31 deletions(-)

diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 45327f0b31..ea56677f7a 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -706,29 +706,41 @@ static void 
kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
 }
 
 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
- unsigned int queue_no,
+ EventNotifier *n,
  unsigned int vector)
 {
 VirtIOIRQFD *irqfd = >vector_irqfd[vector];
-VirtIODevice *vdev = virtio_bus_get_device(>bus);
-VirtQueue *vq = virtio_get_queue(vdev, queue_no);
-EventNotifier *n = virtio_queue_get_guest_notifier(vq);
 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq);
 }
 
 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
-  unsigned int queue_no,
+  EventNotifier *n ,
   unsigned int vector)
 {
-VirtIODevice *vdev = virtio_bus_get_device(>bus);
-VirtQueue *vq = virtio_get_queue(vdev, queue_no);
-EventNotifier *n = virtio_queue_get_guest_notifier(vq);
 VirtIOIRQFD *irqfd = >vector_irqfd[vector];
 int ret;
 
 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq);
 assert(ret == 0);
 }
+static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no,
+  EventNotifier **n, unsigned int *vector)
+{
+VirtIODevice *vdev = virtio_bus_get_device(>bus);
+VirtQueue *vq;
+
+if (queue_no == VIRTIO_CONFIG_IRQ_IDX) {
+return -1;
+} else {
+if (!virtio_queue_get_num(vdev, queue_no)) {
+return -1;
+}
+*vector = virtio_queue_vector(vdev, queue_no);
+vq = virtio_get_queue(vdev, queue_no);
+*n = virtio_queue_get_guest_notifier(vq);
+}
+return 0;
+}
 
 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
 {
@@ -737,12 +749,15 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy 
*proxy, int nvqs)
 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
 unsigned int vector;
 int ret, queue_no;
-
+EventNotifier *n;
 for (queue_no = 0; queue_no < nvqs; queue_no++) {
 if (!virtio_queue_get_num(vdev, queue_no)) {
 break;
 }
-vector = virtio_queue_vector(vdev, queue_no);
+ret = virtio_pci_get_notifier(proxy, queue_no, , );
+if (ret < 0) {
+break;
+}
 if (vector >= msix_nr_vectors_allocated(dev)) {
 continue;
 }
@@ -754,7 +769,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, 
int nvqs)
  * Otherwise, delay until unmasked in the frontend.
  */
 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
 if (ret < 0) {
 kvm_virtio_pci_vq_vector_release(proxy, vector);
 goto undo;
@@ -770,7 +785,11 @@ undo:
 continue;
 }
 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ret = virtio_pci_get_notifier(proxy, queue_no, , );
+if (ret < 0) {
+break;
+}
+kvm_virtio_pci_irqfd_release(proxy, n, vector);
 }
 kvm_virtio_pci_vq_vector_release(proxy, vector);
 }
@@ -784,12 +803,16 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy 
*proxy, int nvqs)
 unsigned int vector;
 int queue_no;
 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-
+EventNotifier *n;
+int ret ;
 for (queue_no = 0; queue_no < nvqs; queue_no++) {
 if (!virtio_queue_get_num(vdev, queue_no)) {
 break;
 }
-vector = virtio_queue_vector(vdev, queue_no);
+ret = virtio_pci_get_notifier(proxy, queue_no, , );
+if (ret < 0) {
+break;
+}
 if (vector >= msix_nr_vectors_allocated(dev)) {
 continue;
 }
@@ -797,21 +820,20 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy 
*proxy, int nvqs)
  * Otherwise, it was cleaned when masked in the frontend.
  */
 if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
-kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+kvm_virtio_pci_irqfd_release(proxy, n, vector);
 }
 

[PATCH v15 00/10] vhost-vdpa: add support for configure interrupt

2022-07-24 Thread Cindy Lu
These patches introduced the support for configure interrupt 
These codes are tested on x86_64 and aarch64 platforms.  
the test scenario is based on vp-vdpa/vdpa_sim /virtio net device, with
the irqfd and without irqfd.
Tested in virtio-pci bus and virtio-mmio bus

Change in v2:
Add support for virtio-mmio bus
active the notifier while the backend support configure interrupt
misc fixes from v1

Change in v3
fix the coding style problems

Change in v4
misc fixes from v3
merge the set_config_notifier to set_guest_notifier
when vdpa start, check the feature by VIRTIO_NET_F_STATUS

Change in v5
misc fixes from v4
split the code to introduce configure interrupt type and the callback function
will init the configure interrupt in all virtio-pci and virtio-mmio bus, but 
will
only active while using vhost-vdpa driver

Change in v6
misc fixes from v5
decouple vq from interrupt setting and misc process
fix the bug in virtio_net_handle_rx

Change in v7
misc fixes from v6
decouple vq from interrupt setting and misc process
decouple vq from vector use/release process
decouple vq from set notifier fd handler process
move config_notifier and masked_config_notifier to VirtIODevice
fix the bug in virtio_net_handle_rx, add more information
add VIRTIO_CONFIG_IRQ_IDX as the queue number for configure interrupt 

Change in v8
misc fixes from v7
decouple vq from interrupt setting and misc process
decouple vq from vector use/release process
decouple vq from set notifier fd handler process
move the vhost configure interrupt to vhost_net

Change in v9
misc fixes from v8
address the comments from v8

Change in v10
fix the hang issue in qtest
address the comments from v9

Change in v11
fix the crash in aarch64 plateform.
fix the crash upstream reported

Change in v12
fix the typo and the comments

changes in v13
re-send the patches by git-publish

changes in v14
rebased the code based on upstream

Cindy Lu (10):
  virtio: introduce macro VIRTIO_CONFIG_IRQ_IDX
  virtio-pci: decouple notifier from interrupt process
  virtio-pci: decouple the single vector from the interrupt process
  vhost: introduce new VhostOps vhost_set_config_call
  vhost-vdpa: add support for config interrupt
  virtio: add support for configure interrupt
  vhost: add support for configure interrupt
  virtio-net: add support for configure interrupt
  virtio-mmio: add support for configure interrupt
  virtio-pci: add support for configure interrupt

 hw/display/vhost-user-gpu.c   |  15 ++
 hw/net/vhost_net.c|   9 +
 hw/net/virtio-net.c   |  20 ++-
 hw/virtio/trace-events|   1 +
 hw/virtio/vhost-user-fs.c |  16 ++
 hw/virtio/vhost-vdpa.c|   8 +
 hw/virtio/vhost-vsock-common.c|  16 ++
 hw/virtio/vhost.c |  78 +++-
 hw/virtio/virtio-crypto.c |  16 ++
 hw/virtio/virtio-mmio.c   |  27 +++
 hw/virtio/virtio-pci.c| 283 +-
 hw/virtio/virtio.c|  29 +++
 include/hw/virtio/vhost-backend.h |   3 +
 include/hw/virtio/vhost.h |   4 +
 include/hw/virtio/virtio-pci.h|   4 +-
 include/hw/virtio/virtio.h|   7 +
 include/net/vhost_net.h   |   2 +
 17 files changed, 451 insertions(+), 87 deletions(-)

-- 
2.34.3