On 2021/1/26 下午3:42, Cindy Lu wrote:
Add support for configure interrupt, use kvm_irqfd_assign and set the
gsi to kernel. When the configure notifier was eventfd_signal by host
kernel, this will finally inject an msix interrupt to guest
Signed-off-by: Cindy Lu <l...@redhat.com>
---
hw/virtio/virtio-pci.c | 92 ++++++++++++++++++++++++++++++++++--------
1 file changed, 75 insertions(+), 17 deletions(-)
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 36524a5728..8e192600b8 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -664,7 +664,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev,
}
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
- unsigned int queue_no,
unsigned int vector)
{
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
@@ -691,23 +690,17 @@ static void
kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
}
static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
- unsigned int queue_no,
+ EventNotifier *n,
unsigned int vector)
{
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- VirtQueue *vq = virtio_get_queue(vdev, queue_no);
- EventNotifier *n = virtio_queue_get_guest_notifier(vq);
return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL,
irqfd->virq);
}
static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
- unsigned int queue_no,
+ EventNotifier *n ,
unsigned int vector)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- VirtQueue *vq = virtio_get_queue(vdev, queue_no);
- EventNotifier *n = virtio_queue_get_guest_notifier(vq);
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
int ret;
@@ -722,7 +715,8 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
unsigned int vector;
int ret, queue_no;
-
+ VirtQueue *vq;
+ EventNotifier *n;
new line is needed.
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
break;
@@ -731,7 +725,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy,
int nvqs)
if (vector >= msix_nr_vectors_allocated(dev)) {
continue;
}
- ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector);
+ ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
if (ret < 0) {
goto undo;
}
@@ -739,7 +733,9 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy,
int nvqs)
* Otherwise, delay until unmasked in the frontend.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+ vq = virtio_get_queue(vdev, queue_no);
+ n = virtio_queue_get_guest_notifier(vq);
+ ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
if (ret < 0) {
kvm_virtio_pci_vq_vector_release(proxy, vector);
goto undo;
@@ -755,13 +751,69 @@ undo:
continue;
}
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ vq = virtio_get_queue(vdev, queue_no);
+ n = virtio_queue_get_guest_notifier(vq);
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
}
return ret;
}
+static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy)
+{
+
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ unsigned int vector;
+ int ret;
+ EventNotifier *n = virtio_get_config_notifier(vdev);
+
+ vector = vdev->config_vector ;
+ ret = kvm_virtio_pci_vq_vector_use(proxy, vector);
+ if (ret < 0) {
+ goto undo;
+ }
+ ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
+ if (ret < 0) {
+ goto undo;
+ }
+ return 0;
+undo:
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
+ return ret;
+}
newline is needed.
+static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy)
+{
+ PCIDevice *dev = &proxy->pci_dev;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ unsigned int vector;
+ EventNotifier *n = virtio_get_config_notifier(vdev);
+ vector = vdev->config_vector ;
+ if (vector >= msix_nr_vectors_allocated(dev)) {
+ return;
+ }
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
+ kvm_virtio_pci_vq_vector_release(proxy, vector);
+}
+
+static int virtio_pci_set_config_notifier(DeviceState *d, bool assign)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ EventNotifier *notifier = virtio_get_config_notifier(vdev);
+ int r = 0;
+ if (assign) {
+ r = event_notifier_init(notifier, 0);
+ virtio_set_config_notifier_fd_handler(vdev, true, true);
+ kvm_virtio_pci_vector_config_use(proxy);
+ } else {
+ virtio_set_config_notifier_fd_handler(vdev, false, true);
+ kvm_virtio_pci_vector_config_release(proxy);
+ event_notifier_cleanup(notifier);
+ }
+ return r;
+}
Two questions, don't we need to check whether or not MSIX is enalbed in
this case? Instead of introducing new helpers that is easy to be buggy I
still prefer to re-use virtio_pci_set_guest_notifier:
We can switch to use accepts EventNotifier instead of virtqueue index.
And we need also convert virtqueue_set_guest_notifier_fd_handler() to
accept EventNotifier.
+
static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
{
PCIDevice *dev = &proxy->pci_dev;
@@ -769,7 +821,8 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy
*proxy, int nvqs)
unsigned int vector;
int queue_no;
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
-
+ VirtQueue *vq;
+ EventNotifier *n;
newline is needed.
Thanks
for (queue_no = 0; queue_no < nvqs; queue_no++) {
if (!virtio_queue_get_num(vdev, queue_no)) {
break;
@@ -782,7 +835,9 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy
*proxy, int nvqs)
* Otherwise, it was cleaned when masked in the frontend.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
- kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ vq = virtio_get_queue(vdev, queue_no);
+ n = virtio_queue_get_guest_notifier(vq);
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
}
@@ -823,7 +878,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy
*proxy,
event_notifier_set(n);
}
} else {
- ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
+ ret = kvm_virtio_pci_irqfd_use(proxy, n, vector);
}
return ret;
}
@@ -835,13 +890,15 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy
*proxy,
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ VirtQueue *vq = virtio_get_queue(vdev, queue_no);
+ EventNotifier *n = virtio_queue_get_guest_notifier(vq);
/* If guest supports masking, keep irqfd but mask it.
* Otherwise, clean it up now.
*/
if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, true);
} else {
- kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
+ kvm_virtio_pci_irqfd_release(proxy, n, vector);
}
}
@@ -2137,6 +2194,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
k->get_dma_as = virtio_pci_get_dma_as;
k->queue_enabled = virtio_pci_queue_enabled;
+ k->set_config_notifiers = virtio_pci_set_config_notifier;
}
static const TypeInfo virtio_pci_bus_info = {