Re: [Xen-devel] [PATCH v4 7/9] vpci/msi: add MSI handlers

2017-08-09 Thread Roger Pau Monné
On Wed, Aug 09, 2017 at 02:21:33AM -0600, Jan Beulich wrote:
> >>> On 08.08.17 at 17:44,  wrote:
> > On Wed, Aug 02, 2017 at 07:34:28AM -0600, Jan Beulich wrote:
> >> >>> Roger Pau Monne  06/30/17 5:01 PM >>>
> >> >+/* Get a PIRQ. */
> >> >+rc = allocate_and_map_msi_pirq(pdev->domain, -1, >pirq,
> >> >+   MAP_PIRQ_TYPE_MULTI_MSI, _info);
> >> >+if ( rc )
> >> >+{
> >> >+dprintk(XENLOG_ERR, "%04x:%02x:%02x.%u: failed to map PIRQ: 
> >> >%d\n",
> >> >+pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
> >> >+PCI_FUNC(pdev->devfn), rc);
> >> >+return rc;
> >> >+}
> >> >+
> >> >+for ( i = 0; i < vectors; i++ )
> >> >+{
> >> >+xen_domctl_bind_pt_irq_t bind = {
> >> >+.machine_irq = arch->pirq + i,
> >> >+.irq_type = PT_IRQ_TYPE_MSI,
> >> >+.u.msi.gvec = msi_vector(data) + i,
> >> >+.u.msi.gflags = msi_flags(data, address),
> >> >+};
> >> >+
> >> >+pcidevs_lock();
> >> >+rc = pt_irq_create_bind(pdev->domain, );
> >> >+if ( rc )
> >> >+{
> >> >+dprintk(XENLOG_ERR,
> >> >+"%04x:%02x:%02x.%u: failed to bind PIRQ %u: %d\n",
> >> >+pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
> >> >+PCI_FUNC(pdev->devfn), arch->pirq + i, rc);
> >> >+spin_lock(>domain->event_lock);
> >> >+unmap_domain_pirq(pdev->domain, arch->pirq);
> >> 
> >> Don't you also need to undo the pt_irq_create_bind() calls here for all 
> >> prior
> >> successful iterations?
> > 
> > Yes, unmap_domain_pirq calls pirq_guest_force_unbind but better not
> > resort to that.
> 
> I don't understand.

I've added a calls to pt_irq_destroy_bind before calling
unmap_domain_pirq.

> >> >+int vpci_msi_arch_disable(struct vpci_arch_msi *arch, struct pci_dev 
> >> >*pdev,
> >> >+  unsigned int vectors)
> >> >+{
> >> >+unsigned int i;
> >> >+
> >> >+ASSERT(arch->pirq != -1);
> >> >+
> >> >+for ( i = 0; i < vectors; i++ )
> >> >+{
> >> >+xen_domctl_bind_pt_irq_t bind = {
> >> >+.machine_irq = arch->pirq + i,
> >> >+.irq_type = PT_IRQ_TYPE_MSI,
> >> >+};
> >> >+
> >> >+pcidevs_lock();
> >> >+pt_irq_destroy_bind(pdev->domain, );
> >> 
> >> While I agree that the loop should continue of this fails, I'm not 
> >> convinced
> >> you should entirely ignore the return value here.
> > 
> > I've added a printk in order to aid debug.
> 
> I've actually tried to hint at you wanting to run the loop to
> completion while returning to the caller the first error you've
> encountered.

Hm, I'm not sure of the best way to proceed here.

If vpci_msi_arch_disable returns once one of the pt_irq_destroy_bind
calls fail, further calls to vpci_msi_arch_disable are also likely to
fail if the previous call managed to destroy some of the bindings but
not all of them.

But then trying to call unmap_domain_pirq without having destroyed all
of the bindings seems likely to fail anyway...

Thanks, Roger.

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v4 7/9] vpci/msi: add MSI handlers

2017-08-09 Thread Jan Beulich
>>> On 08.08.17 at 17:44,  wrote:
> On Wed, Aug 02, 2017 at 07:34:28AM -0600, Jan Beulich wrote:
>> >>> Roger Pau Monne  06/30/17 5:01 PM >>>
>> >+/* Get a PIRQ. */
>> >+rc = allocate_and_map_msi_pirq(pdev->domain, -1, >pirq,
>> >+   MAP_PIRQ_TYPE_MULTI_MSI, _info);
>> >+if ( rc )
>> >+{
>> >+dprintk(XENLOG_ERR, "%04x:%02x:%02x.%u: failed to map PIRQ: %d\n",
>> >+pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
>> >+PCI_FUNC(pdev->devfn), rc);
>> >+return rc;
>> >+}
>> >+
>> >+for ( i = 0; i < vectors; i++ )
>> >+{
>> >+xen_domctl_bind_pt_irq_t bind = {
>> >+.machine_irq = arch->pirq + i,
>> >+.irq_type = PT_IRQ_TYPE_MSI,
>> >+.u.msi.gvec = msi_vector(data) + i,
>> >+.u.msi.gflags = msi_flags(data, address),
>> >+};
>> >+
>> >+pcidevs_lock();
>> >+rc = pt_irq_create_bind(pdev->domain, );
>> >+if ( rc )
>> >+{
>> >+dprintk(XENLOG_ERR,
>> >+"%04x:%02x:%02x.%u: failed to bind PIRQ %u: %d\n",
>> >+pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
>> >+PCI_FUNC(pdev->devfn), arch->pirq + i, rc);
>> >+spin_lock(>domain->event_lock);
>> >+unmap_domain_pirq(pdev->domain, arch->pirq);
>> 
>> Don't you also need to undo the pt_irq_create_bind() calls here for all prior
>> successful iterations?
> 
> Yes, unmap_domain_pirq calls pirq_guest_force_unbind but better not
> resort to that.

I don't understand.

>> >+int vpci_msi_arch_disable(struct vpci_arch_msi *arch, struct pci_dev *pdev,
>> >+  unsigned int vectors)
>> >+{
>> >+unsigned int i;
>> >+
>> >+ASSERT(arch->pirq != -1);
>> >+
>> >+for ( i = 0; i < vectors; i++ )
>> >+{
>> >+xen_domctl_bind_pt_irq_t bind = {
>> >+.machine_irq = arch->pirq + i,
>> >+.irq_type = PT_IRQ_TYPE_MSI,
>> >+};
>> >+
>> >+pcidevs_lock();
>> >+pt_irq_destroy_bind(pdev->domain, );
>> 
>> While I agree that the loop should continue of this fails, I'm not convinced
>> you should entirely ignore the return value here.
> 
> I've added a printk in order to aid debug.

I've actually tried to hint at you wanting to run the loop to
completion while returning to the caller the first error you've
encountered.

>> >+/* Handlers for the MSI control field (PCI_MSI_FLAGS). */
>> >+static void vpci_msi_control_read(struct pci_dev *pdev, unsigned int reg,
>> >+  union vpci_val *val, void *data)
>> >+{
>> >+const struct vpci_msi *msi = data;
>> >+
>> >+/* Set multiple message capable. */
>> >+val->u16 = MASK_INSR(fls(msi->max_vectors) - 1, PCI_MSI_FLAGS_QMASK);
>> 
>> The comment is somewhat misleading - whether the device is multi-message
>> capable depends on msi->max_vectors.
> 
> Better "Set the number of supported messages"?

Yes.

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v4 7/9] vpci/msi: add MSI handlers

2017-08-08 Thread Roger Pau Monné
On Wed, Aug 02, 2017 at 07:34:28AM -0600, Jan Beulich wrote:
> >>> Roger Pau Monne  06/30/17 5:01 PM >>>
> >+int vpci_msi_arch_enable(struct vpci_arch_msi *arch, struct pci_dev *pdev,
> >+ uint64_t address, uint32_t data, unsigned int 
> >vectors)
> >+{
> >+struct msi_info msi_info = {
> >+.seg = pdev->seg,
> >+.bus = pdev->bus,
> >+.devfn = pdev->devfn,
> >+.entry_nr = vectors,
> >+};
> >+unsigned int i;
> >+int rc;
> >+
> >+ASSERT(arch->pirq == -1);
> 
> Please introduce a #define for the -1 here, to allow easily matching up
> producer and consumer side(s).

I've added a define for INVALID_PIRQ to xen/irq.h.

> >+/* Get a PIRQ. */
> >+rc = allocate_and_map_msi_pirq(pdev->domain, -1, >pirq,
> >+   MAP_PIRQ_TYPE_MULTI_MSI, _info);
> >+if ( rc )
> >+{
> >+dprintk(XENLOG_ERR, "%04x:%02x:%02x.%u: failed to map PIRQ: %d\n",
> >+pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
> >+PCI_FUNC(pdev->devfn), rc);
> >+return rc;
> >+}
> >+
> >+for ( i = 0; i < vectors; i++ )
> >+{
> >+xen_domctl_bind_pt_irq_t bind = {
> >+.machine_irq = arch->pirq + i,
> >+.irq_type = PT_IRQ_TYPE_MSI,
> >+.u.msi.gvec = msi_vector(data) + i,
> >+.u.msi.gflags = msi_flags(data, address),
> >+};
> >+
> >+pcidevs_lock();
> >+rc = pt_irq_create_bind(pdev->domain, );
> >+if ( rc )
> >+{
> >+dprintk(XENLOG_ERR,
> >+"%04x:%02x:%02x.%u: failed to bind PIRQ %u: %d\n",
> >+pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
> >+PCI_FUNC(pdev->devfn), arch->pirq + i, rc);
> >+spin_lock(>domain->event_lock);
> >+unmap_domain_pirq(pdev->domain, arch->pirq);
> 
> Don't you also need to undo the pt_irq_create_bind() calls here for all prior
> successful iterations?

Yes, unmap_domain_pirq calls pirq_guest_force_unbind but better not
resort to that.

> >+int vpci_msi_arch_disable(struct vpci_arch_msi *arch, struct pci_dev *pdev,
> >+  unsigned int vectors)
> >+{
> >+unsigned int i;
> >+
> >+ASSERT(arch->pirq != -1);
> >+
> >+for ( i = 0; i < vectors; i++ )
> >+{
> >+xen_domctl_bind_pt_irq_t bind = {
> >+.machine_irq = arch->pirq + i,
> >+.irq_type = PT_IRQ_TYPE_MSI,
> >+};
> >+
> >+pcidevs_lock();
> >+pt_irq_destroy_bind(pdev->domain, );
> 
> While I agree that the loop should continue of this fails, I'm not convinced
> you should entirely ignore the return value here.

I've added a printk in order to aid debug.

> >+/* Handlers for the MSI control field (PCI_MSI_FLAGS). */
> >+static void vpci_msi_control_read(struct pci_dev *pdev, unsigned int reg,
> >+  union vpci_val *val, void *data)
> >+{
> >+const struct vpci_msi *msi = data;
> >+
> >+/* Set multiple message capable. */
> >+val->u16 = MASK_INSR(fls(msi->max_vectors) - 1, PCI_MSI_FLAGS_QMASK);
> 
> The comment is somewhat misleading - whether the device is multi-message
> capable depends on msi->max_vectors.

Better "Set the number of supported messages"?

> >+if ( msi->enabled ) {
> 
> Style.
> 
> >+val->u16 |= PCI_MSI_FLAGS_ENABLE;
> >+val->u16 |= MASK_INSR(fls(msi->vectors) - 1, PCI_MSI_FLAGS_QSIZE);
> 
> Why is reading back the proper value here dependent upon MSI being
> enabled?

Right, I've now slightly changed this to always store the number of
enabled vectors, regardless of whether the MSI enable bit is set or
not.

> >...
> >+ error:
> >+ASSERT(ret);
> >+xfree(msi);
> >+return ret;
> >+}
> 
> Don't you also need to unregister address handlers you've registered?

vpci_add_handlers already takes care of cleaning up the register
handlers on failure.

> >+void vpci_dump_msi(void)
> >+{
> >+struct domain *d;
> >+
> >+for_each_domain ( d )
> >+{
> >+const struct pci_dev *pdev;
> >+
> >+if ( !has_vpci(d) )
> >+continue;
> >+
> >+printk("vPCI MSI information for guest %u\n", d->domain_id);
> 
> "... for Dom%d" or "... for d%d" please.
> 
> >...
> >+if ( msi->masking )
> >+printk("mask=%#032x\n", msi->mask);
> 
> Why 30 hex digits? And generally # should be used only when not blank or
> zero padding the value (as field width includes the 0x prefix).

Ouch, that should be 8, not 32.

Thanks, Roger.

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v4 7/9] vpci/msi: add MSI handlers

2017-08-02 Thread Jan Beulich
>>> Roger Pau Monne  06/30/17 5:01 PM >>>
>Add handlers for the MSI control, address, data and mask fields in
>order to detect accesses to them and setup the interrupts as requested
>by the guest.
>
>Note that the pending register is not trapped, and the guest can
>freely read/write to it.
>
>Whether Xen is going to provide this functionality to Dom0 (MSI
>emulation) is controlled by the "msi" option in the dom0 field. When
>disabling this option Xen will hide the MSI capability structure from
>Dom0.

Isn't this last paragraph stale now?

>+void vpci_msi_arch_mask(struct vpci_arch_msi *arch, struct pci_dev *pdev,
>+unsigned int entry, bool mask)
>+{
>+struct domain *d = pdev->domain;
>+const struct pirq *pinfo;
>+struct irq_desc *desc;
>+unsigned long flags;
>+int irq;
>+
>+ASSERT(arch->pirq >= 0);
>+pinfo = pirq_info(d, arch->pirq + entry);
>+ASSERT(pinfo);
>+
>+irq = pinfo->arch.irq;
>+ASSERT(irq < nr_irqs && irq >= 0);
>+
>+desc = irq_to_desc(irq);
>+ASSERT(desc);

I know the goal is Dom0 support only at this point, but nevertheless I think
we shouldn't have ASSERT()s in place which could trigger if Dom0
misbehaves (and which would all need to be audited if we were to extend
support to DomU): I'm not convinced all of the ones above could really only
trigger depending on Xen (mis)behavior.

>+int vpci_msi_arch_enable(struct vpci_arch_msi *arch, struct pci_dev *pdev,
>+ uint64_t address, uint32_t data, unsigned int 
>vectors)
>+{
>+struct msi_info msi_info = {
>+.seg = pdev->seg,
>+.bus = pdev->bus,
>+.devfn = pdev->devfn,
>+.entry_nr = vectors,
>+};
>+unsigned int i;
>+int rc;
>+
>+ASSERT(arch->pirq == -1);

Please introduce a #define for the -1 here, to allow easily matching up
producer and consumer side(s).

>+/* Get a PIRQ. */
>+rc = allocate_and_map_msi_pirq(pdev->domain, -1, >pirq,
>+   MAP_PIRQ_TYPE_MULTI_MSI, _info);
>+if ( rc )
>+{
>+dprintk(XENLOG_ERR, "%04x:%02x:%02x.%u: failed to map PIRQ: %d\n",
>+pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
>+PCI_FUNC(pdev->devfn), rc);
>+return rc;
>+}
>+
>+for ( i = 0; i < vectors; i++ )
>+{
>+xen_domctl_bind_pt_irq_t bind = {
>+.machine_irq = arch->pirq + i,
>+.irq_type = PT_IRQ_TYPE_MSI,
>+.u.msi.gvec = msi_vector(data) + i,
>+.u.msi.gflags = msi_flags(data, address),
>+};
>+
>+pcidevs_lock();
>+rc = pt_irq_create_bind(pdev->domain, );
>+if ( rc )
>+{
>+dprintk(XENLOG_ERR,
>+"%04x:%02x:%02x.%u: failed to bind PIRQ %u: %d\n",
>+pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
>+PCI_FUNC(pdev->devfn), arch->pirq + i, rc);
>+spin_lock(>domain->event_lock);
>+unmap_domain_pirq(pdev->domain, arch->pirq);

Don't you also need to undo the pt_irq_create_bind() calls here for all prior
successful iterations?

>+int vpci_msi_arch_disable(struct vpci_arch_msi *arch, struct pci_dev *pdev,
>+  unsigned int vectors)
>+{
>+unsigned int i;
>+
>+ASSERT(arch->pirq != -1);
>+
>+for ( i = 0; i < vectors; i++ )
>+{
>+xen_domctl_bind_pt_irq_t bind = {
>+.machine_irq = arch->pirq + i,
>+.irq_type = PT_IRQ_TYPE_MSI,
>+};
>+
>+pcidevs_lock();
>+pt_irq_destroy_bind(pdev->domain, );

While I agree that the loop should continue of this fails, I'm not convinced
you should entirely ignore the return value here.

>+pcidevs_unlock();
>+}
>+
>+pcidevs_lock();

What good does it do to acquire the lock for most of the loop body as well
as for most of the epilogue, instead of just acquiring it once ahead of the
loop?

>+int vpci_msi_arch_init(struct vpci_arch_msi *arch)
>+{
>+arch->pirq = -1;
>+return 0;
>+}

At this point I think the function would better return void.

>+void vpci_msi_arch_print(struct vpci_arch_msi *arch, uint16_t data,

const

>+ uint64_t addr)
>+{
>+printk("vec=%#02x%7s%6s%3sassert%5s%7s dest_id=%lu pirq: %d\n",
>+   MASK_EXTR(data, MSI_DATA_VECTOR_MASK),
>+   data & MSI_DATA_DELIVERY_LOWPRI ? "lowest" : "fixed",
>+   data & MSI_DATA_TRIGGER_LEVEL ? "level" : "edge",
>+   data & MSI_DATA_LEVEL_ASSERT ? "" : "de",
>+   addr & MSI_ADDR_DESTMODE_LOGIC ? "log" : "phys",
>+   addr & MSI_ADDR_REDIRECTION_LOWPRI ? "lowest" : "cpu",

Why "cpu"? Elsewhere we call this mode "fixed".

>--- /dev/null
>+++ b/xen/drivers/vpci/msi.c
>@@ -0,0 +1,348 @@
>+/*
>+ * Handlers for accesses to the MSI capability structure.
>+ *
>+ * Copyright (C) 2017 Citrix Systems R
>+ *
>+ * This program is free software; you can redistribute it and/or
>+ 

Re: [Xen-devel] [PATCH v4 7/9] vpci/msi: add MSI handlers

2017-07-18 Thread Paul Durrant
> -Original Message-
> From: Roger Pau Monne [mailto:roger@citrix.com]
> Sent: 30 June 2017 16:01
> To: xen-de...@lists.xenproject.org
> Cc: boris.ostrov...@oracle.com; julien.gr...@arm.com;
> konrad.w...@oracle.com; Roger Pau Monne ; Jan
> Beulich ; Andrew Cooper
> ; Paul Durrant 
> Subject: [PATCH v4 7/9] vpci/msi: add MSI handlers
> 
> Add handlers for the MSI control, address, data and mask fields in
> order to detect accesses to them and setup the interrupts as requested
> by the guest.
> 
> Note that the pending register is not trapped, and the guest can
> freely read/write to it.
> 
> Whether Xen is going to provide this functionality to Dom0 (MSI
> emulation) is controlled by the "msi" option in the dom0 field. When
> disabling this option Xen will hide the MSI capability structure from
> Dom0.
> 
> Signed-off-by: Roger Pau Monné 
> ---
> Cc: Jan Beulich 
> Cc: Andrew Cooper 
> Cc: Paul Durrant 
> ---
> Changes since v3:
>  - Propagate changes from previous versions: drop xen_ prefix, drop
>return value from handlers, use the new vpci_val fields.
>  - Use MASK_EXTR.
>  - Remove the usage of GENMASK.
>  - Add GFLAGS_SHIFT_DEST_ID and use it in msi_flags.
>  - Add "arch" to the MSI arch specific functions.
>  - Move the dumping of vPCI MSI information to dump_msi (key 'M').
>  - Remove the guest_vectors field.
>  - Allow the guest to change the number of active vectors without
>having to disable and enable MSI.
>  - Check the number of active vectors when parsing the disable
>mask.
>  - Remove the debug messages from vpci_init_msi.
>  - Move the arch-specific part of the dump handler to x86/hvm/vmsi.c.
>  - Use trylock in the dump handler to get the vpci lock.
> 
> Changes since v2:
>  - Add an arch-specific abstraction layer. Note that this is only implemented
>for x86 currently.
>  - Add a wrapper to detect MSI enabling for vPCI.
> 
> NB: I've only been able to test this with devices using a single MSI interrupt
> and no mask register. I will try to find hardware that supports the mask
> register and more than one vector, but I cannot make any promises.
> 
> If there are doubts about the untested parts we could always force Xen to
> report no per-vector masking support and only 1 available vector, but I would
> rather avoid doing it.
> ---
>  xen/arch/x86/hvm/vmsi.c  | 149 ++
>  xen/arch/x86/msi.c   |   3 +
>  xen/drivers/vpci/Makefile|   2 +-
>  xen/drivers/vpci/msi.c   | 348
> +++
>  xen/include/asm-x86/hvm/io.h |  18 +++
>  xen/include/asm-x86/msi.h|   1 +
>  xen/include/xen/hvm/irq.h|   2 +
>  xen/include/xen/vpci.h   |  26 
>  8 files changed, 548 insertions(+), 1 deletion(-)
>  create mode 100644 xen/drivers/vpci/msi.c
> 
> diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
> index a36692c313..5732c70b5c 100644
> --- a/xen/arch/x86/hvm/vmsi.c
> +++ b/xen/arch/x86/hvm/vmsi.c
> @@ -622,3 +622,152 @@ void msix_write_completion(struct vcpu *v)
>  if ( msixtbl_write(v, ctrl_address, 4, 0) != X86EMUL_OKAY )
>  gdprintk(XENLOG_WARNING, "MSI-X write completion failure\n");
>  }
> +
> +static unsigned int msi_vector(uint16_t data)
> +{
> +return MASK_EXTR(data, MSI_DATA_VECTOR_MASK);
> +}
> +
> +static unsigned int msi_flags(uint16_t data, uint64_t addr)
> +{
> +unsigned int rh, dm, dest_id, deliv_mode, trig_mode;
> +
> +rh = MASK_EXTR(addr, MSI_ADDR_REDIRECTION_MASK);
> +dm = MASK_EXTR(addr, MSI_ADDR_DESTMODE_MASK);
> +dest_id = MASK_EXTR(addr, MSI_ADDR_DEST_ID_MASK);
> +deliv_mode = MASK_EXTR(data, MSI_DATA_DELIVERY_MODE_MASK);
> +trig_mode = MASK_EXTR(data, MSI_DATA_TRIGGER_MASK);
> +
> +return (dest_id << GFLAGS_SHIFT_DEST_ID) | (rh << GFLAGS_SHIFT_RH)
> |
> +   (dm << GFLAGS_SHIFT_DM) | (deliv_mode <<
> GFLAGS_SHIFT_DELIV_MODE) |
> +   (trig_mode << GFLAGS_SHIFT_TRG_MODE);
> +}
> +
> +void vpci_msi_arch_mask(struct vpci_arch_msi *arch, struct pci_dev *pdev,
> +unsigned int entry, bool mask)
> +{
> +struct domain *d = pdev->domain;
> +const struct pirq *pinfo;
> +struct irq_desc *desc;
> +unsigned long flags;
> +int irq;
> +
> +ASSERT(arch->pirq >= 0);
> +pinfo = pirq_info(d, arch->pirq + entry);
> +ASSERT(pinfo);
> +
> +irq = pinfo->arch.irq;
> +ASSERT(irq < nr_irqs && irq >= 0);
> +
> +desc = irq_to_desc(irq);
> +ASSERT(desc);
> +
> +spin_lock_irqsave(>lock, flags);
> +guest_mask_msi_irq(desc, mask);
> +spin_unlock_irqrestore(>lock, flags);
> +}
> +
> +int vpci_msi_arch_enable(struct vpci_arch_msi *arch, struct pci_dev *pdev,
> + uint64_t address, uint32_t data, unsigned int 
> vectors)
> +{
> +struct msi_info