This is a partial update aiming at enhancing the VFIO user API
with IOMMU info capability chain, msi_resv reporting and
MSI IOVA window registration.

The kernel code is not yet upstreamed. It is available at
https://github.com/eauger/linux/tree/generic-v7-passthrough-v14
[PATCH v14 00/16] KVM PCIe/MSI passthrough on ARM/ARM64,
https://lkml.org/lkml/2016/10/12/347

Signed-off-by: Eric Auger <eric.au...@redhat.com>

---

v4 -> v5:
- update according to kernel v14 series

v2 -> v3:
- features VFIO_IOMMU_TYPE1_INFO_CAP_MSI_GEOMETRY
---
 linux-headers/linux/vfio.h | 30 ++++++++++++++++++++++++++++--
 1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
index 759b850..74c8f02 100644
--- a/linux-headers/linux/vfio.h
+++ b/linux-headers/linux/vfio.h
@@ -488,7 +488,23 @@ struct vfio_iommu_type1_info {
        __u32   argsz;
        __u32   flags;
 #define VFIO_IOMMU_INFO_PGSIZES (1 << 0)       /* supported page sizes info */
-       __u64   iova_pgsizes;           /* Bitmap of supported page sizes */
+#define VFIO_IOMMU_INFO_CAPS   (1 << 1)        /* Info supports caps */
+       __u64   iova_pgsizes;   /* Bitmap of supported page sizes */
+       __u32   cap_offset;     /* Offset within info struct of first cap */
+       __u32   __resv;
+};
+
+/*
+ * The MSI_RESV capability allows to report the MSI reserved IOVA requirements:
+ * In case this capability is supported, the userspace must provide an IOVA
+ * window characterized by @size and @alignment using VFIO_IOMMU_MAP_DMA with
+ * RESERVED_MSI_IOVA flag.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_CAP_MSI_RESV  1
+struct vfio_iommu_type1_info_cap_msi_resv {
+       struct vfio_info_cap_header header;
+       __u64 size;             /* requested IOVA aperture size in bytes */
+       __u64 alignment;        /* requested byte alignment of the window */
 };
 
 #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
@@ -498,12 +514,21 @@ struct vfio_iommu_type1_info {
  *
  * Map process virtual addresses to IO virtual addresses using the
  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+ *
+ * In case RESERVED_MSI_IOVA flag is set, the API only aims at registering an
+ * IOVA region that will be used on some platforms to map the host MSI frames.
+ * In that specific case, vaddr is ignored. Once registered, an MSI reserved
+ * IOVA region stays until the container is closed.
+ * The requirement for provisioning such reserved IOVA range can be checked by
+ * checking the VFIO_IOMMU_TYPE1_INFO_CAP_MSI_RESV capability.
  */
 struct vfio_iommu_type1_dma_map {
        __u32   argsz;
        __u32   flags;
 #define VFIO_DMA_MAP_FLAG_READ (1 << 0)                /* readable from device 
*/
 #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)       /* writable from device */
+/* reserved iova for MSI vectors*/
+#define VFIO_DMA_MAP_FLAG_RESERVED_MSI_IOVA (1 << 2)
        __u64   vaddr;                          /* Process virtual address */
        __u64   iova;                           /* IO virtual address */
        __u64   size;                           /* Size of mapping (bytes) */
@@ -519,7 +544,8 @@ struct vfio_iommu_type1_dma_map {
  * Caller sets argsz.  The actual unmapped size is returned in the size
  * field.  No guarantee is made to the user that arbitrary unmaps of iova
  * or size different from those used in the original mapping call will
- * succeed.
+ * succeed. Once registered, an MSI region cannot be unmapped and stays
+ * until the container is closed.
  */
 struct vfio_iommu_type1_dma_unmap {
        __u32   argsz;
-- 
1.9.1


Reply via email to