Add a helper, vfio_pci_dma_buf_find_pfn(), which a VMA fault handler
can use to find a PFN.

This supports multi-range DMABUFs, which typically would be used to
represent scattered spans but might even represent overlapping or
aliasing spans of PFNs.

Because this is intended to be used in vfio_pci_core.c, we also need
to expose the struct vfio_pci_dma_buf in the vfio_pci_priv.h header.

Signed-off-by: Matt Evans <[email protected]>
---
 drivers/vfio/pci/vfio_pci_dmabuf.c | 102 +++++++++++++++++++++++++----
 drivers/vfio/pci/vfio_pci_priv.h   |  19 ++++++
 2 files changed, 108 insertions(+), 13 deletions(-)

diff --git a/drivers/vfio/pci/vfio_pci_dmabuf.c 
b/drivers/vfio/pci/vfio_pci_dmabuf.c
index 44558cc2948e..63140528dbea 100644
--- a/drivers/vfio/pci/vfio_pci_dmabuf.c
+++ b/drivers/vfio/pci/vfio_pci_dmabuf.c
@@ -9,19 +9,6 @@
 
 MODULE_IMPORT_NS("DMA_BUF");
 
-struct vfio_pci_dma_buf {
-       struct dma_buf *dmabuf;
-       struct vfio_pci_core_device *vdev;
-       struct list_head dmabufs_elm;
-       size_t size;
-       struct phys_vec *phys_vec;
-       struct p2pdma_provider *provider;
-       u32 nr_ranges;
-       struct kref kref;
-       struct completion comp;
-       u8 revoked : 1;
-};
-
 static int vfio_pci_dma_buf_attach(struct dma_buf *dmabuf,
                                   struct dma_buf_attachment *attachment)
 {
@@ -106,6 +93,95 @@ static const struct dma_buf_ops vfio_pci_dmabuf_ops = {
        .release = vfio_pci_dma_buf_release,
 };
 
+int vfio_pci_dma_buf_find_pfn(struct vfio_pci_dma_buf *vpdmabuf,
+                             struct vm_area_struct *vma,
+                             unsigned long address,
+                             unsigned int order,
+                             unsigned long *out_pfn)
+{
+       /*
+        * Given a VMA (start, end, pgoffs) and a fault address,
+        * search the corresponding DMABUF's phys_vec[] to find the
+        * range representing the address's offset into the VMA, and
+        * its PFN.
+        *
+        * The phys_vec[] ranges represent contiguous spans of VAs
+        * upwards from the buffer offset 0; the actual PFNs might be
+        * in any order, overlap/alias, etc.  Calculate an offset of
+        * the desired page given VMA start/pgoff and address, then
+        * search upwards from 0 to find which span contains it.
+        *
+        * On success, a valid PFN for a page sized by 'order' is
+        * returned into out_pfn.
+        *
+        * Failure occurs if:
+        * - The page would cross the edge of the VMA
+        * - The page isn't entirely contained within a range
+        * - We find a range, but the final PFN isn't aligned to the
+        *   requested order.
+        *
+        * (Upon failure, the caller is expected to try again with a
+        * smaller order; the tests above will always succeed for
+        * order=0 as the limit case.)
+        *
+        * It's suboptimal if DMABUFs are created with neigbouring
+        * ranges that are physically contiguous, since hugepages
+        * can't straddle range boundaries.  (The construction of the
+        * ranges vector should merge such ranges.)
+        */
+
+       const unsigned long pagesize = PAGE_SIZE << order;
+       unsigned long rounded_page_addr = address & ~(pagesize - 1);
+       unsigned long rounded_page_end = rounded_page_addr + pagesize;
+       unsigned long buf_page_offset;
+       unsigned long buf_offset = 0;
+       unsigned int i;
+
+       if (rounded_page_addr < vma->vm_start || rounded_page_end > vma->vm_end)
+               return -EAGAIN;
+
+       if (unlikely(check_add_overflow(rounded_page_addr - vma->vm_start,
+                                       vma->vm_pgoff << PAGE_SHIFT, 
&buf_page_offset)))
+               return -EFAULT;
+
+       for (i = 0; i < vpdmabuf->nr_ranges; i++) {
+               unsigned long range_len = vpdmabuf->phys_vec[i].len;
+               unsigned long range_start = vpdmabuf->phys_vec[i].paddr;
+
+               if (buf_page_offset >= buf_offset &&
+                   buf_page_offset + pagesize <= buf_offset + range_len) {
+                       /*
+                        * The faulting page is wholly contained
+                        * within the span represented by the range.
+                        * Validate PFN alignment for the order:
+                        */
+                       unsigned long pfn = (range_start >> PAGE_SHIFT) +
+                               ((buf_page_offset - buf_offset) >> PAGE_SHIFT);
+
+                       if (IS_ALIGNED(pfn, 1 << order)) {
+                               *out_pfn = pfn;
+                               return 0;
+                       }
+                       /* Retry with smaller order */
+                       return -EAGAIN;
+               }
+               buf_offset += range_len;
+       }
+
+       /*
+        * If we get here, the address fell outside of the span
+        * represented by the (concatenated) ranges.  Setup of a
+        * mapping must ensure that the VMA is <= the total size of
+        * the ranges, so this should never happen.  But, if it does,
+        * force SIGBUS for the access and warn.
+        */
+       WARN_ONCE(1, "No range for addr 0x%lx, order %d: VMA 0x%lx-0x%lx pgoff 
0x%lx, %d ranges, size 0x%lx\n",
+                 address, order, vma->vm_start, vma->vm_end, vma->vm_pgoff,
+                 vpdmabuf->nr_ranges, vpdmabuf->size);
+
+       return -EFAULT;
+}
+
 /*
  * This is a temporary "private interconnect" between VFIO DMABUF and iommufd.
  * It allows the two co-operating drivers to exchange the physical address of
diff --git a/drivers/vfio/pci/vfio_pci_priv.h b/drivers/vfio/pci/vfio_pci_priv.h
index 27ac280f00b9..5cc8c85a2153 100644
--- a/drivers/vfio/pci/vfio_pci_priv.h
+++ b/drivers/vfio/pci/vfio_pci_priv.h
@@ -23,6 +23,19 @@ struct vfio_pci_ioeventfd {
        bool                    test_mem;
 };
 
+struct vfio_pci_dma_buf {
+       struct dma_buf *dmabuf;
+       struct vfio_pci_core_device *vdev;
+       struct list_head dmabufs_elm;
+       size_t size;
+       struct phys_vec *phys_vec;
+       struct p2pdma_provider *provider;
+       u32 nr_ranges;
+       struct kref kref;
+       struct completion comp;
+       u8 revoked : 1;
+};
+
 bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
 
@@ -110,6 +123,12 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
        return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
 }
 
+int vfio_pci_dma_buf_find_pfn(struct vfio_pci_dma_buf *vpdmabuf,
+                             struct vm_area_struct *vma,
+                             unsigned long address,
+                             unsigned int order,
+                             unsigned long *out_pfn);
+
 #ifdef CONFIG_VFIO_PCI_DMABUF
 int vfio_pci_core_feature_dma_buf(struct vfio_pci_core_device *vdev, u32 flags,
                                  struct vfio_device_feature_dma_buf __user 
*arg,
-- 
2.47.3

Reply via email to