Populate the page array to the extent available to enable batching.

Signed-off-by: Alex Williamson <alex.william...@redhat.com>
---
 drivers/vfio/vfio_iommu_type1.c |   10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index e89f11141dee..d499bccfbe3f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -628,6 +628,8 @@ static int vaddr_get_pfns(struct vfio_iommu *iommu, struct 
vfio_dma *dma,
        vma = find_vma_intersection(mm, vaddr, vaddr + 1);
 
        if (vma && vma->vm_flags & VM_PFNMAP) {
+               unsigned long count, i;
+
                if ((dma->prot & IOMMU_WRITE && !(vma->vm_flags & VM_WRITE)) ||
                    (dma->prot & IOMMU_READ && !(vma->vm_flags & VM_READ))) {
                        ret = -EFAULT;
@@ -678,7 +680,13 @@ static int vaddr_get_pfns(struct vfio_iommu *iommu, struct 
vfio_dma *dma,
 
                *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) +
                                                        dma->pfnmap->base_pfn;
-               ret = 1;
+               count = min_t(long,
+                             (vma->vm_end - vaddr) >> PAGE_SHIFT, npages);
+
+               for (i = 0; i < count; i++)
+                       pages[i] = pfn_to_page(*pfn + i);
+
+               ret = count;
        }
 done:
        mmap_read_unlock(mm);

Reply via email to