Make use of the new mmap_prepare functionality to perform an I/O remap in
favour of the deprecated f_op->mmap hook, hooking the success path to
correctly update the users refcount.

Signed-off-by: Lorenzo Stoakes <[email protected]>
---
 drivers/iommu/iommufd/main.c | 47 ++++++++++++++++++++----------------
 1 file changed, 26 insertions(+), 21 deletions(-)

diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 15af7ced0501..b8b9c0e7520d 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -535,46 +535,51 @@ static const struct vm_operations_struct iommufd_vma_ops 
= {
        .close = iommufd_fops_vma_close,
 };
 
+static int iommufd_fops_mmap_success(const struct vm_area_struct *vma)
+{
+       struct iommufd_mmap *immap = vma->vm_private_data;
+
+       /* vm_ops.open won't be called for mmap itself. */
+       refcount_inc(&immap->owner->users);
+
+       return 0;
+}
+
 /* The vm_pgoff must be pre-allocated from mt_mmap, and given to user space */
-static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
+static int iommufd_fops_mmap_prepare(struct vm_area_desc *desc)
 {
+       struct file *filp = desc->file;
        struct iommufd_ctx *ictx = filp->private_data;
-       size_t length = vma->vm_end - vma->vm_start;
+       const size_t length = vma_desc_size(desc);
        struct iommufd_mmap *immap;
-       int rc;
 
        if (!PAGE_ALIGNED(length))
                return -EINVAL;
-       if (!(vma->vm_flags & VM_SHARED))
+       if (!(desc->vm_flags & VM_SHARED))
                return -EINVAL;
-       if (vma->vm_flags & VM_EXEC)
+       if (desc->vm_flags & VM_EXEC)
                return -EPERM;
 
-       /* vma->vm_pgoff carries a page-shifted start position to an immap */
-       immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
+       /* desc->pgoff carries a page-shifted start position to an immap */
+       immap = mtree_load(&ictx->mt_mmap, desc->pgoff << PAGE_SHIFT);
        if (!immap)
                return -ENXIO;
        /*
         * mtree_load() returns the immap for any contained mmio_addr, so only
         * allow the exact immap thing to be mapped
         */
-       if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
+       if (desc->pgoff != immap->vm_pgoff || length != immap->length)
                return -ENXIO;
 
-       vma->vm_pgoff = 0;
-       vma->vm_private_data = immap;
-       vma->vm_ops = &iommufd_vma_ops;
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       desc->pgoff = 0;
+       desc->private_data = immap;
+       desc->vm_ops = &iommufd_vma_ops;
+       desc->page_prot = pgprot_noncached(desc->page_prot);
 
-       rc = io_remap_pfn_range(vma, vma->vm_start,
-                               immap->mmio_addr >> PAGE_SHIFT, length,
-                               vma->vm_page_prot);
-       if (rc)
-               return rc;
+       mmap_action_ioremap_full(desc, immap->mmio_addr >> PAGE_SHIFT);
+       desc->action.success_hook = iommufd_fops_mmap_success;
 
-       /* vm_ops.open won't be called for mmap itself. */
-       refcount_inc(&immap->owner->users);
-       return rc;
+       return 0;
 }
 
 static const struct file_operations iommufd_fops = {
@@ -582,7 +587,7 @@ static const struct file_operations iommufd_fops = {
        .open = iommufd_fops_open,
        .release = iommufd_fops_release,
        .unlocked_ioctl = iommufd_fops_ioctl,
-       .mmap = iommufd_fops_mmap,
+       .mmap_prepare = iommufd_fops_mmap_prepare,
 };
 
 /**
-- 
2.51.0


Reply via email to