The upcoming patch is going to support VA mapping/unmapping.
So let's factor out the logic of PA mapping/unmapping firstly
to make the code more readable.

Suggested-by: Jason Wang <jasow...@redhat.com>
Signed-off-by: Xie Yongji <xieyon...@bytedance.com>
Acked-by: Jason Wang <jasow...@redhat.com>
---
 drivers/vhost/vdpa.c | 53 +++++++++++++++++++++++++++++++++-------------------
 1 file changed, 34 insertions(+), 19 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index f60a513dac7c..3e260038beba 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -511,7 +511,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        return r;
 }
 
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
 {
        struct vhost_dev *dev = &v->vdev;
        struct vhost_iotlb *iotlb = dev->iotlb;
@@ -533,6 +533,11 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, 
u64 start, u64 last)
        }
 }
 
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+       return vhost_vdpa_pa_unmap(v, start, last);
+}
+
 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
 {
        struct vhost_dev *dev = &v->vdev;
@@ -613,37 +618,28 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 
iova, u64 size)
        }
 }
 
-static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
-                                          struct vhost_iotlb_msg *msg)
+static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+                            u64 iova, u64 size, u64 uaddr, u32 perm)
 {
        struct vhost_dev *dev = &v->vdev;
-       struct vhost_iotlb *iotlb = dev->iotlb;
        struct page **page_list;
        unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
        unsigned int gup_flags = FOLL_LONGTERM;
        unsigned long npages, cur_base, map_pfn, last_pfn = 0;
        unsigned long lock_limit, sz2pin, nchunks, i;
-       u64 iova = msg->iova;
+       u64 start = iova;
        long pinned;
        int ret = 0;
 
-       if (msg->iova < v->range.first ||
-           msg->iova + msg->size - 1 > v->range.last)
-               return -EINVAL;
-
-       if (vhost_iotlb_itree_first(iotlb, msg->iova,
-                                   msg->iova + msg->size - 1))
-               return -EEXIST;
-
        /* Limit the use of memory for bookkeeping */
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
        if (!page_list)
                return -ENOMEM;
 
-       if (msg->perm & VHOST_ACCESS_WO)
+       if (perm & VHOST_ACCESS_WO)
                gup_flags |= FOLL_WRITE;
 
-       npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+       npages = PAGE_ALIGN(size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
        if (!npages) {
                ret = -EINVAL;
                goto free;
@@ -657,7 +653,7 @@ static int vhost_vdpa_process_iotlb_update(struct 
vhost_vdpa *v,
                goto unlock;
        }
 
-       cur_base = msg->uaddr & PAGE_MASK;
+       cur_base = uaddr & PAGE_MASK;
        iova &= PAGE_MASK;
        nchunks = 0;
 
@@ -688,7 +684,7 @@ static int vhost_vdpa_process_iotlb_update(struct 
vhost_vdpa *v,
                                csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
                                ret = vhost_vdpa_map(v, iova, csize,
                                                     map_pfn << PAGE_SHIFT,
-                                                    msg->perm);
+                                                    perm);
                                if (ret) {
                                        /*
                                         * Unpin the pages that are left 
unmapped
@@ -717,7 +713,7 @@ static int vhost_vdpa_process_iotlb_update(struct 
vhost_vdpa *v,
 
        /* Pin the rest chunk */
        ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
-                            map_pfn << PAGE_SHIFT, msg->perm);
+                            map_pfn << PAGE_SHIFT, perm);
 out:
        if (ret) {
                if (nchunks) {
@@ -736,13 +732,32 @@ static int vhost_vdpa_process_iotlb_update(struct 
vhost_vdpa *v,
                        for (pfn = map_pfn; pfn <= last_pfn; pfn++)
                                unpin_user_page(pfn_to_page(pfn));
                }
-               vhost_vdpa_unmap(v, msg->iova, msg->size);
+               vhost_vdpa_unmap(v, start, size);
        }
 unlock:
        mmap_read_unlock(dev->mm);
 free:
        free_page((unsigned long)page_list);
        return ret;
+
+}
+
+static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+                                          struct vhost_iotlb_msg *msg)
+{
+       struct vhost_dev *dev = &v->vdev;
+       struct vhost_iotlb *iotlb = dev->iotlb;
+
+       if (msg->iova < v->range.first ||
+           msg->iova + msg->size - 1 > v->range.last)
+               return -EINVAL;
+
+       if (vhost_iotlb_itree_first(iotlb, msg->iova,
+                                   msg->iova + msg->size - 1))
+               return -EEXIST;
+
+       return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
+                                msg->perm);
 }
 
 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
-- 
2.11.0

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to