hmm_range_fault() calls find_vma() and walk_page_range() in a loop.
This is unnecessary duplication since walk_page_range() calls find_vma()
in a loop already.
Simplify hmm_range_fault() by defining a walk_test() callback function
to filter unhandled vmas.

Signed-off-by: Ralph Campbell <rcampb...@nvidia.com>
Cc: "Jérôme Glisse" <jgli...@redhat.com>
Cc: Jason Gunthorpe <j...@mellanox.com>
Cc: Christoph Hellwig <h...@lst.de>
---
 mm/hmm.c | 130 ++++++++++++++++++++++++-------------------------------
 1 file changed, 57 insertions(+), 73 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 1bc014cddd78..838cd1d50497 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -840,13 +840,44 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, 
unsigned long hmask,
 #endif
 }
 
-static void hmm_pfns_clear(struct hmm_range *range,
-                          uint64_t *pfns,
-                          unsigned long addr,
-                          unsigned long end)
+static int hmm_vma_walk_test(unsigned long start,
+                            unsigned long end,
+                            struct mm_walk *walk)
 {
-       for (; addr < end; addr += PAGE_SIZE, pfns++)
-               *pfns = range->values[HMM_PFN_NONE];
+       struct hmm_vma_walk *hmm_vma_walk = walk->private;
+       struct hmm_range *range = hmm_vma_walk->range;
+       struct vm_area_struct *vma = walk->vma;
+
+       /* If range is no longer valid, force retry. */
+       if (!range->valid)
+               return -EBUSY;
+
+       /*
+        * Skip vma ranges that don't have struct page backing them or
+        * map I/O devices directly.
+        * TODO: handle peer-to-peer device mappings.
+        */
+       if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
+               return -EFAULT;
+
+       if (is_vm_hugetlb_page(vma)) {
+               if (huge_page_shift(hstate_vma(vma)) != range->page_shift &&
+                   range->page_shift != PAGE_SHIFT)
+                       return -EINVAL;
+       } else {
+               if (range->page_shift != PAGE_SHIFT)
+                       return -EINVAL;
+       }
+
+       /*
+        * If vma does not allow read access, then assume that it does not
+        * allow write access, either. HMM does not support architectures
+        * that allow write without read.
+        */
+       if (!(vma->vm_flags & VM_READ))
+               return -EPERM;
+
+       return 0;
 }
 
 /*
@@ -965,82 +996,35 @@ EXPORT_SYMBOL(hmm_range_unregister);
  */
 long hmm_range_fault(struct hmm_range *range, unsigned int flags)
 {
-       const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
-       unsigned long start = range->start, end;
-       struct hmm_vma_walk hmm_vma_walk;
+       unsigned long start = range->start;
+       struct hmm_vma_walk hmm_vma_walk = {};
        struct hmm *hmm = range->hmm;
-       struct vm_area_struct *vma;
-       struct mm_walk mm_walk;
+       struct mm_walk mm_walk = {};
        int ret;
 
        lockdep_assert_held(&hmm->mm->mmap_sem);
 
-       do {
-               /* If range is no longer valid force retry. */
-               if (!range->valid)
-                       return -EBUSY;
+       hmm_vma_walk.range = range;
+       hmm_vma_walk.last = start;
+       hmm_vma_walk.flags = flags;
+       mm_walk.private = &hmm_vma_walk;
 
-               vma = find_vma(hmm->mm, start);
-               if (vma == NULL || (vma->vm_flags & device_vma))
-                       return -EFAULT;
-
-               if (is_vm_hugetlb_page(vma)) {
-                       if (huge_page_shift(hstate_vma(vma)) !=
-                           range->page_shift &&
-                           range->page_shift != PAGE_SHIFT)
-                               return -EINVAL;
-               } else {
-                       if (range->page_shift != PAGE_SHIFT)
-                               return -EINVAL;
-               }
+       mm_walk.mm = hmm->mm;
+       mm_walk.pud_entry = hmm_vma_walk_pud;
+       mm_walk.pmd_entry = hmm_vma_walk_pmd;
+       mm_walk.pte_hole = hmm_vma_walk_hole;
+       mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
+       mm_walk.test_walk = hmm_vma_walk_test;
 
-               if (!(vma->vm_flags & VM_READ)) {
-                       /*
-                        * If vma do not allow read access, then assume that it
-                        * does not allow write access, either. HMM does not
-                        * support architecture that allow write without read.
-                        */
-                       hmm_pfns_clear(range, range->pfns,
-                               range->start, range->end);
-                       return -EPERM;
-               }
+       do {
+               ret = walk_page_range(start, range->end, &mm_walk);
+               start = hmm_vma_walk.last;
 
-               range->vma = vma;
-               hmm_vma_walk.pgmap = NULL;
-               hmm_vma_walk.last = start;
-               hmm_vma_walk.flags = flags;
-               hmm_vma_walk.range = range;
-               mm_walk.private = &hmm_vma_walk;
-               end = min(range->end, vma->vm_end);
-
-               mm_walk.vma = vma;
-               mm_walk.mm = vma->vm_mm;
-               mm_walk.pte_entry = NULL;
-               mm_walk.test_walk = NULL;
-               mm_walk.hugetlb_entry = NULL;
-               mm_walk.pud_entry = hmm_vma_walk_pud;
-               mm_walk.pmd_entry = hmm_vma_walk_pmd;
-               mm_walk.pte_hole = hmm_vma_walk_hole;
-               mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
-
-               do {
-                       ret = walk_page_range(start, end, &mm_walk);
-                       start = hmm_vma_walk.last;
-
-                       /* Keep trying while the range is valid. */
-               } while (ret == -EBUSY && range->valid);
-
-               if (ret) {
-                       unsigned long i;
-
-                       i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
-                       hmm_pfns_clear(range, &range->pfns[i],
-                               hmm_vma_walk.last, range->end);
-                       return ret;
-               }
-               start = end;
+               /* Keep trying while the range is valid. */
+       } while (ret == -EBUSY && range->valid);
 
-       } while (start < range->end);
+       if (ret)
+               return ret;
 
        return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
 }
-- 
2.20.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to