We need the ability to split PFN remap between updating the VMA and
performing the actual remap, in order to do away with the legacy f_op->mmap
hook.

To do so, update the PFN remap code to provide shared logic, and also make
remap_pfn_range_notrack() static, as its one user, io_mapping_map_user()
was removed in commit 9a4f90e24661 ("mm: remove mm/io-mapping.c").

Then, introduce remap_pfn_range_prepare(), which accepts VMA descriptor
and PFN parameters, and remap_pfn_range_complete() which accepts the same
parameters as remap_pfn_rangte().

remap_pfn_range_prepare() will set the cow vma->vm_pgoff if necessary, so
it must be supplied with a correct PFN to do so.

While we're here, also clean up the duplicated #ifdef
__HAVE_PFNMAP_TRACKING check and put into a single #ifdef/#else block.

We keep these internal to mm as they should only be used by internal
helpers.

Signed-off-by: Lorenzo Stoakes <lorenzo.stoa...@oracle.com>
Reviewed-by: Jason Gunthorpe <j...@nvidia.com>
Acked-by: Pedro Falcato <pfalc...@suse.de>
---
 include/linux/mm.h |  22 ++++++--
 mm/internal.h      |   4 ++
 mm/memory.c        | 133 ++++++++++++++++++++++++++++++---------------
 3 files changed, 110 insertions(+), 49 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index dd1fec5f028a..8e4006eaf4dd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -489,6 +489,21 @@ extern unsigned int kobjsize(const void *objp);
  */
 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
 
+/*
+ * Physically remapped pages are special. Tell the
+ * rest of the world about it:
+ *   VM_IO tells people not to look at these pages
+ *     (accesses can have side effects).
+ *   VM_PFNMAP tells the core MM that the base pages are just
+ *     raw PFN mappings, and do not have a "struct page" associated
+ *     with them.
+ *   VM_DONTEXPAND
+ *      Disable vma merging and expanding with mremap().
+ *   VM_DONTDUMP
+ *      Omit vma from core dump, even when VM_IO turned off.
+ */
+#define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+
 /* This mask prevents VMA from being scanned with khugepaged */
 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
 
@@ -3622,10 +3637,9 @@ unsigned long change_prot_numa(struct vm_area_struct 
*vma,
 
 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
                unsigned long addr);
-int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
-                       unsigned long pfn, unsigned long size, pgprot_t);
-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
-               unsigned long pfn, unsigned long size, pgprot_t prot);
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+                   unsigned long pfn, unsigned long size, pgprot_t pgprot);
+
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
                        struct page **pages, unsigned long *num);
diff --git a/mm/internal.h b/mm/internal.h
index 63e3ec8d63be..c6655f76cf69 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1653,4 +1653,8 @@ static inline bool reclaim_pt_is_enabled(unsigned long 
start, unsigned long end,
 void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm);
 int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm);
 
+void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn);
+int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
+               unsigned long pfn, unsigned long size, pgprot_t pgprot);
+
 #endif /* __MM_INTERNAL_H */
diff --git a/mm/memory.c b/mm/memory.c
index 41e641823558..daa7124d371d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2900,6 +2900,25 @@ static inline int remap_p4d_range(struct mm_struct *mm, 
pgd_t *pgd,
        return 0;
 }
 
+static int get_remap_pgoff(vm_flags_t vm_flags, unsigned long addr,
+               unsigned long end, unsigned long vm_start, unsigned long vm_end,
+               unsigned long pfn, pgoff_t *vm_pgoff_p)
+{
+       /*
+        * There's a horrible special case to handle copy-on-write
+        * behaviour that some programs depend on. We mark the "original"
+        * un-COW'ed pages by matching them up with "vma->vm_pgoff".
+        * See vm_normal_page() for details.
+        */
+       if (is_cow_mapping(vm_flags)) {
+               if (addr != vm_start || end != vm_end)
+                       return -EINVAL;
+               *vm_pgoff_p = pfn;
+       }
+
+       return 0;
+}
+
 static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long 
addr,
                unsigned long pfn, unsigned long size, pgprot_t prot)
 {
@@ -2912,31 +2931,7 @@ static int remap_pfn_range_internal(struct 
vm_area_struct *vma, unsigned long ad
        if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
                return -EINVAL;
 
-       /*
-        * Physically remapped pages are special. Tell the
-        * rest of the world about it:
-        *   VM_IO tells people not to look at these pages
-        *      (accesses can have side effects).
-        *   VM_PFNMAP tells the core MM that the base pages are just
-        *      raw PFN mappings, and do not have a "struct page" associated
-        *      with them.
-        *   VM_DONTEXPAND
-        *      Disable vma merging and expanding with mremap().
-        *   VM_DONTDUMP
-        *      Omit vma from core dump, even when VM_IO turned off.
-        *
-        * There's a horrible special case to handle copy-on-write
-        * behaviour that some programs depend on. We mark the "original"
-        * un-COW'ed pages by matching them up with "vma->vm_pgoff".
-        * See vm_normal_page() for details.
-        */
-       if (is_cow_mapping(vma->vm_flags)) {
-               if (addr != vma->vm_start || end != vma->vm_end)
-                       return -EINVAL;
-               vma->vm_pgoff = pfn;
-       }
-
-       vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+       VM_WARN_ON_ONCE((vma->vm_flags & VM_REMAP_FLAGS) != VM_REMAP_FLAGS);
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -2957,11 +2952,10 @@ static int remap_pfn_range_internal(struct 
vm_area_struct *vma, unsigned long ad
  * Variant of remap_pfn_range that does not call track_pfn_remap.  The caller
  * must have pre-validated the caching bits of the pgprot_t.
  */
-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long 
addr,
                unsigned long pfn, unsigned long size, pgprot_t prot)
 {
        int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
-
        if (!error)
                return 0;
 
@@ -3002,23 +2996,9 @@ void pfnmap_track_ctx_release(struct kref *ref)
        pfnmap_untrack(ctx->pfn, ctx->size);
        kfree(ctx);
 }
-#endif /* __HAVE_PFNMAP_TRACKING */
 
-/**
- * remap_pfn_range - remap kernel memory to userspace
- * @vma: user vma to map to
- * @addr: target page aligned user address to start at
- * @pfn: page frame number of kernel physical memory address
- * @size: size of mapping area
- * @prot: page protection flags for this mapping
- *
- * Note: this is only safe if the mm semaphore is held when called.
- *
- * Return: %0 on success, negative error code otherwise.
- */
-#ifdef __HAVE_PFNMAP_TRACKING
-int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
-                   unsigned long pfn, unsigned long size, pgprot_t prot)
+static int remap_pfn_range_track(struct vm_area_struct *vma, unsigned long 
addr,
+               unsigned long pfn, unsigned long size, pgprot_t prot)
 {
        struct pfnmap_track_ctx *ctx = NULL;
        int err;
@@ -3054,15 +3034,78 @@ int remap_pfn_range(struct vm_area_struct *vma, 
unsigned long addr,
        return err;
 }
 
+static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+               unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+       return remap_pfn_range_track(vma, addr, pfn, size, prot);
+}
 #else
-int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
-                   unsigned long pfn, unsigned long size, pgprot_t prot)
+static int do_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+               unsigned long pfn, unsigned long size, pgprot_t prot)
 {
        return remap_pfn_range_notrack(vma, addr, pfn, size, prot);
 }
 #endif
+
+void remap_pfn_range_prepare(struct vm_area_desc *desc, unsigned long pfn)
+{
+       /*
+        * We set addr=VMA start, end=VMA end here, so this won't fail, but we
+        * check it again on complete and will fail there if specified addr is
+        * invalid.
+        */
+       get_remap_pgoff(desc->vm_flags, desc->start, desc->end,
+                       desc->start, desc->end, pfn, &desc->pgoff);
+       desc->vm_flags |= VM_REMAP_FLAGS;
+}
+
+static int remap_pfn_range_prepare_vma(struct vm_area_struct *vma, unsigned 
long addr,
+               unsigned long pfn, unsigned long size)
+{
+       unsigned long end = addr + PAGE_ALIGN(size);
+       int err;
+
+       err = get_remap_pgoff(vma->vm_flags, addr, end,
+                             vma->vm_start, vma->vm_end,
+                             pfn, &vma->vm_pgoff);
+       if (err)
+               return err;
+
+       vm_flags_set(vma, VM_REMAP_FLAGS);
+       return 0;
+}
+
+/**
+ * remap_pfn_range - remap kernel memory to userspace
+ * @vma: user vma to map to
+ * @addr: target page aligned user address to start at
+ * @pfn: page frame number of kernel physical memory address
+ * @size: size of mapping area
+ * @prot: page protection flags for this mapping
+ *
+ * Note: this is only safe if the mm semaphore is held when called.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+                   unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+       int err;
+
+       err = remap_pfn_range_prepare_vma(vma, addr, pfn, size);
+       if (err)
+               return err;
+
+       return do_remap_pfn_range(vma, addr, pfn, size, prot);
+}
 EXPORT_SYMBOL(remap_pfn_range);
 
+int remap_pfn_range_complete(struct vm_area_struct *vma, unsigned long addr,
+               unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+       return do_remap_pfn_range(vma, addr, pfn, size, prot);
+}
+
 /**
  * vm_iomap_memory - remap memory to userspace
  * @vma: user vma to map to
-- 
2.51.0


Reply via email to