Now that device-dax and filesystem-dax are guaranteed to unmap all user
mappings of devmap / DAX pages before tearing down the 'struct page'
array, get_user_pages_fast() can rely on its traditional synchronization
method "validate_pte(); get_page(); revalidate_pte()" to catch races with
device shutdown. Specifically the unmap guarantee ensures that gup-fast
either succeeds in taking a page reference (lock-less), or it detects a
need to fall back to the slow path where the device presence can be
revalidated with locks held.

Reported-by: Jason Gunthorpe <j...@ziepe.ca>
Cc: Christoph Hellwig <h...@lst.de>
Cc: Shiyang Ruan <ruansy.f...@fujitsu.com>
Cc: Vishal Verma <vishal.l.ve...@intel.com>
Cc: Dave Jiang <dave.ji...@intel.com>
Cc: Ira Weiny <ira.we...@intel.com>
Cc: Matthew Wilcox <wi...@infradead.org>
Cc: Jan Kara <j...@suse.cz>
Cc: Andrew Morton <a...@linux-foundation.org>
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
---
 mm/gup.c |   38 ++++++++++++++++----------------------
 1 file changed, 16 insertions(+), 22 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index e40579624f10..dfeb47e4e8d4 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1996,9 +1996,8 @@ static void __maybe_unused undo_dev_pagemap(int *nr, int 
nr_start,
 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
                         unsigned int flags, struct page **pages, int *nr)
 {
-       struct dev_pagemap *pgmap = NULL;
-       int nr_start = *nr, ret = 0;
        pte_t *ptep, *ptem;
+       int ret = 0;
 
        ptem = ptep = pte_offset_map(&pmd, addr);
        do {
@@ -2015,16 +2014,10 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, 
unsigned long end,
                if (!pte_access_permitted(pte, flags & FOLL_WRITE))
                        goto pte_unmap;
 
-               if (pte_devmap(pte)) {
-                       if (unlikely(flags & FOLL_LONGTERM))
-                               goto pte_unmap;
+               if (pte_devmap(pte) && (flags & FOLL_LONGTERM))
+                       goto pte_unmap;
 
-                       pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
-                       if (unlikely(!pgmap)) {
-                               undo_dev_pagemap(nr, nr_start, flags, pages);
-                               goto pte_unmap;
-                       }
-               } else if (pte_special(pte))
+               if (pte_special(pte))
                        goto pte_unmap;
 
                VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -2063,8 +2056,6 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, 
unsigned long end,
        ret = 1;
 
 pte_unmap:
-       if (pgmap)
-               put_dev_pagemap(pgmap);
        pte_unmap(ptem);
        return ret;
 }
@@ -2087,21 +2078,26 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, 
unsigned long end,
 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
 
 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+
 static int __gup_device_huge(unsigned long pfn, unsigned long addr,
                             unsigned long end, unsigned int flags,
                             struct page **pages, int *nr)
 {
        int nr_start = *nr;
-       struct dev_pagemap *pgmap = NULL;
 
        do {
-               struct page *page = pfn_to_page(pfn);
+               struct page *page;
+
+               /*
+                * Typically pfn_to_page() on a devmap pfn is not safe
+                * without holding a live reference on the hosting
+                * pgmap. In the gup-fast path it is safe because any
+                * races will be resolved by either gup-fast taking a
+                * reference or the shutdown path unmapping the pte to
+                * trigger gup-fast to fall back to the slow path.
+                */
+               page = pfn_to_page(pfn);
 
-               pgmap = get_dev_pagemap(pfn, pgmap);
-               if (unlikely(!pgmap)) {
-                       undo_dev_pagemap(nr, nr_start, flags, pages);
-                       return 0;
-               }
                SetPageReferenced(page);
                pages[*nr] = page;
                if (unlikely(!try_grab_page(page, flags))) {
@@ -2112,8 +2108,6 @@ static int __gup_device_huge(unsigned long pfn, unsigned 
long addr,
                pfn++;
        } while (addr += PAGE_SIZE, addr != end);
 
-       if (pgmap)
-               put_dev_pagemap(pgmap);
        return 1;
 }
 
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org

Reply via email to