Rather than decrementing the ref count one by one, we
walk the page array and checking which belong to the same
compound_head. Later on we decrement the calculated amount
of references in a single write to the head page.

Signed-off-by: Joao Martins <joao.m.mart...@oracle.com>
---
 mm/gup.c | 41 ++++++++++++++++++++++++++++++++---------
 1 file changed, 32 insertions(+), 9 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index 194e6981eb03..3a9a7229f418 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -212,6 +212,18 @@ static bool __unpin_devmap_managed_user_page(struct page 
*page)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
+static int record_refs(struct page **pages, int npages)
+{
+       struct page *head = compound_head(pages[0]);
+       int refs = 1, index;
+
+       for (index = 1; index < npages; index++, refs++)
+               if (compound_head(pages[index]) != head)
+                       break;
+
+       return refs;
+}
+
 /**
  * unpin_user_page() - release a dma-pinned page
  * @page:            pointer to page to be released
@@ -221,9 +233,9 @@ static bool __unpin_devmap_managed_user_page(struct page 
*page)
  * that such pages can be separately tracked and uniquely handled. In
  * particular, interactions with RDMA and filesystems need special handling.
  */
-void unpin_user_page(struct page *page)
+static void __unpin_user_page(struct page *page, int refs)
 {
-       int refs = 1;
+       int orig_refs = refs;
 
        page = compound_head(page);
 
@@ -237,14 +249,19 @@ void unpin_user_page(struct page *page)
                return;
 
        if (hpage_pincount_available(page))
-               hpage_pincount_sub(page, 1);
+               hpage_pincount_sub(page, refs);
        else
-               refs = GUP_PIN_COUNTING_BIAS;
+               refs *= GUP_PIN_COUNTING_BIAS;
 
        if (page_ref_sub_and_test(page, refs))
                __put_page(page);
 
-       mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, 1);
+       mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED, orig_refs);
+}
+
+void unpin_user_page(struct page *page)
+{
+       __unpin_user_page(page, 1);
 }
 EXPORT_SYMBOL(unpin_user_page);
 
@@ -274,6 +291,7 @@ void unpin_user_pages_dirty_lock(struct page **pages, 
unsigned long npages,
                                 bool make_dirty)
 {
        unsigned long index;
+       int refs = 1;
 
        /*
         * TODO: this can be optimized for huge pages: if a series of pages is
@@ -286,8 +304,9 @@ void unpin_user_pages_dirty_lock(struct page **pages, 
unsigned long npages,
                return;
        }
 
-       for (index = 0; index < npages; index++) {
+       for (index = 0; index < npages; index += refs) {
                struct page *page = compound_head(pages[index]);
+
                /*
                 * Checking PageDirty at this point may race with
                 * clear_page_dirty_for_io(), but that's OK. Two key
@@ -310,7 +329,8 @@ void unpin_user_pages_dirty_lock(struct page **pages, 
unsigned long npages,
                 */
                if (!PageDirty(page))
                        set_page_dirty_lock(page);
-               unpin_user_page(page);
+               refs = record_refs(pages + index, npages - index);
+               __unpin_user_page(page, refs);
        }
 }
 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
@@ -327,6 +347,7 @@ EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
 void unpin_user_pages(struct page **pages, unsigned long npages)
 {
        unsigned long index;
+       int refs = 1;
 
        /*
         * If this WARN_ON() fires, then the system *might* be leaking pages (by
@@ -340,8 +361,10 @@ void unpin_user_pages(struct page **pages, unsigned long 
npages)
         * physically contiguous and part of the same compound page, then a
         * single operation to the head page should suffice.
         */
-       for (index = 0; index < npages; index++)
-               unpin_user_page(pages[index]);
+       for (index = 0; index < npages; index += refs) {
+               refs = record_refs(pages + index, npages - index);
+               __unpin_user_page(pages[index], refs);
+       }
 }
 EXPORT_SYMBOL(unpin_user_pages);
 
-- 
2.17.1
_______________________________________________
Linux-nvdimm mailing list -- linux-nvdimm@lists.01.org
To unsubscribe send an email to linux-nvdimm-le...@lists.01.org

Reply via email to