In the subsequent patch, we will allocate the vmemmap pages when free
huge pages. But update_and_free_page() is be called from a non-task
context(and hold hugetlb_lock), we can defer the actual freeing in
a workqueue to prevent use GFP_ATOMIC to allocate the vmemmap pages.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 mm/hugetlb.c         | 98 +++++++++++++++++++++++++++++++++++++++++++++-------
 mm/hugetlb_vmemmap.c |  5 ---
 mm/hugetlb_vmemmap.h | 10 ++++++
 3 files changed, 96 insertions(+), 17 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a0ce6f33a717..4aabf12aca9b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1221,7 +1221,7 @@ static void destroy_compound_gigantic_page(struct page 
*page,
        __ClearPageHead(page);
 }
 
-static void free_gigantic_page(struct page *page, unsigned int order)
+static void __free_gigantic_page(struct page *page, unsigned int order)
 {
        /*
         * If the page isn't allocated using the cma allocator,
@@ -1288,20 +1288,100 @@ static struct page *alloc_gigantic_page(struct hstate 
*h, gfp_t gfp_mask,
 {
        return NULL;
 }
-static inline void free_gigantic_page(struct page *page, unsigned int order) { 
}
+static inline void __free_gigantic_page(struct page *page,
+                                       unsigned int order) { }
 static inline void destroy_compound_gigantic_page(struct page *page,
                                                unsigned int order) { }
 #endif
 
-static void update_and_free_page(struct hstate *h, struct page *page)
+static void __free_hugepage(struct hstate *h, struct page *page);
+
+/*
+ * As update_and_free_page() is be called from a non-task context(and hold
+ * hugetlb_lock), we can defer the actual freeing in a workqueue to prevent
+ * use GFP_ATOMIC to allocate a lot of vmemmap pages.
+ *
+ * update_hpage_vmemmap_workfn() locklessly retrieves the linked list of
+ * pages to be freed and frees them one-by-one. As the page->mapping pointer
+ * is going to be cleared in update_hpage_vmemmap_workfn() anyway, it is
+ * reused as the llist_node structure of a lockless linked list of huge
+ * pages to be freed.
+ */
+static LLIST_HEAD(hpage_update_freelist);
+
+static void update_hpage_vmemmap_workfn(struct work_struct *work)
 {
-       int i;
+       struct llist_node *node;
+       struct page *page;
+
+       node = llist_del_all(&hpage_update_freelist);
+
+       while (node) {
+               page = container_of((struct address_space **)node,
+                                    struct page, mapping);
+               node = node->next;
+               page->mapping = NULL;
+               __free_hugepage(page_hstate(page), page);
 
+               cond_resched();
+       }
+}
+static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);
+
+static inline void __update_and_free_page(struct hstate *h, struct page *page)
+{
+       /* No need to allocate vmemmap pages */
+       if (!free_vmemmap_pages_per_hpage(h)) {
+               __free_hugepage(h, page);
+               return;
+       }
+
+       /*
+        * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap
+        * pages.
+        *
+        * Only call schedule_work() if hpage_update_freelist is previously
+        * empty. Otherwise, schedule_work() had been called but the workfn
+        * hasn't retrieved the list yet.
+        */
+       if (llist_add((struct llist_node *)&page->mapping,
+                     &hpage_update_freelist))
+               schedule_work(&hpage_update_work);
+}
+
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+static inline void free_gigantic_page(struct hstate *h, struct page *page)
+{
+       __free_gigantic_page(page, huge_page_order(h));
+}
+#else
+static inline void free_gigantic_page(struct hstate *h, struct page *page)
+{
+       /*
+        * Temporarily drop the hugetlb_lock, because
+        * we might block in __free_gigantic_page().
+        */
+       spin_unlock(&hugetlb_lock);
+       __free_gigantic_page(page, huge_page_order(h));
+       spin_lock(&hugetlb_lock);
+}
+#endif
+
+static void update_and_free_page(struct hstate *h, struct page *page)
+{
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
                return;
 
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
+
+       __update_and_free_page(h, page);
+}
+
+static void __free_hugepage(struct hstate *h, struct page *page)
+{
+       int i;
+
        for (i = 0; i < pages_per_huge_page(h); i++) {
                page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
                                1 << PG_referenced | 1 << PG_dirty |
@@ -1313,14 +1393,8 @@ static void update_and_free_page(struct hstate *h, 
struct page *page)
        set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
        set_page_refcounted(page);
        if (hstate_is_gigantic(h)) {
-               /*
-                * Temporarily drop the hugetlb_lock, because
-                * we might block in free_gigantic_page().
-                */
-               spin_unlock(&hugetlb_lock);
                destroy_compound_gigantic_page(page, huge_page_order(h));
-               free_gigantic_page(page, huge_page_order(h));
-               spin_lock(&hugetlb_lock);
+               free_gigantic_page(h, page);
        } else {
                __free_pages(page, huge_page_order(h));
        }
@@ -1761,7 +1835,7 @@ static struct page *alloc_fresh_huge_page(struct hstate 
*h,
 
        if (vmemmap_pgtable_prealloc(h, page)) {
                if (hstate_is_gigantic(h))
-                       free_gigantic_page(page, huge_page_order(h));
+                       free_gigantic_page(h, page);
                else
                        put_page(page);
                return NULL;
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 6f8a735e0dd3..eda7e3a0b67c 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -141,11 +141,6 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
 }
 #endif
 
-static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
-{
-       return h->nr_free_vmemmap_pages;
-}
-
 static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h)
 {
        return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR;
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index a9425d94ed8b..4175b44f88bc 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -15,6 +15,11 @@ void __init hugetlb_vmemmap_init(struct hstate *h);
 int vmemmap_pgtable_prealloc(struct hstate *h, struct page *page);
 void vmemmap_pgtable_free(struct page *page);
 void free_huge_page_vmemmap(struct hstate *h, struct page *head);
+
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+       return h->nr_free_vmemmap_pages;
+}
 #else
 static inline void hugetlb_vmemmap_init(struct hstate *h)
 {
@@ -32,5 +37,10 @@ static inline void vmemmap_pgtable_free(struct page *page)
 static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 }
+
+static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
+{
+       return 0;
+}
 #endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
 #endif /* _LINUX_HUGETLB_VMEMMAP_H */
-- 
2.11.0

Reply via email to