When we free a hugetlb page to the buddy, we should allocate the vmemmap
pages associated with it. We can do that in the __free_hugepage().

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 mm/hugetlb.c         |   2 +
 mm/hugetlb_vmemmap.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++++++
 mm/hugetlb_vmemmap.h |   5 +++
 3 files changed, 109 insertions(+)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 41056b4230f1..3fafa39fcac6 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1382,6 +1382,8 @@ static void __free_hugepage(struct hstate *h, struct page 
*page)
 {
        int i;
 
+       alloc_huge_page_vmemmap(h, page);
+
        for (i = 0; i < pages_per_huge_page(h); i++) {
                page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
                                1 << PG_referenced | 1 << PG_dirty |
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index f6ba288966d4..d6a1b06c1322 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -95,6 +95,7 @@
 #define pr_fmt(fmt)    "HugeTLB vmemmap: " fmt
 
 #include <linux/bootmem_info.h>
+#include <linux/delay.h>
 #include "hugetlb_vmemmap.h"
 
 /*
@@ -108,6 +109,8 @@
 #define RESERVE_VMEMMAP_NR             2U
 #define RESERVE_VMEMMAP_SIZE           (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
 #define TAIL_PAGE_REUSE                        -1
+#define GFP_VMEMMAP_PAGE               \
+       (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_HIGH)
 
 #ifndef VMEMMAP_HPAGE_SHIFT
 #define VMEMMAP_HPAGE_SHIFT            HPAGE_SHIFT
@@ -159,6 +162,105 @@ static pmd_t *vmemmap_to_pmd(unsigned long page)
        return pmd_offset(pud, page);
 }
 
+static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
+                                         unsigned long start,
+                                         unsigned long end,
+                                         struct list_head *remap_pages)
+{
+       pgprot_t pgprot = PAGE_KERNEL;
+       void *from = page_to_virt(reuse);
+       unsigned long addr;
+
+       for (addr = start; addr < end; addr += PAGE_SIZE) {
+               void *to;
+               struct page *page;
+               pte_t entry, old = *ptep;
+
+               page = list_first_entry(remap_pages, struct page, lru);
+               list_del(&page->lru);
+               to = page_to_virt(page);
+               copy_page(to, from);
+
+               /*
+                * Make sure that any data that writes to the @to is made
+                * visible to the physical page.
+                */
+               flush_kernel_vmap_range(to, PAGE_SIZE);
+
+               prepare_vmemmap_page(page);
+
+               entry = mk_pte(page, pgprot);
+               set_pte_at(&init_mm, addr, ptep++, entry);
+
+               VM_BUG_ON(!pte_present(old) || pte_page(old) != reuse);
+       }
+}
+
+static void __remap_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+                                         unsigned long end,
+                                         struct list_head *vmemmap_pages)
+{
+       unsigned long next, addr = start;
+       struct page *reuse = NULL;
+
+       do {
+               pte_t *ptep;
+
+               ptep = pte_offset_kernel(pmd, addr);
+               if (!reuse)
+                       reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
+
+               next = vmemmap_hpage_addr_end(addr, end);
+               __remap_huge_page_pte_vmemmap(reuse, ptep, addr, next,
+                                             vmemmap_pages);
+       } while (pmd++, addr = next, addr != end);
+
+       flush_tlb_kernel_range(start, end);
+}
+
+static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head 
*list)
+{
+       unsigned int nr = free_vmemmap_pages_per_hpage(h);
+
+       while (nr--) {
+               struct page *page;
+
+retry:
+               page = alloc_page(GFP_VMEMMAP_PAGE);
+               if (unlikely(!page)) {
+                       msleep(100);
+                       /*
+                        * We should retry infinitely, because we cannot
+                        * handle allocation failures. Once we allocate
+                        * vmemmap pages successfully, then we can free
+                        * a HugeTLB page.
+                        */
+                       goto retry;
+               }
+               list_add_tail(&page->lru, list);
+       }
+}
+
+void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+       pmd_t *pmd;
+       unsigned long start, end;
+       unsigned long vmemmap_addr = (unsigned long)head;
+       LIST_HEAD(map_pages);
+
+       if (!free_vmemmap_pages_per_hpage(h))
+               return;
+
+       alloc_vmemmap_pages(h, &map_pages);
+
+       pmd = vmemmap_to_pmd(vmemmap_addr);
+       BUG_ON(!pmd);
+
+       start = vmemmap_addr + RESERVE_VMEMMAP_SIZE;
+       end = vmemmap_addr + vmemmap_pages_size_per_hpage(h);
+       __remap_huge_page_pmd_vmemmap(pmd, start, end, &map_pages);
+}
+
 static inline void free_vmemmap_page_list(struct list_head *list)
 {
        struct page *page, *next;
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 293897b9f1d8..7887095488f4 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -12,6 +12,7 @@
 
 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
 void __init hugetlb_vmemmap_init(struct hstate *h);
+void alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
 void free_huge_page_vmemmap(struct hstate *h, struct page *head);
 
 static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
@@ -23,6 +24,10 @@ static inline void hugetlb_vmemmap_init(struct hstate *h)
 {
 }
 
+static inline void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+}
+
 static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 }
-- 
2.11.0

Reply via email to