We only can free the tail vmemmap pages of HugeTLB to the buddy allocator
when the size of struct page is a power of two.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
---
 mm/hugetlb_vmemmap.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index fad760483e01..fd60cfdf3d40 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -111,6 +111,11 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
        unsigned int order = huge_page_order(h);
        unsigned int vmemmap_pages;
 
+       if (!is_power_of_2(sizeof(struct page))) {
+               pr_info("disable freeing vmemmap pages for %s\n", h->name);
+               return;
+       }
+
        vmemmap_pages = ((1 << order) * sizeof(struct page)) >> PAGE_SHIFT;
        /*
         * The head page and the first tail page are not to be freed to buddy
-- 
2.11.0

Reply via email to