dissolve_free_huge_pages() will either run into the VM_BUG_ON() or a
list corruption and addressing exception when trying to set a memory
block offline that is part (but not the first part) of a gigantic
hugetlb page with a size > memory block size.

When no other smaller hugepage sizes are present, the VM_BUG_ON() will
trigger directly. In the other case we will run into an addressing
exception later, because dissolve_free_huge_page() will not use the head
page of the compound hugetlb page which will result in a NULL hstate
from page_hstate(). list_del() would also not work well on a tail page.

To fix this, first remove the VM_BUG_ON() because it is wrong, and then
use the compound head page in dissolve_free_huge_page().

Signed-off-by: Gerald Schaefer <gerald.schae...@de.ibm.com>
---
 mm/hugetlb.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 87e11d8..65e723c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1441,15 +1441,17 @@ static int free_pool_huge_page(struct hstate *h, 
nodemask_t *nodes_allowed,
  */
 static void dissolve_free_huge_page(struct page *page)
 {
+       struct page *head = compound_head(page);
+
        spin_lock(&hugetlb_lock);
-       if (PageHuge(page) && !page_count(page)) {
-               struct hstate *h = page_hstate(page);
-               int nid = page_to_nid(page);
-               list_del(&page->lru);
+       if (!page_count(head)) {
+               struct hstate *h = page_hstate(head);
+               int nid = page_to_nid(head);
+               list_del(&head->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                h->max_huge_pages--;
-               update_and_free_page(h, page);
+               update_and_free_page(h, head);
        }
        spin_unlock(&hugetlb_lock);
 }
@@ -1466,9 +1468,9 @@ void dissolve_free_huge_pages(unsigned long start_pfn, 
unsigned long end_pfn)
        if (!hugepages_supported())
                return;
 
-       VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
        for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
-               dissolve_free_huge_page(pfn_to_page(pfn));
+               if (PageHuge(pfn_to_page(pfn)))
+                       dissolve_free_huge_page(pfn_to_page(pfn));
 }
 
 /*
-- 
2.8.4

Reply via email to