Because we reuse the first tail vmemmap page frame and remap it
with read-only, we cannot set the PageHWPosion on a tail page.
So we can use the head[4].private to record the real error page
index and set the raw error page PageHWPoison later.

Signed-off-by: Muchun Song <songmuc...@bytedance.com>
Reviewed-by: Oscar Salvador <osalva...@suse.de>
---
 mm/hugetlb.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 61 insertions(+), 8 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d11c32fcdb38..6caaa7e5dd2a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1358,6 +1358,63 @@ static inline void __update_and_free_page(struct hstate 
*h, struct page *page)
                schedule_work(&hpage_update_work);
 }
 
+#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+static inline void hwpoison_subpage_deliver(struct hstate *h, struct page 
*head)
+{
+       struct page *page;
+
+       if (!PageHWPoison(head) || !free_vmemmap_pages_per_hpage(h))
+               return;
+
+       page = head + page_private(head + 4);
+
+       /*
+        * Move PageHWPoison flag from head page to the raw error page,
+        * which makes any subpages rather than the error page reusable.
+        */
+       if (page != head) {
+               SetPageHWPoison(page);
+               ClearPageHWPoison(head);
+       }
+}
+
+static inline void hwpoison_subpage_set(struct hstate *h, struct page *head,
+                                       struct page *page)
+{
+       if (!PageHWPoison(head))
+               return;
+
+       if (free_vmemmap_pages_per_hpage(h)) {
+               set_page_private(head + 4, page - head);
+       } else if (page != head) {
+               /*
+                * Move PageHWPoison flag from head page to the raw error page,
+                * which makes any subpages rather than the error page reusable.
+                */
+               SetPageHWPoison(page);
+               ClearPageHWPoison(head);
+       }
+}
+
+#else
+static inline void hwpoison_subpage_deliver(struct hstate *h, struct page 
*head)
+{
+}
+
+static inline void hwpoison_subpage_set(struct hstate *h, struct page *head,
+                                       struct page *page)
+{
+       if (PageHWPoison(head) && page != head) {
+               /*
+                * Move PageHWPoison flag from head page to the raw error page,
+                * which makes any subpages rather than the error page reusable.
+                */
+               SetPageHWPoison(page);
+               ClearPageHWPoison(head);
+       }
+}
+#endif
+
 static void update_and_free_page(struct hstate *h, struct page *page)
 {
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
@@ -1373,6 +1430,8 @@ static void __free_hugepage(struct hstate *h, struct page 
*page)
 {
        int i;
 
+       hwpoison_subpage_deliver(h, page);
+
        for (i = 0; i < pages_per_huge_page(h); i++) {
                page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
                                1 << PG_referenced | 1 << PG_dirty |
@@ -1845,14 +1904,8 @@ int dissolve_free_huge_page(struct page *page)
                int nid = page_to_nid(head);
                if (h->free_huge_pages - h->resv_huge_pages == 0)
                        goto out;
-               /*
-                * Move PageHWPoison flag from head page to the raw error page,
-                * which makes any subpages rather than the error page reusable.
-                */
-               if (PageHWPoison(head) && page != head) {
-                       SetPageHWPoison(page);
-                       ClearPageHWPoison(head);
-               }
+
+               hwpoison_subpage_set(h, head, page);
                list_del(&head->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
-- 
2.11.0

Reply via email to