Let's always increase surplus_huge_pages and so that free_huge_page
could decrease it at free time.

Signed-off-by: Wei Yang <[email protected]>
---
 mm/hugetlb.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1f2010c9dd8d..a0eb81e0e4c5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1913,21 +1913,19 @@ static struct page *alloc_surplus_huge_page(struct 
hstate *h, gfp_t gfp_mask,
                return NULL;
 
        spin_lock(&hugetlb_lock);
+
+       h->surplus_huge_pages++;
+       h->surplus_huge_pages_node[page_to_nid(page)]++;
+
        /*
         * We could have raced with the pool size change.
         * Double check that and simply deallocate the new page
-        * if we would end up overcommiting the surpluses. Abuse
-        * temporary page to workaround the nasty free_huge_page
-        * codeflow
+        * if we would end up overcommiting the surpluses.
         */
-       if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
-               SetPageHugeTemporary(page);
+       if (h->surplus_huge_pages > h->nr_overcommit_huge_pages) {
                spin_unlock(&hugetlb_lock);
                put_page(page);
                return NULL;
-       } else {
-               h->surplus_huge_pages++;
-               h->surplus_huge_pages_node[page_to_nid(page)]++;
        }
 
 out_unlock:
-- 
2.20.1 (Apple Git-117)

Reply via email to