Support MAP_NORESERVE accounting as part of the new counter.

For each hugepage allocation, at allocation time we check if there is
a reservation for this allocation or not. If there is a reservation for
this allocation, then this allocation was charged at reservation time,
and we don't re-account it. If there is no reserevation for this
allocation, we charge the appropriate hugetlb_cgroup.

The hugetlb_cgroup to uncharge for this allocation is stored in
page[3].private. We use new APIs added in an earlier patch to set this
pointer.

---
 mm/hugetlb.c | 25 ++++++++++++++++++++++++-
 1 file changed, 24 insertions(+), 1 deletion(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index af336bf227fb6..79b99878ce6f9 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1217,6 +1217,7 @@ static void update_and_free_page(struct hstate *h, struct 
page *page)
                                1 << PG_writeback);
        }
        VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page, false), page);
+       VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page, true), page);
        set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
        set_page_refcounted(page);
        if (hstate_is_gigantic(h)) {
@@ -1328,6 +1329,9 @@ void free_huge_page(struct page *page)
        clear_page_huge_active(page);
        hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h),
                                     page, false);
+       hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h),
+                                    page, true);
+
        if (restore_reserve)
                h->resv_huge_pages++;

@@ -1354,6 +1358,7 @@ static void prep_new_huge_page(struct hstate *h, struct 
page *page, int nid)
        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
        spin_lock(&hugetlb_lock);
        set_hugetlb_cgroup(page, NULL, false);
+       set_hugetlb_cgroup(page, NULL, true);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
        spin_unlock(&hugetlb_lock);
@@ -2155,10 +2160,19 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
                        gbl_chg = 1;
        }

+       /* If this allocation is not consuming a reservation, charge it now.
+        */
+       if (map_chg || avoid_reserve || !vma_resv_map(vma)) {
+               ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h),
+                                                  &h_cg, true);
+               if (ret)
+                       goto out_subpool_put;
+       }
+
        ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg,
                                           false);
        if (ret)
-               goto out_subpool_put;
+               goto out_uncharge_cgroup_reservation;

        spin_lock(&hugetlb_lock);
        /*
@@ -2182,6 +2196,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
        }
        hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page,
                                     false);
+       if (!vma_resv_map(vma) || map_chg || avoid_reserve) {
+               hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg,
+                                            page, true);
+       }
+
        spin_unlock(&hugetlb_lock);

        set_page_private(page, (unsigned long)spool);
@@ -2207,6 +2226,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
 out_uncharge_cgroup:
        hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg,
                                       false);
+out_uncharge_cgroup_reservation:
+       if (map_chg || avoid_reserve || !vma_resv_map(vma))
+               hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h),
+                                              h_cg, true);
 out_subpool_put:
        if (map_chg || avoid_reserve)
                hugepage_subpool_put_pages(spool, 1);
--
2.23.0.700.g56cf767bdb-goog

Reply via email to