Re: [PATCH v4 2/9] hugetlb_cgroup: add interface for charge/uncharge hugetlb reservations

2019-09-16 Thread shuah

On 9/10/19 5:31 PM, Mina Almasry wrote:

Augements hugetlb_cgroup_charge_cgroup to be able to charge hugetlb
usage or hugetlb reservation counter.



Augments?


Adds a new interface to uncharge a hugetlb_cgroup counter via
hugetlb_cgroup_uncharge_counter.

Integrates the counter with hugetlb_cgroup, via hugetlb_cgroup_init,
hugetlb_cgroup_have_usage, and hugetlb_cgroup_css_offline.

Signed-off-by: Mina Almasry 
---
  include/linux/hugetlb_cgroup.h | 13 --
  mm/hugetlb.c   |  6 ++-
  mm/hugetlb_cgroup.c| 82 +++---
  3 files changed, 80 insertions(+), 21 deletions(-)

diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 063962f6dfc6a..c467715dd8fb8 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -52,14 +52,19 @@ static inline bool hugetlb_cgroup_disabled(void)
  }

  extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
-   struct hugetlb_cgroup **ptr);
+   struct hugetlb_cgroup **ptr,
+   bool reserved);
  extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
 struct hugetlb_cgroup *h_cg,
 struct page *page);
  extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
 struct page *page);
  extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
-  struct hugetlb_cgroup *h_cg);
+  struct hugetlb_cgroup *h_cg,
+  bool reserved);
+extern void hugetlb_cgroup_uncharge_counter(struct page_counter *p,
+   unsigned long nr_pages);
+
  extern void hugetlb_cgroup_file_init(void) __init;
  extern void hugetlb_cgroup_migrate(struct page *oldhpage,
   struct page *newhpage);
@@ -83,7 +88,7 @@ static inline bool hugetlb_cgroup_disabled(void)

  static inline int
  hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
-struct hugetlb_cgroup **ptr)
+struct hugetlb_cgroup **ptr, bool reserved)

Please line the arguments.


  {
return 0;
  }
@@ -102,7 +107,7 @@ hugetlb_cgroup_uncharge_page(int idx, unsigned long 
nr_pages, struct page *page)

  static inline void
  hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
-  struct hugetlb_cgroup *h_cg)
+  struct hugetlb_cgroup *h_cg, bool reserved)


Same here.


  {
  }

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6d7296dd11b83..e975f55aede94 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2078,7 +2078,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
gbl_chg = 1;
}

-   ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), _cg);
+   ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), _cg,
+  false);
if (ret)
goto out_subpool_put;

@@ -2126,7 +2127,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
return page;

  out_uncharge_cgroup:
-   hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
+   hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg,
+   false);


Please be consistent with indentation. Line this up like you did above.


  out_subpool_put:
if (map_chg || avoid_reserve)
hugepage_subpool_put_pages(spool, 1);
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 51a72624bd1ff..2ab36a98d834e 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -38,8 +38,8 @@ struct hugetlb_cgroup {
  static struct hugetlb_cgroup *root_h_cgroup __read_mostly;

  static inline
-struct page_counter *hugetlb_cgroup_get_counter(struct hugetlb_cgroup *h_cg, 
int idx,
-bool reserved)
+struct page_counter *hugetlb_cgroup_get_counter(struct hugetlb_cgroup *h_cg,
+   int idx, bool reserved)


Same here.


  {
if (reserved)
return  _cg->reserved_hugepage[idx];
@@ -74,8 +74,12 @@ static inline bool hugetlb_cgroup_have_usage(struct 
hugetlb_cgroup *h_cg)
int idx;

for (idx = 0; idx < hugetlb_max_hstate; idx++) {
-   if (page_counter_read(_cg->hugepage[idx]))
+   if (page_counter_read(hugetlb_cgroup_get_counter(h_cg, idx,
+   true)) ||
+   page_counter_read(hugetlb_cgroup_get_counter(h_cg, idx,
+   false))) {
return true;
+   }
}

[PATCH v4 2/9] hugetlb_cgroup: add interface for charge/uncharge hugetlb reservations

2019-09-10 Thread Mina Almasry
Augements hugetlb_cgroup_charge_cgroup to be able to charge hugetlb
usage or hugetlb reservation counter.

Adds a new interface to uncharge a hugetlb_cgroup counter via
hugetlb_cgroup_uncharge_counter.

Integrates the counter with hugetlb_cgroup, via hugetlb_cgroup_init,
hugetlb_cgroup_have_usage, and hugetlb_cgroup_css_offline.

Signed-off-by: Mina Almasry 
---
 include/linux/hugetlb_cgroup.h | 13 --
 mm/hugetlb.c   |  6 ++-
 mm/hugetlb_cgroup.c| 82 +++---
 3 files changed, 80 insertions(+), 21 deletions(-)

diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 063962f6dfc6a..c467715dd8fb8 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -52,14 +52,19 @@ static inline bool hugetlb_cgroup_disabled(void)
 }

 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
-   struct hugetlb_cgroup **ptr);
+   struct hugetlb_cgroup **ptr,
+   bool reserved);
 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
 struct hugetlb_cgroup *h_cg,
 struct page *page);
 extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
 struct page *page);
 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
-  struct hugetlb_cgroup *h_cg);
+  struct hugetlb_cgroup *h_cg,
+  bool reserved);
+extern void hugetlb_cgroup_uncharge_counter(struct page_counter *p,
+   unsigned long nr_pages);
+
 extern void hugetlb_cgroup_file_init(void) __init;
 extern void hugetlb_cgroup_migrate(struct page *oldhpage,
   struct page *newhpage);
@@ -83,7 +88,7 @@ static inline bool hugetlb_cgroup_disabled(void)

 static inline int
 hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
-struct hugetlb_cgroup **ptr)
+struct hugetlb_cgroup **ptr, bool reserved)
 {
return 0;
 }
@@ -102,7 +107,7 @@ hugetlb_cgroup_uncharge_page(int idx, unsigned long 
nr_pages, struct page *page)

 static inline void
 hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
-  struct hugetlb_cgroup *h_cg)
+  struct hugetlb_cgroup *h_cg, bool reserved)
 {
 }

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6d7296dd11b83..e975f55aede94 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2078,7 +2078,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
gbl_chg = 1;
}

-   ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), _cg);
+   ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), _cg,
+  false);
if (ret)
goto out_subpool_put;

@@ -2126,7 +2127,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
return page;

 out_uncharge_cgroup:
-   hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
+   hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg,
+   false);
 out_subpool_put:
if (map_chg || avoid_reserve)
hugepage_subpool_put_pages(spool, 1);
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 51a72624bd1ff..2ab36a98d834e 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -38,8 +38,8 @@ struct hugetlb_cgroup {
 static struct hugetlb_cgroup *root_h_cgroup __read_mostly;

 static inline
-struct page_counter *hugetlb_cgroup_get_counter(struct hugetlb_cgroup *h_cg, 
int idx,
-bool reserved)
+struct page_counter *hugetlb_cgroup_get_counter(struct hugetlb_cgroup *h_cg,
+   int idx, bool reserved)
 {
if (reserved)
return  _cg->reserved_hugepage[idx];
@@ -74,8 +74,12 @@ static inline bool hugetlb_cgroup_have_usage(struct 
hugetlb_cgroup *h_cg)
int idx;

for (idx = 0; idx < hugetlb_max_hstate; idx++) {
-   if (page_counter_read(_cg->hugepage[idx]))
+   if (page_counter_read(hugetlb_cgroup_get_counter(h_cg, idx,
+   true)) ||
+   page_counter_read(hugetlb_cgroup_get_counter(h_cg, idx,
+   false))) {
return true;
+   }
}
return false;
 }
@@ -86,18 +90,30 @@ static void hugetlb_cgroup_init(struct hugetlb_cgroup 
*h_cgroup,
int idx;

for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
-   struct