Now shrinker's nr_deferred is per memcg for memcg aware shrinkers, add to 
parent's
corresponding nr_deferred when memcg offline.

Acked-by: Vlastimil Babka <vba...@suse.cz>
Acked-by: Kirill Tkhai <ktk...@virtuozzo.com>
Acked-by: Roman Gushchin <g...@fb.com>
Reviewed-by: Shakeel Butt <shake...@google.com>
Signed-off-by: Yang Shi <shy828...@gmail.com>
---
 include/linux/memcontrol.h |  1 +
 mm/memcontrol.c            |  1 +
 mm/vmscan.c                | 24 ++++++++++++++++++++++++
 3 files changed, 26 insertions(+)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 42a4facb5b7c..2c76fe53fb6d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1569,6 +1569,7 @@ static inline bool 
mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
 int alloc_shrinker_info(struct mem_cgroup *memcg);
 void free_shrinker_info(struct mem_cgroup *memcg);
 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
+void reparent_shrinker_deferred(struct mem_cgroup *memcg);
 #else
 #define mem_cgroup_sockets_enabled 0
 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index edd8a06c751f..dacb1c6087ea 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5262,6 +5262,7 @@ static void mem_cgroup_css_offline(struct 
cgroup_subsys_state *css)
        page_counter_set_low(&memcg->memory, 0);
 
        memcg_offline_kmem(memcg);
+       reparent_shrinker_deferred(memcg);
        wb_memcg_offline(memcg);
 
        drain_all_stock(memcg);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index cf25c78661d1..9a2dfeaa79f4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -395,6 +395,30 @@ static long add_nr_deferred_memcg(long nr, int nid, struct 
shrinker *shrinker,
        return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
 }
 
+void reparent_shrinker_deferred(struct mem_cgroup *memcg)
+{
+       int i, nid;
+       long nr;
+       struct mem_cgroup *parent;
+       struct shrinker_info *child_info, *parent_info;
+
+       parent = parent_mem_cgroup(memcg);
+       if (!parent)
+               parent = root_mem_cgroup;
+
+       /* Prevent from concurrent shrinker_info expand */
+       down_read(&shrinker_rwsem);
+       for_each_node(nid) {
+               child_info = shrinker_info_protected(memcg, nid);
+               parent_info = shrinker_info_protected(parent, nid);
+               for (i = 0; i < shrinker_nr_max; i++) {
+                       nr = atomic_long_read(&child_info->nr_deferred[i]);
+                       atomic_long_add(nr, &parent_info->nr_deferred[i]);
+               }
+       }
+       up_read(&shrinker_rwsem);
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
        return sc->target_mem_cgroup;
-- 
2.26.2

Reply via email to