Re: [v8 PATCH 10/13] mm: vmscan: use per memcg nr_deferred of shrinker

2021-03-08 Thread Shakeel Butt
On Tue, Feb 16, 2021 at 4:13 PM Yang Shi  wrote:
>
> Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's 
> nr_deferred
> will be used in the following cases:
> 1. Non memcg aware shrinkers
> 2. !CONFIG_MEMCG
> 3. memcg is disabled by boot parameter
>
> Signed-off-by: Yang Shi 

Reviewed-by: Shakeel Butt 


Re: [v8 PATCH 10/13] mm: vmscan: use per memcg nr_deferred of shrinker

2021-02-16 Thread Kirill Tkhai
On 17.02.2021 03:13, Yang Shi wrote:
> Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's 
> nr_deferred
> will be used in the following cases:
> 1. Non memcg aware shrinkers
> 2. !CONFIG_MEMCG
> 3. memcg is disabled by boot parameter
> 
> Signed-off-by: Yang Shi 

Acked-by: Kirill Tkhai 

> ---
>  mm/vmscan.c | 78 -
>  1 file changed, 66 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index fcb399e18fc3..57cbc6bc8a49 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -374,6 +374,24 @@ static void unregister_memcg_shrinker(struct shrinker 
> *shrinker)
>   idr_remove(&shrinker_idr, id);
>  }
>  
> +static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> +struct mem_cgroup *memcg)
> +{
> + struct shrinker_info *info;
> +
> + info = shrinker_info_protected(memcg, nid);
> + return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
> +}
> +
> +static long add_nr_deferred_memcg(long nr, int nid, struct shrinker 
> *shrinker,
> +   struct mem_cgroup *memcg)
> +{
> + struct shrinker_info *info;
> +
> + info = shrinker_info_protected(memcg, nid);
> + return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
> +}
> +
>  static bool cgroup_reclaim(struct scan_control *sc)
>  {
>   return sc->target_mem_cgroup;
> @@ -412,6 +430,18 @@ static void unregister_memcg_shrinker(struct shrinker 
> *shrinker)
>  {
>  }
>  
> +static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> +struct mem_cgroup *memcg)
> +{
> + return 0;
> +}
> +
> +static long add_nr_deferred_memcg(long nr, int nid, struct shrinker 
> *shrinker,
> +   struct mem_cgroup *memcg)
> +{
> + return 0;
> +}
> +
>  static bool cgroup_reclaim(struct scan_control *sc)
>  {
>   return false;
> @@ -423,6 +453,39 @@ static bool writeback_throttling_sane(struct 
> scan_control *sc)
>  }
>  #endif
>  
> +static long xchg_nr_deferred(struct shrinker *shrinker,
> +  struct shrink_control *sc)
> +{
> + int nid = sc->nid;
> +
> + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> + nid = 0;
> +
> + if (sc->memcg &&
> + (shrinker->flags & SHRINKER_MEMCG_AWARE))
> + return xchg_nr_deferred_memcg(nid, shrinker,
> +   sc->memcg);
> +
> + return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +}
> +
> +
> +static long add_nr_deferred(long nr, struct shrinker *shrinker,
> + struct shrink_control *sc)
> +{
> + int nid = sc->nid;
> +
> + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> + nid = 0;
> +
> + if (sc->memcg &&
> + (shrinker->flags & SHRINKER_MEMCG_AWARE))
> + return add_nr_deferred_memcg(nr, nid, shrinker,
> +  sc->memcg);
> +
> + return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
> +}
> +
>  /*
>   * This misses isolated pages which are not accounted for to save counters.
>   * As the data only determines if reclaim or compaction continues, it is
> @@ -558,14 +621,10 @@ static unsigned long do_shrink_slab(struct 
> shrink_control *shrinkctl,
>   long freeable;
>   long nr;
>   long new_nr;
> - int nid = shrinkctl->nid;
>   long batch_size = shrinker->batch ? shrinker->batch
> : SHRINK_BATCH;
>   long scanned = 0, next_deferred;
>  
> - if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> - nid = 0;
> -
>   freeable = shrinker->count_objects(shrinker, shrinkctl);
>   if (freeable == 0 || freeable == SHRINK_EMPTY)
>   return freeable;
> @@ -575,7 +634,7 @@ static unsigned long do_shrink_slab(struct shrink_control 
> *shrinkctl,
>* and zero it so that other concurrent shrinker invocations
>* don't also do this scanning work.
>*/
> - nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> + nr = xchg_nr_deferred(shrinker, shrinkctl);
>  
>   total_scan = nr;
>   if (shrinker->seeks) {
> @@ -666,14 +725,9 @@ static unsigned long do_shrink_slab(struct 
> shrink_control *shrinkctl,
>   next_deferred = 0;
>   /*
>* move the unused scan count back into the shrinker in a
> -  * manner that handles concurrent updates. If we exhausted the
> -  * scan, there is no need to do an update.
> +  * manner that handles concurrent updates.
>*/
> - if (next_deferred > 0)
> - new_nr = atomic_long_add_return(next_deferred,
> - &shrinker->nr_deferred[nid]);
> - else
> - new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
> + new_nr = add_nr_deferred(next_deferred, shrinker, shrink

Re: [v8 PATCH 10/13] mm: vmscan: use per memcg nr_deferred of shrinker

2021-02-16 Thread Roman Gushchin
On Tue, Feb 16, 2021 at 04:13:19PM -0800, Yang Shi wrote:
> Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's 
> nr_deferred
> will be used in the following cases:
> 1. Non memcg aware shrinkers
> 2. !CONFIG_MEMCG
> 3. memcg is disabled by boot parameter
> 
> Signed-off-by: Yang Shi 

LGTM!

Acked-by: Roman Gushchin 

Thanks!


[v8 PATCH 10/13] mm: vmscan: use per memcg nr_deferred of shrinker

2021-02-16 Thread Yang Shi
Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's 
nr_deferred
will be used in the following cases:
1. Non memcg aware shrinkers
2. !CONFIG_MEMCG
3. memcg is disabled by boot parameter

Signed-off-by: Yang Shi 
---
 mm/vmscan.c | 78 -
 1 file changed, 66 insertions(+), 12 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index fcb399e18fc3..57cbc6bc8a49 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -374,6 +374,24 @@ static void unregister_memcg_shrinker(struct shrinker 
*shrinker)
idr_remove(&shrinker_idr, id);
 }
 
+static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+  struct mem_cgroup *memcg)
+{
+   struct shrinker_info *info;
+
+   info = shrinker_info_protected(memcg, nid);
+   return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
+}
+
+static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+   struct shrinker_info *info;
+
+   info = shrinker_info_protected(memcg, nid);
+   return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
return sc->target_mem_cgroup;
@@ -412,6 +430,18 @@ static void unregister_memcg_shrinker(struct shrinker 
*shrinker)
 {
 }
 
+static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+  struct mem_cgroup *memcg)
+{
+   return 0;
+}
+
+static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+ struct mem_cgroup *memcg)
+{
+   return 0;
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
return false;
@@ -423,6 +453,39 @@ static bool writeback_throttling_sane(struct scan_control 
*sc)
 }
 #endif
 
+static long xchg_nr_deferred(struct shrinker *shrinker,
+struct shrink_control *sc)
+{
+   int nid = sc->nid;
+
+   if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+   nid = 0;
+
+   if (sc->memcg &&
+   (shrinker->flags & SHRINKER_MEMCG_AWARE))
+   return xchg_nr_deferred_memcg(nid, shrinker,
+ sc->memcg);
+
+   return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+}
+
+
+static long add_nr_deferred(long nr, struct shrinker *shrinker,
+   struct shrink_control *sc)
+{
+   int nid = sc->nid;
+
+   if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+   nid = 0;
+
+   if (sc->memcg &&
+   (shrinker->flags & SHRINKER_MEMCG_AWARE))
+   return add_nr_deferred_memcg(nr, nid, shrinker,
+sc->memcg);
+
+   return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
+}
+
 /*
  * This misses isolated pages which are not accounted for to save counters.
  * As the data only determines if reclaim or compaction continues, it is
@@ -558,14 +621,10 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
long freeable;
long nr;
long new_nr;
-   int nid = shrinkctl->nid;
long batch_size = shrinker->batch ? shrinker->batch
  : SHRINK_BATCH;
long scanned = 0, next_deferred;
 
-   if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
-   nid = 0;
-
freeable = shrinker->count_objects(shrinker, shrinkctl);
if (freeable == 0 || freeable == SHRINK_EMPTY)
return freeable;
@@ -575,7 +634,7 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
 * and zero it so that other concurrent shrinker invocations
 * don't also do this scanning work.
 */
-   nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+   nr = xchg_nr_deferred(shrinker, shrinkctl);
 
total_scan = nr;
if (shrinker->seeks) {
@@ -666,14 +725,9 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
next_deferred = 0;
/*
 * move the unused scan count back into the shrinker in a
-* manner that handles concurrent updates. If we exhausted the
-* scan, there is no need to do an update.
+* manner that handles concurrent updates.
 */
-   if (next_deferred > 0)
-   new_nr = atomic_long_add_return(next_deferred,
-   &shrinker->nr_deferred[nid]);
-   else
-   new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+   new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl);
 
trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, 
total_scan);
return freed;
-- 
2.26.2