On 22.01.2021 02:06, Yang Shi wrote:
> Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's 
> nr_deferred
> will be used in the following cases:
>     1. Non memcg aware shrinkers
>     2. !CONFIG_MEMCG
>     3. memcg is disabled by boot parameter
> 
> Signed-off-by: Yang Shi <[email protected]>
> ---
>  mm/vmscan.c | 81 +++++++++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 69 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 722aa71b13b2..d8e77ea13815 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -359,6 +359,27 @@ static void unregister_memcg_shrinker(struct shrinker 
> *shrinker)
>       up_write(&shrinker_rwsem);
>  }
>  
> +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> +                                 struct mem_cgroup *memcg)
> +{
> +     struct shrinker_info *info;
> +
> +     info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> +                                      true);

Since now these rcu_dereference_protected() are in separate functions and there 
is
no taking a lock near them, it seems it would be better to underling the desired
lock with rcu_dereference_protected(, 
lockdep_assert_held(lock_you_need_here_locked));


> +     return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
> +}
> +
> +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker 
> *shrinker,
> +                               struct mem_cgroup *memcg)
> +{
> +     struct shrinker_info *info;
> +
> +     info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
> +                                      true);
> +
> +     return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
> +}
> +
>  static bool cgroup_reclaim(struct scan_control *sc)
>  {
>       return sc->target_mem_cgroup;
> @@ -397,6 +418,18 @@ static void unregister_memcg_shrinker(struct shrinker 
> *shrinker)
>  {
>  }
>  
> +static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
> +                                 struct mem_cgroup *memcg)
> +{
> +     return 0;
> +}
> +
> +static long set_nr_deferred_memcg(long nr, int nid, struct shrinker 
> *shrinker,
> +                               struct mem_cgroup *memcg)
> +{
> +     return 0;
> +}
> +
>  static bool cgroup_reclaim(struct scan_control *sc)
>  {
>       return false;
> @@ -408,6 +441,39 @@ static bool writeback_throttling_sane(struct 
> scan_control *sc)
>  }
>  #endif
>  
> +static long count_nr_deferred(struct shrinker *shrinker,
> +                           struct shrink_control *sc)
> +{
> +     int nid = sc->nid;
> +
> +     if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> +             nid = 0;
> +
> +     if (sc->memcg &&
> +         (shrinker->flags & SHRINKER_MEMCG_AWARE))
> +             return count_nr_deferred_memcg(nid, shrinker,
> +                                            sc->memcg);
> +
> +     return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +}
> +
> +
> +static long set_nr_deferred(long nr, struct shrinker *shrinker,
> +                         struct shrink_control *sc)
> +{
> +     int nid = sc->nid;
> +
> +     if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> +             nid = 0;
> +
> +     if (sc->memcg &&
> +         (shrinker->flags & SHRINKER_MEMCG_AWARE))
> +             return set_nr_deferred_memcg(nr, nid, shrinker,
> +                                          sc->memcg);
> +
> +     return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
> +}
> +
>  /*
>   * This misses isolated pages which are not accounted for to save counters.
>   * As the data only determines if reclaim or compaction continues, it is
> @@ -544,14 +610,10 @@ static unsigned long do_shrink_slab(struct 
> shrink_control *shrinkctl,
>       long freeable;
>       long nr;
>       long new_nr;
> -     int nid = shrinkctl->nid;
>       long batch_size = shrinker->batch ? shrinker->batch
>                                         : SHRINK_BATCH;
>       long scanned = 0, next_deferred;
>  
> -     if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
> -             nid = 0;
> -
>       freeable = shrinker->count_objects(shrinker, shrinkctl);
>       if (freeable == 0 || freeable == SHRINK_EMPTY)
>               return freeable;
> @@ -561,7 +623,7 @@ static unsigned long do_shrink_slab(struct shrink_control 
> *shrinkctl,
>        * and zero it so that other concurrent shrinker invocations
>        * don't also do this scanning work.
>        */
> -     nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
> +     nr = count_nr_deferred(shrinker, shrinkctl);
>  
>       total_scan = nr;
>       if (shrinker->seeks) {
> @@ -652,14 +714,9 @@ static unsigned long do_shrink_slab(struct 
> shrink_control *shrinkctl,
>               next_deferred = 0;
>       /*
>        * move the unused scan count back into the shrinker in a
> -      * manner that handles concurrent updates. If we exhausted the
> -      * scan, there is no need to do an update.
> +      * manner that handles concurrent updates.
>        */
> -     if (next_deferred > 0)
> -             new_nr = atomic_long_add_return(next_deferred,
> -                                             &shrinker->nr_deferred[nid]);
> -     else
> -             new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
> +     new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
>  
>       trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, 
> total_scan);
>       return freed;
> 

Reply via email to