Re: [v8 PATCH 09/13] mm: vmscan: add per memcg shrinker nr_deferred

2021-03-08 Thread Shakeel Butt
On Mon, Mar 8, 2021 at 12:30 PM Yang Shi  wrote:
>
> On Mon, Mar 8, 2021 at 11:12 AM Shakeel Butt  wrote:
> >
> > On Tue, Feb 16, 2021 at 4:13 PM Yang Shi  wrote:
> > >
> > > Currently the number of deferred objects are per shrinker, but some 
> > > slabs, for example,
> > > vfs inode/dentry cache are per memcg, this would result in poor isolation 
> > > among memcgs.
> > >
> > > The deferred objects typically are generated by __GFP_NOFS allocations, 
> > > one memcg with
> > > excessive __GFP_NOFS allocations may blow up deferred objects, then other 
> > > innocent memcgs
> > > may suffer from over shrink, excessive reclaim latency, etc.
> > >
> > > For example, two workloads run in memcgA and memcgB respectively, 
> > > workload in B is vfs
> > > heavy workload.  Workload in A generates excessive deferred objects, then 
> > > B's vfs cache
> > > might be hit heavily (drop half of caches) by B's limit reclaim or global 
> > > reclaim.
> > >
> > > We observed this hit in our production environment which was running vfs 
> > > heavy workload
> > > shown as the below tracing log:
> > >
> > > <...>-409454 [016]  28286961.747146: mm_shrink_slab_start: 
> > > super_cache_scan+0x0/0x1a0 9a83046f3458:
> > > nid: 1 objects to shrink 3641681686040 gfp_flags 
> > > GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> > > cache items 246404277 delta 31345 total_scan 123202138
> > > <...>-409454 [022]  28287105.928018: mm_shrink_slab_end: 
> > > super_cache_scan+0x0/0x1a0 9a83046f3458:
> > > nid: 1 unused scan count 3641681686040 new scan count 3641798379189 
> > > total_scan 602
> > > last shrinker return val 123186855
> > >
> > > The vfs cache and page cache ratio was 10:1 on this machine, and half of 
> > > caches were dropped.
> > > This also resulted in significant amount of page caches were dropped due 
> > > to inodes eviction.
> > >
> > > Make nr_deferred per memcg for memcg aware shrinkers would solve the 
> > > unfairness and bring
> > > better isolation.
> > >
> > > When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the 
> > > shrinker's nr_deferred
> > > would be used.  And non memcg aware shrinkers use shrinker's nr_deferred 
> > > all the time.
> > >
> > > Signed-off-by: Yang Shi 
> > > ---
> > >  include/linux/memcontrol.h |  7 +++--
> > >  mm/vmscan.c| 60 ++
> > >  2 files changed, 46 insertions(+), 21 deletions(-)
> > >
> > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> > > index 4c9253896e25..c457fc7bc631 100644
> > > --- a/include/linux/memcontrol.h
> > > +++ b/include/linux/memcontrol.h
> > > @@ -93,12 +93,13 @@ struct lruvec_stat {
> > >  };
> > >
> > >  /*
> > > - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> > > - * which have elements charged to this memcg.
> > > + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> > > + * shrinkers, which have elements charged to this memcg.
> > >   */
> > >  struct shrinker_info {
> > > struct rcu_head rcu;
> > > -   unsigned long map[];
> > > +   atomic_long_t *nr_deferred;
> > > +   unsigned long *map;
> > >  };
> > >
> > >  /*
> > > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > > index a1047ea60ecf..fcb399e18fc3 100644
> > > --- a/mm/vmscan.c
> > > +++ b/mm/vmscan.c
> > > @@ -187,11 +187,17 @@ static DECLARE_RWSEM(shrinker_rwsem);
> > >  #ifdef CONFIG_MEMCG
> > >  static int shrinker_nr_max;
> > >
> > > +/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
> > >  static inline int shrinker_map_size(int nr_items)
> > >  {
> > > return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned 
> > > long));
> > >  }
> > >
> > > +static inline int shrinker_defer_size(int nr_items)
> > > +{
> > > +   return (round_up(nr_items, BITS_PER_LONG) * 
> > > sizeof(atomic_long_t));
> > > +}
> > > +
> > >  static struct shrinker_info *shrinker_info_protected(struct mem_cgroup 
> > > *memcg,
> > >  int nid)
> > >  {
> > > @@ -200,10 +206,12 @@ static struct shrinker_info 
> > > *shrinker_info_protected(struct mem_cgroup *memcg,
> > >  }
> > >
> > >  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> > > -   int size, int old_size)
> > > +   int map_size, int defer_size,
> > > +   int old_map_size, int old_defer_size)
> > >  {
> > > struct shrinker_info *new, *old;
> > > int nid;
> > > +   int size = map_size + defer_size;
> > >
> > > for_each_node(nid) {
> > > old = shrinker_info_protected(memcg, nid);
> > > @@ -215,9 +223,16 @@ static int expand_one_shrinker_info(struct 
> > > mem_cgroup *memcg,
> > > if (!new)
> > > return -ENOMEM;
> > >
> > > -   /* Set all old bits, clear all new bits */
> > > -   

Re: [v8 PATCH 09/13] mm: vmscan: add per memcg shrinker nr_deferred

2021-03-08 Thread Yang Shi
On Mon, Mar 8, 2021 at 11:12 AM Shakeel Butt  wrote:
>
> On Tue, Feb 16, 2021 at 4:13 PM Yang Shi  wrote:
> >
> > Currently the number of deferred objects are per shrinker, but some slabs, 
> > for example,
> > vfs inode/dentry cache are per memcg, this would result in poor isolation 
> > among memcgs.
> >
> > The deferred objects typically are generated by __GFP_NOFS allocations, one 
> > memcg with
> > excessive __GFP_NOFS allocations may blow up deferred objects, then other 
> > innocent memcgs
> > may suffer from over shrink, excessive reclaim latency, etc.
> >
> > For example, two workloads run in memcgA and memcgB respectively, workload 
> > in B is vfs
> > heavy workload.  Workload in A generates excessive deferred objects, then 
> > B's vfs cache
> > might be hit heavily (drop half of caches) by B's limit reclaim or global 
> > reclaim.
> >
> > We observed this hit in our production environment which was running vfs 
> > heavy workload
> > shown as the below tracing log:
> >
> > <...>-409454 [016]  28286961.747146: mm_shrink_slab_start: 
> > super_cache_scan+0x0/0x1a0 9a83046f3458:
> > nid: 1 objects to shrink 3641681686040 gfp_flags 
> > GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> > cache items 246404277 delta 31345 total_scan 123202138
> > <...>-409454 [022]  28287105.928018: mm_shrink_slab_end: 
> > super_cache_scan+0x0/0x1a0 9a83046f3458:
> > nid: 1 unused scan count 3641681686040 new scan count 3641798379189 
> > total_scan 602
> > last shrinker return val 123186855
> >
> > The vfs cache and page cache ratio was 10:1 on this machine, and half of 
> > caches were dropped.
> > This also resulted in significant amount of page caches were dropped due to 
> > inodes eviction.
> >
> > Make nr_deferred per memcg for memcg aware shrinkers would solve the 
> > unfairness and bring
> > better isolation.
> >
> > When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's 
> > nr_deferred
> > would be used.  And non memcg aware shrinkers use shrinker's nr_deferred 
> > all the time.
> >
> > Signed-off-by: Yang Shi 
> > ---
> >  include/linux/memcontrol.h |  7 +++--
> >  mm/vmscan.c| 60 ++
> >  2 files changed, 46 insertions(+), 21 deletions(-)
> >
> > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> > index 4c9253896e25..c457fc7bc631 100644
> > --- a/include/linux/memcontrol.h
> > +++ b/include/linux/memcontrol.h
> > @@ -93,12 +93,13 @@ struct lruvec_stat {
> >  };
> >
> >  /*
> > - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> > - * which have elements charged to this memcg.
> > + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> > + * shrinkers, which have elements charged to this memcg.
> >   */
> >  struct shrinker_info {
> > struct rcu_head rcu;
> > -   unsigned long map[];
> > +   atomic_long_t *nr_deferred;
> > +   unsigned long *map;
> >  };
> >
> >  /*
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index a1047ea60ecf..fcb399e18fc3 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -187,11 +187,17 @@ static DECLARE_RWSEM(shrinker_rwsem);
> >  #ifdef CONFIG_MEMCG
> >  static int shrinker_nr_max;
> >
> > +/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
> >  static inline int shrinker_map_size(int nr_items)
> >  {
> > return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned 
> > long));
> >  }
> >
> > +static inline int shrinker_defer_size(int nr_items)
> > +{
> > +   return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
> > +}
> > +
> >  static struct shrinker_info *shrinker_info_protected(struct mem_cgroup 
> > *memcg,
> >  int nid)
> >  {
> > @@ -200,10 +206,12 @@ static struct shrinker_info 
> > *shrinker_info_protected(struct mem_cgroup *memcg,
> >  }
> >
> >  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> > -   int size, int old_size)
> > +   int map_size, int defer_size,
> > +   int old_map_size, int old_defer_size)
> >  {
> > struct shrinker_info *new, *old;
> > int nid;
> > +   int size = map_size + defer_size;
> >
> > for_each_node(nid) {
> > old = shrinker_info_protected(memcg, nid);
> > @@ -215,9 +223,16 @@ static int expand_one_shrinker_info(struct mem_cgroup 
> > *memcg,
> > if (!new)
> > return -ENOMEM;
> >
> > -   /* Set all old bits, clear all new bits */
> > -   memset(new->map, (int)0xff, old_size);
> > -   memset((void *)new->map + old_size, 0, size - old_size);
> > +   new->nr_deferred = (atomic_long_t *)(new + 1);
> > +   new->map = (void *)new->nr_deferred + defer_size;
> > +
> > +   /* map: set all old bits, clear all 

Re: [v8 PATCH 09/13] mm: vmscan: add per memcg shrinker nr_deferred

2021-03-08 Thread Shakeel Butt
On Tue, Feb 16, 2021 at 4:13 PM Yang Shi  wrote:
>
> Currently the number of deferred objects are per shrinker, but some slabs, 
> for example,
> vfs inode/dentry cache are per memcg, this would result in poor isolation 
> among memcgs.
>
> The deferred objects typically are generated by __GFP_NOFS allocations, one 
> memcg with
> excessive __GFP_NOFS allocations may blow up deferred objects, then other 
> innocent memcgs
> may suffer from over shrink, excessive reclaim latency, etc.
>
> For example, two workloads run in memcgA and memcgB respectively, workload in 
> B is vfs
> heavy workload.  Workload in A generates excessive deferred objects, then B's 
> vfs cache
> might be hit heavily (drop half of caches) by B's limit reclaim or global 
> reclaim.
>
> We observed this hit in our production environment which was running vfs 
> heavy workload
> shown as the below tracing log:
>
> <...>-409454 [016]  28286961.747146: mm_shrink_slab_start: 
> super_cache_scan+0x0/0x1a0 9a83046f3458:
> nid: 1 objects to shrink 3641681686040 gfp_flags 
> GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> cache items 246404277 delta 31345 total_scan 123202138
> <...>-409454 [022]  28287105.928018: mm_shrink_slab_end: 
> super_cache_scan+0x0/0x1a0 9a83046f3458:
> nid: 1 unused scan count 3641681686040 new scan count 3641798379189 
> total_scan 602
> last shrinker return val 123186855
>
> The vfs cache and page cache ratio was 10:1 on this machine, and half of 
> caches were dropped.
> This also resulted in significant amount of page caches were dropped due to 
> inodes eviction.
>
> Make nr_deferred per memcg for memcg aware shrinkers would solve the 
> unfairness and bring
> better isolation.
>
> When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's 
> nr_deferred
> would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all 
> the time.
>
> Signed-off-by: Yang Shi 
> ---
>  include/linux/memcontrol.h |  7 +++--
>  mm/vmscan.c| 60 ++
>  2 files changed, 46 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 4c9253896e25..c457fc7bc631 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -93,12 +93,13 @@ struct lruvec_stat {
>  };
>
>  /*
> - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> - * which have elements charged to this memcg.
> + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> + * shrinkers, which have elements charged to this memcg.
>   */
>  struct shrinker_info {
> struct rcu_head rcu;
> -   unsigned long map[];
> +   atomic_long_t *nr_deferred;
> +   unsigned long *map;
>  };
>
>  /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a1047ea60ecf..fcb399e18fc3 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -187,11 +187,17 @@ static DECLARE_RWSEM(shrinker_rwsem);
>  #ifdef CONFIG_MEMCG
>  static int shrinker_nr_max;
>
> +/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
>  static inline int shrinker_map_size(int nr_items)
>  {
> return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned 
> long));
>  }
>
> +static inline int shrinker_defer_size(int nr_items)
> +{
> +   return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
> +}
> +
>  static struct shrinker_info *shrinker_info_protected(struct mem_cgroup 
> *memcg,
>  int nid)
>  {
> @@ -200,10 +206,12 @@ static struct shrinker_info 
> *shrinker_info_protected(struct mem_cgroup *memcg,
>  }
>
>  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> -   int size, int old_size)
> +   int map_size, int defer_size,
> +   int old_map_size, int old_defer_size)
>  {
> struct shrinker_info *new, *old;
> int nid;
> +   int size = map_size + defer_size;
>
> for_each_node(nid) {
> old = shrinker_info_protected(memcg, nid);
> @@ -215,9 +223,16 @@ static int expand_one_shrinker_info(struct mem_cgroup 
> *memcg,
> if (!new)
> return -ENOMEM;
>
> -   /* Set all old bits, clear all new bits */
> -   memset(new->map, (int)0xff, old_size);
> -   memset((void *)new->map + old_size, 0, size - old_size);
> +   new->nr_deferred = (atomic_long_t *)(new + 1);
> +   new->map = (void *)new->nr_deferred + defer_size;
> +
> +   /* map: set all old bits, clear all new bits */
> +   memset(new->map, (int)0xff, old_map_size);
> +   memset((void *)new->map + old_map_size, 0, map_size - 
> old_map_size);
> +   /* nr_deferred: copy old values, clear all new values */
> +   memcpy(new->nr_deferred, old->nr_deferred, 

Re: [v8 PATCH 09/13] mm: vmscan: add per memcg shrinker nr_deferred

2021-02-16 Thread Kirill Tkhai
On 17.02.2021 03:13, Yang Shi wrote:
> Currently the number of deferred objects are per shrinker, but some slabs, 
> for example,
> vfs inode/dentry cache are per memcg, this would result in poor isolation 
> among memcgs.
> 
> The deferred objects typically are generated by __GFP_NOFS allocations, one 
> memcg with
> excessive __GFP_NOFS allocations may blow up deferred objects, then other 
> innocent memcgs
> may suffer from over shrink, excessive reclaim latency, etc.
> 
> For example, two workloads run in memcgA and memcgB respectively, workload in 
> B is vfs
> heavy workload.  Workload in A generates excessive deferred objects, then B's 
> vfs cache
> might be hit heavily (drop half of caches) by B's limit reclaim or global 
> reclaim.
> 
> We observed this hit in our production environment which was running vfs 
> heavy workload
> shown as the below tracing log:
> 
> <...>-409454 [016]  28286961.747146: mm_shrink_slab_start: 
> super_cache_scan+0x0/0x1a0 9a83046f3458:
> nid: 1 objects to shrink 3641681686040 gfp_flags 
> GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> cache items 246404277 delta 31345 total_scan 123202138
> <...>-409454 [022]  28287105.928018: mm_shrink_slab_end: 
> super_cache_scan+0x0/0x1a0 9a83046f3458:
> nid: 1 unused scan count 3641681686040 new scan count 3641798379189 
> total_scan 602
> last shrinker return val 123186855
> 
> The vfs cache and page cache ratio was 10:1 on this machine, and half of 
> caches were dropped.
> This also resulted in significant amount of page caches were dropped due to 
> inodes eviction.
> 
> Make nr_deferred per memcg for memcg aware shrinkers would solve the 
> unfairness and bring
> better isolation.
> 
> When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's 
> nr_deferred
> would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all 
> the time.
> 
> Signed-off-by: Yang Shi 

Acked-by: Kirill Tkhai 

> ---
>  include/linux/memcontrol.h |  7 +++--
>  mm/vmscan.c| 60 ++
>  2 files changed, 46 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 4c9253896e25..c457fc7bc631 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -93,12 +93,13 @@ struct lruvec_stat {
>  };
>  
>  /*
> - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> - * which have elements charged to this memcg.
> + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> + * shrinkers, which have elements charged to this memcg.
>   */
>  struct shrinker_info {
>   struct rcu_head rcu;
> - unsigned long map[];
> + atomic_long_t *nr_deferred;
> + unsigned long *map;
>  };
>  
>  /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a1047ea60ecf..fcb399e18fc3 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -187,11 +187,17 @@ static DECLARE_RWSEM(shrinker_rwsem);
>  #ifdef CONFIG_MEMCG
>  static int shrinker_nr_max;
>  
> +/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
>  static inline int shrinker_map_size(int nr_items)
>  {
>   return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
>  }
>  
> +static inline int shrinker_defer_size(int nr_items)
> +{
> + return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
> +}
> +
>  static struct shrinker_info *shrinker_info_protected(struct mem_cgroup 
> *memcg,
>int nid)
>  {
> @@ -200,10 +206,12 @@ static struct shrinker_info 
> *shrinker_info_protected(struct mem_cgroup *memcg,
>  }
>  
>  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> - int size, int old_size)
> + int map_size, int defer_size,
> + int old_map_size, int old_defer_size)
>  {
>   struct shrinker_info *new, *old;
>   int nid;
> + int size = map_size + defer_size;
>  
>   for_each_node(nid) {
>   old = shrinker_info_protected(memcg, nid);
> @@ -215,9 +223,16 @@ static int expand_one_shrinker_info(struct mem_cgroup 
> *memcg,
>   if (!new)
>   return -ENOMEM;
>  
> - /* Set all old bits, clear all new bits */
> - memset(new->map, (int)0xff, old_size);
> - memset((void *)new->map + old_size, 0, size - old_size);
> + new->nr_deferred = (atomic_long_t *)(new + 1);
> + new->map = (void *)new->nr_deferred + defer_size;
> +
> + /* map: set all old bits, clear all new bits */
> + memset(new->map, (int)0xff, old_map_size);
> + memset((void *)new->map + old_map_size, 0, map_size - 
> old_map_size);
> + /* nr_deferred: copy old values, clear all new values */
> + memcpy(new->nr_deferred, old->nr_deferred, old_defer_size);
> + 

Re: [v8 PATCH 09/13] mm: vmscan: add per memcg shrinker nr_deferred

2021-02-16 Thread Roman Gushchin
On Tue, Feb 16, 2021 at 04:13:18PM -0800, Yang Shi wrote:
> Currently the number of deferred objects are per shrinker, but some slabs, 
> for example,
> vfs inode/dentry cache are per memcg, this would result in poor isolation 
> among memcgs.
> 
> The deferred objects typically are generated by __GFP_NOFS allocations, one 
> memcg with
> excessive __GFP_NOFS allocations may blow up deferred objects, then other 
> innocent memcgs
> may suffer from over shrink, excessive reclaim latency, etc.
> 
> For example, two workloads run in memcgA and memcgB respectively, workload in 
> B is vfs
> heavy workload.  Workload in A generates excessive deferred objects, then B's 
> vfs cache
> might be hit heavily (drop half of caches) by B's limit reclaim or global 
> reclaim.
> 
> We observed this hit in our production environment which was running vfs 
> heavy workload
> shown as the below tracing log:
> 
> <...>-409454 [016]  28286961.747146: mm_shrink_slab_start: 
> super_cache_scan+0x0/0x1a0 9a83046f3458:
> nid: 1 objects to shrink 3641681686040 gfp_flags 
> GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
> cache items 246404277 delta 31345 total_scan 123202138
> <...>-409454 [022]  28287105.928018: mm_shrink_slab_end: 
> super_cache_scan+0x0/0x1a0 9a83046f3458:
> nid: 1 unused scan count 3641681686040 new scan count 3641798379189 
> total_scan 602
> last shrinker return val 123186855
> 
> The vfs cache and page cache ratio was 10:1 on this machine, and half of 
> caches were dropped.
> This also resulted in significant amount of page caches were dropped due to 
> inodes eviction.
> 
> Make nr_deferred per memcg for memcg aware shrinkers would solve the 
> unfairness and bring
> better isolation.
> 
> When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's 
> nr_deferred
> would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all 
> the time.
> 
> Signed-off-by: Yang Shi 

Acked-by: Roman Gushchin 

Thanks!

> ---
>  include/linux/memcontrol.h |  7 +++--
>  mm/vmscan.c| 60 ++
>  2 files changed, 46 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 4c9253896e25..c457fc7bc631 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -93,12 +93,13 @@ struct lruvec_stat {
>  };
>  
>  /*
> - * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
> - * which have elements charged to this memcg.
> + * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
> + * shrinkers, which have elements charged to this memcg.
>   */
>  struct shrinker_info {
>   struct rcu_head rcu;
> - unsigned long map[];
> + atomic_long_t *nr_deferred;
> + unsigned long *map;
>  };
>  
>  /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a1047ea60ecf..fcb399e18fc3 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -187,11 +187,17 @@ static DECLARE_RWSEM(shrinker_rwsem);
>  #ifdef CONFIG_MEMCG
>  static int shrinker_nr_max;
>  
> +/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
>  static inline int shrinker_map_size(int nr_items)
>  {
>   return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
>  }
>  
> +static inline int shrinker_defer_size(int nr_items)
> +{
> + return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
> +}
> +
>  static struct shrinker_info *shrinker_info_protected(struct mem_cgroup 
> *memcg,
>int nid)
>  {
> @@ -200,10 +206,12 @@ static struct shrinker_info 
> *shrinker_info_protected(struct mem_cgroup *memcg,
>  }
>  
>  static int expand_one_shrinker_info(struct mem_cgroup *memcg,
> - int size, int old_size)
> + int map_size, int defer_size,
> + int old_map_size, int old_defer_size)
>  {
>   struct shrinker_info *new, *old;
>   int nid;
> + int size = map_size + defer_size;
>  
>   for_each_node(nid) {
>   old = shrinker_info_protected(memcg, nid);
> @@ -215,9 +223,16 @@ static int expand_one_shrinker_info(struct mem_cgroup 
> *memcg,
>   if (!new)
>   return -ENOMEM;
>  
> - /* Set all old bits, clear all new bits */
> - memset(new->map, (int)0xff, old_size);
> - memset((void *)new->map + old_size, 0, size - old_size);
> + new->nr_deferred = (atomic_long_t *)(new + 1);
> + new->map = (void *)new->nr_deferred + defer_size;
> +
> + /* map: set all old bits, clear all new bits */
> + memset(new->map, (int)0xff, old_map_size);
> + memset((void *)new->map + old_map_size, 0, map_size - 
> old_map_size);
> + /* nr_deferred: copy old values, clear all new values */
> + memcpy(new->nr_deferred, old->nr_deferred, 

[v8 PATCH 09/13] mm: vmscan: add per memcg shrinker nr_deferred

2021-02-16 Thread Yang Shi
Currently the number of deferred objects are per shrinker, but some slabs, for 
example,
vfs inode/dentry cache are per memcg, this would result in poor isolation among 
memcgs.

The deferred objects typically are generated by __GFP_NOFS allocations, one 
memcg with
excessive __GFP_NOFS allocations may blow up deferred objects, then other 
innocent memcgs
may suffer from over shrink, excessive reclaim latency, etc.

For example, two workloads run in memcgA and memcgB respectively, workload in B 
is vfs
heavy workload.  Workload in A generates excessive deferred objects, then B's 
vfs cache
might be hit heavily (drop half of caches) by B's limit reclaim or global 
reclaim.

We observed this hit in our production environment which was running vfs heavy 
workload
shown as the below tracing log:

<...>-409454 [016]  28286961.747146: mm_shrink_slab_start: 
super_cache_scan+0x0/0x1a0 9a83046f3458:
nid: 1 objects to shrink 3641681686040 gfp_flags 
GFP_HIGHUSER_MOVABLE|__GFP_ZERO pgs_scanned 1 lru_pgs 15721
cache items 246404277 delta 31345 total_scan 123202138
<...>-409454 [022]  28287105.928018: mm_shrink_slab_end: 
super_cache_scan+0x0/0x1a0 9a83046f3458:
nid: 1 unused scan count 3641681686040 new scan count 3641798379189 total_scan 
602
last shrinker return val 123186855

The vfs cache and page cache ratio was 10:1 on this machine, and half of caches 
were dropped.
This also resulted in significant amount of page caches were dropped due to 
inodes eviction.

Make nr_deferred per memcg for memcg aware shrinkers would solve the unfairness 
and bring
better isolation.

When memcg is not enabled (!CONFIG_MEMCG or memcg disabled), the shrinker's 
nr_deferred
would be used.  And non memcg aware shrinkers use shrinker's nr_deferred all 
the time.

Signed-off-by: Yang Shi 
---
 include/linux/memcontrol.h |  7 +++--
 mm/vmscan.c| 60 ++
 2 files changed, 46 insertions(+), 21 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 4c9253896e25..c457fc7bc631 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -93,12 +93,13 @@ struct lruvec_stat {
 };
 
 /*
- * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
- * which have elements charged to this memcg.
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to this memcg.
  */
 struct shrinker_info {
struct rcu_head rcu;
-   unsigned long map[];
+   atomic_long_t *nr_deferred;
+   unsigned long *map;
 };
 
 /*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a1047ea60ecf..fcb399e18fc3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -187,11 +187,17 @@ static DECLARE_RWSEM(shrinker_rwsem);
 #ifdef CONFIG_MEMCG
 static int shrinker_nr_max;
 
+/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
 static inline int shrinker_map_size(int nr_items)
 {
return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
 }
 
+static inline int shrinker_defer_size(int nr_items)
+{
+   return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
+}
+
 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
 int nid)
 {
@@ -200,10 +206,12 @@ static struct shrinker_info 
*shrinker_info_protected(struct mem_cgroup *memcg,
 }
 
 static int expand_one_shrinker_info(struct mem_cgroup *memcg,
-   int size, int old_size)
+   int map_size, int defer_size,
+   int old_map_size, int old_defer_size)
 {
struct shrinker_info *new, *old;
int nid;
+   int size = map_size + defer_size;
 
for_each_node(nid) {
old = shrinker_info_protected(memcg, nid);
@@ -215,9 +223,16 @@ static int expand_one_shrinker_info(struct mem_cgroup 
*memcg,
if (!new)
return -ENOMEM;
 
-   /* Set all old bits, clear all new bits */
-   memset(new->map, (int)0xff, old_size);
-   memset((void *)new->map + old_size, 0, size - old_size);
+   new->nr_deferred = (atomic_long_t *)(new + 1);
+   new->map = (void *)new->nr_deferred + defer_size;
+
+   /* map: set all old bits, clear all new bits */
+   memset(new->map, (int)0xff, old_map_size);
+   memset((void *)new->map + old_map_size, 0, map_size - 
old_map_size);
+   /* nr_deferred: copy old values, clear all new values */
+   memcpy(new->nr_deferred, old->nr_deferred, old_defer_size);
+   memset((void *)new->nr_deferred + old_defer_size, 0,
+  defer_size - old_defer_size);
 
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, new);
kvfree_rcu(old);
@@ -232,9 +247,6 @@ void free_shrinker_info(struct