On Tue, 2 Jul 2019, Waiman Long wrote:

> diff --git a/Documentation/ABI/testing/sysfs-kernel-slab 
> b/Documentation/ABI/testing/sysfs-kernel-slab
> index 29601d93a1c2..2a3d0fc4b4ac 100644
> --- a/Documentation/ABI/testing/sysfs-kernel-slab
> +++ b/Documentation/ABI/testing/sysfs-kernel-slab
> @@ -429,10 +429,12 @@ KernelVersion:  2.6.22
>  Contact:     Pekka Enberg <[email protected]>,
>               Christoph Lameter <[email protected]>
>  Description:
> -             The shrink file is written when memory should be reclaimed from
> -             a cache.  Empty partial slabs are freed and the partial list is
> -             sorted so the slabs with the fewest available objects are used
> -             first.
> +             A value of '1' is written to the shrink file when memory should
> +             be reclaimed from a cache.  Empty partial slabs are freed and
> +             the partial list is sorted so the slabs with the fewest
> +             available objects are used first.  When a value of '2' is
> +             written, all the corresponding child memory cgroup caches
> +             should be shrunk as well.  All other values are invalid.
>  

This should likely call out that '2' also does '1', that might not be 
clear enough.

>  What:                /sys/kernel/slab/cache/slab_size
>  Date:                May 2007
> diff --git a/mm/slab.h b/mm/slab.h
> index 3b22931bb557..a16b2c7ff4dd 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -174,6 +174,7 @@ int __kmem_cache_shrink(struct kmem_cache *);
>  void __kmemcg_cache_deactivate(struct kmem_cache *s);
>  void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
>  void slab_kmem_cache_release(struct kmem_cache *);
> +int kmem_cache_shrink_all(struct kmem_cache *s);
>  
>  struct seq_file;
>  struct file;
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 464faaa9fd81..493697ba1da5 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -981,6 +981,49 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
>  }
>  EXPORT_SYMBOL(kmem_cache_shrink);
>  
> +/**
> + * kmem_cache_shrink_all - shrink a cache and all its memcg children
> + * @s: The root cache to shrink.
> + *
> + * Return: 0 if successful, -EINVAL if not a root cache
> + */
> +int kmem_cache_shrink_all(struct kmem_cache *s)
> +{
> +     struct kmem_cache *c;
> +
> +     if (!IS_ENABLED(CONFIG_MEMCG_KMEM)) {
> +             kmem_cache_shrink(s);
> +             return 0;
> +     }
> +     if (!is_root_cache(s))
> +             return -EINVAL;
> +
> +     /*
> +      * The caller should have a reference to the root cache and so
> +      * we don't need to take the slab_mutex. We have to take the
> +      * slab_mutex, however, to iterate the memcg caches.
> +      */
> +     get_online_cpus();
> +     get_online_mems();
> +     kasan_cache_shrink(s);
> +     __kmem_cache_shrink(s);
> +
> +     mutex_lock(&slab_mutex);
> +     for_each_memcg_cache(c, s) {
> +             /*
> +              * Don't need to shrink deactivated memcg caches.
> +              */
> +             if (s->flags & SLAB_DEACTIVATED)
> +                     continue;
> +             kasan_cache_shrink(c);
> +             __kmem_cache_shrink(c);
> +     }
> +     mutex_unlock(&slab_mutex);
> +     put_online_mems();
> +     put_online_cpus();
> +     return 0;
> +}
> +
>  bool slab_is_available(void)
>  {
>       return slab_state >= UP;

I'm wondering how long this could take, i.e. how long we hold slab_mutex 
while we traverse each cache and shrink it.

Acked-by: David Rientjes <[email protected]>

Reply via email to