On Tue, 19 Jun 2018, Shakeel Butt wrote:

> diff --git a/mm/slub.c b/mm/slub.c
> index a3b8467c14af..731c02b371ae 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3673,9 +3673,23 @@ static void free_partial(struct kmem_cache *s, struct 
> kmem_cache_node *n)
>  
>  bool __kmem_cache_empty(struct kmem_cache *s)
>  {
> -     int node;
> +     int cpu, node;

Nit: wouldn't cpu be unused if CONFIG_SLUB_DEBUG is disabled?

>       struct kmem_cache_node *n;
>  
> +     /*
> +      * slabs_node will always be 0 for !CONFIG_SLUB_DEBUG. So, manually
> +      * check slabs for all cpus.
> +      */
> +     if (!IS_ENABLED(CONFIG_SLUB_DEBUG)) {
> +             for_each_online_cpu(cpu) {
> +                     struct kmem_cache_cpu *c;
> +
> +                     c = per_cpu_ptr(s->cpu_slab, cpu);
> +                     if (c->page || slub_percpu_partial(c))
> +                             return false;
> +             }
> +     }
> +
>       for_each_kmem_cache_node(s, node, n)
>               if (n->nr_partial || slabs_node(s, node))
>                       return false;

Wouldn't it just be better to allow {inc,dec}_slabs_node() to adjust the 
nr_slabs counter instead of doing the per-cpu iteration on every shutdown?

Reply via email to