On 2/21/24 20:40, Suren Baghdasaryan wrote:
> Currently slab pages can store only vectors of obj_cgroup pointers in
> page->memcg_data. Introduce slabobj_ext structure to allow more data
> to be stored for each slab object. Wrap obj_cgroup into slabobj_ext
> to support current functionality while allowing to extend slabobj_ext
> in the future.
> 
> Signed-off-by: Suren Baghdasaryan <[email protected]>

Hi, mostly good from slab perspective, just some fixups:

> --- a/mm/slab.h
> +++ b/mm/slab.h
> -int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
> -                              gfp_t gfp, bool new_slab);
> -void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
> -                  enum node_stat_item idx, int nr);
> -#else /* CONFIG_MEMCG_KMEM */
> -static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
> +int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> +                     gfp_t gfp, bool new_slab);
>

We could remove this declaration and make the function static in mm/slub.c.

> +#else /* CONFIG_SLAB_OBJ_EXT */
> +
> +static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
>  {
>       return NULL;
>  }
>  
> -static inline int memcg_alloc_slab_cgroups(struct slab *slab,
> -                                            struct kmem_cache *s, gfp_t gfp,
> -                                            bool new_slab)
> +static inline int alloc_slab_obj_exts(struct slab *slab,
> +                                   struct kmem_cache *s, gfp_t gfp,
> +                                   bool new_slab)
>  {
>       return 0;
>  }

Ditto

> -#endif /* CONFIG_MEMCG_KMEM */
> +
> +static inline struct slabobj_ext *
> +prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> +{
> +     return NULL;
> +}

Same here (and the definition and usage even happens in later patch).

> +#endif /* CONFIG_SLAB_OBJ_EXT */
> +
> +#ifdef CONFIG_MEMCG_KMEM
> +void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
> +                  enum node_stat_item idx, int nr);
> +#endif
>  
>  size_t __ksize(const void *objp);
>  
> diff --git a/mm/slub.c b/mm/slub.c
> index d31b03a8d9d5..76fb600fbc80 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -683,10 +683,10 @@ static inline bool __slab_update_freelist(struct 
> kmem_cache *s, struct slab *sla
>  
>       if (s->flags & __CMPXCHG_DOUBLE) {
>               ret = __update_freelist_fast(slab, freelist_old, counters_old,
> -                                         freelist_new, counters_new);
> +                                         freelist_new, counters_new);
>       } else {
>               ret = __update_freelist_slow(slab, freelist_old, counters_old,
> -                                         freelist_new, counters_new);
> +                                         freelist_new, counters_new);
>       }
>       if (likely(ret))
>               return true;
> @@ -710,13 +710,13 @@ static inline bool slab_update_freelist(struct 
> kmem_cache *s, struct slab *slab,
>  
>       if (s->flags & __CMPXCHG_DOUBLE) {
>               ret = __update_freelist_fast(slab, freelist_old, counters_old,
> -                                         freelist_new, counters_new);
> +                                         freelist_new, counters_new);
>       } else {
>               unsigned long flags;
>  
>               local_irq_save(flags);
>               ret = __update_freelist_slow(slab, freelist_old, counters_old,
> -                                         freelist_new, counters_new);
> +                                          freelist_new, counters_new);

Please no drive-by fixups of whitespace in code you're not actually
changing. I thought you agreed in v3?

>  static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
>                                            struct list_lru *lru,
>                                            struct obj_cgroup **objcgp,
> @@ -2314,7 +2364,7 @@ static __always_inline void account_slab(struct slab 
> *slab, int order,
>                                        struct kmem_cache *s, gfp_t gfp)
>  {
>       if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
> -             memcg_alloc_slab_cgroups(slab, s, gfp, true);
> +             alloc_slab_obj_exts(slab, s, gfp, true);

This is still guarded by the memcg_kmem_online() static key, which is good.

>  
>       mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
>                           PAGE_SIZE << order);
> @@ -2323,8 +2373,7 @@ static __always_inline void account_slab(struct slab 
> *slab, int order,
>  static __always_inline void unaccount_slab(struct slab *slab, int order,
>                                          struct kmem_cache *s)
>  {
> -     if (memcg_kmem_online())
> -             memcg_free_slab_cgroups(slab);
> +     free_slab_obj_exts(slab);

But this no longer is, yet it still could be?

>  
>       mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
>                           -(PAGE_SIZE << order));


Reply via email to