Re: [PATCH v6 08/19] mm: memcg/slab: save obj_cgroup for non-root slab objects

2020-06-19 Thread Roman Gushchin
On Fri, Jun 19, 2020 at 05:16:02PM -0700, Shakeel Butt wrote:
> On Mon, Jun 8, 2020 at 4:07 PM Roman Gushchin  wrote:
> >
> > Store the obj_cgroup pointer in the corresponding place of
> > page->obj_cgroups for each allocated non-root slab object.
> > Make sure that each allocated object holds a reference to obj_cgroup.
> >
> > Objcg pointer is obtained from the memcg->objcg dereferencing
> > in memcg_kmem_get_cache() and passed from pre_alloc_hook to
> > post_alloc_hook. Then in case of successful allocation(s) it's
> > getting stored in the page->obj_cgroups vector.
> >
> > The objcg obtaining part look a bit bulky now, but it will be simplified
> > by next commits in the series.
> >
> > Signed-off-by: Roman Gushchin 
> > Reviewed-by: Vlastimil Babka 
> 
> One nit below otherwise:
> 
> Reviewed-by: Shakeel Butt 
> 
> > ---
> [snip]
> > +static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> > + struct obj_cgroup *objcg,
> > + size_t size, void **p)
> > +{
> > +   struct page *page;
> > +   unsigned long off;
> > +   size_t i;
> > +
> > +   for (i = 0; i < size; i++) {
> > +   if (likely(p[i])) {
> > +   page = virt_to_head_page(p[i]);
> > +   off = obj_to_index(s, page, p[i]);
> > +   obj_cgroup_get(objcg);
> > +   page_obj_cgroups(page)[off] = objcg;
> > +   }
> > +   }
> > +   obj_cgroup_put(objcg);
> 
> Nit: we get the objcg reference in memcg_kmem_get_cache(), doesn't it
> look cleaner to put that reference in memcg_kmem_put_cache() instead
> of here.

memcg_kmem_put_cache() will go away completely later in the series.

I know the code might look sub-optimal and messy on some stages,
but it's only because there is a big transition from the original
to the final state, and I don't wanna to increase intermediate diffs.

Please, take a look at the final result.


Re: [PATCH v6 08/19] mm: memcg/slab: save obj_cgroup for non-root slab objects

2020-06-19 Thread Shakeel Butt
On Mon, Jun 8, 2020 at 4:07 PM Roman Gushchin  wrote:
>
> Store the obj_cgroup pointer in the corresponding place of
> page->obj_cgroups for each allocated non-root slab object.
> Make sure that each allocated object holds a reference to obj_cgroup.
>
> Objcg pointer is obtained from the memcg->objcg dereferencing
> in memcg_kmem_get_cache() and passed from pre_alloc_hook to
> post_alloc_hook. Then in case of successful allocation(s) it's
> getting stored in the page->obj_cgroups vector.
>
> The objcg obtaining part look a bit bulky now, but it will be simplified
> by next commits in the series.
>
> Signed-off-by: Roman Gushchin 
> Reviewed-by: Vlastimil Babka 

One nit below otherwise:

Reviewed-by: Shakeel Butt 

> ---
[snip]
> +static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
> + struct obj_cgroup *objcg,
> + size_t size, void **p)
> +{
> +   struct page *page;
> +   unsigned long off;
> +   size_t i;
> +
> +   for (i = 0; i < size; i++) {
> +   if (likely(p[i])) {
> +   page = virt_to_head_page(p[i]);
> +   off = obj_to_index(s, page, p[i]);
> +   obj_cgroup_get(objcg);
> +   page_obj_cgroups(page)[off] = objcg;
> +   }
> +   }
> +   obj_cgroup_put(objcg);

Nit: we get the objcg reference in memcg_kmem_get_cache(), doesn't it
look cleaner to put that reference in memcg_kmem_put_cache() instead
of here.

> +   memcg_kmem_put_cache(s);
> +}
> +


[PATCH v6 08/19] mm: memcg/slab: save obj_cgroup for non-root slab objects

2020-06-08 Thread Roman Gushchin
Store the obj_cgroup pointer in the corresponding place of
page->obj_cgroups for each allocated non-root slab object.
Make sure that each allocated object holds a reference to obj_cgroup.

Objcg pointer is obtained from the memcg->objcg dereferencing
in memcg_kmem_get_cache() and passed from pre_alloc_hook to
post_alloc_hook. Then in case of successful allocation(s) it's
getting stored in the page->obj_cgroups vector.

The objcg obtaining part look a bit bulky now, but it will be simplified
by next commits in the series.

Signed-off-by: Roman Gushchin 
Reviewed-by: Vlastimil Babka 
---
 include/linux/memcontrol.h |  3 +-
 mm/memcontrol.c| 14 +++--
 mm/slab.c  | 18 +++-
 mm/slab.h  | 60 ++
 mm/slub.c  | 14 +
 5 files changed, 88 insertions(+), 21 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index c69e66fe4f12..c63473fffdda 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1404,7 +1404,8 @@ static inline void memcg_set_shrinker_bit(struct 
mem_cgroup *memcg,
 }
 #endif
 
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
+struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
+   struct obj_cgroup **objcgp);
 void memcg_kmem_put_cache(struct kmem_cache *cachep);
 
 #ifdef CONFIG_MEMCG_KMEM
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2020c7542aa1..f0ea0ce6bea5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2971,7 +2971,8 @@ static inline bool memcg_kmem_bypass(void)
  * done with it, memcg_kmem_put_cache() must be called to release the
  * reference.
  */
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
+struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep,
+   struct obj_cgroup **objcgp)
 {
struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep;
@@ -3027,8 +3028,17 @@ struct kmem_cache *memcg_kmem_get_cache(struct 
kmem_cache *cachep)
 */
if (unlikely(!memcg_cachep))
memcg_schedule_kmem_cache_create(memcg, cachep);
-   else if (percpu_ref_tryget(_cachep->memcg_params.refcnt))
+   else if (percpu_ref_tryget(_cachep->memcg_params.refcnt)) {
+   struct obj_cgroup *objcg = rcu_dereference(memcg->objcg);
+
+   if (!objcg || !obj_cgroup_tryget(objcg)) {
+   percpu_ref_put(_cachep->memcg_params.refcnt);
+   goto out_unlock;
+   }
+
+   *objcgp = objcg;
cachep = memcg_cachep;
+   }
 out_unlock:
rcu_read_unlock();
return cachep;
diff --git a/mm/slab.c b/mm/slab.c
index 9350062ffc1a..02b4363930c1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3222,9 +3222,10 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, 
int nodeid,
unsigned long save_flags;
void *ptr;
int slab_node = numa_mem_id();
+   struct obj_cgroup *objcg = NULL;
 
flags &= gfp_allowed_mask;
-   cachep = slab_pre_alloc_hook(cachep, flags);
+   cachep = slab_pre_alloc_hook(cachep, , 1, flags);
if (unlikely(!cachep))
return NULL;
 
@@ -3260,7 +3261,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, 
int nodeid,
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
memset(ptr, 0, cachep->object_size);
 
-   slab_post_alloc_hook(cachep, flags, 1, );
+   slab_post_alloc_hook(cachep, objcg, flags, 1, );
return ptr;
 }
 
@@ -3301,9 +3302,10 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, 
unsigned long caller)
 {
unsigned long save_flags;
void *objp;
+   struct obj_cgroup *objcg = NULL;
 
flags &= gfp_allowed_mask;
-   cachep = slab_pre_alloc_hook(cachep, flags);
+   cachep = slab_pre_alloc_hook(cachep, , 1, flags);
if (unlikely(!cachep))
return NULL;
 
@@ -3317,7 +3319,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, 
unsigned long caller)
if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
memset(objp, 0, cachep->object_size);
 
-   slab_post_alloc_hook(cachep, flags, 1, );
+   slab_post_alloc_hook(cachep, objcg, flags, 1, );
return objp;
 }
 
@@ -3439,6 +3441,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
memset(objp, 0, cachep->object_size);
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
+   memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
 
/*
 * Skip calling cache_free_alien() when the platform is not numa.
@@ -3504,8 +3507,9 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t 
flags, size_t size,
  void **p)
 {
size_t i;
+   struct