From: Kent Overstreet <kent.overstr...@linux.dev>

Upcoming alloc tagging patches require a place to stash per-allocation
metadata.

We already do this when memcg is enabled, so this patch generalizes the
obj_cgroup * vector in struct pcpu_chunk by creating a pcpu_obj_ext
type, which we will be adding to in an upcoming patch - similarly to the
previous slabobj_ext patch.

Signed-off-by: Kent Overstreet <kent.overstr...@linux.dev>
Signed-off-by: Suren Baghdasaryan <sur...@google.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Dennis Zhou <den...@kernel.org>
Cc: Tejun Heo <t...@kernel.org>
Cc: Christoph Lameter <c...@linux.com>
Cc: linux...@kvack.org
---
 mm/percpu-internal.h | 19 +++++++++++++++++--
 mm/percpu.c          | 30 +++++++++++++++---------------
 2 files changed, 32 insertions(+), 17 deletions(-)

diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h
index cdd0aa597a81..e62d582f4bf3 100644
--- a/mm/percpu-internal.h
+++ b/mm/percpu-internal.h
@@ -32,6 +32,16 @@ struct pcpu_block_md {
        int                     nr_bits;        /* total bits responsible for */
 };
 
+struct pcpuobj_ext {
+#ifdef CONFIG_MEMCG_KMEM
+       struct obj_cgroup       *cgroup;
+#endif
+};
+
+#ifdef CONFIG_MEMCG_KMEM
+#define NEED_PCPUOBJ_EXT
+#endif
+
 struct pcpu_chunk {
 #ifdef CONFIG_PERCPU_STATS
        int                     nr_alloc;       /* # of allocations */
@@ -64,8 +74,8 @@ struct pcpu_chunk {
        int                     end_offset;     /* additional area required to
                                                   have the region end page
                                                   aligned */
-#ifdef CONFIG_MEMCG_KMEM
-       struct obj_cgroup       **obj_cgroups;  /* vector of object cgroups */
+#ifdef NEED_PCPUOBJ_EXT
+       struct pcpuobj_ext      *obj_exts;      /* vector of object cgroups */
 #endif
 
        int                     nr_pages;       /* # of pages served by this 
chunk */
@@ -74,6 +84,11 @@ struct pcpu_chunk {
        unsigned long           populated[];    /* populated bitmap */
 };
 
+static inline bool need_pcpuobj_ext(void)
+{
+       return !mem_cgroup_kmem_disabled();
+}
+
 extern spinlock_t pcpu_lock;
 
 extern struct list_head *pcpu_chunk_lists;
diff --git a/mm/percpu.c b/mm/percpu.c
index a7665de8485f..5a6202acffa3 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1392,9 +1392,9 @@ static struct pcpu_chunk * __init 
pcpu_alloc_first_chunk(unsigned long tmp_addr,
                panic("%s: Failed to allocate %zu bytes\n", __func__,
                      alloc_size);
 
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef NEED_PCPUOBJ_EXT
        /* first chunk is free to use */
-       chunk->obj_cgroups = NULL;
+       chunk->obj_exts = NULL;
 #endif
        pcpu_init_md_blocks(chunk);
 
@@ -1463,12 +1463,12 @@ static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
        if (!chunk->md_blocks)
                goto md_blocks_fail;
 
-#ifdef CONFIG_MEMCG_KMEM
-       if (!mem_cgroup_kmem_disabled()) {
-               chunk->obj_cgroups =
+#ifdef NEED_PCPUOBJ_EXT
+       if (need_pcpuobj_ext()) {
+               chunk->obj_exts =
                        pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
-                                       sizeof(struct obj_cgroup *), gfp);
-               if (!chunk->obj_cgroups)
+                                       sizeof(struct pcpuobj_ext), gfp);
+               if (!chunk->obj_exts)
                        goto objcg_fail;
        }
 #endif
@@ -1480,7 +1480,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
 
        return chunk;
 
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef NEED_PCPUOBJ_EXT
 objcg_fail:
        pcpu_mem_free(chunk->md_blocks);
 #endif
@@ -1498,8 +1498,8 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 {
        if (!chunk)
                return;
-#ifdef CONFIG_MEMCG_KMEM
-       pcpu_mem_free(chunk->obj_cgroups);
+#ifdef NEED_PCPUOBJ_EXT
+       pcpu_mem_free(chunk->obj_exts);
 #endif
        pcpu_mem_free(chunk->md_blocks);
        pcpu_mem_free(chunk->bound_map);
@@ -1648,8 +1648,8 @@ static void pcpu_memcg_post_alloc_hook(struct obj_cgroup 
*objcg,
        if (!objcg)
                return;
 
-       if (likely(chunk && chunk->obj_cgroups)) {
-               chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
+       if (likely(chunk && chunk->obj_exts)) {
+               chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = objcg;
 
                rcu_read_lock();
                mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
@@ -1665,13 +1665,13 @@ static void pcpu_memcg_free_hook(struct pcpu_chunk 
*chunk, int off, size_t size)
 {
        struct obj_cgroup *objcg;
 
-       if (unlikely(!chunk->obj_cgroups))
+       if (unlikely(!chunk->obj_exts))
                return;
 
-       objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
+       objcg = chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup;
        if (!objcg)
                return;
-       chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
+       chunk->obj_exts[off >> PCPU_MIN_ALLOC_SHIFT].cgroup = NULL;
 
        obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
 
-- 
2.42.0.758.gaed0368e0e-goog


Reply via email to