cpu_slab's field partial is used when CONFIG_SLUB_CPU_PARTIAL is set, which
means we can save a pointer's space on each cpu for every slub item.

This patch wrap cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL and wrap its
sysfs too.

Signed-off-by: Wei Yang <richard.weiy...@gmail.com>

---
v2: define slub_percpu_partial() to make code more elegant
---
 include/linux/slub_def.h | 19 +++++++++++++++++++
 mm/slub.c                | 16 +++++++++-------
 2 files changed, 28 insertions(+), 7 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index ec13aab32647..f882a34bb9aa 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -41,12 +41,31 @@ struct kmem_cache_cpu {
        void **freelist;        /* Pointer to next available object */
        unsigned long tid;      /* Globally unique transaction id */
        struct page *page;      /* The slab from which we are allocating */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
        struct page *partial;   /* Partially allocated frozen slabs */
+#endif
 #ifdef CONFIG_SLUB_STATS
        unsigned stat[NR_SLUB_STAT_ITEMS];
 #endif
 };
 
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+#define slub_percpu_partial(c)         ((c)->partial)
+
+#define slub_set_percpu_partial(c, p)          \
+({                                             \
+       slub_percpu_partial(c) = (p)->next;     \
+})
+
+#define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
+#else
+#define slub_percpu_partial(c)                 NULL
+
+#define slub_set_percpu_partial(c, p)
+
+#define slub_percpu_partial_read_once(c)       NULL
+#endif // CONFIG_SLUB_CPU_PARTIAL
+
 /*
  * Word size structure that can be atomically updated or read and that
  * contains both the order and the number of objects that a slab of the
diff --git a/mm/slub.c b/mm/slub.c
index 7f4bc7027ed5..ae6166533261 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2302,7 +2302,7 @@ static bool has_cpu_slab(int cpu, void *info)
        struct kmem_cache *s = info;
        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
 
-       return c->page || c->partial;
+       return c->page || slub_percpu_partial(c);
 }
 
 static void flush_all(struct kmem_cache *s)
@@ -2568,9 +2568,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
 
 new_slab:
 
-       if (c->partial) {
-               page = c->page = c->partial;
-               c->partial = page->next;
+       if (slub_percpu_partial(c)) {
+               page = c->page = slub_percpu_partial(c);
+               slub_set_percpu_partial(c, page);
                stat(s, CPU_PARTIAL_ALLOC);
                c->freelist = NULL;
                goto redo;
@@ -4760,7 +4760,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        total += x;
                        nodes[node] += x;
 
-                       page = READ_ONCE(c->partial);
+                       page = slub_percpu_partial_read_once(c);
                        if (page) {
                                node = page_to_nid(page);
                                if (flags & SO_TOTAL)
@@ -4988,7 +4988,8 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache 
*s, char *buf)
        int len;
 
        for_each_online_cpu(cpu) {
-               struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
+               struct page *page =
+                       slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
 
                if (page) {
                        pages += page->pages;
@@ -5000,7 +5001,8 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache 
*s, char *buf)
 
 #ifdef CONFIG_SMP
        for_each_online_cpu(cpu) {
-               struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
+               struct page *page =
+                       slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
 
                if (page && len < PAGE_SIZE - 20)
                        len += sprintf(buf + len, " C%d=%d(%d)", cpu,
-- 
2.11.0

Reply via email to