Currently, kvfree_rcu_barrier() flushes RCU sheaves across all slab
caches when a cache is destroyed. This is unnecessary; only the RCU
sheaves belonging to the cache being destroyed need to be flushed.

As suggested by Vlastimil Babka, introduce a weaker form of
kvfree_rcu_barrier() that operates on a specific slab cache.

Factor out flush_rcu_sheaves_on_cache() from flush_all_rcu_sheaves() and
call it from flush_all_rcu_sheaves() and kvfree_rcu_barrier_on_cache().

Call kvfree_rcu_barrier_on_cache() instead of kvfree_rcu_barrier() on
cache destruction.

The performance benefit is evaluated on a 12 core 24 threads AMD Ryzen
5900X machine (1 socket), by loading slub_kunit module.

Before:
  Total calls: 19
  Average latency (us): 18127
  Total time (us): 344414

After:
  Total calls: 19
  Average latency (us): 10066
  Total time (us): 191264

Two performance regression have been reported:
  - stress module loader test's runtime increases by 50-60% (Daniel)
  - internal graphics test's runtime on Tegra23 increases by 35% (Jon)

They are fixed by this change.

Suggested-by: Vlastimil Babka <[email protected]>
Fixes: ec66e0d59952 ("slab: add sheaf support for batching kfree_rcu() 
operations")
Cc: <[email protected]>
Link: 
https://lore.kernel.org/linux-mm/[email protected]
Reported-and-tested-by: Daniel Gomez <[email protected]>
Closes: 
https://lore.kernel.org/linux-mm/[email protected]
Reported-and-tested-by: Jon Hunter <[email protected]>
Closes: 
https://lore.kernel.org/linux-mm/[email protected]
Signed-off-by: Harry Yoo <[email protected]>
---

v2 -> v3:
- Addressed Suren's comment [1], thanks!

[1] 
https://lore.kernel.org/linux-mm/CAJuCfpE+g65Dm8-r=psdjqf_o1rfbg62dozx4me1mb+ottu...@mail.gmail.com
 

 include/linux/slab.h |  7 ++++++
 mm/slab.h            |  1 +
 mm/slab_common.c     | 52 +++++++++++++++++++++++++++++------------
 mm/slub.c            | 55 ++++++++++++++++++++++++--------------------
 4 files changed, 75 insertions(+), 40 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index cf443f064a66..2482992248dc 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -1150,10 +1150,17 @@ static inline void kvfree_rcu_barrier(void)
        rcu_barrier();
 }
 
+static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
+{
+       rcu_barrier();
+}
+
 static inline void kfree_rcu_scheduler_running(void) { }
 #else
 void kvfree_rcu_barrier(void);
 
+void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
+
 void kfree_rcu_scheduler_running(void);
 #endif
 
diff --git a/mm/slab.h b/mm/slab.h
index f730e012553c..e767aa7e91b0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -422,6 +422,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
 
 bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
 void flush_all_rcu_sheaves(void);
+void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
 
 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
                         SLAB_CACHE_DMA32 | SLAB_PANIC | \
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 84dfff4f7b1f..dd8a49d6f9cc 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
                return;
 
        /* in-flight kfree_rcu()'s may include objects from our cache */
-       kvfree_rcu_barrier();
+       kvfree_rcu_barrier_on_cache(s);
 
        if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
            (s->flags & SLAB_TYPESAFE_BY_RCU)) {
@@ -2038,25 +2038,13 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 }
 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
 
-/**
- * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
- *
- * Note that a single argument of kvfree_rcu() call has a slow path that
- * triggers synchronize_rcu() following by freeing a pointer. It is done
- * before the return from the function. Therefore for any single-argument
- * call that will result in a kfree() to a cache that is to be destroyed
- * during module exit, it is developer's responsibility to ensure that all
- * such calls have returned before the call to kmem_cache_destroy().
- */
-void kvfree_rcu_barrier(void)
+static inline void __kvfree_rcu_barrier(void)
 {
        struct kfree_rcu_cpu_work *krwp;
        struct kfree_rcu_cpu *krcp;
        bool queued;
        int i, cpu;
 
-       flush_all_rcu_sheaves();
-
        /*
         * Firstly we detach objects and queue them over an RCU-batch
         * for all CPUs. Finally queued works are flushed for each CPU.
@@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void)
                }
        }
 }
+
+/**
+ * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
+ *
+ * Note that a single argument of kvfree_rcu() call has a slow path that
+ * triggers synchronize_rcu() following by freeing a pointer. It is done
+ * before the return from the function. Therefore for any single-argument
+ * call that will result in a kfree() to a cache that is to be destroyed
+ * during module exit, it is developer's responsibility to ensure that all
+ * such calls have returned before the call to kmem_cache_destroy().
+ */
+void kvfree_rcu_barrier(void)
+{
+       flush_all_rcu_sheaves();
+       __kvfree_rcu_barrier();
+}
 EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
 
+/**
+ * kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a
+ *                               specific slab cache.
+ * @s: slab cache to wait for
+ *
+ * See the description of kvfree_rcu_barrier() for details.
+ */
+void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
+{
+       if (s->cpu_sheaves)
+               flush_rcu_sheaves_on_cache(s);
+       /*
+        * TODO: Introduce a version of __kvfree_rcu_barrier() that works
+        * on a specific slab cache.
+        */
+       __kvfree_rcu_barrier();
+}
+EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache);
+
 static unsigned long
 kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
@@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void)
 }
 
 #endif /* CONFIG_KVFREE_RCU_BATCHED */
-
diff --git a/mm/slub.c b/mm/slub.c
index 785e25a14999..7cec2220712b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4118,42 +4118,47 @@ static void flush_rcu_sheaf(struct work_struct *w)
 
 
 /* needed for kvfree_rcu_barrier() */
-void flush_all_rcu_sheaves(void)
+void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
 {
        struct slub_flush_work *sfw;
-       struct kmem_cache *s;
        unsigned int cpu;
 
-       cpus_read_lock();
-       mutex_lock(&slab_mutex);
+       mutex_lock(&flush_lock);
 
-       list_for_each_entry(s, &slab_caches, list) {
-               if (!s->cpu_sheaves)
-                       continue;
+       for_each_online_cpu(cpu) {
+               sfw = &per_cpu(slub_flush, cpu);
 
-               mutex_lock(&flush_lock);
+               /*
+                * we don't check if rcu_free sheaf exists - racing
+                * __kfree_rcu_sheaf() might have just removed it.
+                * by executing flush_rcu_sheaf() on the cpu we make
+                * sure the __kfree_rcu_sheaf() finished its call_rcu()
+                */
 
-               for_each_online_cpu(cpu) {
-                       sfw = &per_cpu(slub_flush, cpu);
+               INIT_WORK(&sfw->work, flush_rcu_sheaf);
+               sfw->s = s;
+               queue_work_on(cpu, flushwq, &sfw->work);
+       }
 
-                       /*
-                        * we don't check if rcu_free sheaf exists - racing
-                        * __kfree_rcu_sheaf() might have just removed it.
-                        * by executing flush_rcu_sheaf() on the cpu we make
-                        * sure the __kfree_rcu_sheaf() finished its call_rcu()
-                        */
+       for_each_online_cpu(cpu) {
+               sfw = &per_cpu(slub_flush, cpu);
+               flush_work(&sfw->work);
+       }
 
-                       INIT_WORK(&sfw->work, flush_rcu_sheaf);
-                       sfw->s = s;
-                       queue_work_on(cpu, flushwq, &sfw->work);
-               }
+       mutex_unlock(&flush_lock);
+}
 
-               for_each_online_cpu(cpu) {
-                       sfw = &per_cpu(slub_flush, cpu);
-                       flush_work(&sfw->work);
-               }
+void flush_all_rcu_sheaves(void)
+{
+       struct kmem_cache *s;
+
+       cpus_read_lock();
+       mutex_lock(&slab_mutex);
 
-               mutex_unlock(&flush_lock);
+       list_for_each_entry(s, &slab_caches, list) {
+               if (!s->cpu_sheaves)
+                       continue;
+               flush_rcu_sheaves_on_cache(s);
        }
 
        mutex_unlock(&slab_mutex);
-- 
2.43.0


Reply via email to