Move the percpu_up_read() slowpath out of the inline function into a new
__percpu_up_read() to avoid binary size increase from adding a
tracepoint to an inlined function.

Signed-off-by: Dmitry Ilvokhin <[email protected]>
---
 include/linux/percpu-rwsem.h  | 15 +++------------
 kernel/locking/percpu-rwsem.c | 18 ++++++++++++++++++
 2 files changed, 21 insertions(+), 12 deletions(-)

diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index c8cb010d655e..39d5bf8e6562 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -107,6 +107,8 @@ static inline bool percpu_down_read_trylock(struct 
percpu_rw_semaphore *sem)
        return ret;
 }
 
+extern void __percpu_up_read(struct percpu_rw_semaphore *sem);
+
 static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
 {
        rwsem_release(&sem->dep_map, _RET_IP_);
@@ -118,18 +120,7 @@ static inline void percpu_up_read(struct 
percpu_rw_semaphore *sem)
        if (likely(rcu_sync_is_idle(&sem->rss))) {
                this_cpu_dec(*sem->read_count);
        } else {
-               /*
-                * slowpath; reader will only ever wake a single blocked
-                * writer.
-                */
-               smp_mb(); /* B matches C */
-               /*
-                * In other words, if they see our decrement (presumably to
-                * aggregate zero, as that is the only time it matters) they
-                * will also see our critical section.
-                */
-               this_cpu_dec(*sem->read_count);
-               rcuwait_wake_up(&sem->writer);
+               __percpu_up_read(sem);
        }
        preempt_enable();
 }
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index ef234469baac..f3ee7a0d6047 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -288,3 +288,21 @@ void percpu_up_write(struct percpu_rw_semaphore *sem)
        rcu_sync_exit(&sem->rss);
 }
 EXPORT_SYMBOL_GPL(percpu_up_write);
+
+void __percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+       lockdep_assert_preemption_disabled();
+       /*
+        * slowpath; reader will only ever wake a single blocked
+        * writer.
+        */
+       smp_mb(); /* B matches C */
+       /*
+        * In other words, if they see our decrement (presumably to
+        * aggregate zero, as that is the only time it matters) they
+        * will also see our critical section.
+        */
+       this_cpu_dec(*sem->read_count);
+       rcuwait_wake_up(&sem->writer);
+}
+EXPORT_SYMBOL_GPL(__percpu_up_read);
-- 
2.52.0


Reply via email to