The common code between both versions of __local_bh_disable_ip(), whether
CONFIG_TRACE_IRQFLAGS is on or off, is going to grow up in order to
support vector masking granularity.

Merge these versions together to prepare for that.

Reviewed-by: David S. Miller <[email protected]>
Signed-off-by: Frederic Weisbecker <[email protected]>
Cc: Mauro Carvalho Chehab <[email protected]>
Cc: Joel Fernandes <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Pavan Kondeti <[email protected]>
Cc: Paul E . McKenney <[email protected]>
Cc: David S . Miller <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Sebastian Andrzej Siewior <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
---
 include/linux/bottom_half.h | 10 ----------
 kernel/softirq.c            | 10 ++++++----
 2 files changed, 6 insertions(+), 14 deletions(-)

diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 240419382978..ef9e4c752f56 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -28,17 +28,7 @@ enum
 
 #define SOFTIRQ_DATA_INIT (SOFTIRQ_ALL_MASK << SOFTIRQ_ENABLED_SHIFT)
 
-
-
-#ifdef CONFIG_TRACE_IRQFLAGS
 extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
-#else
-static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned 
int cnt)
-{
-       preempt_count_add(cnt);
-       barrier();
-}
-#endif
 
 static inline void local_bh_disable(void)
 {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 60d1706ad47e..40aa915c5e4a 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -104,14 +104,14 @@ static bool ksoftirqd_running(unsigned long pending)
  * softirq and whether we just have bh disabled.
  */
 
-#ifdef CONFIG_TRACE_IRQFLAGS
 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 {
+#ifdef CONFIG_TRACE_IRQFLAGS
        unsigned long flags;
 
-       WARN_ON_ONCE(in_irq());
-
        raw_local_irq_save(flags);
+#endif
+       WARN_ON_ONCE(in_irq());
        /*
         * The preempt tracer hooks into preempt_count_add and will break
         * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
@@ -125,7 +125,10 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int 
cnt)
         */
        if (softirq_count() == (cnt & SOFTIRQ_MASK))
                trace_softirqs_off(ip);
+
+#ifdef CONFIG_TRACE_IRQFLAGS
        raw_local_irq_restore(flags);
+#endif
 
        if (preempt_count() == cnt) {
 #ifdef CONFIG_DEBUG_PREEMPT
@@ -135,7 +138,6 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int 
cnt)
        }
 }
 EXPORT_SYMBOL(__local_bh_disable_ip);
-#endif /* CONFIG_TRACE_IRQFLAGS */
 
 static void __local_bh_enable_no_softirq(unsigned int cnt)
 {
-- 
2.21.0

Reply via email to