Commit-ID:  0fd7d86285290ccebc0dc6eb536b6b043dd6a1e4
Gitweb:     https://git.kernel.org/tip/0fd7d86285290ccebc0dc6eb536b6b043dd6a1e4
Author:     Frederic Weisbecker <frede...@kernel.org>
AuthorDate: Tue, 8 May 2018 15:38:20 +0200
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Mon, 14 May 2018 11:25:27 +0200

softirq/core: Consolidate default local_softirq_pending() implementations

Consolidate and optimize default softirq mask API implementations.
Per-CPU operations are expected to be faster and a few architectures
already rely on them to implement local_softirq_pending() and related
accessors/mutators. Those will be migrated to the new generic code.

Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Acked-by: Thomas Gleixner <t...@linutronix.de>
Acked-by: Peter Zijlstra <pet...@infradead.org>
Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: David S. Miller <da...@davemloft.net>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: Heiko Carstens <heiko.carst...@de.ibm.com>
Cc: Helge Deller <del...@gmx.de>
Cc: James E.J. Bottomley <j...@parisc-linux.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Martin Schwidefsky <schwidef...@de.ibm.com>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Paul Mackerras <pau...@samba.org>
Cc: Rich Felker <dal...@libc.org>
Cc: Sebastian Andrzej Siewior <bige...@linutronix.de>
Cc: Tony Luck <tony.l...@intel.com>
Cc: Yoshinori Sato <ys...@users.sourceforge.jp>
Link: 
http://lkml.kernel.org/r/1525786706-22846-6-git-send-email-frede...@kernel.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 include/linux/interrupt.h   | 14 ++++++++++++++
 include/linux/irq_cpustat.h |  6 +-----
 2 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5426627f9c55..7a11f73c5c3b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -432,11 +432,25 @@ extern bool force_irqthreads;
 #define force_irqthreads       (0)
 #endif
 
+#ifndef local_softirq_pending
+
+#ifndef local_softirq_pending_ref
+#define local_softirq_pending_ref irq_stat.__softirq_pending
+#endif
+
+#define local_softirq_pending()        
(__this_cpu_read(local_softirq_pending_ref))
+#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, 
(x)))
+#define or_softirq_pending(x)  (__this_cpu_or(local_softirq_pending_ref, (x)))
+
+#else /* local_softirq_pending */
+
 #ifndef __ARCH_SET_SOFTIRQ_PENDING
 #define set_softirq_pending(x) (local_softirq_pending() = (x))
 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
 #endif
 
+#endif /* local_softirq_pending */
+
 /* Some architectures might implement lazy enabling/disabling of
  * interrupts. In some cases, such as stop_machine, we might want
  * to ensure that after a local_irq_disable(), interrupts have
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
index ddea03c7c39d..6e8895cd4d92 100644
--- a/include/linux/irq_cpustat.h
+++ b/include/linux/irq_cpustat.h
@@ -22,11 +22,7 @@ DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);    /* 
defined in asm/hardirq.h */
 #define __IRQ_STAT(cpu, member)        (per_cpu(irq_stat.member, cpu))
 #endif
 
-  /* arch independent irq_stat fields */
-#define local_softirq_pending() \
-       __IRQ_STAT(smp_processor_id(), __softirq_pending)
-
-  /* arch dependent irq_stat fields */
+/* arch dependent irq_stat fields */
 #define nmi_count(cpu)         __IRQ_STAT((cpu), __nmi_count)  /* i386 */
 
 #endif /* __irq_cpustat_h */

Reply via email to