From: Boqun Feng <[email protected]>

In order to use preempt_count() to tracking the interrupt disable
nesting level, __preempt_count_{add,sub}_return() are introduced, as
their name suggest, these primitives return the new value of the
preempt_count() after changing it. The following example shows the usage
of it in local_interrupt_disable():

        // increase the HARDIRQ_DISABLE bit
        new_count = __preempt_count_add_return(HARDIRQ_DISABLE_OFFSET);

        // if it's the first-time increment, then disable the interrupt
        // at hardware level.
        if (new_count & HARDIRQ_DISABLE_MASK == HARDIRQ_DISABLE_OFFSET) {
                local_irq_save(flags);
                raw_cpu_write(local_interrupt_disable_state.flags, flags);
        }

Having these primitives will avoid a read of preempt_count() after
changing preempt_count() on certain architectures.

Signed-off-by: Boqun Feng <[email protected]>
Signed-off-by: Boqun Feng <[email protected]>
Link: https://patch.msgid.link/[email protected]
---
 arch/arm64/include/asm/preempt.h | 18 ++++++++++++++++++
 arch/s390/include/asm/preempt.h  | 10 ++++++++++
 arch/x86/include/asm/preempt.h   | 10 ++++++++++
 include/asm-generic/preempt.h    | 14 ++++++++++++++
 4 files changed, 52 insertions(+)

diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 932ea4b62042..0dd8221d1bef 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -55,6 +55,24 @@ static inline void __preempt_count_sub(int val)
        WRITE_ONCE(current_thread_info()->preempt.count, pc);
 }
 
+static inline int __preempt_count_add_return(int val)
+{
+       u32 pc = READ_ONCE(current_thread_info()->preempt.count);
+       pc += val;
+       WRITE_ONCE(current_thread_info()->preempt.count, pc);
+
+       return pc;
+}
+
+static inline int __preempt_count_sub_return(int val)
+{
+       u32 pc = READ_ONCE(current_thread_info()->preempt.count);
+       pc -= val;
+       WRITE_ONCE(current_thread_info()->preempt.count, pc);
+
+       return pc;
+}
+
 static inline bool __preempt_count_dec_and_test(void)
 {
        struct thread_info *ti = current_thread_info();
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 6e5821bb047e..0a25d4648b4c 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -139,6 +139,16 @@ static __always_inline bool should_resched(int 
preempt_offset)
        return unlikely(READ_ONCE(get_lowcore()->preempt_count) == 
preempt_offset);
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+       return val + __atomic_add(val, &get_lowcore()->preempt_count);
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+       return __preempt_count_add_return(-val);
+}
+
 #define init_task_preempt_count(p)     do { } while (0)
 /* Deferred to CPU bringup time */
 #define init_idle_preempt_count(p, cpu)        do { } while (0)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 578441db09f0..1220656f3370 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -85,6 +85,16 @@ static __always_inline void __preempt_count_sub(int val)
        raw_cpu_add_4(__preempt_count, -val);
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+       return raw_cpu_add_return_4(__preempt_count, val);
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+       return raw_cpu_add_return_4(__preempt_count, -val);
+}
+
 /*
  * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
  * a decrement which hits zero means we have no preempt_count and should
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 51f8f3881523..c8683c046615 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -59,6 +59,20 @@ static __always_inline void __preempt_count_sub(int val)
        *preempt_count_ptr() -= val;
 }
 
+static __always_inline int __preempt_count_add_return(int val)
+{
+       *preempt_count_ptr() += val;
+
+       return *preempt_count_ptr();
+}
+
+static __always_inline int __preempt_count_sub_return(int val)
+{
+       *preempt_count_ptr() -= val;
+
+       return *preempt_count_ptr();
+}
+
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
        /*
-- 
2.50.1 (Apple Git-155)


Reply via email to