Provide the same horrible semantics provided by
smp_call_function_single_async(), doing so allows skiping a bunch of
atomic ops.

API wise this is horrible crap as it relies on external serialization.

Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
---
 include/linux/irq_work.h |    1 +
 kernel/irq_work.c        |   19 +++++++++++++++++++
 2 files changed, 20 insertions(+)

--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -68,6 +68,7 @@ static inline bool irq_work_needs_cpu(vo
 
 #ifdef CONFIG_SMP
 extern int irq_work_queue_remote(int cpu, struct irq_work *work);
+extern int irq_work_queue_remote_static(int cpu, struct irq_work *work);
 extern void irq_work_single(void *arg);
 #endif
 
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -63,6 +63,9 @@ void irq_work_single(void *arg)
        work->func(work);
        lockdep_irq_work_exit(flags);
 
+       if (!(flags & IRQ_WORK_BUSY))
+               return;
+
        /*
         * Clear the BUSY bit, if set, and return to the free state if no-one
         * else claimed it meanwhile.
@@ -108,6 +111,22 @@ int irq_work_queue_remote(int cpu, struc
 
        return 0;
 }
+
+int irq_work_queue_remote_static(int cpu, struct irq_work *work)
+{
+       /*
+        * Ensures preemption is disabled in the caller.
+        */
+       WARN_ON_ONCE(cpu == smp_processor_id());
+
+       if (atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING)
+               return -EBUSY;
+
+       atomic_set(&work->node.a_flags, IRQ_WORK_PENDING);
+       __smp_call_single_queue(cpu, &work->node.llist);
+
+       return 0;
+}
 
 #endif /* CONFIG_SMP */
 


Reply via email to