Split it up as a preparatory step to move the heavy lifting out of
interrupt context.

Signed-off-by: Thomas Gleixner <[email protected]>
---
 kernel/time/posix-cpu-timers.c |   38 ++++++++++++++++++++++----------------
 1 file changed, 22 insertions(+), 16 deletions(-)

--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1080,27 +1080,12 @@ static inline bool fastpath_timer_check(
        return false;
 }
 
-/*
- * This is called from the timer interrupt handler.  The irq handler has
- * already updated our counts.  We need to check if any timers fire now.
- * Interrupts are disabled.
- */
-void run_posix_cpu_timers(void)
+static void __run_posix_cpu_timers(struct task_struct *tsk)
 {
-       struct task_struct *tsk = current;
        struct k_itimer *timer, *next;
        unsigned long flags;
        LIST_HEAD(firing);
 
-       lockdep_assert_irqs_disabled();
-
-       /*
-        * The fast path checks that there are no expired thread or thread
-        * group timers.  If that's so, just return.
-        */
-       if (!fastpath_timer_check(tsk))
-               return;
-
        lockdep_posixtimer_enter();
        if (!lock_task_sighand(tsk, &flags)) {
                lockdep_posixtimer_exit();
@@ -1151,6 +1136,27 @@ void run_posix_cpu_timers(void)
 }
 
 /*
+ * This is called from the timer interrupt handler.  The irq handler has
+ * already updated our counts.  We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+void run_posix_cpu_timers(void)
+{
+       struct task_struct *tsk = current;
+
+       lockdep_assert_irqs_disabled();
+
+       /*
+        * The fast path checks that there are no expired thread or thread
+        * group timers.  If that's so, just return.
+        */
+       if (!fastpath_timer_check(tsk))
+               return;
+
+       __run_posix_cpu_timers(tsk);
+}
+
+/*
  * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
  * The tsk->sighand->siglock must be held by the caller.
  */

Reply via email to