Add idle notifier callback to x86_32.

Changelog since v1:
* Add CONFIG_HAVE_IDLE_NOTIFIER.

Signed-off-by: Mathieu Desnoyers <[email protected]>
---
 arch/x86/Kconfig             |    2 +-
 arch/x86/include/asm/idle.h  |    5 -----
 arch/x86/kernel/apm_32.c     |    6 ++++++
 arch/x86/kernel/process_32.c |   33 +++++++++++++++++++++++++++++++++
 4 files changed, 40 insertions(+), 6 deletions(-)

Index: linux.trees.git/arch/x86/kernel/process_32.c
===================================================================
--- linux.trees.git.orig/arch/x86/kernel/process_32.c
+++ linux.trees.git/arch/x86/kernel/process_32.c
@@ -38,6 +38,8 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/kdebug.h>
+#include <linux/notifier.h>
+#include <linux/idle.h>
 
 #include <asm/pgtable.h>
 #include <asm/system.h>
@@ -61,6 +63,30 @@
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
+static DEFINE_PER_CPU(unsigned char, is_idle);
+
+void enter_idle(void)
+{
+       percpu_write(is_idle, 1);
+       notify_idle(IDLE_START);
+}
+
+static void __exit_idle(void)
+{
+       if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
+               return;
+       notify_idle(IDLE_END);
+}
+
+/* Called from interrupts to signify idle end */
+void exit_idle(void)
+{
+       /* idle loop has pid 0 */
+       if (current->pid)
+               return;
+       __exit_idle();
+}
+
 /*
  * Return saved PC of a blocked thread.
  */
@@ -109,10 +135,17 @@ void cpu_idle(void)
                                play_dead();
 
                        local_irq_disable();
+                       enter_idle();
                        /* Don't trace irqs off for idle */
                        stop_critical_timings();
                        pm_idle();
                        start_critical_timings();
+                       /*
+                        * In many cases the interrupt that ended idle
+                        * has already called exit_idle. But some idle loops can
+                        * be woken up without interrupt.
+                        */
+                       __exit_idle();
 
                        trace_power_end(smp_processor_id());
                }
Index: linux.trees.git/arch/x86/include/asm/idle.h
===================================================================
--- linux.trees.git.orig/arch/x86/include/asm/idle.h
+++ linux.trees.git/arch/x86/include/asm/idle.h
@@ -1,13 +1,8 @@
 #ifndef _ASM_X86_IDLE_H
 #define _ASM_X86_IDLE_H
 
-#ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
-#else /* !CONFIG_X86_64 */
-static inline void enter_idle(void) { }
-static inline void exit_idle(void) { }
-#endif /* CONFIG_X86_64 */
 
 void c1e_remove_cpu(int cpu);
 
Index: linux.trees.git/arch/x86/kernel/apm_32.c
===================================================================
--- linux.trees.git.orig/arch/x86/kernel/apm_32.c
+++ linux.trees.git/arch/x86/kernel/apm_32.c
@@ -227,6 +227,7 @@
 #include <linux/suspend.h>
 #include <linux/kthread.h>
 #include <linux/jiffies.h>
+#include <linux/idle.h>
 
 #include <asm/system.h>
 #include <asm/uaccess.h>
@@ -947,10 +948,15 @@ recalc:
                                break;
                        }
                }
+               enter_idle();
                if (original_pm_idle)
                        original_pm_idle();
                else
                        default_idle();
+               /* In many cases the interrupt that ended idle
+                  has already called exit_idle. But some idle
+                  loops can be woken up without interrupt. */
+               __exit_idle();
                local_irq_disable();
                jiffies_since_last_check = jiffies - last_jiffies;
                if (jiffies_since_last_check > idle_period)
Index: linux.trees.git/arch/x86/Kconfig
===================================================================
--- linux.trees.git.orig/arch/x86/Kconfig
+++ linux.trees.git/arch/x86/Kconfig
@@ -45,7 +45,7 @@ config X86
        select HAVE_ARCH_TRACEHOOK
        select HAVE_GENERIC_DMA_COHERENT if X86_32
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
-       select HAVE_IDLE_NOTIFIER if X86_64
+       select HAVE_IDLE_NOTIFIER
        select USER_STACKTRACE_SUPPORT
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_DMA_API_DEBUG
-- 
Mathieu Desnoyers
Operating System Efficiency R&D Consultant
EfficiOS Inc.
http://www.efficios.com

_______________________________________________
ltt-dev mailing list
[email protected]
http://lists.casi.polymtl.ca/cgi-bin/mailman/listinfo/ltt-dev

Reply via email to