KASAN poisons stack redzones on function's entrance and unpoisons prior
return. So when cpu goes offline the stack of idle task left poisoned.
When cpu goes back online it re-enters the kernel via another path and
starts using idle task's stack. Hence it's possible to hit stale poison
values which results in false-positive KASAN splats.

This patch registers cpu hotplug notifier which unpoisons idle task's
stack prior to onlining cpu.

Signed-off-by: Andrey Ryabinin <[email protected]>
---
 include/linux/sched.h |  6 ++++++
 kernel/smpboot.h      |  2 --
 mm/kasan/kasan.c      | 33 +++++++++++++++++++++++++++------
 3 files changed, 33 insertions(+), 8 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index a10494a..18e526d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -337,6 +337,12 @@ extern asmlinkage void schedule_tail(struct task_struct 
*prev);
 extern void init_idle(struct task_struct *idle, int cpu);
 extern void init_idle_bootup_task(struct task_struct *idle);
 
+#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
+extern struct task_struct *idle_thread_get(unsigned int cpu);
+#else
+static inline struct task_struct *idle_thread_get(unsigned int cpu) { return 
NULL; }
+#endif
+
 extern cpumask_var_t cpu_isolated_map;
 
 extern int runqueue_is_locked(int cpu);
diff --git a/kernel/smpboot.h b/kernel/smpboot.h
index 72415a0..eebf9ec 100644
--- a/kernel/smpboot.h
+++ b/kernel/smpboot.h
@@ -4,11 +4,9 @@
 struct task_struct;
 
 #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
-struct task_struct *idle_thread_get(unsigned int cpu);
 void idle_thread_set_boot_cpu(void);
 void idle_threads_init(void);
 #else
-static inline struct task_struct *idle_thread_get(unsigned int cpu) { return 
NULL; }
 static inline void idle_thread_set_boot_cpu(void) { }
 static inline void idle_threads_init(void) { }
 #endif
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index bc0a8d8..c4ffd82 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -16,6 +16,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #define DISABLE_BRANCH_PROFILING
 
+#include <linux/cpu.h>
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -537,16 +538,36 @@ static int kasan_mem_notifier(struct notifier_block *nb,
 {
        return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
 }
+#endif
+
+static int kasan_cpu_callback(struct notifier_block *nfb,
+                       unsigned long action, void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+
+       if ((action == CPU_UP_PREPARE) || (action == CPU_UP_PREPARE_FROZEN)) {
+               struct task_struct *tidle = idle_thread_get(cpu);
+               kasan_unpoison_shadow(task_stack_page(tidle), THREAD_SIZE);
+       }
+       return NOTIFY_OK;
+}
 
-static int __init kasan_memhotplug_init(void)
+static struct notifier_block kasan_cpu_notifier =
 {
-       pr_err("WARNING: KASAN doesn't support memory hot-add\n");
-       pr_err("Memory hot-add will be disabled\n");
+       .notifier_call = kasan_cpu_callback,
+};
 
-       hotplug_memory_notifier(kasan_mem_notifier, 0);
+static int __init kasan_notifiers_init(void)
+{
+       if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
+               pr_err("WARNING: KASAN doesn't support memory hot-add\n");
+               pr_err("Memory hot-add will be disabled\n");
+               hotplug_memory_notifier(kasan_mem_notifier, 0);
+       }
+
+       register_hotcpu_notifier(&kasan_cpu_notifier);
 
        return 0;
 }
 
-module_init(kasan_memhotplug_init);
-#endif
+module_init(kasan_notifiers_init);
-- 
2.4.10

Reply via email to