We already have `async' printk_emergency_begin(), which returns immediately and does not guarantee that `printk_kthread' will stop by the time it returns. Add `sync' version, which waits for `printk_kthread' to stop.
Signed-off-by: Sergey Senozhatsky <[email protected]> --- include/linux/console.h | 2 ++ kernel/printk/printk.c | 52 ++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/include/linux/console.h b/include/linux/console.h index 07005db4c788..8ce29b2381d2 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -189,6 +189,8 @@ extern void resume_console(void); extern void printk_emergency_begin(void); extern void printk_emergency_end(void); +extern int printk_emergency_begin_sync(void); +extern int printk_emergency_end_sync(void); int mda_console_init(void); void prom_con_init(void); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 3d4df3f02854..16f5f5c7e541 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -472,6 +472,13 @@ static inline bool printk_offloading_enabled(void) atomic_read(&printk_emergency) == 0; } +static inline bool printk_kthread_should_stop(bool emergency) +{ + if (current != printk_kthread) + return false; + return emergency || kthread_should_park(); +} + /* * This disables printing offloading and instead attempts * to do the usual console_trylock()->console_unlock(). @@ -492,6 +499,34 @@ void printk_emergency_end(void) } EXPORT_SYMBOL_GPL(printk_emergency_end); +/* + * This disables printing offloading and instead attempts + * to do the usual console_trylock()->console_unlock(). + * + * Note, this does wait for printk_kthread to stop. + */ +int printk_emergency_begin_sync(void) +{ + atomic_inc(&printk_emergency); + if (!printk_kthread) + return -EINVAL; + + return kthread_park(printk_kthread); +} +EXPORT_SYMBOL_GPL(printk_emergency_begin_sync); + +/* This re-enables printk_kthread offloading. */ +int printk_emergency_end_sync(void) +{ + atomic_dec(&printk_emergency); + if (!printk_kthread) + return -EINVAL; + + kthread_unpark(printk_kthread); + return 0; +} +EXPORT_SYMBOL_GPL(printk_emergency_end_sync); + /* * Adjust max timeout value in the following order: * a) 1/2 of RCU stall timeout - it is usually the largest @@ -531,13 +566,14 @@ static inline bool should_handoff_printing(u64 printing_start_ts) static struct task_struct *printing_task; static u64 printing_elapsed; u64 now = local_clock(); + bool emergency = !printk_offloading_enabled(); + + /* We are in emergency mode, disable printk_kthread */ + if (printk_kthread_should_stop(emergency)) + return true; - if (!printk_offloading_enabled()) { - /* We are in emergency mode, disable printk_kthread */ - if (current == printk_kthread) - return true; + if (emergency) return false; - } /* A new task - reset the counters. */ if (printing_task != current) { @@ -2017,6 +2053,12 @@ EXPORT_SYMBOL_GPL(printk_emergency_begin); void printk_emergency_end(void) {} EXPORT_SYMBOL_GPL(printk_emergency_end); +int printk_emergency_begin_sync(void) { return 0; } +EXPORT_SYMBOL_GPL(printk_emergency_begin_sync); + +int printk_emergency_end_sync(void) { return 0; } +EXPORT_SYMBOL_GPL(printk_emergency_end_sync); + static bool should_handoff_printing(u64 printing_start_ts) { return false; } #endif /* CONFIG_PRINTK */ -- 2.15.1

