Add a blocking notifier chain to allow subsystems to be notified
before kexec execution. This enables modules to perform necessary
cleanup or validation before the system transitions to a new kernel or
block kexec if not possible under current conditions.

Signed-off-by: Stanislav Kinsburskii <[email protected]>
---
 include/linux/kexec.h |    6 ++++++
 kernel/kexec_core.c   |   24 ++++++++++++++++++++++++
 2 files changed, 30 insertions(+)

diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index ff7e231b0485..311037d30f9e 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -35,6 +35,7 @@ extern note_buf_t __percpu *crash_notes;
 #include <linux/ioport.h>
 #include <linux/module.h>
 #include <linux/highmem.h>
+#include <linux/notifier.h>
 #include <asm/kexec.h>
 #include <linux/crash_core.h>
 
@@ -532,10 +533,13 @@ extern bool kexec_file_dbg_print;
 
 extern void *kimage_map_segment(struct kimage *image, unsigned long addr, 
unsigned long size);
 extern void kimage_unmap_segment(void *buffer);
+extern int kexec_block_notifier_register(struct notifier_block *nb);
+extern int kexec_block_notifier_unregister(struct notifier_block *nb);
 #else /* !CONFIG_KEXEC_CORE */
 struct pt_regs;
 struct task_struct;
 struct kimage;
+struct notifier_block;
 static inline void __crash_kexec(struct pt_regs *regs) { }
 static inline void crash_kexec(struct pt_regs *regs) { }
 static inline int kexec_should_crash(struct task_struct *p) { return 0; }
@@ -543,6 +547,8 @@ static inline int kexec_crash_loaded(void) { return 0; }
 static inline void *kimage_map_segment(struct kimage *image, unsigned long 
addr, unsigned long size)
 { return NULL; }
 static inline void kimage_unmap_segment(void *buffer) { }
+static inline int kexec_block_notifier_register(struct notifier_block *nb) { }
+static inline int kexec_block_notifier_unregister(struct notifier_block *nb) { 
}
 #define kexec_in_progress false
 #endif /* CONFIG_KEXEC_CORE */
 
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 0f92acdd354d..1e86a6f175f0 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -57,6 +57,20 @@ bool kexec_in_progress = false;
 
 bool kexec_file_dbg_print;
 
+static BLOCKING_NOTIFIER_HEAD(kexec_block_list);
+
+int kexec_block_notifier_register(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(&kexec_block_list, nb);
+}
+EXPORT_SYMBOL_GPL(kexec_block_notifier_register);
+
+int kexec_block_notifier_unregister(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(&kexec_block_list, nb);
+}
+EXPORT_SYMBOL_GPL(kexec_block_notifier_unregister);
+
 /*
  * When kexec transitions to the new kernel there is a one-to-one
  * mapping between physical and virtual addresses.  On processors
@@ -1124,6 +1138,12 @@ bool kexec_load_permitted(int kexec_image_type)
        return true;
 }
 
+static int kexec_check_blockers(void)
+{
+       /* Notify subsystems of impending kexec */
+       return blocking_notifier_call_chain(&kexec_block_list, 0, NULL);
+}
+
 /*
  * Move into place and start executing a preloaded standalone
  * executable.  If nothing was preloaded return an error.
@@ -1139,6 +1159,10 @@ int kernel_kexec(void)
                goto Unlock;
        }
 
+       error = kexec_check_blockers();
+       if (error)
+               goto Unlock;
+
        error = liveupdate_reboot();
        if (error)
                goto Unlock;



Reply via email to