From: Vegard Nossum <[email protected]> To enable loading the crypto module earlier before file system is ready, add a new helper function, load_crypto_module_mem(), which can load a kernel module from a byte array in memory. When loading in this way, we don't do signature verification as crypto is not ready yet before loaded. To tell that a module is loaded in this way, a new module loader flag, MODULE_INIT_CRYPTO_FROM_MEM, is added.
Co-developed-by: Saeed Mirzamohammadi <[email protected]> Signed-off-by: Vegard Nossum <[email protected]> [With code change and revise commit message] Signed-off-by: Jay Wang <[email protected]> --- include/linux/module.h | 2 + include/uapi/linux/module.h | 5 ++ kernel/module/main.c | 100 +++++++++++++++++++++++++----------- kernel/params.c | 3 +- 4 files changed, 79 insertions(+), 31 deletions(-) diff --git a/include/linux/module.h b/include/linux/module.h index 20ddfd97630d..22a1d8459ce4 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -589,6 +589,8 @@ struct module { #ifdef CONFIG_MODULES +extern int load_crypto_module_mem(const char *mem, size_t size); + /* Get/put a kernel symbol (calls must be symmetric) */ void *__symbol_get(const char *symbol); void *__symbol_get_gpl(const char *symbol); diff --git a/include/uapi/linux/module.h b/include/uapi/linux/module.h index 03a33ffffcba..30e9a7813eac 100644 --- a/include/uapi/linux/module.h +++ b/include/uapi/linux/module.h @@ -7,4 +7,9 @@ #define MODULE_INIT_IGNORE_VERMAGIC 2 #define MODULE_INIT_COMPRESSED_FILE 4 +#ifdef __KERNEL__ +/* Internal flags */ +#define MODULE_INIT_CRYPTO_FROM_MEM 30 +#endif + #endif /* _UAPI_LINUX_MODULE_H */ diff --git a/kernel/module/main.c b/kernel/module/main.c index 710ee30b3bea..2914e7619766 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2572,11 +2572,14 @@ static void module_augment_kernel_taints(struct module *mod, struct load_info *i static int check_modinfo(struct module *mod, struct load_info *info, int flags) { - const char *modmagic = get_modinfo(info, "vermagic"); + const char *modmagic = NULL; int err; - if (flags & MODULE_INIT_IGNORE_VERMAGIC) - modmagic = NULL; + if (flags & MODULE_INIT_CRYPTO_FROM_MEM) + return 0; + + if (!(flags & MODULE_INIT_IGNORE_VERMAGIC)) + modmagic = get_modinfo(info, "vermagic"); /* This is allowed: modprobe --force will invalidate it. */ if (!modmagic) { @@ -3007,7 +3010,7 @@ module_param(async_probe, bool, 0644); * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb * helper command 'lx-symbols'. */ -static noinline int do_init_module(struct module *mod) +static noinline int do_init_module(struct module *mod, int flags) { int ret = 0; struct mod_initfree *freeinit; @@ -3070,8 +3073,10 @@ static noinline int do_init_module(struct module *mod) ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base, mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size); mutex_lock(&module_mutex); - /* Drop initial reference. */ - module_put(mod); + /* Drop initial reference for normal modules to allow unloading. + * Keep reference for MODULE_INIT_CRYPTO_FROM_MEM modules to prevent unloading. */ + if (!(flags & MODULE_INIT_CRYPTO_FROM_MEM)) + module_put(mod); trim_init_extable(mod); #ifdef CONFIG_KALLSYMS /* Switch to core kallsyms now init is done: kallsyms may be walking! */ @@ -3347,31 +3352,17 @@ static int early_mod_check(struct load_info *info, int flags) /* * Allocate and load the module: note that size of section 0 is always * zero, and we rely on this for optional sections. + * + * NOTE: module signature verification must have been done already. */ -static int load_module(struct load_info *info, const char __user *uargs, - int flags) +static int _load_module(struct load_info *info, const char __user *uargs, + int flags) { struct module *mod; bool module_allocated = false; long err = 0; char *after_dashes; - /* - * Do the signature check (if any) first. All that - * the signature check needs is info->len, it does - * not need any of the section info. That can be - * set up later. This will minimize the chances - * of a corrupt module causing problems before - * we even get to the signature check. - * - * The check will also adjust info->len by stripping - * off the sig length at the end of the module, making - * checks against info->len more correct. - */ - err = module_sig_check(info, flags); - if (err) - goto free_copy; - /* * Do basic sanity checks against the ELF header and * sections. Cache useful sections and set the @@ -3405,7 +3396,8 @@ static int load_module(struct load_info *info, const char __user *uargs, * We are tainting your kernel if your module gets into * the modules linked list somehow. */ - module_augment_kernel_taints(mod, info); + if (!(flags & MODULE_INIT_CRYPTO_FROM_MEM)) + module_augment_kernel_taints(mod, info); /* To avoid stressing percpu allocator, do this once we're unique. */ err = percpu_modalloc(mod, info); @@ -3452,7 +3444,11 @@ static int load_module(struct load_info *info, const char __user *uargs, flush_module_icache(mod); /* Now copy in args */ - mod->args = strndup_user(uargs, ~0UL >> 1); + if ((flags & MODULE_INIT_CRYPTO_FROM_MEM)) + mod->args = kstrdup("", GFP_KERNEL); + else + mod->args = strndup_user(uargs, ~0UL >> 1); + if (IS_ERR(mod->args)) { err = PTR_ERR(mod->args); goto free_arch_cleanup; @@ -3500,13 +3496,10 @@ static int load_module(struct load_info *info, const char __user *uargs, if (codetag_load_module(mod)) goto sysfs_cleanup; - /* Get rid of temporary copy. */ - free_copy(info, flags); - /* Done! */ trace_module_load(mod); - return do_init_module(mod); + return do_init_module(mod, flags); sysfs_cleanup: mod_sysfs_teardown(mod); @@ -3562,7 +3555,54 @@ static int load_module(struct load_info *info, const char __user *uargs, audit_log_kern_module(info->name ? info->name : "?"); mod_stat_bump_becoming(info, flags); } + return err; +} + +/* + * Load crypto module from kernel memory without signature check. + */ +int __init load_crypto_module_mem(const char *mem, size_t size) +{ + int err; + struct load_info info = { }; + + if (!mem) { + pr_err("load_crypto_module_mem: mem parameter is NULL\n"); + return -EINVAL; + } + + info.sig_ok = true; + info.hdr = (Elf_Ehdr *) mem; + info.len = size; + + err = _load_module(&info, NULL, MODULE_INIT_CRYPTO_FROM_MEM); + return err; +} + +static int load_module(struct load_info *info, const char __user *uargs, + int flags) +{ + int err; + + /* + * Do the signature check (if any) first. All that + * the signature check needs is info->len, it does + * not need any of the section info. That can be + * set up later. This will minimize the chances + * of a corrupt module causing problems before + * we even get to the signature check. + * + * The check will also adjust info->len by stripping + * off the sig length at the end of the module, making + * checks against info->len more correct. + */ + err = module_sig_check(info, flags); + if (!err) + err = _load_module(info, uargs, flags); + + /* Get rid of temporary copy. */ free_copy(info, flags); + return err; } diff --git a/kernel/params.c b/kernel/params.c index 7c2242f64bf0..b0671d752ff1 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -967,7 +967,8 @@ static int __init param_sysfs_init(void) return 0; } -subsys_initcall(param_sysfs_init); +/* Use arch_initcall instead of subsys_initcall for early module loading */ +arch_initcall(param_sysfs_init); /* * param_sysfs_builtin_init - add sysfs version and parameter -- 2.47.3
