Re: [PATCH v3 04/13] mm/execmem, arch: convert remaining overrides of module_alloc to execmem

2023-11-07 Thread Will Deacon
On Mon, Oct 30, 2023 at 09:00:53AM +0200, Mike Rapoport wrote:
> On Thu, Oct 26, 2023 at 11:24:39AM +0100, Will Deacon wrote:
> > On Thu, Oct 26, 2023 at 11:58:00AM +0300, Mike Rapoport wrote:
> > > On Mon, Oct 23, 2023 at 06:14:20PM +0100, Will Deacon wrote:
> > > > On Mon, Sep 18, 2023 at 10:29:46AM +0300, Mike Rapoport wrote:
> > > > > diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> > > > > index dd851297596e..cd6320de1c54 100644
> > > > > --- a/arch/arm64/kernel/module.c
> > > > > +++ b/arch/arm64/kernel/module.c
> 
> ...
> 
> > > > > - if (module_direct_base) {
> > > > > - p = __vmalloc_node_range(size, MODULE_ALIGN,
> > > > > -  module_direct_base,
> > > > > -  module_direct_base + SZ_128M,
> > > > > -  GFP_KERNEL | __GFP_NOWARN,
> > > > > -  PAGE_KERNEL, 0, NUMA_NO_NODE,
> > > > > -  __builtin_return_address(0));
> > > > > - }
> > > > > + module_init_limits();
> > > > 
> > > > Hmm, this used to be run from subsys_initcall(), but now you're running
> > > > it _really_ early, before random_init(), so randomization of the module
> > > > space is no longer going to be very random if we don't have early 
> > > > entropy
> > > > from the firmware or the CPU, which is likely to be the case on most 
> > > > SoCs.
> > > 
> > > Well, it will be as random as KASLR. Won't that be enough?
> > 
> > I don't think that's true -- we have the 'kaslr-seed' property for KASLR,
> > but I'm not seeing anything like that for the module randomisation and I
> > also don't see why we need to set these limits so early.
> 
> x86 needs execmem initialized before ftrace_init() so I thought it would be
> best to setup execmem along with most of MM in mm_core_init().
> 
> I'll move execmem initialization for !x86 to a later point, say
> core_initcall.

Thanks, Mike.

Will



Re: [PATCH v3 04/13] mm/execmem, arch: convert remaining overrides of module_alloc to execmem

2023-10-30 Thread Mike Rapoport
On Thu, Oct 26, 2023 at 11:24:39AM +0100, Will Deacon wrote:
> On Thu, Oct 26, 2023 at 11:58:00AM +0300, Mike Rapoport wrote:
> > On Mon, Oct 23, 2023 at 06:14:20PM +0100, Will Deacon wrote:
> > > On Mon, Sep 18, 2023 at 10:29:46AM +0300, Mike Rapoport wrote:
> > > > diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> > > > index dd851297596e..cd6320de1c54 100644
> > > > --- a/arch/arm64/kernel/module.c
> > > > +++ b/arch/arm64/kernel/module.c

...

> > > > -   if (module_direct_base) {
> > > > -   p = __vmalloc_node_range(size, MODULE_ALIGN,
> > > > -module_direct_base,
> > > > -module_direct_base + SZ_128M,
> > > > -GFP_KERNEL | __GFP_NOWARN,
> > > > -PAGE_KERNEL, 0, NUMA_NO_NODE,
> > > > -__builtin_return_address(0));
> > > > -   }
> > > > +   module_init_limits();
> > > 
> > > Hmm, this used to be run from subsys_initcall(), but now you're running
> > > it _really_ early, before random_init(), so randomization of the module
> > > space is no longer going to be very random if we don't have early entropy
> > > from the firmware or the CPU, which is likely to be the case on most SoCs.
> > 
> > Well, it will be as random as KASLR. Won't that be enough?
> 
> I don't think that's true -- we have the 'kaslr-seed' property for KASLR,
> but I'm not seeing anything like that for the module randomisation and I
> also don't see why we need to set these limits so early.

x86 needs execmem initialized before ftrace_init() so I thought it would be
best to setup execmem along with most of MM in mm_core_init().

I'll move execmem initialization for !x86 to a later point, say
core_initcall.
 
> Will

-- 
Sincerely yours,
Mike.



Re: [PATCH v3 04/13] mm/execmem, arch: convert remaining overrides of module_alloc to execmem

2023-10-26 Thread Will Deacon
On Thu, Oct 26, 2023 at 11:58:00AM +0300, Mike Rapoport wrote:
> On Mon, Oct 23, 2023 at 06:14:20PM +0100, Will Deacon wrote:
> > On Mon, Sep 18, 2023 at 10:29:46AM +0300, Mike Rapoport wrote:
> > > diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> > > index dd851297596e..cd6320de1c54 100644
> > > --- a/arch/arm64/kernel/module.c
> > > +++ b/arch/arm64/kernel/module.c
> > > @@ -20,6 +20,7 @@
> > >  #include 
> > >  #include 
> > >  #include 
> > > +#include 
> > >  
> > >  #include 
> > >  #include 
> > > @@ -108,46 +109,38 @@ static int __init module_init_limits(void)
> > >  
> > >   return 0;
> > >  }
> > > -subsys_initcall(module_init_limits);
> > >  
> > > -void *module_alloc(unsigned long size)
> > > +static struct execmem_params execmem_params __ro_after_init = {
> > > + .ranges = {
> > > + [EXECMEM_DEFAULT] = {
> > > + .flags = EXECMEM_KASAN_SHADOW,
> > > + .alignment = MODULE_ALIGN,
> > > + },
> > > + },
> > > +};
> > > +
> > > +struct execmem_params __init *execmem_arch_params(void)
> > >  {
> > > - void *p = NULL;
> > > + struct execmem_range *r = _params.ranges[EXECMEM_DEFAULT];
> > >  
> > > - /*
> > > -  * Where possible, prefer to allocate within direct branch range of the
> > > -  * kernel such that no PLTs are necessary.
> > > -  */
> > 
> > Why are you removing this comment? I think you could just move it next
> > to the part where we set a 128MiB range.
>  
> Oops, my bad. Will add it back.

Thanks.

> > > - if (module_direct_base) {
> > > - p = __vmalloc_node_range(size, MODULE_ALIGN,
> > > -  module_direct_base,
> > > -  module_direct_base + SZ_128M,
> > > -  GFP_KERNEL | __GFP_NOWARN,
> > > -  PAGE_KERNEL, 0, NUMA_NO_NODE,
> > > -  __builtin_return_address(0));
> > > - }
> > > + module_init_limits();
> > 
> > Hmm, this used to be run from subsys_initcall(), but now you're running
> > it _really_ early, before random_init(), so randomization of the module
> > space is no longer going to be very random if we don't have early entropy
> > from the firmware or the CPU, which is likely to be the case on most SoCs.
> 
> Well, it will be as random as KASLR. Won't that be enough?

I don't think that's true -- we have the 'kaslr-seed' property for KASLR,
but I'm not seeing anything like that for the module randomisation and I
also don't see why we need to set these limits so early.

Will



Re: [PATCH v3 04/13] mm/execmem, arch: convert remaining overrides of module_alloc to execmem

2023-10-26 Thread Mike Rapoport
Hi Will,

On Mon, Oct 23, 2023 at 06:14:20PM +0100, Will Deacon wrote:
> Hi Mike,
> 
> On Mon, Sep 18, 2023 at 10:29:46AM +0300, Mike Rapoport wrote:
> > From: "Mike Rapoport (IBM)" 
> > 
> > Extend execmem parameters to accommodate more complex overrides of
> > module_alloc() by architectures.
> > 
> > This includes specification of a fallback range required by arm, arm64
> > and powerpc and support for allocation of KASAN shadow required by
> > arm64, s390 and x86.
> > 
> > The core implementation of execmem_alloc() takes care of suppressing
> > warnings when the initial allocation fails but there is a fallback range
> > defined.
> > 
> > Signed-off-by: Mike Rapoport (IBM) 
> > ---
> >  arch/arm/kernel/module.c | 38 -
> >  arch/arm64/kernel/module.c   | 57 ++--
> >  arch/powerpc/kernel/module.c | 52 ++---
> >  arch/s390/kernel/module.c| 52 +++--
> >  arch/x86/kernel/module.c | 64 +++-
> >  include/linux/execmem.h  | 14 
> >  mm/execmem.c | 43 ++--
> >  7 files changed, 167 insertions(+), 153 deletions(-)
> 
> [...]
> 
> > diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> > index dd851297596e..cd6320de1c54 100644
> > --- a/arch/arm64/kernel/module.c
> > +++ b/arch/arm64/kernel/module.c
> > @@ -20,6 +20,7 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> >  
> >  #include 
> >  #include 
> > @@ -108,46 +109,38 @@ static int __init module_init_limits(void)
> >  
> > return 0;
> >  }
> > -subsys_initcall(module_init_limits);
> >  
> > -void *module_alloc(unsigned long size)
> > +static struct execmem_params execmem_params __ro_after_init = {
> > +   .ranges = {
> > +   [EXECMEM_DEFAULT] = {
> > +   .flags = EXECMEM_KASAN_SHADOW,
> > +   .alignment = MODULE_ALIGN,
> > +   },
> > +   },
> > +};
> > +
> > +struct execmem_params __init *execmem_arch_params(void)
> >  {
> > -   void *p = NULL;
> > +   struct execmem_range *r = _params.ranges[EXECMEM_DEFAULT];
> >  
> > -   /*
> > -* Where possible, prefer to allocate within direct branch range of the
> > -* kernel such that no PLTs are necessary.
> > -*/
> 
> Why are you removing this comment? I think you could just move it next
> to the part where we set a 128MiB range.
 
Oops, my bad. Will add it back.

> > -   if (module_direct_base) {
> > -   p = __vmalloc_node_range(size, MODULE_ALIGN,
> > -module_direct_base,
> > -module_direct_base + SZ_128M,
> > -GFP_KERNEL | __GFP_NOWARN,
> > -PAGE_KERNEL, 0, NUMA_NO_NODE,
> > -__builtin_return_address(0));
> > -   }
> > +   module_init_limits();
> 
> Hmm, this used to be run from subsys_initcall(), but now you're running
> it _really_ early, before random_init(), so randomization of the module
> space is no longer going to be very random if we don't have early entropy
> from the firmware or the CPU, which is likely to be the case on most SoCs.

Well, it will be as random as KASLR. Won't that be enough?
 
> > diff --git a/mm/execmem.c b/mm/execmem.c
> > index f25a5e064886..a8c2f44d0133 100644
> > --- a/mm/execmem.c
> > +++ b/mm/execmem.c
> > @@ -11,12 +11,46 @@ static void *execmem_alloc(size_t size, struct 
> > execmem_range *range)
> >  {
> > unsigned long start = range->start;
> > unsigned long end = range->end;
> > +   unsigned long fallback_start = range->fallback_start;
> > +   unsigned long fallback_end = range->fallback_end;
> > unsigned int align = range->alignment;
> > pgprot_t pgprot = range->pgprot;
> > +   bool kasan = range->flags & EXECMEM_KASAN_SHADOW;
> > +   unsigned long vm_flags  = VM_FLUSH_RESET_PERMS;
> > +   bool fallback  = !!fallback_start;
> > +   gfp_t gfp_flags = GFP_KERNEL;
> > +   void *p;
> >  
> > -   return __vmalloc_node_range(size, align, start, end,
> > -  GFP_KERNEL, pgprot, VM_FLUSH_RESET_PERMS,
> > -  NUMA_NO_NODE, __builtin_return_address(0));
> > +   if (PAGE_ALIGN(size) > (end - start))
> > +   return NULL;
> > +
> > +   if (kasan)
> > +   vm_flags |= VM_DEFER_KMEMLEAK;
> 
> Hmm, I don't think we passed this before on arm64, should we have done?

It was there on arm64 before commit 8339f7d8e178 ("arm64: module: remove
old !KASAN_VMALLOC logic").
There's no need to pass VM_DEFER_KMEMLEAK when KASAN_VMALLOC is enabled and
arm64 always selects KASAN_VMALLOC with KASAN.

And for the generic case, I should have made the condition to check for
KASAN_VMALLOC as well.
 
> Will

-- 
Sincerely yours,
Mike.



Re: [PATCH v3 04/13] mm/execmem, arch: convert remaining overrides of module_alloc to execmem

2023-10-23 Thread Will Deacon
Hi Mike,

On Mon, Sep 18, 2023 at 10:29:46AM +0300, Mike Rapoport wrote:
> From: "Mike Rapoport (IBM)" 
> 
> Extend execmem parameters to accommodate more complex overrides of
> module_alloc() by architectures.
> 
> This includes specification of a fallback range required by arm, arm64
> and powerpc and support for allocation of KASAN shadow required by
> arm64, s390 and x86.
> 
> The core implementation of execmem_alloc() takes care of suppressing
> warnings when the initial allocation fails but there is a fallback range
> defined.
> 
> Signed-off-by: Mike Rapoport (IBM) 
> ---
>  arch/arm/kernel/module.c | 38 -
>  arch/arm64/kernel/module.c   | 57 ++--
>  arch/powerpc/kernel/module.c | 52 ++---
>  arch/s390/kernel/module.c| 52 +++--
>  arch/x86/kernel/module.c | 64 +++-
>  include/linux/execmem.h  | 14 
>  mm/execmem.c | 43 ++--
>  7 files changed, 167 insertions(+), 153 deletions(-)

[...]

> diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> index dd851297596e..cd6320de1c54 100644
> --- a/arch/arm64/kernel/module.c
> +++ b/arch/arm64/kernel/module.c
> @@ -20,6 +20,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #include 
>  #include 
> @@ -108,46 +109,38 @@ static int __init module_init_limits(void)
>  
>   return 0;
>  }
> -subsys_initcall(module_init_limits);
>  
> -void *module_alloc(unsigned long size)
> +static struct execmem_params execmem_params __ro_after_init = {
> + .ranges = {
> + [EXECMEM_DEFAULT] = {
> + .flags = EXECMEM_KASAN_SHADOW,
> + .alignment = MODULE_ALIGN,
> + },
> + },
> +};
> +
> +struct execmem_params __init *execmem_arch_params(void)
>  {
> - void *p = NULL;
> + struct execmem_range *r = _params.ranges[EXECMEM_DEFAULT];
>  
> - /*
> -  * Where possible, prefer to allocate within direct branch range of the
> -  * kernel such that no PLTs are necessary.
> -  */

Why are you removing this comment? I think you could just move it next
to the part where we set a 128MiB range.

> - if (module_direct_base) {
> - p = __vmalloc_node_range(size, MODULE_ALIGN,
> -  module_direct_base,
> -  module_direct_base + SZ_128M,
> -  GFP_KERNEL | __GFP_NOWARN,
> -  PAGE_KERNEL, 0, NUMA_NO_NODE,
> -  __builtin_return_address(0));
> - }
> + module_init_limits();

Hmm, this used to be run from subsys_initcall(), but now you're running
it _really_ early, before random_init(), so randomization of the module
space is no longer going to be very random if we don't have early entropy
from the firmware or the CPU, which is likely to be the case on most SoCs.

>  
> - if (!p && module_plt_base) {
> - p = __vmalloc_node_range(size, MODULE_ALIGN,
> -  module_plt_base,
> -  module_plt_base + SZ_2G,
> -  GFP_KERNEL | __GFP_NOWARN,
> -  PAGE_KERNEL, 0, NUMA_NO_NODE,
> -  __builtin_return_address(0));
> - }
> + r->pgprot = PAGE_KERNEL;
>  
> - if (!p) {
> - pr_warn_ratelimited("%s: unable to allocate memory\n",
> - __func__);
> - }
> + if (module_direct_base) {
> + r->start = module_direct_base;
> + r->end = module_direct_base + SZ_128M;
>  
> - if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
> - vfree(p);
> - return NULL;
> + if (module_plt_base) {
> + r->fallback_start = module_plt_base;
> + r->fallback_end = module_plt_base + SZ_2G;
> + }
> + } else if (module_plt_base) {
> + r->start = module_plt_base;
> + r->end = module_plt_base + SZ_2G;
>   }
>  
> - /* Memory is intended to be executable, reset the pointer tag. */
> - return kasan_reset_tag(p);
> + return _params;
>  }
>  
>  enum aarch64_reloc_op {

[...]

> diff --git a/include/linux/execmem.h b/include/linux/execmem.h
> index 44e213625053..806ad1a0088d 100644
> --- a/include/linux/execmem.h
> +++ b/include/linux/execmem.h
> @@ -32,19 +32,33 @@ enum execmem_type {
>   EXECMEM_TYPE_MAX,
>  };
>  
> +/**
> + * enum execmem_module_flags - options for executable memory allocations
> + * @EXECMEM_KASAN_SHADOW:allocate kasan shadow
> + */
> +enum execmem_range_flags {
> + EXECMEM_KASAN_SHADOW= (1 << 0),
> +};
> +
>  /**
>   * struct execmem_range - definition of a memory range suitable for code and

Re: [PATCH v3 04/13] mm/execmem, arch: convert remaining overrides of module_alloc to execmem

2023-10-04 Thread Mike Rapoport
On Wed, Oct 04, 2023 at 12:29:36AM +, Edgecombe, Rick P wrote:
> On Mon, 2023-09-18 at 10:29 +0300, Mike Rapoport wrote:
> > diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
> > index 5f71a0cf4399..9d37375e2f05 100644
> > --- a/arch/x86/kernel/module.c
> > +++ b/arch/x86/kernel/module.c
> >
> > -void *module_alloc(unsigned long size)
> > +struct execmem_params __init *execmem_arch_params(void)
> >  {
> > -   gfp_t gfp_mask = GFP_KERNEL;
> > -   void *p;
> > -
> > -   if (PAGE_ALIGN(size) > MODULES_LEN)
> > -   return NULL;
> > +   unsigned long module_load_offset = 0;
> > +   unsigned long start;
> >  
> > -   p = __vmalloc_node_range(size, MODULE_ALIGN,
> > -    MODULES_VADDR +
> > get_module_load_offset(),
> > -    MODULES_END, gfp_mask, PAGE_KERNEL,
> > -    VM_FLUSH_RESET_PERMS |
> > VM_DEFER_KMEMLEAK,
> > -    NUMA_NO_NODE,
> > __builtin_return_address(0));
> > +   if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled())
> > +   module_load_offset =
> > +   get_random_u32_inclusive(1, 1024) *
> > PAGE_SIZE;
> 
> Minor:
> I think you can skip the IS_ENABLED(CONFIG_RANDOMIZE_BASE) part because
> CONFIG_RANDOMIZE_MEMORY depends on CONFIG_RANDOMIZE_BASE (which is
> checked in kaslr_enabled()).

Thanks, I'll look into it.

-- 
Sincerely yours,
Mike.



Re: [PATCH v3 04/13] mm/execmem, arch: convert remaining overrides of module_alloc to execmem

2023-10-03 Thread Edgecombe, Rick P
On Mon, 2023-09-18 at 10:29 +0300, Mike Rapoport wrote:
> diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
> index 5f71a0cf4399..9d37375e2f05 100644
> --- a/arch/x86/kernel/module.c
> +++ b/arch/x86/kernel/module.c
> @@ -19,6 +19,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #include 
>  #include 
> @@ -36,55 +37,30 @@ do
> {\
>  } while (0)
>  #endif
>  
> -#ifdef CONFIG_RANDOMIZE_BASE
> -static unsigned long module_load_offset;
> +static struct execmem_params execmem_params __ro_after_init = {
> +   .ranges = {
> +   [EXECMEM_DEFAULT] = {
> +   .flags = EXECMEM_KASAN_SHADOW,
> +   .alignment = MODULE_ALIGN,
> +   },
> +   },
> +};
>  
> -/* Mutex protects the module_load_offset. */
> -static DEFINE_MUTEX(module_kaslr_mutex);
> -
> -static unsigned long int get_module_load_offset(void)
> -{
> -   if (kaslr_enabled()) {
> -   mutex_lock(_kaslr_mutex);
> -   /*
> -    * Calculate the module_load_offset the first time
> this
> -    * code is called. Once calculated it stays the same
> until
> -    * reboot.
> -    */
> -   if (module_load_offset == 0)
> -   module_load_offset =
> -   get_random_u32_inclusive(1, 1024) *
> PAGE_SIZE;
> -   mutex_unlock(_kaslr_mutex);
> -   }
> -   return module_load_offset;
> -}
> -#else
> -static unsigned long int get_module_load_offset(void)
> -{
> -   return 0;
> -}
> -#endif
> -
> -void *module_alloc(unsigned long size)
> +struct execmem_params __init *execmem_arch_params(void)
>  {
> -   gfp_t gfp_mask = GFP_KERNEL;
> -   void *p;
> -
> -   if (PAGE_ALIGN(size) > MODULES_LEN)
> -   return NULL;
> +   unsigned long module_load_offset = 0;
> +   unsigned long start;
>  
> -   p = __vmalloc_node_range(size, MODULE_ALIGN,
> -    MODULES_VADDR +
> get_module_load_offset(),
> -    MODULES_END, gfp_mask, PAGE_KERNEL,
> -    VM_FLUSH_RESET_PERMS |
> VM_DEFER_KMEMLEAK,
> -    NUMA_NO_NODE,
> __builtin_return_address(0));
> +   if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled())
> +   module_load_offset =
> +   get_random_u32_inclusive(1, 1024) *
> PAGE_SIZE;

Minor:
I think you can skip the IS_ENABLED(CONFIG_RANDOMIZE_BASE) part because
CONFIG_RANDOMIZE_MEMORY depends on CONFIG_RANDOMIZE_BASE (which is
checked in kaslr_enabled()).