hi will and all:
>
> The following is schematic diagram of the program before and after the
> modification.
>
> Before:
> if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {} --(a)
> if (memory_limit != PHYS_ADDR_MAX) {}                               --(b)
> if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {}       --(c)
> if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {}                           --(d)*
>
> After:
> if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {} --(a)
> if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {}                           --(d)*
> if (memory_limit != PHYS_ADDR_MAX) {}                               --(b)
> if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {}       --(c)
>
> After grouping modification of memstart_address by moving linear region
> randomization ahead of memory_init, driver can safely using macro,
> __phys_to_virt, in (b) or (c), if necessary.
>
> Signed-off-by: pierre Kuo <vichy....@gmail.com>
> ---
> Changes in v2:
> - add Fixes tag
>
> Changes in v3:
> - adding patch of shifting linear region randomization ahead of
>  memory_limit
>
>  arch/arm64/mm/init.c | 33 +++++++++++++++++----------------
>  1 file changed, 17 insertions(+), 16 deletions(-)
>
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 7205a9085b4d..5142020fc146 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -389,6 +389,23 @@ void __init arm64_memblock_init(void)
>                 memblock_remove(0, memstart_addr);
>         }
>
> +       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
> +               extern u16 memstart_offset_seed;
> +               u64 range = linear_region_size -
> +                           (memblock_end_of_DRAM() - 
> memblock_start_of_DRAM());
> +
> +               /*
> +                * If the size of the linear region exceeds, by a sufficient
> +                * margin, the size of the region that the available physical
> +                * memory spans, randomize the linear region as well.
> +                */
> +               if (memstart_offset_seed > 0 && range >= 
> ARM64_MEMSTART_ALIGN) {
> +                       range /= ARM64_MEMSTART_ALIGN;
> +                       memstart_addr -= ARM64_MEMSTART_ALIGN *
> +                                        ((range * memstart_offset_seed) >> 
> 16);
> +               }
> +       }
> +
>         /*
>          * Apply the memory limit if it was set. Since the kernel may be 
> loaded
>          * high up in memory, add back the kernel region that must be 
> accessible
> @@ -428,22 +445,6 @@ void __init arm64_memblock_init(void)
>                 }
>         }
>
> -       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
> -               extern u16 memstart_offset_seed;
> -               u64 range = linear_region_size -
> -                           (memblock_end_of_DRAM() - 
> memblock_start_of_DRAM());
> -
> -               /*
> -                * If the size of the linear region exceeds, by a sufficient
> -                * margin, the size of the region that the available physical
> -                * memory spans, randomize the linear region as well.
> -                */
> -               if (memstart_offset_seed > 0 && range >= 
> ARM64_MEMSTART_ALIGN) {
> -                       range /= ARM64_MEMSTART_ALIGN;
> -                       memstart_addr -= ARM64_MEMSTART_ALIGN *
> -                                        ((range * memstart_offset_seed) >> 
> 16);
> -               }
> -       }
>
>         /*
>          * Register the kernel text, kernel data, initrd, and initial

Would you mind to give some comment and suggestion for these v3 patches?
https://lkml.org/lkml/2019/4/8/682
https://lkml.org/lkml/2019/4/8/683

Sincerely appreciate your kind help,

Reply via email to