Do not offset mmap base address because of stack randomization if
current task does not want randomization.
Note that x86 already implements this behaviour.

Signed-off-by: Alexandre Ghiti <a...@ghiti.fr>
Acked-by: Catalin Marinas <catalin.mari...@arm.com>
Acked-by: Kees Cook <keesc...@chromium.org>
Reviewed-by: Christoph Hellwig <h...@lst.de>
Reviewed-by: Luis Chamberlain <mcg...@kernel.org>
---
 arch/arm64/mm/mmap.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index bb0140afed66..e4acaead67de 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -54,7 +54,11 @@ unsigned long arch_mmap_rnd(void)
 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 {
        unsigned long gap = rlim_stack->rlim_cur;
-       unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
+       unsigned long pad = stack_guard_gap;
+
+       /* Account for stack randomization if necessary */
+       if (current->flags & PF_RANDOMIZE)
+               pad += (STACK_RND_MASK << PAGE_SHIFT);
 
        /* Values close to RLIM_INFINITY can overflow. */
        if (gap + pad > gap)
-- 
2.20.1

Reply via email to