For book3s/32 it is assumed that TASK_SIZE is a multiple of 256 Mbytes,
but Kconfig allows any value for TASK_SIZE.

In all relevant calculations, align TASK_SIZE to the upper 256 Mbytes
boundary.

Also use ASM_CONST() in the definition of TASK_SIZE to ensure it is
seen as an unsigned constant.

Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
 arch/powerpc/include/asm/book3s/32/mmu-hash.h | 5 ++++-
 arch/powerpc/include/asm/task_size_32.h       | 2 +-
 arch/powerpc/kernel/asm-offsets.c             | 2 +-
 arch/powerpc/kernel/head_book3s_32.S          | 6 +++---
 arch/powerpc/mm/book3s32/mmu.c                | 2 +-
 arch/powerpc/mm/ptdump/segment_regs.c         | 2 +-
 6 files changed, 11 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h 
b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index 78c6a5fde1d6..df00be5b4044 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -192,12 +192,15 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
 extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
 extern s32 patch__flush_hash_B;
 
+#include <linux/sizes.h>
+#include <linux/align.h>
+
 #include <asm/reg.h>
 #include <asm/task_size_32.h>
 
 static __always_inline void update_user_segment(u32 n, u32 val)
 {
-       if (n << 28 < TASK_SIZE)
+       if (n << 28 < ALIGN(TASK_SIZE, SZ_256M))
                mtsr(val + n * 0x111, n << 28);
 }
 
diff --git a/arch/powerpc/include/asm/task_size_32.h 
b/arch/powerpc/include/asm/task_size_32.h
index de7290ee770f..30edc21f71fb 100644
--- a/arch/powerpc/include/asm/task_size_32.h
+++ b/arch/powerpc/include/asm/task_size_32.h
@@ -6,7 +6,7 @@
 #error User TASK_SIZE overlaps with KERNEL_START address
 #endif
 
-#define TASK_SIZE (CONFIG_TASK_SIZE)
+#define TASK_SIZE ASM_CONST(CONFIG_TASK_SIZE)
 
 /*
  * This decides where the kernel will search for a free chunk of vm space 
during
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index b3048f6d3822..2c7fadddae4a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -330,7 +330,7 @@ int main(void)
 
 #ifndef CONFIG_PPC64
        DEFINE(TASK_SIZE, TASK_SIZE);
-       DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
+       DEFINE(NUM_USER_SEGMENTS, ALIGN(TASK_SIZE, SZ_256M) >> 28);
 #endif /* ! CONFIG_PPC64 */
 
        /* datapage offsets for use by vdso */
diff --git a/arch/powerpc/kernel/head_book3s_32.S 
b/arch/powerpc/kernel/head_book3s_32.S
index cb2bca76be53..c1779455ea32 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -420,7 +420,7 @@ InstructionTLBMiss:
        lwz     r2,0(r2)                /* get pmd entry */
 #ifdef CONFIG_EXECMEM
        rlwinm  r3, r0, 4, 0xf
-       subi    r3, r3, (TASK_SIZE >> 28) & 0xf
+       subi    r3, r3, NUM_USER_SEGMENTS
 #endif
        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
        beq-    InstructionAddressInvalid       /* return if no mapping */
@@ -475,7 +475,7 @@ DataLoadTLBMiss:
        lwz     r2,0(r1)                /* get pmd entry */
        rlwinm  r3, r0, 4, 0xf
        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
-       subi    r3, r3, (TASK_SIZE >> 28) & 0xf
+       subi    r3, r3, NUM_USER_SEGMENTS
        beq-    2f                      /* bail if no mapping */
 1:     rlwimi  r2,r0,22,20,29          /* insert next 10 bits of address */
        lwz     r2,0(r2)                /* get linux-style pte */
@@ -554,7 +554,7 @@ DataStoreTLBMiss:
        lwz     r2,0(r1)                /* get pmd entry */
        rlwinm  r3, r0, 4, 0xf
        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
-       subi    r3, r3, (TASK_SIZE >> 28) & 0xf
+       subi    r3, r3, NUM_USER_SEGMENTS
        beq-    2f                      /* bail if no mapping */
 1:
        rlwimi  r2,r0,22,20,29          /* insert next 10 bits of address */
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index be9c4106e22f..afc9b5cac5a6 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -225,7 +225,7 @@ int mmu_mark_initmem_nx(void)
 
        BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE);
 
-       for (i = TASK_SIZE >> 28; i < 16; i++) {
+       for (i = ALIGN(TASK_SIZE, SZ_256M) >> 28; i < 16; i++) {
                /* Do not set NX on VM space for modules */
                if (is_module_segment(i << 28))
                        continue;
diff --git a/arch/powerpc/mm/ptdump/segment_regs.c 
b/arch/powerpc/mm/ptdump/segment_regs.c
index 9df3af8d481f..c06704b18a2c 100644
--- a/arch/powerpc/mm/ptdump/segment_regs.c
+++ b/arch/powerpc/mm/ptdump/segment_regs.c
@@ -31,7 +31,7 @@ static int sr_show(struct seq_file *m, void *v)
        int i;
 
        seq_puts(m, "---[ User Segments ]---\n");
-       for (i = 0; i < TASK_SIZE >> 28; i++)
+       for (i = 0; i < ALIGN(TASK_SIZE, SZ_256M) >> 28; i++)
                seg_show(m, i);
 
        seq_puts(m, "\n---[ Kernel Segments ]---\n");
-- 
2.49.0


Reply via email to