Le 21/01/2019 à 12:34, Christophe Leroy a écrit :
Some debug setup like CONFIG_KASAN generate huge
kernels with text size over the 8M limit.

This patch maps a second 8M page when _einittext is over 8M.

This is not enough for CONFIG_KASAN_INLINE. I'll send a v3 which maps up to 32M based on _einittext.



Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
---
  v2: Using IS_ENABLED() instead of #ifdef in 8xx_mmu.c

  arch/powerpc/kernel/head_8xx.S | 27 +++++++++++++++++++++++++--
  arch/powerpc/mm/8xx_mmu.c      |  3 +++
  2 files changed, 28 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 20cc816b3508..3b3b7846247f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -337,8 +337,8 @@ InstructionTLBMiss:
        rlwinm  r10, r10, 16, 0xfff8
        cmpli   cr0, r10, PAGE_OFFSET@h
  #ifndef CONFIG_PIN_TLB_TEXT
-       /* It is assumed that kernel code fits into the first 8M page */
-0:     cmpli   cr7, r10, (PAGE_OFFSET + 0x0800000)@h
+       /* It is assumed that kernel code fits into the two first 8M pages */
+0:     cmpli   cr7, r10, (PAGE_OFFSET + 0x1000000)@h
        patch_site      0b, patch__itlbmiss_linmem_top
  #endif
  #endif
@@ -908,6 +908,29 @@ initial_mmu:
        li      r8, MI_BOOTINIT         /* Create RPN for address 0 */
        mtspr   SPRN_MI_RPN, r8         /* Store TLB entry */
+ /* Map a second 8M page if needed */
+       lis     r9, _einittext@h
+       oris    r9, r9, _einittext@l
+       cmpli   cr0, r9, (PAGE_OFFSET + 0x8000000)@h

Should be 0x800000 here

Christophe

+       blt     1f
+
+#ifdef CONFIG_PIN_TLB_TEXT
+       lis     r8, MI_RSV4I@h
+       ori     r8, r8, 0x1d00
+
+       mtspr   SPRN_MI_CTR, r8 /* Set instruction MMU control */
+#endif
+
+       lis     r8, (KERNELBASE + 0x800000)@h   /* Create vaddr for TLB */
+       ori     r8, r8, MI_EVALID       /* Mark it valid */
+       mtspr   SPRN_MI_EPN, r8
+       li      r8, MI_PS8MEG /* Set 8M byte page */
+       ori     r8, r8, MI_SVALID       /* Make it valid */
+       mtspr   SPRN_MI_TWC, r8
+       li      r8, MI_BOOTINIT         /* Create RPN for address 0 */
+       addis   r8, r8, 0x80
+       mtspr   SPRN_MI_RPN, r8         /* Store TLB entry */
+1:
        lis     r8, MI_APG_INIT@h       /* Set protection modes */
        ori     r8, r8, MI_APG_INIT@l
        mtspr   SPRN_MI_AP, r8
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 92b677faea8c..b5f6d794281d 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -112,6 +112,9 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
                        mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
        } else {
                mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
+               if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
+                       mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top,
+                                           _ALIGN(__pa(_einittext), 8 << 20));
        }
mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);

Reply via email to