This prepare kernel to operate with a different value than userspace IAMR.
For this, IAMR needs to be saved and restored on entry and return from the
kernel.

If MMU_FTR_KEY is enabled we always use the key mechanism to implement KUEP
feature. If MMU_FTR_KEY is not supported and if we support MMU_FTR_KUEP
(radix translation on POWER9), we can skip restoring IAMR on return
to userspace. Userspace won't be using IAMR in that specific config.

We don't need to save/restore IAMR on reentry into the kernel due to interrupt
because the kernel doesn't modify IAMR internally.

Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/kup.h | 51 +++++++++++++++++++++++-
 arch/powerpc/include/asm/ptrace.h        |  6 ++-
 arch/powerpc/kernel/asm-offsets.c        |  4 ++
 arch/powerpc/kernel/syscall_64.c         |  5 ++-
 4 files changed, 62 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/book3s/64/kup.h 
b/arch/powerpc/include/asm/book3s/64/kup.h
index 85645d1b7102..717c2c4d3681 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -25,6 +25,13 @@
        b       100f  // skip_restore_amr
        ALT_MMU_FTR_SECTION_END_NESTED_IFSET(MMU_FTR_KUAP, 68)
 
+       /*
+        * We don't check KUEP feature here, because if FTR_PKEY
+        * is not enabled we don't need to restore IAMR on
+        * return to usespace. That is handled by either
+        * handle_kuap_restore_amr or skip_restore_amr
+        */
+
 99:  // handle_kuap_restore_amr
        /*
         * No key support, don't bother to restore AMR
@@ -37,7 +44,17 @@
        ld      \gpr, STACK_REGS_KUAP(r1)
        isync
        mtspr   SPRN_AMR, \gpr
-       /*  No isync here because we will return to a different context  */
+
+       /*
+        * Restore IAMR only when returning to userspace
+        */
+       .ifnb \to_user
+       ld      \gpr, STACK_REGS_KUEP(r1)
+       mtspr   SPRN_IAMR, \gpr
+       .endif
+       /*
+        * No isync here because we will return to a different context
+        */
 100:  // skip_restore_amr
 #endif
 .endm
@@ -75,6 +92,12 @@
        b       100f  // skip_save_amr
        ALT_MMU_FTR_SECTION_END_NESTED_IFSET(MMU_FTR_KUAP, 68)
 
+       /*
+        * We don't check KUEP feature here, because if FTR_PKEY
+        * is not enabled we don't need to save IAMR on
+        * entry from usespace. That is handled by either
+        * handle_kuap_save_amr or skip_save_amr
+        */
 
 99: // handle_kuap_save_amr
        .ifnb \msr_pr_cr
@@ -103,7 +126,26 @@
        mtspr   SPRN_AMR, \gpr2
        isync
 102:
-        END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 69)
+       END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 69)
+
+       .ifnb \msr_pr_cr
+       beq     \msr_pr_cr, 103f // from kernel space
+       mfspr   \gpr1, SPRN_IAMR
+       std     \gpr1, STACK_REGS_KUEP(r1)
+
+       /*
+        * update kernel IAMR with AMR_KUEP_BLOCKED only
+        * if KUAP feature is enabled
+        */
+       BEGIN_MMU_FTR_SECTION_NESTED(70)
+       LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
+       cmpd    \use_cr, \gpr1, \gpr2
+       beq     \use_cr, 103f
+       mtspr   SPRN_IAMR, \gpr2
+       isync
+103:
+        END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUEP, 70)
+       .endif
 
 100: // skip_save_amr
 #endif
@@ -129,6 +171,11 @@ static inline void kuap_restore_amr(struct pt_regs *regs, 
bool to_user)
                 * returning to a different context using rfid
                 */
        }
+       /*
+        * We do support FTR_PKEY, restore IAMR when returning to user
+        */
+       if (to_user)
+               mtspr(SPRN_IAMR, regs->kuep);
 }
 
 static inline void kuap_check_amr(void)
diff --git a/arch/powerpc/include/asm/ptrace.h 
b/arch/powerpc/include/asm/ptrace.h
index e0195e6b892b..2bfd2b6a72ab 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -56,8 +56,12 @@ struct pt_regs
 #ifdef CONFIG_PPC_KUAP
                        unsigned long kuap;
 #endif
+#ifdef CONFIG_PPC_KUEP
+                       unsigned long kuep;
+#endif
+
                };
-               unsigned long __pad[2]; /* Maintain 16 byte interrupt stack 
alignment */
+               unsigned long __pad[4]; /* Maintain 16 byte interrupt stack 
alignment */
        };
 };
 #endif
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index fcf24a365fc0..6c7326fc73b9 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -353,6 +353,10 @@ int main(void)
 #ifdef CONFIG_PPC_KUAP
        STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
 #endif
+#ifdef CONFIG_PPC_KUEP
+       STACK_PT_REGS_OFFSET(STACK_REGS_KUEP, kuep);
+#endif
+
 
 #if defined(CONFIG_PPC32)
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index f69b4774f3b3..458f2bb80c6d 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -36,11 +36,14 @@ notrace long system_call_exception(long r3, long r4, long 
r5,
        BUG_ON(regs->softe != IRQS_ENABLED);
 
        if (mmu_has_feature(MMU_FTR_PKEY)) {
-               unsigned long amr;
+               unsigned long amr, iamr;
 
                amr = mfspr(SPRN_AMR);
+               iamr = mfspr(SPRN_IAMR);
                regs->kuap = amr;
+               regs->kuep = iamr;
                mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+               mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
                isync();
        } else
                kuap_check_amr();
-- 
2.25.3

Reply via email to