The ISA 3.1B hashst and hashchk instructions use a per-cpu SPR HASHKEYR
to hold a key used in the hash calculation. This key should be different
for each process to make it harder for a malicious process to recreate
valid hash values for a victim process.

Add support for storing a per-thread hash key, and setting/clearing
HASHKEYR appropriately.

Signed-off-by: Benjamin Gray <bg...@linux.ibm.com>
---
 arch/powerpc/include/asm/book3s/64/kexec.h |  3 +++
 arch/powerpc/include/asm/processor.h       |  1 +
 arch/powerpc/include/asm/reg.h             |  1 +
 arch/powerpc/kernel/process.c              | 12 ++++++++++++
 4 files changed, 17 insertions(+)

diff --git a/arch/powerpc/include/asm/book3s/64/kexec.h 
b/arch/powerpc/include/asm/book3s/64/kexec.h
index 563baf94a962..163de935df28 100644
--- a/arch/powerpc/include/asm/book3s/64/kexec.h
+++ b/arch/powerpc/include/asm/book3s/64/kexec.h
@@ -24,6 +24,9 @@ static inline void reset_sprs(void)
        if (cpu_has_feature(CPU_FTR_ARCH_31))
                mtspr(SPRN_DEXCR, 0);
 
+       if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE))
+               mtspr(SPRN_HASHKEYR, 0);
+
        /*  Do we need isync()? We are going via a kexec reset */
        isync();
 }
diff --git a/arch/powerpc/include/asm/processor.h 
b/arch/powerpc/include/asm/processor.h
index c17ec1e44c86..2381217c95dc 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -264,6 +264,7 @@ struct thread_struct {
        unsigned long   mmcr3;
        unsigned long   sier2;
        unsigned long   sier3;
+       unsigned long   hashkeyr;
 
 #endif
 };
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index cdd1f174c399..854664cf844f 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -384,6 +384,7 @@
 #define SPRN_HRMOR     0x139   /* Real mode offset register */
 #define SPRN_HSRR0     0x13A   /* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1     0x13B   /* Hypervisor Save/Restore 1 */
+#define SPRN_HASHKEYR  0x1D4   /* Non-privileged hashst/hashchk key register */
 #define SPRN_ASDR      0x330   /* Access segment descriptor register */
 #define SPRN_DEXCR     0x33C   /* Dynamic execution control register */
 #define   DEXCR_PRO_MASK(aspect)       __MASK(63 - (32 + (aspect)))    /* 
Aspect number to problem state aspect mask */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 17d26f652b80..4d7b0c7641d0 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1229,6 +1229,9 @@ static inline void restore_sprs(struct thread_struct 
*old_thread,
            old_thread->tidr != new_thread->tidr)
                mtspr(SPRN_TIDR, new_thread->tidr);
 
+       if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE))
+               mtspr(SPRN_HASHKEYR, new_thread->hashkeyr);
+
        if (cpu_has_feature(CPU_FTR_ARCH_31)) {
                unsigned long new_dexcr = get_thread_dexcr(new_thread);
 
@@ -1818,6 +1821,10 @@ int copy_thread(struct task_struct *p, const struct 
kernel_clone_args *args)
                childregs->ppr = DEFAULT_PPR;
 
        p->thread.tidr = 0;
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE))
+               p->thread.hashkeyr = current->thread.hashkeyr;
 #endif
        /*
         * Run with the current AMR value of the kernel
@@ -1947,6 +1954,11 @@ void start_thread(struct pt_regs *regs, unsigned long 
start, unsigned long sp)
        current->thread.load_tm = 0;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 #ifdef CONFIG_PPC_BOOK3S_64
+       if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
+               current->thread.hashkeyr = get_random_long();
+               mtspr(SPRN_HASHKEYR, current->thread.hashkeyr);
+       }
+
        if (cpu_has_feature(CPU_FTR_ARCH_31))
                mtspr(SPRN_DEXCR, get_thread_dexcr(&current->thread));
 #endif /* CONFIG_PPC_BOOK3S_64 */
-- 
2.38.1

Reply via email to