Gustavo Romero <grom...@linux.ibm.com> writes:

> Althought AMR is stashed in the checkpoint area, currently we don't save
> it to the per thread checkpoint struct after a treclaim and so we don't
> restore it either from that struct when we trechkpt. As a consequence when
> the transaction is later rolled back the kernel space AMR value when the
> trechkpt was done appears in userspace.
>
> That commit saves and restores AMR accordingly on treclaim and trechkpt.
> Since AMR value is also used in kernel space in other functions, it also
> takes care of stashing kernel live AMR into the stack before treclaim and
> before trechkpt, restoring it later, just before returning from tm_reclaim
> and __tm_recheckpoint.
>
> Is also fixes two nonrelated comments about CR and MSR.
>

Tested-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>

> Signed-off-by: Gustavo Romero <grom...@linux.ibm.com>
> ---
>  arch/powerpc/include/asm/processor.h |  1 +
>  arch/powerpc/kernel/asm-offsets.c    |  1 +
>  arch/powerpc/kernel/tm.S             | 35 ++++++++++++++++++++++++----
>  3 files changed, 33 insertions(+), 4 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/processor.h 
> b/arch/powerpc/include/asm/processor.h
> index ed0d633ab5aa..9f4f6cc033ac 100644
> --- a/arch/powerpc/include/asm/processor.h
> +++ b/arch/powerpc/include/asm/processor.h
> @@ -220,6 +220,7 @@ struct thread_struct {
>       unsigned long   tm_tar;
>       unsigned long   tm_ppr;
>       unsigned long   tm_dscr;
> +     unsigned long   tm_amr;
>  
>       /*
>        * Checkpointed FP and VSX 0-31 register set.
> diff --git a/arch/powerpc/kernel/asm-offsets.c 
> b/arch/powerpc/kernel/asm-offsets.c
> index 8711c2164b45..c2722ff36e98 100644
> --- a/arch/powerpc/kernel/asm-offsets.c
> +++ b/arch/powerpc/kernel/asm-offsets.c
> @@ -176,6 +176,7 @@ int main(void)
>       OFFSET(THREAD_TM_TAR, thread_struct, tm_tar);
>       OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr);
>       OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr);
> +     OFFSET(THREAD_TM_AMR, thread_struct, tm_amr);
>       OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs);
>       OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr);
>       OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave);
> diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
> index 6ba0fdd1e7f8..2b91f233b05d 100644
> --- a/arch/powerpc/kernel/tm.S
> +++ b/arch/powerpc/kernel/tm.S
> @@ -122,6 +122,13 @@ _GLOBAL(tm_reclaim)
>       std     r3, STK_PARAM(R3)(r1)
>       SAVE_NVGPRS(r1)
>  
> +     /*
> +      * Save kernel live AMR since it will be clobbered by treclaim
> +      * but can be used elsewhere later in kernel space.
> +      */
> +     mfspr   r3, SPRN_AMR
> +     std     r3, TM_FRAME_L1(r1)
> +
>       /* We need to setup MSR for VSX register save instructions. */
>       mfmsr   r14
>       mr      r15, r14
> @@ -245,7 +252,7 @@ _GLOBAL(tm_reclaim)
>        * but is used in signal return to 'wind back' to the abort handler.
>        */
>  
> -     /* ******************** CR,LR,CCR,MSR ********** */
> +     /* ***************** CTR, LR, CR, XER ********** */
>       mfctr   r3
>       mflr    r4
>       mfcr    r5
> @@ -256,7 +263,6 @@ _GLOBAL(tm_reclaim)
>       std     r5, _CCR(r7)
>       std     r6, _XER(r7)
>  
> -
>       /* ******************** TAR, DSCR ********** */
>       mfspr   r3, SPRN_TAR
>       mfspr   r4, SPRN_DSCR
> @@ -264,6 +270,10 @@ _GLOBAL(tm_reclaim)
>       std     r3, THREAD_TM_TAR(r12)
>       std     r4, THREAD_TM_DSCR(r12)
>  
> +        /* ******************** AMR **************** */
> +        mfspr        r3, SPRN_AMR
> +        std  r3, THREAD_TM_AMR(r12)
> +
>       /*
>        * MSR and flags: We don't change CRs, and we don't need to alter MSR.
>        */
> @@ -308,7 +318,9 @@ _GLOBAL(tm_reclaim)
>       std     r3, THREAD_TM_TFHAR(r12)
>       std     r4, THREAD_TM_TFIAR(r12)
>  
> -     /* AMR is checkpointed too, but is unsupported by Linux. */
> +     /* Restore kernel live AMR */
> +     ld      r8, TM_FRAME_L1(r1)
> +     mtspr   SPRN_AMR, r8
>  
>       /* Restore original MSR/IRQ state & clear TM mode */
>       ld      r14, TM_FRAME_L0(r1)            /* Orig MSR */
> @@ -355,6 +367,13 @@ _GLOBAL(__tm_recheckpoint)
>        */
>       SAVE_NVGPRS(r1)
>  
> +     /*
> +      * Save kernel live AMR since it will be clobbered for trechkpt
> +      * but can be used elsewhere later in kernel space.
> +      */
> +     mfspr   r8, SPRN_AMR
> +     std     r8, TM_FRAME_L0(r1)
> +
>       /* Load complete register state from ts_ckpt* registers */
>  
>       addi    r7, r3, PT_CKPT_REGS            /* Thread's ckpt_regs */
> @@ -404,7 +423,7 @@ _GLOBAL(__tm_recheckpoint)
>  
>  restore_gprs:
>  
> -     /* ******************** CR,LR,CCR,MSR ********** */
> +     /* ****************** CTR, LR, XER ************* */
>       ld      r4, _CTR(r7)
>       ld      r5, _LINK(r7)
>       ld      r8, _XER(r7)
> @@ -417,6 +436,10 @@ restore_gprs:
>       ld      r4, THREAD_TM_TAR(r3)
>       mtspr   SPRN_TAR,       r4
>  
> +     /* ******************** AMR ******************** */
> +     ld      r4, THREAD_TM_AMR(r3)
> +     mtspr   SPRN_AMR, r4
> +
>       /* Load up the PPR and DSCR in GPRs only at this stage */
>       ld      r5, THREAD_TM_DSCR(r3)
>       ld      r6, THREAD_TM_PPR(r3)
> @@ -509,6 +532,10 @@ restore_gprs:
>       li      r4, MSR_RI
>       mtmsrd  r4, 1
>  
> +     /* Restore kernel live AMR */
> +     ld      r8, TM_FRAME_L0(r1)
> +     mtspr   SPRN_AMR, r8
> +
>       REST_NVGPRS(r1)
>  
>       addi    r1, r1, TM_FRAME_SIZE
> -- 
> 2.25.1

Reply via email to