Re: [RFC PATCH 01/11] powerpc/tm: Reclaim transaction on kernel entry

2018-09-27 Thread Breno Leitao
Hi Mikey,

On 09/17/2018 10:31 PM, Michael Neuling wrote:
> On Wed, 2018-09-12 at 16:40 -0300, Breno Leitao wrote:
>> This patch creates a macro that will be invoked on all entrance to the
>> kernel, so, in kernel space the transaction will be completely reclaimed
>> and not suspended anymore.
> 
> There are still some calls to tm_reclaim_current() in process.c. Should these
> probably go now, right?

Yes, we shouldn't not call tm_reclaim_current() from anywhere else other than
TM_KERNEL_ENTRY anymore.

That said, I think we can still have some references for tm_reclaim_current()
in some specific checkpoints. Something as:

if (WARN_ON(MSR_TM_SUSPENDED(mfmsr(
tm_reclaim_current(0);

I initially wrote something as BUG_ON(MSR_TM_SUSPENDED(mfmsr()) but
scripts/checkpatch.pl suggested me writing a WARN_ON() and create a recovery
pass, which seems fair.

Anyway, if you think it is not a good strategy, I can get rid of them at v2.

Thank you!


Re: [RFC PATCH 01/11] powerpc/tm: Reclaim transaction on kernel entry

2018-09-17 Thread Michael Neuling
On Wed, 2018-09-12 at 16:40 -0300, Breno Leitao wrote:
> This patch creates a macro that will be invoked on all entrance to the
> kernel, so, in kernel space the transaction will be completely reclaimed
> and not suspended anymore.

There are still some calls to tm_reclaim_current() in process.c. Should these
probably go now, right?

Mikey

> This patchset checks if we are coming from PR, if not, skip. This is useful
> when there is a irq_replay() being called after recheckpoint, when the IRQ
> is re-enable. In this case, we do not want to re-reclaim and
> re-recheckpoint, thus, if not coming from PR, skip it completely.
> 
> This macro does not care about TM SPR also, it will only be saved and
> restore in the context switch code now on.
> 
> This macro will return 0 or 1 in r3 register, to specify if a reclaim was
> executed or not.
> 
> This patchset is based on initial work done by Cyril:
> https://patchwork.ozlabs.org/cover/875341/
> 
> Signed-off-by: Breno Leitao 
> ---
>  arch/powerpc/include/asm/exception-64s.h | 46 
>  arch/powerpc/kernel/entry_64.S   | 10 ++
>  arch/powerpc/kernel/exceptions-64s.S | 12 +--
>  3 files changed, 66 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/exception-64s.h
> b/arch/powerpc/include/asm/exception-64s.h
> index a86fead0..db90b6d7826e 100644
> --- a/arch/powerpc/include/asm/exception-64s.h
> +++ b/arch/powerpc/include/asm/exception-64s.h
> @@ -36,6 +36,7 @@
>   */
>  #include 
>  #include 
> +#include 
>  
>  /* PACA save area offsets (exgen, exmc, etc) */
>  #define EX_R90
> @@ -686,10 +687,54 @@ BEGIN_FTR_SECTION   \
>   beqlppc64_runlatch_on_trampoline;   \
>  END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
>  
> +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
> +
> +/*
> + * This macro will reclaim a transaction if called when coming from userspace
> + * (MSR.PR = 1) and if the transaction state is active or suspended.
> + *
> + * Since we don't want to reclaim when coming from kernel, for instance after
> + * a trechkpt. or a IRQ replay, the live MSR is not useful and instead of it
> the
> + * MSR from thread stack is used to check the MSR.PR bit.
> + * This macro has one argument which is the cause that will be used by
> treclaim.
> + * and returns in r3 '1' if the reclaim happens or '0' if reclaim didn't
> + * happen, which is useful to know what registers were clobbered.
> + *
> + * NOTE: If addition registers are clobbered here, make sure the callee
> + * function restores them before proceeding.
> + */
> +#define TM_KERNEL_ENTRY(cause)   
> \
> + ld  r3, _MSR(r1);   \
> + andi.   r0, r3, MSR_PR; /* Coming from userspace? */\
> + beq 1f; /* Skip reclaim if MSR.PR != 1 */   \
> + rldicl. r0, r3, (64-MSR_TM_LG), 63; /* Is TM enabled? */\
> + beq 1f; /* Skip reclaim if TM is off */ \
> + rldicl. r0, r3, (64-MSR_TS_LG), 62; /* Is active */ \
> + beq 1f; /* Skip reclaim if neither */   \
> + /*  \
> +  * If there is a transaction active or suspended, save the  \
> +  * non-volatile GPRs if they are not already saved. \
> +  */ \
> + bl  save_nvgprs;\
> + /*  \
> +  * Soft disable the IRQs, otherwise it might cause a CPU hang.  \
> +  */ \
> + RECONCILE_IRQ_STATE(r10, r11);  \
> + li  r3, cause;  \
> + bl  tm_reclaim_current; \
> + li  r3, 1;  /* Reclaim happened */  \
> + b   2f; \
> +1:   li  r3, 0;  /* Reclaim didn't happen */ \
> +2:
> +#else
> +#define TM_KERNEL_ENTRY(cause)
> +#endif
> +
>  #define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \
>   EXCEPTION_PROLOG_COMMON(trap, area);\
>   /* Volatile regs are potentially clobbered here */  \
>   additions;  \
> + TM_KERNEL_ENTRY(TM_CAUSE_MISC); \
>   addir3,r1,STACK_FRAME_OVERHEAD; \
>   bl  hdlr;   \
>   b   ret
> @@ -704,6 +749,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
>   EXCEPTION_PROLOG_COMMON_3(trap);\
>   /* Volatile regs are potentially clobbered here */  \
>   additions;  

[RFC PATCH 01/11] powerpc/tm: Reclaim transaction on kernel entry

2018-09-12 Thread Breno Leitao
This patch creates a macro that will be invoked on all entrance to the
kernel, so, in kernel space the transaction will be completely reclaimed
and not suspended anymore.

This patchset checks if we are coming from PR, if not, skip. This is useful
when there is a irq_replay() being called after recheckpoint, when the IRQ
is re-enable. In this case, we do not want to re-reclaim and
re-recheckpoint, thus, if not coming from PR, skip it completely.

This macro does not care about TM SPR also, it will only be saved and
restore in the context switch code now on.

This macro will return 0 or 1 in r3 register, to specify if a reclaim was
executed or not.

This patchset is based on initial work done by Cyril:
https://patchwork.ozlabs.org/cover/875341/

Signed-off-by: Breno Leitao 
---
 arch/powerpc/include/asm/exception-64s.h | 46 
 arch/powerpc/kernel/entry_64.S   | 10 ++
 arch/powerpc/kernel/exceptions-64s.S | 12 +--
 3 files changed, 66 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/asm/exception-64s.h 
b/arch/powerpc/include/asm/exception-64s.h
index a86fead0..db90b6d7826e 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -36,6 +36,7 @@
  */
 #include 
 #include 
+#include 
 
 /* PACA save area offsets (exgen, exmc, etc) */
 #define EX_R9  0
@@ -686,10 +687,54 @@ BEGIN_FTR_SECTION \
beqlppc64_runlatch_on_trampoline;   \
 END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+
+/*
+ * This macro will reclaim a transaction if called when coming from userspace
+ * (MSR.PR = 1) and if the transaction state is active or suspended.
+ *
+ * Since we don't want to reclaim when coming from kernel, for instance after
+ * a trechkpt. or a IRQ replay, the live MSR is not useful and instead of it 
the
+ * MSR from thread stack is used to check the MSR.PR bit.
+ * This macro has one argument which is the cause that will be used by 
treclaim.
+ * and returns in r3 '1' if the reclaim happens or '0' if reclaim didn't
+ * happen, which is useful to know what registers were clobbered.
+ *
+ * NOTE: If addition registers are clobbered here, make sure the callee
+ * function restores them before proceeding.
+ */
+#define TM_KERNEL_ENTRY(cause) \
+   ld  r3, _MSR(r1);   \
+   andi.   r0, r3, MSR_PR; /* Coming from userspace? */\
+   beq 1f; /* Skip reclaim if MSR.PR != 1 */   \
+   rldicl. r0, r3, (64-MSR_TM_LG), 63; /* Is TM enabled? */\
+   beq 1f; /* Skip reclaim if TM is off */ \
+   rldicl. r0, r3, (64-MSR_TS_LG), 62; /* Is active */ \
+   beq 1f; /* Skip reclaim if neither */   \
+   /*  \
+* If there is a transaction active or suspended, save the  \
+* non-volatile GPRs if they are not already saved. \
+*/ \
+   bl  save_nvgprs;\
+   /*  \
+* Soft disable the IRQs, otherwise it might cause a CPU hang.  \
+*/ \
+   RECONCILE_IRQ_STATE(r10, r11);  \
+   li  r3, cause;  \
+   bl  tm_reclaim_current; \
+   li  r3, 1;  /* Reclaim happened */  \
+   b   2f; \
+1: li  r3, 0;  /* Reclaim didn't happen */ \
+2:
+#else
+#define TM_KERNEL_ENTRY(cause)
+#endif
+
 #define EXCEPTION_COMMON(area, trap, label, hdlr, ret, additions) \
EXCEPTION_PROLOG_COMMON(trap, area);\
/* Volatile regs are potentially clobbered here */  \
additions;  \
+   TM_KERNEL_ENTRY(TM_CAUSE_MISC); \
addir3,r1,STACK_FRAME_OVERHEAD; \
bl  hdlr;   \
b   ret
@@ -704,6 +749,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
EXCEPTION_PROLOG_COMMON_3(trap);\
/* Volatile regs are potentially clobbered here */  \
additions;  \
+   TM_KERNEL_ENTRY(TM_CAUSE_MISC); \
addir3,r1,STACK_FRAME_OVERHEAD; \
bl  hdlr
 
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2206912ea4f0..c38677b7442c 1006