On Tue, 2018-11-06 at 10:40 -0200, Breno Leitao wrote: > In the past, TIF_RESTORE_TM was being handled with the rest of the TIF > workers, > but, that was too early, and can cause some IRQ to be replayed in suspended > state (after recheckpoint). > > This patch moves TIF_RESTORE_TM handler to as late as possible, it also > forces the IRQ to be disabled, and it will continue to be until RFID, so, > no IRQ will be replayed at all. I.e, if trecheckpoint happens, it will RFID > to userspace. > > This makes TIF_RESTORE_TM a special case that should not be handled > similarly to the _TIF_USER_WORK_MASK. > > Since _TIF_RESTORE_TM is not part of _TIF_USER_WORK_MASK anymore, we > need to force system_call_exit to continue to leaves through > fast_exception_return, so, we add the flags together with > _TIF_USER_WORK_MASK at system_call_exist path. > > If this flag is set at system_call_exit, it means that recheckpoint > will be called, and doing it through fast_exception_return is the only > way to do so. > > Signed-off-by: Breno Leitao <lei...@debian.org> > --- > arch/powerpc/include/asm/thread_info.h | 2 +- > arch/powerpc/kernel/entry_64.S | 23 ++++++++++++++--------- > 2 files changed, 15 insertions(+), 10 deletions(-) > > diff --git a/arch/powerpc/include/asm/thread_info.h > b/arch/powerpc/include/asm/thread_info.h > index 544cac0474cb..2835d60bc9ef 100644 > --- a/arch/powerpc/include/asm/thread_info.h > +++ b/arch/powerpc/include/asm/thread_info.h > @@ -139,7 +139,7 @@ void arch_setup_new_exec(void); > > #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ > _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ > - _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ > + _TIF_PATCH_PENDING | \ > _TIF_FSCHECK) > #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) > > diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S > index 17484ebda66c..a86619edf29d 100644 > --- a/arch/powerpc/kernel/entry_64.S > +++ b/arch/powerpc/kernel/entry_64.S > @@ -255,7 +255,7 @@ system_call_exit: > > ld r9,TI_FLAGS(r12) > li r11,-MAX_ERRNO > - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MAS > K|_TIF_PERSYSCALL_MASK) > + andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK| > _TIF_PERSYSCALL_MASK |_TIF_RESTORE_TM) > bne- .Lsyscall_exit_work > > andi. r0,r8,MSR_FP > @@ -784,14 +784,6 @@ _GLOBAL(ret_from_except_lite) > SCHEDULE_USER > b ret_from_except_lite > 2: > -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM > - andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM > - bne 3f /* only restore TM if nothing else to do */ > - addi r3,r1,STACK_FRAME_OVERHEAD > - bl restore_tm_state > - b restore > -3: > -#endif > bl save_nvgprs > /* > * Use a non volatile GPR to save and restore our thread_info flags > @@ -938,6 +930,19 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) > */ > .globl fast_exception_return > fast_exception_return: > + > +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM > + CURRENT_THREAD_INFO(r4, r1) > + ld r4,TI_FLAGS(r4) > + andi. r0,r4,_TIF_RESTORE_TM > + beq 22f > + ld r4,_MSR(r1) /* TODO: MSR[!PR] shouldn't be here */ > + andi. r0,r4,MSR_PR > + beq 22f /* Skip if Kernel thread */ > + addi r3,r1,STACK_FRAME_OVERHEAD > + bl restore_tm_state
Calling out to C here is a bit concerning this late. The main thing you are calling out to is asm anyway (with a small C wrapper). We might want to adapt the asm this case, rather than the old model of getting it called from __switch_to(). We shouldn't need to call tm_reclaim/tm_recheckpoint from C anymore (especially if we use BUG_ON() rather than WARN_ON() in the cases already mentioned.). Same for patch 1. > +22: > +#endif > ld r3,_MSR(r1) > ld r4,_CTR(r1) > ld r0,_LINK(r1)