Masking daif flags is done very early before returning to EL0.

Only toggle the interrupt masking while in the vector entry and mask daif
once in kernel_exit.

Signed-off-by: Julien Thierry <julien.thie...@arm.com>
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Mark Rutland <mark.rutl...@arm.com>
Cc: James Morse <james.mo...@arm.com>
---
 arch/arm64/kernel/entry.S | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index ec2ee72..20252d5 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -240,9 +240,9 @@ alternative_else_nop_endif
        .endm
 
        .macro  kernel_exit, el
-       .if     \el != 0
        disable_daif
 
+       .if     \el != 0
        /* Restore the task's original addr_limit. */
        ldr     x20, [sp, #S_ORIG_ADDR_LIMIT]
        str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
@@ -872,7 +872,7 @@ ENDPROC(el0_error)
  * and this includes saving x0 back into the kernel stack.
  */
 ret_fast_syscall:
-       disable_daif
+       disable_irq                             // disable interrupts
        str     x0, [sp, #S_X0]                 // returned x0
        ldr     x1, [tsk, #TSK_TI_FLAGS]        // re-check for syscall tracing
        and     x2, x1, #_TIF_SYSCALL_WORK
@@ -882,7 +882,7 @@ ret_fast_syscall:
        enable_step_tsk x1, x2
        kernel_exit 0
 ret_fast_syscall_trace:
-       enable_daif
+       enable_irq                              // enable interrupts
        b       __sys_trace_return_skipped      // we already saved x0
 
 /*
@@ -900,7 +900,7 @@ work_pending:
  * "slow" syscall return path.
  */
 ret_to_user:
-       disable_daif
+       disable_irq                             // disable interrupts
        ldr     x1, [tsk, #TSK_TI_FLAGS]
        and     x2, x1, #_TIF_WORK_MASK
        cbnz    x2, work_pending
-- 
1.9.1

Reply via email to