This adds the function implementations necessary to add exception
extensions support to AArch64.
---
 .../cpu/aarch64/aarch64-exception-default.S   | 131 +++++---------
 .../cpu/aarch64/aarch64-exception-default.c   |  59 ++++++-
 .../cpu/aarch64/aarch64-exception-interrupt.S | 165 ++++++++++++++++++
 .../cpu/aarch64/include/rtems/score/cpu.h     |  21 +++
 spec/build/cpukit/optexceptionextensions.yml  |   1 +
 5 files changed, 293 insertions(+), 84 deletions(-)

diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S 
b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
index 2a4ddbcc61..c7c9d03465 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
@@ -72,7 +72,7 @@
  * * The exception returns to the previous execution state
  */
 
-       .macro  JUMP_HANDLER_SHORT
+       .macro  JUMP_HANDLER
 /* Mask to use in BIC, lower 7 bits */
        mov x0, #0x7f
 /* LR contains PC, mask off to the base of the current vector */
@@ -109,10 +109,6 @@
        nop
        nop
        nop
-       .endm
-
-       .macro  JUMP_HANDLER
-       JUMP_HANDLER_SHORT
        nop
        .endm
 
@@ -144,11 +140,48 @@ Vector_table_el3:
  * using SP0.
  */
 curr_el_sp0_sync:
-       stp x0, lr,     [sp, #-0x10]!   /* Push x0,lr on to the stack */
-       bl curr_el_sp0_sync_get_pc      /* Get current execution address */
-curr_el_sp0_sync_get_pc:               /* The current PC is now in LR */
-       JUMP_HANDLER
-       JUMP_TARGET_SP0
+       sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE                       /* 
reserve space for CEF */
+       str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]       /* 
shove lr into CEF */
+       bl .push_exception_context_start                                /* bl 
to CEF store routine */
+/* Save original sp in x0 for .push_exception_context_finish */
+       add x0, sp, #AARCH64_EXCEPTION_FRAME_SIZE                       /* save 
original sp */
+/* Push the remainder of the context */
+       bl .push_exception_context_finish
+/* get jump target and branch/link */
+       bl curr_el_sp0_sync_get_pc              /* Get current execution 
address */
+curr_el_sp0_sync_get_pc:                       /* The current PC is now in LR 
*/
+       mov x0, #0x7f                           /* Mask to use in BIC, lower 7 
bits */
+       bic x0, lr,     x0                      /* Mask LR to base of current 
vector */
+       ldr x1, [x0,    #0x78]                  /* Load target from last word 
in vector */
+       and lr, lr, #0x780                      /* Mask off bits for vector 
number */
+       lsr lr, lr, #7                          /* Shift the vector bits down */
+/* Store the vector */
+       str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
+       mov x0, sp
+       blr x1
+       b twiddle
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+       nop
+/* Takes up the space of 2 instructions */
+#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
+       .word _AArch64_Exception_default
+       .word 0x0
+#else
+       .dword _AArch64_Exception_default
+#endif
 .balign 0x80
 /* The exception handler for IRQ exceptions from the current EL using SP0. */
 curr_el_sp0_irq:
@@ -204,13 +237,11 @@ curr_el_spx_sync_get_pc:                  /* The current 
PC is now in LR */
        str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_VECTOR_OFFSET]
        mov x0, sp
        blr x1
-/* bl to CEF restore routine (doesn't restore lr) */
-       bl .pop_exception_context
-       ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]       /* get 
lr from CEF */
-/* drop space reserved for CEF and clear exclusive */
-       add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
-       msr spsel, #1                           /* switch to thread stack */
-       eret                                    /* exception return */
+       b twiddle
+       nop
+       nop
+       nop
+       nop
        nop
        nop
        nop
@@ -475,69 +506,3 @@ twiddle:
        stp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1e0)]
 /* Done, return to exception handler */
        ret
-
-/*
- * Apply the exception frame to the current register status, SP points to the 
EF
- */
-.pop_exception_context:
-/* Pop daif and spsr */
-       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
-/* Restore daif and spsr */
-       msr DAIF, x2
-       msr SPSR_EL1, x3
-/* Pop FAR and ESR */
-       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
-/* Restore ESR and FAR */
-       msr ESR_EL1, x2
-       msr FAR_EL1, x3
-/* Pop fpcr and fpsr */
-       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
-/* Restore fpcr and fpsr */
-       msr FPSR, x2
-       msr FPCR, x3
-/* Pop VFP registers */
-       ldp q0,  q1,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x000)]
-       ldp q2,  q3,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x020)]
-       ldp q4,  q5,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x040)]
-       ldp q6,  q7,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x060)]
-       ldp q8,  q9,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x080)]
-       ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0a0)]
-       ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0c0)]
-       ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0e0)]
-       ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x100)]
-       ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x120)]
-       ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x140)]
-       ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x160)]
-       ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x180)]
-       ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1a0)]
-       ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1c0)]
-       ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1e0)]
-/* Pop x0-x29(fp) */
-       ldp x2,  x3,  [sp, #0x10]
-       ldp x4,  x5,  [sp, #0x20]
-       ldp x6,  x7,  [sp, #0x30]
-       ldp x8,  x9,  [sp, #0x40]
-       ldp x10, x11, [sp, #0x50]
-       ldp x12, x13, [sp, #0x60]
-       ldp x14, x15, [sp, #0x70]
-       ldp x16, x17, [sp, #0x80]
-       ldp x18, x19, [sp, #0x90]
-       ldp x20, x21, [sp, #0xa0]
-       ldp x22, x23, [sp, #0xb0]
-       ldp x24, x25, [sp, #0xc0]
-       ldp x26, x27, [sp, #0xd0]
-       ldp x28, x29, [sp, #0xe0]
-/* Pop sp and ELR */
-       ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
-/* Restore thread SP */
-       msr spsel, #1
-       mov sp, x0
-       msr spsel, #0
-/* Restore exception LR */
-       msr ELR_EL1, x1
-       ldp x0,  x1,  [sp, #0x00]
-
-/* We must clear reservations to ensure consistency with atomic operations */
-       clrex
-
-       ret
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.c 
b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
index 2ebb3dee9f..3494c88ea6 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.c
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.c
@@ -41,10 +41,67 @@
 #include "config.h"
 #endif
 
-#include <rtems/score/cpu.h>
 #include <rtems/fatal.h>
+#include <rtems/score/aarch64-system-registers.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/percpu.h>
 
 void _AArch64_Exception_default( CPU_Exception_frame *frame )
 {
   rtems_fatal( RTEMS_FATAL_SOURCE_EXCEPTION, (rtems_fatal_code) frame );
 }
+
+void _CPU_Exception_disable_thread_dispatch( void )
+{
+  Per_CPU_Control *cpu_self = _Per_CPU_Get();
+
+  /* Increment interrupt nest and thread dispatch disable level */
+  ++cpu_self->thread_dispatch_disable_level;
+  ++cpu_self->isr_nest_level;
+}
+
+void _AArch64_Exception_frame_copy(
+  CPU_Exception_frame *new_ef,
+  CPU_Exception_frame *old_ef
+)
+{
+  *new_ef = *old_ef;
+}
+
+int _CPU_Exception_frame_get_signal( CPU_Exception_frame *ef )
+{
+  uint64_t EC = AARCH64_ESR_EL1_EC_GET( ef->register_syndrome );
+
+  switch ( EC ) {
+    case 0x1:  /* WFI */
+    case 0x7:  /* SVE/SIMD/FP */
+    case 0xa:  /* LD64B/ST64B* */
+    case 0x18: /* MSR/MRS/system instruction */
+    case 0x19: /* SVE */
+    case 0x15: /* Supervisor call */
+    case 0x26: /* SP Alignment */
+    case 0x31: /* Breakpoint */
+    case 0x33: /* Step */
+    case 0x35: /* Watchpoint */
+    case 0x3c: /* Break Instruction */
+      return -1;
+    case 0x2c: /* FPU */
+      return SIGFPE;
+    case 0x21: /* Instruction Abort */
+    case 0x25: /* Data Abort */
+      return SIGSEGV;
+    default:
+      return SIGILL;
+  }
+}
+
+void _CPU_Exception_frame_set_resume( CPU_Exception_frame *ef, void *address )
+{
+  ef->register_pc = address;
+}
+
+#define AARCH64_INSTRUCTION_SIZE 4
+void  _CPU_Exception_frame_make_resume_next_instruction( CPU_Exception_frame 
*ef )
+{
+  ef->register_pc += AARCH64_INSTRUCTION_SIZE;
+}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S 
b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
index b206f5764b..6344dce63a 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
@@ -45,6 +45,8 @@
 
 .globl _AArch64_Exception_interrupt_no_nest
 .globl _AArch64_Exception_interrupt_nest
+.globl _CPU_Exception_dispatch_and_resume
+.globl _CPU_Exception_resume
 
 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
   #ifdef RTEMS_SMP
@@ -324,3 +326,166 @@ Return to embedded exception vector code
        pop_interrupt_context
 /* Return to vector for final cleanup */
        ret
+
+/*
+ * This function is expected to resume execution using the CPU_Exception_frame
+ * provided in x0. This function  does not adhere to the AAPCS64 calling
+ * convention because all necessary state is contained within the exception
+ * frame.
+ */
+_CPU_Exception_resume:
+/* Reset stack pointer */
+       mov     sp, x0
+
+/* call CEF restore routine (doesn't restore lr) */
+       bl .pop_exception_context
+
+/* get lr from CEF */
+       ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
+
+/* drop space reserved for CEF */
+       add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+
+/* switch to thread stack */
+       msr spsel, #1
+       eret
+
+/*
+ * This function is expected to undo dispatch disabling, perform dispatch, and
+ * resume execution using the CPU_Exception_frame provided in x0. This function
+ * does not adhere to the AAPCS64 calling convention because all necessary
+ * state is contained within the exception frame.
+ */
+_CPU_Exception_dispatch_and_resume:
+/* Get per-CPU control of current processor */
+       GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
+
+/* Reset stack pointer */
+       mov     sp, x0
+
+/* Check dispatch disable and perform dispatch if necessary */
+/* Load some per-CPU variables */
+       ldr     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+       ldrb    w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
+       ldr     w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
+       ldr     w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
+
+/* Decrement levels and determine thread dispatch state */
+       eor     w1, w1, w0
+       sub     w0, w0, #1
+       orr     w1, w1, w0
+       orr     w1, w1, w2
+       sub     w3, w3, #1
+
+/* Store thread dispatch disable and ISR nest levels */
+       str     w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
+       str     w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
+
+/* store should_skip_thread_dispatch in x22 */
+       mov x22, x1
+
+/*
+ * It is now safe to assume that the source of the exception has been resolved.
+ * Copy the exception frame to the thread stack to be compatible with thread
+ * dispatch. This may arbitrarily clobber corruptible registers since all
+ * important state is contained in the exception frame.
+ *
+ * No need to save current LR since this will never return to the caller.
+ */
+       bl .move_exception_frame_and_switch_to_thread_stack
+
+/*
+ * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
+ * disable level.
+ */
+       cmp     x22, #0
+       bne     .Lno_need_thread_dispatch_resume
+       bl .AArch64_Perform_Thread_Dispatch
+.Lno_need_thread_dispatch_resume:
+/* call CEF restore routine (doesn't restore lr) */
+       bl .pop_exception_context
+
+/* get lr from CEF */
+       ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
+
+/* drop space reserved for CEF */
+       add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
+       eret
+
+/* Assumes sp currently points to the EF on the exception stack and SPSel is 0 
*/
+.move_exception_frame_and_switch_to_thread_stack:
+       mov x1, sp                                                      /* Set 
x1 to the current exception frame */
+       msr spsel, #1                                                   /* 
switch to thread stack */
+       ldr x0, [x1, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]       /* Get 
thread SP from exception frame since it may have been updated */
+       mov sp, x0
+       sub sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE                       /* 
reserve space for CEF */
+       mov x0, sp                                                      /* Set 
x0 to the new exception frame */
+       mov x20, lr                                                     /* Save 
LR */
+       bl _AArch64_Exception_frame_copy                                /* Copy 
exception frame to reserved thread stack space */
+       mov lr, x20                                                     /* 
Restore LR */
+       msr spsel, #0                                                   /* 
switch to exception stack */
+       add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE                       /* 
release space for CEF on exception stack */
+       msr spsel, #1                                                   /* 
switch to thread stack */
+       ret
+
+/*
+ * Apply the exception frame to the current register status, SP points to the 
EF
+ */
+.pop_exception_context:
+/* Pop daif and spsr */
+       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
+/* Restore daif and spsr */
+       msr DAIF, x2
+       msr SPSR_EL1, x3
+/* Pop FAR and ESR */
+       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
+/* Restore ESR and FAR */
+       msr ESR_EL1, x2
+       msr FAR_EL1, x3
+/* Pop fpcr and fpsr */
+       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
+/* Restore fpcr and fpsr */
+       msr FPSR, x2
+       msr FPCR, x3
+/* Pop VFP registers */
+       ldp q0,  q1,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x000)]
+       ldp q2,  q3,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x020)]
+       ldp q4,  q5,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x040)]
+       ldp q6,  q7,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x060)]
+       ldp q8,  q9,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x080)]
+       ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0a0)]
+       ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0c0)]
+       ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0e0)]
+       ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x100)]
+       ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x120)]
+       ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x140)]
+       ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x160)]
+       ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x180)]
+       ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1a0)]
+       ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1c0)]
+       ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1e0)]
+/* Pop x0-x29(fp) */
+       ldp x2,  x3,  [sp, #0x10]
+       ldp x4,  x5,  [sp, #0x20]
+       ldp x6,  x7,  [sp, #0x30]
+       ldp x8,  x9,  [sp, #0x40]
+       ldp x10, x11, [sp, #0x50]
+       ldp x12, x13, [sp, #0x60]
+       ldp x14, x15, [sp, #0x70]
+       ldp x16, x17, [sp, #0x80]
+       ldp x18, x19, [sp, #0x90]
+       ldp x20, x21, [sp, #0xa0]
+       ldp x22, x23, [sp, #0xb0]
+       ldp x24, x25, [sp, #0xc0]
+       ldp x26, x27, [sp, #0xd0]
+       ldp x28, x29, [sp, #0xe0]
+/* Pop ELR, SP already popped */
+       ldr x1, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET + 0x8)]
+/* Restore exception LR */
+       msr ELR_EL1, x1
+       ldp x0,  x1,  [sp, #0x00]
+
+/* We must clear reservations to ensure consistency with atomic operations */
+       clrex
+
+       ret
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h 
b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index ae7e2bdcba..e1d9f0a5c2 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -524,6 +524,27 @@ typedef struct {
 
 void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
 
+RTEMS_NO_RETURN void _CPU_Exception_resume( CPU_Exception_frame *frame );
+
+RTEMS_NO_RETURN void
+_CPU_Exception_dispatch_and_resume( CPU_Exception_frame *frame );
+
+void _CPU_Exception_disable_thread_dispatch( void );
+
+int _CPU_Exception_frame_get_signal( CPU_Exception_frame *frame );
+
+void _CPU_Exception_frame_set_resume( CPU_Exception_frame *frame,
+                                      void *address );
+
+void _CPU_Exception_frame_make_resume_next_instruction(
+  CPU_Exception_frame *frame
+);
+
+void _AArch64_Exception_frame_copy(
+  CPU_Exception_frame *new_ef,
+  CPU_Exception_frame *old_ef
+);
+
 void _AArch64_Exception_default( CPU_Exception_frame *frame );
 
 /** Type that can store a 32-bit integer or a pointer. */
diff --git a/spec/build/cpukit/optexceptionextensions.yml 
b/spec/build/cpukit/optexceptionextensions.yml
index 1bc6d8686f..67c787e7dd 100644
--- a/spec/build/cpukit/optexceptionextensions.yml
+++ b/spec/build/cpukit/optexceptionextensions.yml
@@ -13,6 +13,7 @@ description: |
   Enable the RTEMS Exception Extensions for manipulating and acting on 
exception
   frames.
 enabled-by:
+- aarch64
 links: []
 name: RTEMS_EXCEPTION_EXTENSIONS
 type: build
-- 
2.30.2

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to