This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new 10b40abecc arm64_task/pthread_start: Convert the C / inline ASM code 
to assembly
10b40abecc is described below

commit 10b40abeccf6c61a1dbef79204cf630ab25d4752
Author: Ville Juven <[email protected]>
AuthorDate: Fri Sep 13 13:20:56 2024 +0300

    arm64_task/pthread_start: Convert the C / inline ASM code to assembly
    
    The aforementioned functions can/will fail if the C compiler decides
    to use the stack for the incoming entrypt/etc. parameters.
    
    Fix this issue by converting the jump to user part into pure assembly,
    ensuring the stack is NOT used for the parameters.
---
 arch/arm64/src/common/arm64_internal.h      |  2 ++
 arch/arm64/src/common/arm64_pthread_start.c | 26 ++-----------------------
 arch/arm64/src/common/arm64_task_start.c    | 26 ++-----------------------
 arch/arm64/src/common/arm64_vectors.S       | 30 +++++++++++++++++++++++++++++
 4 files changed, 36 insertions(+), 48 deletions(-)

diff --git a/arch/arm64/src/common/arm64_internal.h 
b/arch/arm64/src/common/arm64_internal.h
index 35e51575bd..19c8b9eb69 100644
--- a/arch/arm64/src/common/arm64_internal.h
+++ b/arch/arm64/src/common/arm64_internal.h
@@ -270,6 +270,8 @@ EXTERN uint8_t g_idle_topstack[];   /* End+1 of heap */
  ****************************************************************************/
 
 void arm64_new_task(struct tcb_s *tak_new);
+void arm64_jump_to_user(uint64_t entry, uint64_t x0, uint64_t x1,
+                        uint64_t *regs) noreturn_function;
 
 /* Low level initialization provided by chip logic */
 
diff --git a/arch/arm64/src/common/arm64_pthread_start.c 
b/arch/arm64/src/common/arm64_pthread_start.c
index db80db228b..8132884cf8 100644
--- a/arch/arm64/src/common/arm64_pthread_start.c
+++ b/arch/arm64/src/common/arm64_pthread_start.c
@@ -68,12 +68,6 @@
 void up_pthread_start(pthread_trampoline_t startup,
                       pthread_startroutine_t entrypt, pthread_addr_t arg)
 {
-  uint64_t *regs = this_task()->xcp.initregs;
-
-  /* This must be performed atomically, the C-section ends upon user entry */
-
-  enter_critical_section();
-
   /* Set up to enter the user-space pthread start-up function in
    * unprivileged mode. We need:
    *
@@ -83,24 +77,8 @@ void up_pthread_start(pthread_trampoline_t startup,
    *   SPSR = user mode
    */
 
-  regs[REG_ELR]  = (uint64_t)startup;
-  regs[REG_X0]   = (uint64_t)entrypt;
-  regs[REG_X1]   = (uint64_t)arg;
-  regs[REG_SPSR] = (regs[REG_SPSR] & ~SPSR_MODE_MASK) | SPSR_MODE_EL0T;
-
-  /* Fully unwind the kernel stack and drop to user space */
-
-  asm
-  (
-    "mov x0, %0\n" /* Get context registers */
-    "mov sp, x0\n" /* Stack pointer = context */
-    "b arm64_exit_exception\n"
-    :
-    : "r" (regs)
-    : "x0", "memory"
-  );
-
-  PANIC();
+  arm64_jump_to_user((uint64_t)startup, (uint64_t)entrypt, (uint64_t)arg,
+                     this_task()->xcp.initregs);
 }
 
 #endif /* !CONFIG_BUILD_FLAT && __KERNEL__ && !CONFIG_DISABLE_PTHREAD */
diff --git a/arch/arm64/src/common/arm64_task_start.c 
b/arch/arm64/src/common/arm64_task_start.c
index 5e3501eee6..0d20f693e5 100644
--- a/arch/arm64/src/common/arm64_task_start.c
+++ b/arch/arm64/src/common/arm64_task_start.c
@@ -65,12 +65,6 @@
 
 void up_task_start(main_t taskentry, int argc, char *argv[])
 {
-  uint64_t *regs = this_task()->xcp.initregs;
-
-  /* This must be performed atomically, the C-section ends upon user entry */
-
-  enter_critical_section();
-
   /* Set up to return to the user-space _start function in
    * unprivileged mode.  We need:
    *
@@ -80,24 +74,8 @@ void up_task_start(main_t taskentry, int argc, char *argv[])
    *   SPSR = user mode
    */
 
-  regs[REG_ELR]  = (uint64_t)taskentry;
-  regs[REG_X0]   = (uint64_t)argc;
-  regs[REG_X1]   = (uint64_t)argv;
-  regs[REG_SPSR] = (regs[REG_SPSR] & ~SPSR_MODE_MASK) | SPSR_MODE_EL0T;
-
-  /* Fully unwind the kernel stack and drop to user space */
-
-  asm
-  (
-    "mov x0, %0\n" /* Get context registers */
-    "mov sp, x0\n" /* Stack pointer = context */
-    "b arm64_exit_exception\n"
-    :
-    : "r" (regs)
-    : "x0", "memory"
-  );
-
-  PANIC();
+  arm64_jump_to_user((uint64_t)taskentry, (uint64_t)argc, (uint64_t)argv,
+                     this_task()->xcp.initregs);
 }
 
 #endif /* !CONFIG_BUILD_FLAT */
diff --git a/arch/arm64/src/common/arm64_vectors.S 
b/arch/arm64/src/common/arm64_vectors.S
index 16f75106b2..d2d1e5c95a 100644
--- a/arch/arm64/src/common/arm64_vectors.S
+++ b/arch/arm64/src/common/arm64_vectors.S
@@ -156,6 +156,36 @@ restore_new:
 
     ret
 
+/****************************************************************************
+ * Function: arm64_jump_to_user
+ *
+ * Description:
+ *  Routine to jump to user space, called when a user process is started and
+ *  the kernel is ready to give control to the user task in user space.
+ *
+ * arm64_jump_to_user(entry, x0, x1, regs)
+ *     entry: process entry point
+ *     x0:    parameter 0 for process
+ *     x1:    parameter 1 for process
+ *     regs:  integer register save area to use
+ *
+ ****************************************************************************/
+
+#ifndef CONFIG_BUILD_FLAT
+GTEXT(arm64_jump_to_user)
+SECTION_FUNC(text, arm64_jump_to_user)
+    msr daifset, #IRQ_DAIF_MASK
+    mov sp,  x3
+    str x0,  [sp, #8 * REG_ELR]
+    str x1,  [sp, #8 * REG_X0]
+    str x2,  [sp, #8 * REG_X1]
+    mrs x0,  spsr_el1
+    and x0,  x0, #~SPSR_MODE_MASK
+    #orr x0, x0, #SPSR_MODE_EL0T # EL0T=0x00, out of range for orr
+    str x0,  [sp, #8 * REG_SPSR]
+    b arm64_exit_exception
+#endif
+
 /****************************************************************************
  * Function: arm64_sync_exc
  *

Reply via email to