This is an automated email from the ASF dual-hosted git repository.

ligd pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit be9d0665e41b4eb1d71fb68b95af915a088b6c7c
Author: hujun5 <[email protected]>
AuthorDate: Tue May 13 14:48:05 2025 +0800

    arm64: support protectbuild
    
    Add CONFIG_BUILD_PROTECTED support alongside
    CONFIG_BUILD_KERNEL for saved register
    pointers and task startup functions. Fix
    arm64_fork() to properly handle Thumb bit masking.
    Enable weak_function for up_allocate_kheap()
    to support board-specific heap allocation.
    
    Signed-off-by: hujun5 <[email protected]>
---
 arch/arm64/include/irq.h                    |  2 +-
 arch/arm64/src/common/CMakeLists.txt        |  2 +-
 arch/arm64/src/common/Make.defs             |  2 +-
 arch/arm64/src/common/arm64_allocateheap.c  |  2 +-
 arch/arm64/src/common/arm64_fork.c          |  2 +-
 arch/arm64/src/common/arm64_internal.h      |  4 ++-
 arch/arm64/src/common/arm64_mpu.h           | 53 ++++++++++++++++++++++++-----
 arch/arm64/src/common/arm64_pthread_start.c |  9 ++++-
 arch/arm64/src/common/arm64_syscall.c       | 10 ++++--
 arch/arm64/src/common/arm64_task_start.c    | 19 ++++++++---
 arch/arm64/src/common/arm64_vectors.S       |  7 ++--
 11 files changed, 87 insertions(+), 25 deletions(-)

diff --git a/arch/arm64/include/irq.h b/arch/arm64/include/irq.h
index 0cc12ebc8d6..fb08ce0df1b 100644
--- a/arch/arm64/include/irq.h
+++ b/arch/arm64/include/irq.h
@@ -268,7 +268,7 @@ struct xcptcontext
 
   uint64_t *saved_regs;
 
-#ifdef CONFIG_BUILD_KERNEL
+#if defined(CONFIG_BUILD_KERNEL) || defined(CONFIG_BUILD_PROTECTED)
   /* This is the saved address to use when returning from a user-space
    * signal handler.
    */
diff --git a/arch/arm64/src/common/CMakeLists.txt 
b/arch/arm64/src/common/CMakeLists.txt
index 3ebe4abb3f9..28b9f589de9 100644
--- a/arch/arm64/src/common/CMakeLists.txt
+++ b/arch/arm64/src/common/CMakeLists.txt
@@ -91,7 +91,7 @@ if(CONFIG_SMP)
   list(APPEND SRCS arm64_smpcall.c)
 endif()
 
-if(CONFIG_BUILD_KERNEL)
+if(CONFIG_BUILD_KERNEL OR CONFIG_BUILD_PROTECTED)
   list(APPEND SRCS arm64_task_start.c arm64_pthread_start.c)
   if(CONFIG_ENABLE_ALL_SIGNALS)
     list(APPEND SRCS arm64_signal_dispatch.c)
diff --git a/arch/arm64/src/common/Make.defs b/arch/arm64/src/common/Make.defs
index fca07f4e864..28608e8ae4b 100644
--- a/arch/arm64/src/common/Make.defs
+++ b/arch/arm64/src/common/Make.defs
@@ -100,7 +100,7 @@ CMN_CSRCS += arm64_cpuidlestack.c arm64_cpustart.c
 CMN_CSRCS += arm64_smpcall.c
 endif
 
-ifeq ($(CONFIG_BUILD_KERNEL),y)
+ifeq ($(CONFIG_BUILD_PROTECTED)$(CONFIG_BUILD_KERNEL),y)
 CMN_CSRCS += arm64_task_start.c arm64_pthread_start.c
   ifeq ($(CONFIG_ENABLE_ALL_SIGNALS),y)
     CMN_CSRCS += arm64_signal_dispatch.c
diff --git a/arch/arm64/src/common/arm64_allocateheap.c 
b/arch/arm64/src/common/arm64_allocateheap.c
index 184ff5db07b..fa065623ac7 100644
--- a/arch/arm64/src/common/arm64_allocateheap.c
+++ b/arch/arm64/src/common/arm64_allocateheap.c
@@ -145,7 +145,7 @@ void up_allocate_heap(void **heap_start, size_t *heap_size)
  ****************************************************************************/
 
 #if defined(CONFIG_BUILD_PROTECTED) && defined(CONFIG_MM_KERNEL_HEAP)
-void up_allocate_kheap(void **heap_start, size_t *heap_size)
+void weak_function up_allocate_kheap(void **heap_start, size_t *heap_size)
 {
   /* Get the unaligned size and position of the user-space heap.
    * This heap begins after the user-space .bss section at an offset
diff --git a/arch/arm64/src/common/arm64_fork.c 
b/arch/arm64/src/common/arm64_fork.c
index ec20e02a088..73fbd70922b 100644
--- a/arch/arm64/src/common/arm64_fork.c
+++ b/arch/arm64/src/common/arm64_fork.c
@@ -131,7 +131,7 @@ pid_t arm64_fork(const struct fork_s *context)
 
   /* Allocate and initialize a TCB for the child task. */
 
-  child = nxtask_setup_fork((start_t)(context->lr & ~1));
+  child = nxtask_setup_fork((start_t)context->lr);
   if (!child)
     {
       serr("ERROR: nxtask_setup_fork failed\n");
diff --git a/arch/arm64/src/common/arm64_internal.h 
b/arch/arm64/src/common/arm64_internal.h
index e377ffe1a76..a99491ab26c 100644
--- a/arch/arm64/src/common/arm64_internal.h
+++ b/arch/arm64/src/common/arm64_internal.h
@@ -252,7 +252,9 @@ EXTERN uint8_t g_idle_topstack[];   /* End+1 of heap */
  ****************************************************************************/
 
 void arm64_new_task(struct tcb_s *tak_new);
-void arm64_jump_to_user(uint64_t entry, uint64_t x0, uint64_t x1,
+
+void arm64_jump_to_user(uint64_t entry, uint64_t x0,
+                        uint64_t x1, uint64_t x2,
                         uint64_t sp_el0, uint64_t *regs) noreturn_function;
 
 /* Low level initialization provided by chip logic */
diff --git a/arch/arm64/src/common/arm64_mpu.h 
b/arch/arm64/src/common/arm64_mpu.h
index 59f62d7ae2d..ff7256ab991 100644
--- a/arch/arm64/src/common/arm64_mpu.h
+++ b/arch/arm64/src/common/arm64_mpu.h
@@ -213,12 +213,19 @@
   }
 
 #ifdef CONFIG_SMP
-#  define REGION_RAM_ATTR                                        \
-    {                                                            \
-      /* AP, XN, SH */                                           \
-      .rbar = (NOT_EXEC | P_RW_U_NA_MSK | INNER_SHAREABLE_MSK) , \
-      /* Cache-ability */                                        \
-      .mair_idx = MPU_MAIR_INDEX_SRAM,                           \
+#  define REGION_RAM_ATTR                                       \
+    {                                                           \
+      /* AP, XN, SH */                                          \
+      .rbar = (NOT_EXEC | P_RW_U_NA_MSK | INNER_SHAREABLE_MSK), \
+      /* Cache-ability */                                       \
+      .mair_idx = MPU_MAIR_INDEX_SRAM,                          \
+    }
+#  define REGION_URAM_ATTR                                      \
+    {                                                           \
+      /* AP, XN, SH */                                          \
+      .rbar = (NOT_EXEC | P_RW_U_RW_MSK | INNER_SHAREABLE_MSK), \
+      /* Cache-ability */                                       \
+      .mair_idx = MPU_MAIR_INDEX_SRAM,                          \
     }
 #else
 #  define REGION_RAM_ATTR                                   \
@@ -228,15 +235,45 @@
       /* Cache-ability */                                   \
       .mair_idx = MPU_MAIR_INDEX_SRAM,                      \
     }
+#  define REGION_URAM_ATTR                                  \
+    {                                                       \
+      /* AP, XN, SH */                                      \
+      .rbar = NOT_EXEC | P_RW_U_RW_MSK | NON_SHAREABLE_MSK, \
+      /* Cache-ability */                                   \
+      .mair_idx = MPU_MAIR_INDEX_SRAM,                      \
+    }
 #endif
-
-#define REGION_RAM_TEXT_ATTR                   \
+#ifdef CONFIG_SMP
+#  define REGION_RAM_TEXT_ATTR                   \
+  {                                              \
+    /* AP, XN, SH */                             \
+    .rbar = P_RO_U_NA_MSK | INNER_SHAREABLE_MSK, \
+    /* Cache-ability */                          \
+    .mair_idx = MPU_MAIR_INDEX_SRAM,             \
+  }
+#  define REGION_RAM_UTEXT_ATTR                  \
+  {                                              \
+    /* AP, XN, SH */                             \
+    .rbar = P_RO_U_RO_MSK | INNER_SHAREABLE_MSK, \
+    /* Cache-ability */                          \
+    .mair_idx = MPU_MAIR_INDEX_SRAM,             \
+  }
+#else
+#  define REGION_RAM_TEXT_ATTR                 \
+  {                                            \
+    /* AP, XN, SH */                           \
+    /* Cache-ability */                        \
+    .rbar = P_RO_U_NA_MSK | NON_SHAREABLE_MSK, \
+    .mair_idx = MPU_MAIR_INDEX_SRAM,           \
+  }
+#  define REGION_RAM_UTEXT_ATTR                \
   {                                            \
     /* AP, XN, SH */                           \
     .rbar = P_RO_U_RO_MSK | NON_SHAREABLE_MSK, \
     /* Cache-ability */                        \
     .mair_idx = MPU_MAIR_INDEX_SRAM,           \
   }
+#endif
 
 #define REGION_RAM_RO_ATTR                                \
   {                                                       \
diff --git a/arch/arm64/src/common/arm64_pthread_start.c 
b/arch/arm64/src/common/arm64_pthread_start.c
index b97ddc528af..c92a55480a7 100644
--- a/arch/arm64/src/common/arm64_pthread_start.c
+++ b/arch/arm64/src/common/arm64_pthread_start.c
@@ -81,8 +81,15 @@ void up_pthread_start(pthread_trampoline_t startup,
    *   SPSR = user mode
    */
 
-  arm64_jump_to_user((uint64_t)startup, (uint64_t)entrypt, (uint64_t)arg,
+#ifdef CONFIG_BUILD_KERNEL
+  arm64_jump_to_user((uint64_t)startup, (uint64_t)entrypt,
+                     (uint64_t)arg, 0,
                      (uint64_t)rtcb->xcp.ustkptr, rtcb->xcp.initregs);
+#elif defined(CONFIG_BUILD_PROTECTED)
+  arm64_jump_to_user((uint64_t)USERSPACE->task_startup,
+                     (uint64_t)startup, (uint64_t)entrypt, (uint64_t)arg,
+                     (uint64_t)rtcb->xcp.ustkptr, rtcb->xcp.initregs);
+#endif
 }
 
 #endif /* !CONFIG_BUILD_FLAT && __KERNEL__ && !CONFIG_DISABLE_PTHREAD */
diff --git a/arch/arm64/src/common/arm64_syscall.c 
b/arch/arm64/src/common/arm64_syscall.c
index 3884faa4284..d9804df1f46 100644
--- a/arch/arm64/src/common/arm64_syscall.c
+++ b/arch/arm64/src/common/arm64_syscall.c
@@ -159,7 +159,7 @@ uint64_t *arm64_syscall(uint64_t *regs)
   struct tcb_s **running_task = &g_running_tasks[cpu];
   struct tcb_s *tcb = this_task();
   uint64_t cmd;
-#ifdef CONFIG_BUILD_KERNEL
+#if defined(CONFIG_BUILD_KERNEL) || defined(CONFIG_BUILD_PROTECTED)
   uint64_t             spsr;
 #endif
 
@@ -217,7 +217,8 @@ uint64_t *arm64_syscall(uint64_t *regs)
         restore_critical_section(tcb, cpu);
         break;
 
-#if defined(CONFIG_BUILD_KERNEL) && defined(CONFIG_ENABLE_ALL_SIGNALS)
+#if (defined(CONFIG_BUILD_KERNEL) || defined(CONFIG_BUILD_PROTECTED)) \
+    && defined(CONFIG_ENABLE_ALL_SIGNALS)
       /* R0=SYS_signal_handler:  This a user signal handler callback
        *
        * void signal_handler(_sa_sigaction_t sighand, int signo,
@@ -245,7 +246,12 @@ uint64_t *arm64_syscall(uint64_t *regs)
            * unprivileged mode.
            */
 
+#if defined(CONFIG_BUILD_KERNEL)
           regs[REG_ELR]  = (uint64_t)ARCH_DATA_RESERVE->ar_sigtramp;
+#elif defined(CONFIG_BUILD_PROTECTED)
+          regs[REG_ELR]  = (uint64_t)(USERSPACE->signal_handler);
+#endif
+
           spsr           = regs[REG_SPSR] & ~SPSR_MODE_MASK;
           regs[REG_SPSR] = spsr | SPSR_MODE_EL0T;
 
diff --git a/arch/arm64/src/common/arm64_task_start.c 
b/arch/arm64/src/common/arm64_task_start.c
index f5b6340a723..ced3c7186fb 100644
--- a/arch/arm64/src/common/arm64_task_start.c
+++ b/arch/arm64/src/common/arm64_task_start.c
@@ -72,14 +72,23 @@ void up_task_start(main_t taskentry, int argc, char *argv[])
   /* Set up to return to the user-space _start function in
    * unprivileged mode.  We need:
    *
-   *   R0   = argc
-   *   R1   = argv
-   *   ELR  = taskentry
-   *   SPSR = user mode
+   *   X0 = REG_ELR
+   *   X1 = param1
+   *   X2 = param2
+   *   X3 = param3
+   *   X4 = SP_EL0
+   *   X5 = REGS
    */
 
-  arm64_jump_to_user((uint64_t)taskentry, (uint64_t)argc, (uint64_t)argv,
+#ifdef CONFIG_BUILD_KERNEL
+  arm64_jump_to_user((uint64_t)taskentry, (uint64_t)argc,
+                     (uint64_t)argv, 0,
                      (uint64_t)rtcb->xcp.ustkptr, rtcb->xcp.initregs);
+#elif defined(CONFIG_BUILD_PROTECTED)
+  arm64_jump_to_user((uint64_t)USERSPACE->task_startup,
+                     (uint64_t)taskentry, (uint64_t)argc, (uint64_t)argv,
+                     (uint64_t)rtcb->xcp.ustkptr, rtcb->xcp.initregs);
+#endif
 }
 
 #endif /* !CONFIG_BUILD_FLAT */
diff --git a/arch/arm64/src/common/arm64_vectors.S 
b/arch/arm64/src/common/arm64_vectors.S
index e94fafc5050..d623d5f8575 100644
--- a/arch/arm64/src/common/arm64_vectors.S
+++ b/arch/arm64/src/common/arm64_vectors.S
@@ -105,15 +105,16 @@ SECTION_FUNC(text, up_saveusercontext)
  *
  ****************************************************************************/
 
-#ifndef CONFIG_BUILD_FLAT
+#if defined(CONFIG_BUILD_KERNEL) || defined(CONFIG_BUILD_PROTECTED)
 GTEXT(arm64_jump_to_user)
 SECTION_FUNC(text, arm64_jump_to_user)
     msr daifset, #IRQ_DAIF_MASK
-    mov sp,  x4
+    mov sp,  x5
     str x0,  [sp, #8 * REG_ELR]
     str x1,  [sp, #8 * REG_X0]
     str x2,  [sp, #8 * REG_X1]
-    str x3,  [sp, #8 * REG_SP_EL0]
+    str x3,  [sp, #8 * REG_X2]
+    str x4,  [sp, #8 * REG_SP_EL0]
     mrs x0,  spsr_el1
     and x0,  x0, #~SPSR_MODE_MASK
     #orr x0, x0, #SPSR_MODE_EL0T # EL0T=0x00, out of range for orr

Reply via email to