On Tue, Aug 19, 2025 at 09:15:15PM +0200, Peter Zijlstra wrote:
> On Sun, Jul 20, 2025 at 01:21:20PM +0200, Jiri Olsa wrote:

> > +void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr)
> > +{
> > +   struct mm_struct *mm = current->mm;
> > +   uprobe_opcode_t insn[5];
> > +
> > +   /*
> > +    * Do not optimize if shadow stack is enabled, the return address hijack
> > +    * code in arch_uretprobe_hijack_return_addr updates wrong frame when
> > +    * the entry uprobe is optimized and the shadow stack crashes the app.
> > +    */
> > +   if (shstk_is_enabled())
> > +           return;
> 
> Kernel should be able to fix up userspace shadow stack just fine.
> 
> > +   if (!should_optimize(auprobe))
> > +           return;
> > +
> > +   mmap_write_lock(mm);
> > +
> > +   /*
> > +    * Check if some other thread already optimized the uprobe for us,
> > +    * if it's the case just go away silently.
> > +    */
> > +   if (copy_from_vaddr(mm, vaddr, &insn, 5))
> > +           goto unlock;
> > +   if (!is_swbp_insn((uprobe_opcode_t*) &insn))
> > +           goto unlock;
> > +
> > +   /*
> > +    * If we fail to optimize the uprobe we set the fail bit so the
> > +    * above should_optimize will fail from now on.
> > +    */
> > +   if (__arch_uprobe_optimize(auprobe, mm, vaddr))
> > +           set_bit(ARCH_UPROBE_FLAG_OPTIMIZE_FAIL, &auprobe->flags);
> > +
> > +unlock:
> > +   mmap_write_unlock(mm);
> > +}

Something a little like this should do I suppose...

--- a/arch/x86/include/asm/shstk.h
+++ b/arch/x86/include/asm/shstk.h
@@ -23,6 +23,8 @@ int setup_signal_shadow_stack(struct ksi
 int restore_signal_shadow_stack(void);
 int shstk_update_last_frame(unsigned long val);
 bool shstk_is_enabled(void);
+int shstk_pop(u64 *val);
+int shstk_push(u64 val);
 #else
 static inline long shstk_prctl(struct task_struct *task, int option,
                               unsigned long arg2) { return -EINVAL; }
@@ -35,6 +37,8 @@ static inline int setup_signal_shadow_st
 static inline int restore_signal_shadow_stack(void) { return 0; }
 static inline int shstk_update_last_frame(unsigned long val) { return 0; }
 static inline bool shstk_is_enabled(void) { return false; }
+static inline int shstk_pop(u64 *val) { return -ENOTSUPP; }
+static inline int shstk_push(u64 val) { return -ENOTSUPP; }
 #endif /* CONFIG_X86_USER_SHADOW_STACK */
 
 #endif /* __ASSEMBLER__ */
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -246,6 +246,46 @@ static unsigned long get_user_shstk_addr
        return ssp;
 }
 
+int shstk_pop(u64 *val)
+{
+       int ret = 0;
+       u64 ssp;
+
+       if (!features_enabled(ARCH_SHSTK_SHSTK))
+               return -ENOTSUPP;
+
+       fpregs_lock_and_load();
+
+       rdmsrq(MSR_IA32_PL3_SSP, ssp);
+       if (val && get_user(*val, (__user u64 *)ssp))
+           ret = -EFAULT;
+       ssp += SS_FRAME_SIZE;
+       wrmsrq(MSR_IA32_PL3_SSP, ssp);
+
+       fpregs_unlock();
+
+       return ret;
+}
+
+int shstk_push(u64 val)
+{
+       u64 ssp;
+       int ret;
+
+       if (!features_enabled(ARCH_SHSTK_SHSTK))
+               return -ENOTSUPP;
+
+       fpregs_lock_and_load();
+
+       rdmsrq(MSR_IA32_PL3_SSP, ssp);
+       ssp -= SS_FRAME_SIZE;
+       wrmsrq(MSR_IA32_PL3_SSP, ssp);
+       ret = write_user_shstk_64((__user void *)ssp, val);
+       fpregs_unlock();
+
+       return ret;
+}
+
 #define SHSTK_DATA_BIT BIT(63)
 
 static int put_shstk_data(u64 __user *addr, u64 data)
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -804,7 +804,7 @@ SYSCALL_DEFINE0(uprobe)
 {
        struct pt_regs *regs = task_pt_regs(current);
        struct uprobe_syscall_args args;
-       unsigned long ip, sp;
+       unsigned long ip, sp, sret;
        int err;
 
        /* Allow execution only from uprobe trampolines. */
@@ -831,6 +831,9 @@ SYSCALL_DEFINE0(uprobe)
 
        sp = regs->sp;
 
+       if (shstk_pop(&sret) == 0 && sret != args.retaddr)
+               goto sigill;
+
        handle_syscall_uprobe(regs, regs->ip);
 
        /*
@@ -855,6 +858,9 @@ SYSCALL_DEFINE0(uprobe)
        if (args.retaddr - 5 != regs->ip)
                args.retaddr = regs->ip;
 
+       if (shstk_push(args.retaddr) == -EFAULT)
+               goto sigill;
+
        regs->ip = ip;
 
        err = copy_to_user((void __user *)regs->sp, &args, sizeof(args));
@@ -1124,14 +1130,6 @@ void arch_uprobe_optimize(struct arch_up
        struct mm_struct *mm = current->mm;
        uprobe_opcode_t insn[5];
 
-       /*
-        * Do not optimize if shadow stack is enabled, the return address hijack
-        * code in arch_uretprobe_hijack_return_addr updates wrong frame when
-        * the entry uprobe is optimized and the shadow stack crashes the app.
-        */
-       if (shstk_is_enabled())
-               return;
-
        if (!should_optimize(auprobe))
                return;
 

Reply via email to