Module: xenomai-head
Branch: master
Commit: b3457b347d91577f457d496faa1881f09f549766
URL:    
http://git.xenomai.org/?p=xenomai-head.git;a=commit;h=b3457b347d91577f457d496faa1881f09f549766

Author: Philippe Gerum <r...@xenomai.org>
Date:   Mon Aug 31 22:25:45 2009 +0200

x86_64: introduce support for CC_STACKPROTECTOR

---

 include/asm-x86/bits/pod_64.h |   27 ++++++++----
 include/asm-x86/switch_64.h   |   94 ++++++++++++++++++++++++++++++++++++++--
 include/asm-x86/system_64.h   |    4 +-
 3 files changed, 111 insertions(+), 14 deletions(-)

diff --git a/include/asm-x86/bits/pod_64.h b/include/asm-x86/bits/pod_64.h
index 3eea0f5..88e049d 100644
--- a/include/asm-x86/bits/pod_64.h
+++ b/include/asm-x86/bits/pod_64.h
@@ -62,15 +62,17 @@ static inline void xnarch_enter_root(xnarchtcb_t * rootcb)
 {
 }
 
-static inline void xnarch_switch_to(xnarchtcb_t * out_tcb, xnarchtcb_t * 
in_tcb)
+static inline void xnarch_switch_to(xnarchtcb_t *out_tcb, xnarchtcb_t *in_tcb)
 {
        struct task_struct *prev = out_tcb->active_task;
        struct task_struct *next = in_tcb->user_task;
 
        if (likely(next != NULL)) {
                if (task_thread_info(prev)->status & TS_USEDFPU)
-                       /* __switch_to will try and use __unlazy_fpu, so we 
need to
-                          clear the ts bit. */
+                       /*
+                        * __switch_to will try and use __unlazy_fpu,
+                        * so we need to clear the ts bit.
+                        */
                        clts();
                in_tcb->active_task = next;
                rthal_clear_foreign_stack(&rthal_domain);
@@ -82,15 +84,21 @@ static inline void xnarch_switch_to(xnarchtcb_t * out_tcb, 
xnarchtcb_t * in_tcb)
 
        if (next && next != prev) {
                struct mm_struct *oldmm = prev->active_mm;
-
                switch_mm(oldmm, next->active_mm, next);
-
-               if (!next->mm)
+               if (next->mm == NULL)
                        enter_lazy_tlb(oldmm, next);
        }
 
-       xnarch_switch_threads(prev, next, out_tcb->rspp, in_tcb->rspp, 
out_tcb->ripp, in_tcb->ripp);
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define xnarch_switch_canary  in_tcb->canary
+#else
+#define xnarch_switch_canary  0
+#endif
 
+       xnarch_switch_threads(prev, next,
+                             out_tcb->rspp, in_tcb->rspp,
+                             out_tcb->ripp, in_tcb->ripp,
+                             xnarch_switch_canary);
        stts();
 }
 
@@ -126,7 +134,10 @@ static inline void xnarch_init_thread(xnarchtcb_t *tcb,
        childregs->eflags = flags & ~X86_EFLAGS_IF;
        childregs->arg = (unsigned long)tcb;
        childregs->entry = (unsigned long)&xnarch_thread_trampoline;
-
+#ifdef CONFIG_CC_STACKPROTECTOR
+       tcb->canary = (unsigned long)xnarch_get_cpu_tsc() ^ childregs->arg;
+       childregs->canary = tcb->canary;
+#endif
        tcb->rsp = (unsigned long)childregs;
        tcb->rip = (unsigned long)&__thread_head; /* Will branch there at 
startup. */
        tcb->entry = entry;
diff --git a/include/asm-x86/switch_64.h b/include/asm-x86/switch_64.h
index 4e65a9e..a716ad6 100644
--- a/include/asm-x86/switch_64.h
+++ b/include/asm-x86/switch_64.h
@@ -27,17 +27,91 @@
 
 struct xnarch_x8664_initstack {
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+       unsigned long canary;
+#endif
        unsigned long rbp;
        unsigned long eflags;
        unsigned long arg;
        unsigned long entry;
 };
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+/*
+ * We have an added complexity with -fstack-protector, due to the
+ * hybrid scheduling between user- and kernel-based Xenomai threads,
+ * for which we do not have any underlying task_struct.  We update the
+ * current stack canary for incoming user-based threads exactly the
+ * same way as Linux does. However, we handle the case of incoming
+ * kernel-based Xenomai threads differently: in that case, the canary
+ * value shall be given by our caller.
+ *
+ * In the latter case, the update logic is jointly handled by
+ * xeno_switch_kernel_prologue and xeno_switch_kernel_canary; the
+ * former clears %rax whenever the incoming thread is kernel-based,
+ * and with that information, the latter checks %rax to determine
+ * whether the canary should be picked from the current task struct,
+ * or from %r8. %r8 is set ealier to the proper canary value passed by
+ * our caller (i.e. in_tcb->canary).
+ *
+ * This code is based on the assumption that no scheduler exchange can
+ * happen between Linux and Xenomai for kernel-based Xenomai threads,
+ * i.e. the only way to schedule a kernel-based Xenomai thread in goes
+ * through xnarch_switch_to(), never via schedule(). This is turn
+ * means that neither %rax or %r8 could be clobbered once set by
+ * xeno_switch_kernel_prologue, since the only way of branching to the
+ * incoming kernel-based thread is via retq. Besides, we may take for
+ * granted that %rax can't be null upon return from __switch_to, since
+ * the latter returns prev_p, which cannot be NULL, so the logic
+ * cannot be confused by incoming user-based threads.
+ *
+ * Yeah, I know, it's awfully convoluted, but I'm not even sorry for
+ * this. --rpm
+ */
+#define xeno_init_kernel_canary                                                
\
+       "popq   %%r8\n\t"                                               \
+       "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
+#define xeno_init_canary_oparam                                                
\
+       [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
+#define xeno_switch_canary_setup(c)                                    \
+       register long __kcanary __asm__ ("r8") = (c)
+#define xeno_switch_kernel_prologue                                    \
+       "xor %%rax, %%rax\n\t"
+#define xeno_switch_kernel_canary                                      \
+       "testq %%rax, %%rax\n\t"                                        \
+       "jnz 8f\n\t"                                                    \
+       "movq %%r8,"__percpu_arg([gs_canary])"\n\t"                     \
+       "jmp 9f\n\t"
+#define xeno_switch_user_canary                                                
\
+       "movq "__percpu_arg([current_task])",%%rsi\n\t"                 \
+       "movq %P[user_canary](%%rsi),%%r8\n\t"                          \
+       "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
+#define xeno_switch_canary_oparam                                      \
+       , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
+#define xeno_switch_canary_iparam                                      \
+       , [user_canary] "i" (offsetof(struct task_struct, stack_canary)) \
+       , [current_task] "m" (per_cpu_var(current_task))                \
+       , "r" (__kcanary)
+#define __SWITCH_CLOBBER_LIST  , "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+#define __HEAD_CLOBBER_LIST    , "rdi", "r8"
+#else  /* CC_STACKPROTECTOR */
+#define xeno_switch_canary_setup(c)    (void)(c)
+#define xeno_init_kernel_canary
+#define xeno_init_canary_oparam
+#define xeno_switch_kernel_prologue
+#define xeno_switch_kernel_canary
+#define xeno_switch_user_canary
+#define xeno_switch_canary_oparam
+#define xeno_switch_canary_iparam
 #define __SWITCH_CLOBBER_LIST  , "r8", "r9", "r10", "r11", "r12", "r13", 
"r14", "r15"
+#define __HEAD_CLOBBER_LIST    , "rdi"
+#endif /* CC_STACKPROTECTOR */
 
-#define xnarch_switch_threads(prev,next,p_rsp,n_rsp,p_rip,n_rip)       \
+#define xnarch_switch_threads(prev,next,p_rsp,n_rsp,p_rip,n_rip,kcanary)       
\
        ({                                                              \
                long __rdi, __rsi, __rax, __rbx, __rcx, __rdx;          \
+               xeno_switch_canary_setup(kcanary);                      \
+                                                                       \
                asm volatile("pushfq\n\t"                               \
                             "pushq     %%rbp\n\t"                      \
                             "movq      %%rsi, %%rbp\n\t"               \
@@ -49,27 +123,37 @@ struct xnarch_x8664_initstack {
                             "jz        0f\n\t"                         \
                             "testq     %%rsi, %%rsi\n\t"               \
                             "jnz       __switch_to\n\t"                \
-                            "0:ret\n\t"                                \
-                            "1: movq   %%rbp, %%rsi\n\t"               \
+                            xeno_switch_kernel_prologue                \
+                            "0:\n\t"                                   \
+                            "ret\n\t"                                  \
+                            "1:\n\t"                                   \
+                            xeno_switch_kernel_canary                  \
+                            "8:\n\t"                                   \
+                            xeno_switch_user_canary                    \
+                            "9:\n\t"                                   \
+                            "movq      %%rbp, %%rsi\n\t"               \
                             "popq      %%rbp\n\t"                      \
                             "popfq\n\t"                                \
                             : "=S" (__rsi), "=D" (__rdi), "=a" (__rax), \
                               "=b" (__rbx), "=c" (__rcx), "=d" (__rdx) \
+                              xeno_switch_canary_oparam                \
                             : "0" (next), "1" (prev), "5" (p_rsp), "4" 
(n_rsp), \
                               "2" (p_rip), "3" (n_rip)                 \
+                              xeno_switch_canary_iparam                \
                             : "memory", "cc" __SWITCH_CLOBBER_LIST);   \
        })
 
 #define xnarch_thread_head()                                           \
        asm volatile(".globl __thread_head\n\t"                         \
                     "__thread_head:\n\t"                               \
+                    xeno_init_kernel_canary                            \
                     "popq      %%rbp\n\t"                              \
                     "popfq\n\t"                                        \
                     "popq      %%rdi\n\t"                              \
                     "ret\n\t"                                          \
-                    : /* no output */                                  \
+                    : xeno_init_canary_oparam                          \
                     : /* no input */                                   \
-                    : "cc", "memory", "rdi")
+                    : "cc", "memory" __HEAD_CLOBBER_LIST)
 
 asmlinkage void __thread_head(void);
 
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 18310a4..4de8693 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -52,7 +52,9 @@ typedef struct xnarchtcb {      /* Per-thread arch-dependent 
block */
        x86_fpustate i387;
        unsigned long rsp;
        unsigned long rip;
-
+#ifdef CONFIG_CC_STACKPROTECTOR
+       unsigned long canary;
+#endif
        /* FPU context bits for the root thread. */
        unsigned long is_root: 1;
        unsigned long ts_usedfpu: 1;


_______________________________________________
Xenomai-git mailing list
Xenomai-git@gna.org
https://mail.gna.org/listinfo/xenomai-git

Reply via email to