Module: xenomai-gch
Branch: for-forge
Commit: 8fba3216e130c62e44dad61929517d04eb7327e2
URL:    
http://git.xenomai.org/?p=xenomai-gch.git;a=commit;h=8fba3216e130c62e44dad61929517d04eb7327e2

Author: Gilles Chanteperdrix <gilles.chanteperd...@xenomai.org>
Date:   Sat Oct 26 15:16:50 2013 +0200

cobalt/arm: lazy XNFPU bit

---

 .../cobalt/arch/arm/include/asm/xenomai/thread.h   |   46 ++--
 kernel/cobalt/arch/arm/thread.c                    |  267 ++++++--------------
 2 files changed, 95 insertions(+), 218 deletions(-)

diff --git a/kernel/cobalt/arch/arm/include/asm/xenomai/thread.h 
b/kernel/cobalt/arch/arm/include/asm/xenomai/thread.h
index dd582d7..cf16b7c 100644
--- a/kernel/cobalt/arch/arm/include/asm/xenomai/thread.h
+++ b/kernel/cobalt/arch/arm/include/asm/xenomai/thread.h
@@ -26,36 +26,21 @@
 #include <asm-generic/xenomai/thread.h>
 
 #ifdef CONFIG_XENO_HW_FPU
-
 #ifdef CONFIG_VFP
 #include <asm/vfp.h>
 #endif /* CONFIG_VFP */
-
-struct arm_fpustate {
-       /*
-        * This layout must follow exactely the definition of the FPU
-        * area in the ARM thread_info structure. 'tp_value' is also
-        * saved even if it is not needed, but it shouldn't matter.
-        */
-       __u8                    used_cp[16];    /* thread used copro */
-       unsigned long           tp_value;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) || defined(CONFIG_CRUNCH)
-       struct crunch_state     crunchstate;
-#endif
-       union fp_state          fpstate __attribute__((aligned(8)));
-       union vfp_state         vfpstate;
-};
-
 #endif /* !CONFIG_XENO_HW_FPU */
 
 struct xnarchtcb {
        struct xntcb core;
 #ifdef CONFIG_XENO_HW_FPU
-       struct arm_fpustate *fpup;
+#ifdef CONFIG_VFP
+       union vfp_state *fpup;
 #define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
 #else
 #define xnarch_fpu_ptr(tcb)     NULL
 #endif
+#endif
        struct {
                unsigned long pc;
                unsigned long r0;
@@ -79,17 +64,13 @@ struct xnarchtcb {
 
 #define xnarch_fault_notify(d) (!xnarch_fault_bp_p(d))
 
-void xnarch_save_fpu(struct xnthread *thread);
-
-void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread);
-
 void xnarch_switch_to(struct xnthread *out, struct xnthread *in);
 
 static inline void xnarch_enter_root(struct xnthread *root) { }
 
 int xnarch_escalate(void);
 
-#ifdef CONFIG_XENO_HW_FPU
+#if defined(CONFIG_XENO_HW_FPU) && defined(CONFIG_VFP)
 
 static inline void xnarch_init_root_tcb(struct xnthread *thread)
 {
@@ -103,7 +84,14 @@ int xnarch_fault_fpu_p(struct ipipe_trap_data *d);
 
 void xnarch_leave_root(struct xnthread *root);
 
-#else /* !CONFIG_XENO_HW_FPU */
+void xnarch_save_fpu(struct xnthread *thread);
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread);
+
+int xnarch_handle_fpu_fault(struct xnthread *from, 
+                       struct xnthread *to, struct ipipe_trap_data *d);
+
+#else /* !CONFIG_XENO_HW_FPU || !CONFIG_VFP */
 
 static inline void xnarch_init_root_tcb(struct xnthread *thread) { }
 static inline void xnarch_init_shadow_tcb(struct xnthread *thread) { }
@@ -120,13 +108,15 @@ static inline int xnarch_fault_fpu_p(struct 
ipipe_trap_data *d)
 
 static inline void xnarch_leave_root(struct xnthread *root) { }
 
-#endif /* !CONFIG_XENO_HW_FPU */
+static inline void xnarch_save_fpu(struct xnthread *thread) { }
+
+static inline void xnarch_switch_fpu(struct xnthread *f, struct xnthread *t) { 
}
 
-static inline int 
-xnarch_handle_fpu_fault(struct xnthread *from, 
-                       struct xnthread *to, struct ipipe_trap_data *d)
+static inline int xnarch_handle_fpu_fault(struct xnthread *from, 
+                                       struct xnthread *to, struct 
ipipe_trap_data *d)
 {
        return 0;
 }
+#endif /*  !CONFIG_XENO_HW_FPU || !CONFIG_VFP */
 
 #endif /* !_COBALT_ARM_ASM_THREAD_H */
diff --git a/kernel/cobalt/arch/arm/thread.c b/kernel/cobalt/arch/arm/thread.c
index 40f088a..8f142b3 100644
--- a/kernel/cobalt/arch/arm/thread.c
+++ b/kernel/cobalt/arch/arm/thread.c
@@ -31,26 +31,11 @@ asmlinkage void __asm_thread_switch(struct thread_info *out,
 
 asmlinkage void __asm_thread_trampoline(void);
 
-#ifdef CONFIG_XENO_HW_FPU
-
-#define task_fpenv(task)                                               \
-       ((struct arm_fpustate *) &task_thread_info(task)->used_cp[0])
-
-#ifdef CONFIG_VFP
+#if defined(CONFIG_XENO_HW_FPU) && defined(CONFIG_VFP)
 asmlinkage void __asm_vfp_save(union vfp_state *vfp, unsigned int fpexc);
 
 asmlinkage void __asm_vfp_load(union vfp_state *vfp, unsigned int cpu);
 
-static inline void do_save_fpu(struct arm_fpustate *fpuenv, unsigned int fpexc)
-{
-       __asm_vfp_save(&fpuenv->vfpstate, fpexc);
-}
-
-static inline void do_restore_fpu(struct arm_fpustate *fpuenv)
-{
-       __asm_vfp_load(&fpuenv->vfpstate, ipipe_processor_id());
-}
-
 #define do_vfp_fmrx(_vfp_)                                             \
        ({                                                              \
                u32 __v;                                                \
@@ -67,7 +52,7 @@ static inline void do_restore_fpu(struct arm_fpustate *fpuenv)
 
 extern union vfp_state *vfp_current_hw_state[NR_CPUS];
 
-static inline struct arm_fpustate *get_fpu_owner(void)
+static inline union vfp_state *get_fpu_owner(void)
 {
        union vfp_state *vfp_owner;
        unsigned int cpu;
@@ -84,13 +69,12 @@ static inline struct arm_fpustate *get_fpu_owner(void)
        if (!vfp_owner)
                return NULL;
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)                     \
-     || defined(CONFIG_VFP_3_2_BACKPORT)) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
        if (vfp_owner->hard.cpu != cpu)
                return NULL;
-#endif /* linux >= 3.2.0 */
+#endif /* SMP */
 
-       return container_of(vfp_owner, struct arm_fpustate, vfpstate);
+       return vfp_owner;
 }
 
 #define do_disable_fpu()                                       \
@@ -106,27 +90,6 @@ static inline struct arm_fpustate *get_fpu_owner(void)
                _fpexc;                                                 \
        })
 
-#else /* !CONFIG_VFP */
-
-static inline void do_save_fpu(struct arm_fpustate *fpuenv) { }
-
-static inline void do_restore_fpu(struct arm_fpustate *fpuenv) { }
-
-#define get_fpu_owner(cur)                                             \
-       ({                                                              \
-               struct task_struct * _cur = (cur);                      \
-               ((task_thread_info(_cur)->used_cp[1] | 
task_thread_info(_cur)->used_cp[2]) \
-                ? _cur : NULL);                                        \
-       })
-
-#define do_disable_fpu()                                               \
-       task_thread_info(current)->used_cp[1] = 
task_thread_info(current)->used_cp[2] = 0;
-
-#define do_enable_fpu()                                                        
\
-       task_thread_info(current)->used_cp[1] = 
task_thread_info(current)->used_cp[2] = 1;
-
-#endif /* !CONFIG_VFP */
-
 int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
 {
        /* This function does the same thing to decode the faulting instruct as
@@ -156,16 +119,10 @@ int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
        if (d->exception == IPIPE_TRAP_FPU)
                return 1;
 
-#ifdef CONFIG_VFP
        if (d->exception == IPIPE_TRAP_VFP)
                goto trap_vfp;
-#endif
 
-       /*
-        * When an FPU fault occurs in user-mode, it will be properly
-        * resolved before __ipipe_report_trap() is called.
-        */
-       if (d->exception != IPIPE_TRAP_UNDEFINSTR || user_mode(d->regs))
+       if (d->exception != IPIPE_TRAP_UNDEFINSTR)
                return 0;
 
        pc = (char *) xnarch_fault_pc(d);
@@ -217,7 +174,6 @@ int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
 #endif
 
        exc = copro_to_exc[cp];
-#ifdef CONFIG_VFP
        if (exc == IPIPE_TRAP_VFP) {
          trap_vfp:
                /* If an exception is pending, the VFP fault is not really an
@@ -230,7 +186,7 @@ int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
                else
                        exc = IPIPE_TRAP_VFP;
        }
-#endif
+
        d->exception = exc;
        return exc != IPIPE_TRAP_UNDEFINSTR;
 }
@@ -238,17 +194,79 @@ int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
 void xnarch_leave_root(struct xnthread *root)
 {
        struct xnarchtcb *rootcb = xnthread_archtcb(root);
-#ifdef CONFIG_VFP
        rootcb->fpup = get_fpu_owner();
-#else /* !CONFIG_VFP */
-       rootcb->core.user_fpu_owner = get_fpu_owner(rootcb->core.host_task);
-       /* So that xnarch_save_fpu() will operate on the right FPU area. */
-       rootcb->fpup = (rootcb->core.user_fpu_owner
-                       ? task_fpenv(rootcb->core.user_fpu_owner) : NULL);
-#endif /* !CONFIG_VFP */
 }
 
-#endif /* CONFIG_XENO_HW_FPU */
+void xnarch_save_fpu(struct xnthread *thread)
+{
+       struct xnarchtcb *tcb = &thread->tcb;
+       if (tcb->fpup)
+               __asm_vfp_save(tcb->fpup, do_enable_fpu());
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
+{
+       union vfp_state *const from_fpup = from ? from->tcb.fpup : NULL;
+       union vfp_state *const to_fpup = to->tcb.fpup;
+       unsigned cpu = ipipe_processor_id();
+       
+       if (xnthread_test_state(to, XNROOT) == 0) {
+               unsigned fpexc = do_enable_fpu();
+
+               if (from_fpup == to_fpup)
+                       return;
+
+               if (from_fpup)
+                       __asm_vfp_save(from_fpup, fpexc);
+
+               __asm_vfp_load(to_fpup, cpu);
+       } else {
+               /*
+                * We are restoring the Linux current thread. If it
+                * does not own the FPU context, the FPU must be
+                * disabled, so that a fault will occur if the newly
+                * switched thread uses the FPU, to allow the kernel
+                * handler to pick the correct FPU context, and save 
+                * the last RT context used in the same move.
+                */
+#ifdef CONFIG_SMP
+               unsigned fpexc = do_enable_fpu();
+               if (from_fpup)
+                       __asm_vfp_save(from_fpup, fpexc);
+#endif
+              vfp_current_hw_state[cpu] = from_fpup;
+              if (from_fpup != to_fpup)
+                      do_disable_fpu();
+       }
+}
+
+int xnarch_handle_fpu_fault(struct xnthread *from, 
+                       struct xnthread *to, struct ipipe_trap_data *d)
+{
+       unsigned fpexc;
+
+       fpexc = do_vfp_fmrx(FPEXC);
+       if (fpexc & FPEXC_EN)
+               /* FPU is already enabled, probably an exception */
+               return 0;
+
+       xnthread_set_state(to, XNFPU);
+       xnarch_switch_fpu(from, to);
+
+       /* Retry faulting instruction */
+       d->regs->ARM_pc = xnarch_fault_pc(d);
+       return 1;
+}
+
+void xnarch_init_shadow_tcb(struct xnthread *thread)
+{
+       struct xnarchtcb *tcb = xnthread_archtcb(thread);
+       tcb->fpup = &task_thread_info(tcb->core.host_task)->vfpstate;
+
+       /* XNFPU is set upon first FPU fault */
+       xnthread_clear_state(thread, XNFPU);
+}
+#endif /* CONFIG_XENO_HW_FPU && CONFIG_VFP*/
 
 void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
 {
@@ -284,130 +302,6 @@ void xnarch_switch_to(struct xnthread *out, struct 
xnthread *in)
 #endif
 }
 
-void xnarch_enable_fpu(struct xnthread *thread)
-{
-       struct xnarchtcb *tcb = &thread->tcb;
-#ifdef CONFIG_XENO_HW_FPU
-#ifdef CONFIG_VFP
-       /* If we are restoring the Linux current thread which does not own the
-          FPU context, we keep FPU disabled, so that a fault will occur if the
-          newly switched thread uses the FPU, to allow the kernel handler to
-          pick the correct FPU context.
-       */
-       if (likely(!xnthread_test_state(thread, XNROOT))) {
-               do_enable_fpu();
-               /* No exception should be pending, since it should have caused
-                  a trap earlier.
-               */
-       } else if (tcb->fpup && tcb->fpup == task_fpenv(tcb->core.host_task)) {
-               unsigned fpexc = do_enable_fpu();
-#ifndef CONFIG_SMP
-               if (likely(!(fpexc & XNARCH_VFP_ANY_EXC)
-                          && !(do_vfp_fmrx(FPSCR) & FPSCR_IXE)))
-                       return;
-               /*
-                 If current process has pending exceptions it is
-                 illegal to restore the FPEXC register with them, we must
-                 save the fpu state and disable them, to get linux
-                 fpu fault handler take care of them correctly.
-               */
-#endif
-               /*
-                 On SMP systems, if we are restoring the root
-                 thread, running the task holding the FPU context at
-                 the time when we switched to real-time domain,
-                 forcibly save the FPU context. It seems to fix SMP
-                 systems for still unknown reasons.
-               */
-               do_save_fpu(tcb->fpup, fpexc);
-               vfp_current_hw_state[ipipe_processor_id()] = NULL;
-               do_disable_fpu();
-       }
-#else /* !CONFIG_VFP */
-       if (!tcb->core.host_task)
-               do_enable_fpu();
-#endif /* !CONFIG_VFP */
-#endif /* CONFIG_XENO_HW_FPU */
-}
-
-void xnarch_save_fpu(struct xnthread *thread)
-{
-       struct xnarchtcb *tcb = &thread->tcb;
-#ifdef CONFIG_XENO_HW_FPU
-#ifdef CONFIG_VFP
-       if (tcb->fpup)
-               do_save_fpu(tcb->fpup, do_enable_fpu());
-#else /* !CONFIG_VFP */
-       if (tcb->fpup) {
-               do_save_fpu(tcb->fpup);
-
-               if (tcb->core.user_fpu_owner && 
task_thread_info(tcb->core.user_fpu_owner)) {
-                       task_thread_info(tcb->core.user_fpu_owner)->used_cp[1] 
= 0;
-                       task_thread_info(tcb->core.user_fpu_owner)->used_cp[2] 
= 0;
-               }
-       }
-#endif /* !CONFIG_VFP */
-#endif /* CONFIG_XENO_HW_FPU */
-}
-
-void xnarch_restore_fpu(struct xnthread *thread)
-{
-       struct xnarchtcb *tcb = &thread->tcb;
-#ifdef CONFIG_XENO_HW_FPU
-#ifdef CONFIG_VFP
-       if (likely(!xnthread_test_state(thread, XNROOT))) {
-               do_enable_fpu();
-               do_restore_fpu(tcb->fpup);
-       } else {
-               /*
-                * We are restoring the Linux current thread which
-                * does not own the FPU context, so the FPU must be
-                * disabled, so that a fault will occur if the newly
-                * switched thread uses the FPU, to allow the kernel
-                * handler to pick the correct FPU context.
-                *
-                * Further set vfp_current_hw_state to NULL to avoid
-                * the Linux kernel to save, when the fault occur, the
-                * current FPU context, the one of an RT task, into
-                * the FPU area of the last non RT task which used the
-                * FPU before the preemption by Xenomai.
-               */
-               vfp_current_hw_state[ipipe_processor_id()] = NULL;
-               do_disable_fpu();
-       }
-#else /* !CONFIG_VFP */
-       if (tcb->fpup) {
-               do_restore_fpu(tcb->fpup);
-
-               if (tcb->core.user_fpu_owner && 
task_thread_info(tcb->core.user_fpu_owner)) {
-                       task_thread_info(tcb->core.user_fpu_owner)->used_cp[1] 
= 1;
-                       task_thread_info(tcb->core.user_fpu_owner)->used_cp[2] 
= 1;
-               }
-       }
-
-       /* FIXME: We restore FPU "as it was" when Xenomai preempted Linux,
-          whereas we could be much lazier. */
-       if (tcb->core.host_task)
-               do_disable_fpu();
-#endif /* !CONFIG_VFP */
-#endif /* CONFIG_XENO_HW_FPU */
-}
-
-void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
-{
-       if (from == to || 
-               xnarch_fpu_ptr(xnthread_archtcb(from)) == 
-               xnarch_fpu_ptr(xnthread_archtcb(to))) {
-               xnarch_enable_fpu(to);
-               return;
-       }
-       
-       if (from)
-               xnarch_save_fpu(from);
-       
-       xnarch_restore_fpu(to);
-}
-
 int xnarch_escalate(void)
 {
        if (ipipe_root_p) {
@@ -417,10 +311,3 @@ int xnarch_escalate(void)
 
        return 0;
 }
-
-void xnarch_init_shadow_tcb(struct xnthread *thread)
-{
-       struct xnarchtcb *tcb = xnthread_archtcb(thread);
-       tcb->fpup = (struct arm_fpustate *)
-               &task_thread_info(tcb->core.host_task)->used_cp[0];
-}


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to