[tip:x86/fpu] x86/fpu: Split old_fpu & new_fpu handling into separate functions

2016-10-16 Thread tip-bot for Rik van Riel
Commit-ID:  c474e50711aa79b7bd0ea30b44744baca5650375
Gitweb: http://git.kernel.org/tip/c474e50711aa79b7bd0ea30b44744baca5650375
Author: Rik van Riel 
AuthorDate: Fri, 14 Oct 2016 08:15:31 -0400
Committer:  Ingo Molnar 
CommitDate: Sun, 16 Oct 2016 11:38:41 +0200

x86/fpu: Split old_fpu & new_fpu handling into separate functions

By moving all of the new_fpu state handling into switch_fpu_finish(),
the code can be simplified some more.

This gets rid of the prefetch, but given the size of the FPU register
state on modern CPUs, and the amount of work done by __switch_to()
inbetween both functions, the value of a single cache line prefetch
seems somewhat dubious anyway.

Signed-off-by: Rik van Riel 
Acked-by: Dave Hansen 
Cc: Andy Lutomirski 
Cc: Borislav Petkov 
Cc: Brian Gerst 
Cc: Dave Hansen 
Cc: Denys Vlasenko 
Cc: Fenghua Yu 
Cc: H. Peter Anvin 
Cc: Josh Poimboeuf 
Cc: Linus Torvalds 
Cc: Oleg Nesterov 
Cc: Peter Zijlstra 
Cc: Quentin Casasnovas 
Cc: Thomas Gleixner 
Link: http://lkml.kernel.org/r/1476447331-21566-3-git-send-email-r...@redhat.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/fpu/internal.h | 48 -
 arch/x86/kernel/process_32.c|  5 ++--
 arch/x86/kernel/process_64.c|  5 ++--
 3 files changed, 19 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 590f274..d4a6849 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -552,27 +552,15 @@ static inline int fpregs_active(void)
  *
  * This is a two-stage process:
  *
- *  - switch_fpu_prepare() saves the old state and
- *sets the new state of the CR0.TS bit. This is
- *done within the context of the old process.
+ *  - switch_fpu_prepare() saves the old state.
+ *This is done within the context of the old process.
  *
  *  - switch_fpu_finish() restores the new state as
  *necessary.
  */
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+static inline void
+switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-   fpu_switch_t fpu;
-
-   /*
-* If the task has used the math, pre-load the FPU on xsave processors
-* or if the past 5 consecutive context-switches used math.
-*/
-   fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active;
-
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
@@ -584,16 +572,6 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
-
-   if (fpu.preload) {
-   if (fpregs_state_valid(new_fpu, cpu))
-   fpu.preload = 0;
-   else
-   prefetch(_fpu->state);
-   fpregs_activate(new_fpu);
-   }
-
-   return fpu;
 }
 
 /*
@@ -601,15 +579,19 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
  */
 
 /*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
+ * Set up the userspace FPU context for the new task, if the task
+ * has used the FPU.
  */
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t 
fpu_switch)
+static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
 {
-   if (fpu_switch.preload)
-   copy_kernel_to_fpregs(_fpu->state);
+   bool preload = static_cpu_has(X86_FEATURE_FPU) &&
+  new_fpu->fpstate_active;
+
+   if (preload) {
+   if (!fpregs_state_valid(new_fpu, cpu))
+   copy_kernel_to_fpregs(_fpu->state);
+   fpregs_activate(new_fpu);
+   }
 }
 
 /*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8e..7dc8c9c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -232,11 +232,10 @@ __switch_to(struct task_struct *prev_p, struct 
task_struct *next_p)
struct fpu *next_fpu = >fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = _cpu(cpu_tss, cpu);
-   fpu_switch_t fpu_switch;
 
/* never put a printk in __switch_to... printk() calls wake_up*() 
indirectly */
 
-   

[tip:x86/fpu] x86/fpu: Split old_fpu & new_fpu handling into separate functions

2016-10-16 Thread tip-bot for Rik van Riel
Commit-ID:  c474e50711aa79b7bd0ea30b44744baca5650375
Gitweb: http://git.kernel.org/tip/c474e50711aa79b7bd0ea30b44744baca5650375
Author: Rik van Riel 
AuthorDate: Fri, 14 Oct 2016 08:15:31 -0400
Committer:  Ingo Molnar 
CommitDate: Sun, 16 Oct 2016 11:38:41 +0200

x86/fpu: Split old_fpu & new_fpu handling into separate functions

By moving all of the new_fpu state handling into switch_fpu_finish(),
the code can be simplified some more.

This gets rid of the prefetch, but given the size of the FPU register
state on modern CPUs, and the amount of work done by __switch_to()
inbetween both functions, the value of a single cache line prefetch
seems somewhat dubious anyway.

Signed-off-by: Rik van Riel 
Acked-by: Dave Hansen 
Cc: Andy Lutomirski 
Cc: Borislav Petkov 
Cc: Brian Gerst 
Cc: Dave Hansen 
Cc: Denys Vlasenko 
Cc: Fenghua Yu 
Cc: H. Peter Anvin 
Cc: Josh Poimboeuf 
Cc: Linus Torvalds 
Cc: Oleg Nesterov 
Cc: Peter Zijlstra 
Cc: Quentin Casasnovas 
Cc: Thomas Gleixner 
Link: http://lkml.kernel.org/r/1476447331-21566-3-git-send-email-r...@redhat.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/include/asm/fpu/internal.h | 48 -
 arch/x86/kernel/process_32.c|  5 ++--
 arch/x86/kernel/process_64.c|  5 ++--
 3 files changed, 19 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 590f274..d4a6849 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -552,27 +552,15 @@ static inline int fpregs_active(void)
  *
  * This is a two-stage process:
  *
- *  - switch_fpu_prepare() saves the old state and
- *sets the new state of the CR0.TS bit. This is
- *done within the context of the old process.
+ *  - switch_fpu_prepare() saves the old state.
+ *This is done within the context of the old process.
  *
  *  - switch_fpu_finish() restores the new state as
  *necessary.
  */
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+static inline void
+switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-   fpu_switch_t fpu;
-
-   /*
-* If the task has used the math, pre-load the FPU on xsave processors
-* or if the past 5 consecutive context-switches used math.
-*/
-   fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active;
-
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
@@ -584,16 +572,6 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
-
-   if (fpu.preload) {
-   if (fpregs_state_valid(new_fpu, cpu))
-   fpu.preload = 0;
-   else
-   prefetch(_fpu->state);
-   fpregs_activate(new_fpu);
-   }
-
-   return fpu;
 }
 
 /*
@@ -601,15 +579,19 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
  */
 
 /*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
+ * Set up the userspace FPU context for the new task, if the task
+ * has used the FPU.
  */
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t 
fpu_switch)
+static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
 {
-   if (fpu_switch.preload)
-   copy_kernel_to_fpregs(_fpu->state);
+   bool preload = static_cpu_has(X86_FEATURE_FPU) &&
+  new_fpu->fpstate_active;
+
+   if (preload) {
+   if (!fpregs_state_valid(new_fpu, cpu))
+   copy_kernel_to_fpregs(_fpu->state);
+   fpregs_activate(new_fpu);
+   }
 }
 
 /*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8e..7dc8c9c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -232,11 +232,10 @@ __switch_to(struct task_struct *prev_p, struct 
task_struct *next_p)
struct fpu *next_fpu = >fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = _cpu(cpu_tss, cpu);
-   fpu_switch_t fpu_switch;
 
/* never put a printk in __switch_to... printk() calls wake_up*() 
indirectly */
 
-   fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+   switch_fpu_prepare(prev_fpu, cpu);
 
/*
 * Save away %gs. No need to save %fs, as it was saved on the
@@ -295,7 +294,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
 
-