Re: [PATCH 2/2] x86/fpu: split old & new fpu handling into separate functions

2016-10-14 Thread Dave Hansen
On 10/14/2016 05:15 AM, r...@redhat.com wrote:
> From: Rik van Riel 
> 
> By moving all of the new fpu state handling into switch_fpu_finish,
> the code can be simplified some more. This does get rid of the
> prefetch, but given the size of the fpu register state on modern
> CPUs, and the amount of work done by __switch_to in-between both
> functions, the value of a single cache line prefetch seems somewhat
> dubious anyway.
...
> -
> - if (fpu.preload) {
> - if (fpregs_state_valid(new_fpu, cpu))
> - fpu.preload = 0;
> - else
> - prefetch(&new_fpu->state);
> - fpregs_activate(new_fpu);
> - }
> -
> - return fpu;
>  }

Yeah, that prefetch is highly dubious.  XRSTOR might not even be
_reading_ that cacheline if the state isn't present (xstate->xfeatures
bit is 0).  If we had to pick *a* cacheline to prefetch for XRSTOR, it
would be the XSAVE header, *not* the FPU state.

I actually did some attempts to optimize the PKRU handling by touching
and prefetching the state before calling XRSTOR.  It actually made
things overall _worse_ when I touched it before the XRSTOR.

It would be ideal to have some data on whether this actually _does_
anything, but I can't imagine it being a real delta in either direction.

Acked-by: Dave Hansen 


[PATCH 2/2] x86/fpu: split old & new fpu handling into separate functions

2016-10-14 Thread riel
From: Rik van Riel 

By moving all of the new fpu state handling into switch_fpu_finish,
the code can be simplified some more. This does get rid of the
prefetch, but given the size of the fpu register state on modern
CPUs, and the amount of work done by __switch_to in-between both
functions, the value of a single cache line prefetch seems somewhat
dubious anyway.

Signed-off-by: Rik van Riel 
---
 arch/x86/include/asm/fpu/internal.h | 48 -
 arch/x86/kernel/process_32.c|  5 ++--
 arch/x86/kernel/process_64.c|  5 ++--
 3 files changed, 19 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index a75324675311..621ba3bfa2a7 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -552,27 +552,15 @@ static inline int fpregs_active(void)
  *
  * This is a two-stage process:
  *
- *  - switch_fpu_prepare() saves the old state and
- *sets the new state of the CR0.TS bit. This is
- *done within the context of the old process.
+ *  - switch_fpu_prepare() saves the old state.
+ *This is done within the context of the old process.
  *
  *  - switch_fpu_finish() restores the new state as
  *necessary.
  */
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+static inline void
+switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-   fpu_switch_t fpu;
-
-   /*
-* If the task has used the math, pre-load the FPU on xsave processors
-* or if the past 5 consecutive context-switches used math.
-*/
-   fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active;
-
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
@@ -584,16 +572,6 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
trace_x86_fpu_regs_deactivated(old_fpu);
} else
old_fpu->last_cpu = -1;
-
-   if (fpu.preload) {
-   if (fpregs_state_valid(new_fpu, cpu))
-   fpu.preload = 0;
-   else
-   prefetch(&new_fpu->state);
-   fpregs_activate(new_fpu);
-   }
-
-   return fpu;
 }
 
 /*
@@ -601,15 +579,19 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
  */
 
 /*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
+ * Set up the userspace FPU context for the new task, if the task
+ * has used the FPU.
  */
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t 
fpu_switch)
+static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
 {
-   if (fpu_switch.preload)
-   copy_kernel_to_fpregs(&new_fpu->state);
+   bool preload = static_cpu_has(X86_FEATURE_FPU) &&
+  new_fpu->fpstate_active;
+
+   if (preload) {
+   if (!fpregs_state_valid(new_fpu, cpu))
+   copy_kernel_to_fpregs(&new_fpu->state);
+   fpregs_activate(new_fpu);
+   }
 }
 
 /*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8efdc4c..7dc8c9c3d801 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -232,11 +232,10 @@ __switch_to(struct task_struct *prev_p, struct 
task_struct *next_p)
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
-   fpu_switch_t fpu_switch;
 
/* never put a printk in __switch_to... printk() calls wake_up*() 
indirectly */
 
-   fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+   switch_fpu_prepare(prev_fpu, cpu);
 
/*
 * Save away %gs. No need to save %fs, as it was saved on the
@@ -295,7 +294,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
 
-   switch_fpu_finish(next_fpu, fpu_switch);
+   switch_fpu_finish(next_fpu, cpu);
 
this_cpu_write(current_task, next_p);
 
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ee944bd2310d..705669efb762 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -264,9 +264,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
unsigned prev_fsindex, prev_gsindex;
-   fpu_switch_t fpu_switch;
 
-   fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+   switch_fpu_prepare(prev_fpu, cpu);