Re: [PATCH 5/9] x86/fpu: remove fpu.counter

2016-10-04 Thread Andy Lutomirski
On Tue, Oct 4, 2016 at 5:34 PM,   wrote:
> From: Rik van Riel 
>
> With the lazy FPU code gone, we no longer use the counter field
> in struct fpu for anything. Get rid it.

Reviewed-by: Andy Lutomirski 


Re: [PATCH 5/9] x86/fpu: remove fpu.counter

2016-10-04 Thread Andy Lutomirski
On Tue, Oct 4, 2016 at 5:34 PM,   wrote:
> From: Rik van Riel 
>
> With the lazy FPU code gone, we no longer use the counter field
> in struct fpu for anything. Get rid it.

Reviewed-by: Andy Lutomirski 


[PATCH 5/9] x86/fpu: remove fpu.counter

2016-10-04 Thread riel
From: Rik van Riel 

With the lazy FPU code gone, we no longer use the counter field
in struct fpu for anything. Get rid it.

Signed-off-by: Rik van Riel 
---
 arch/x86/include/asm/fpu/internal.h |  3 ---
 arch/x86/include/asm/fpu/types.h| 11 ---
 arch/x86/include/asm/trace/fpu.h|  5 +
 arch/x86/kernel/fpu/core.c  |  3 ---
 4 files changed, 1 insertion(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 7801d32347a2..499d6ed0e376 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -581,16 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
 
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
-   new_fpu->counter++;
__fpregs_activate(new_fpu);
trace_x86_fpu_regs_activated(new_fpu);
prefetch(_fpu->state);
}
} else {
-   old_fpu->counter = 0;
old_fpu->last_cpu = -1;
if (fpu.preload) {
-   new_fpu->counter++;
if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486b02f9..e31332d6f0e8 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -322,17 +322,6 @@ struct fpu {
unsigned char   fpregs_active;
 
/*
-* @counter:
-*
-* This counter contains the number of consecutive context switches
-* during which the FPU stays used. If this is over a threshold, the
-* lazy FPU restore logic becomes eager, to save the trap overhead.
-* This is an unsigned char so that after 256 iterations the counter
-* wraps and the context switch behavior turns lazy again; this is to
-* deal with bursty apps that only use the FPU for a short time:
-*/
-   unsigned char   counter;
-   /*
 * @state:
 *
 * In-memory copy of all FPU registers that we save/restore
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1f5bf6..342e59789fcd 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
__field(struct fpu *, fpu)
__field(bool, fpregs_active)
__field(bool, fpstate_active)
-   __field(int, counter)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
__entry->fpu= fpu;
__entry->fpregs_active  = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active;
-   __entry->counter= fpu->counter;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
}
),
-   TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d 
xfeatures: %llx xcomp_bv: %llx",
+   TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: 
%llx xcomp_bv: %llx",
__entry->fpu,
__entry->fpregs_active,
__entry->fpstate_active,
-   __entry->counter,
__entry->xfeatures,
__entry->xcomp_bv
)
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 036e14fe3b77..6a37d525bdbe 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -222,7 +222,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
 
 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
-   dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
 
@@ -430,7 +429,6 @@ void fpu__restore(struct fpu *fpu)
trace_x86_fpu_before_restore(fpu);
fpregs_activate(fpu);
copy_kernel_to_fpregs(>state);
-   fpu->counter++;
trace_x86_fpu_after_restore(fpu);
kernel_fpu_enable();
 }
@@ -448,7 +446,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
 void fpu__drop(struct fpu *fpu)
 {
preempt_disable();
-   fpu->counter = 0;
 
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
-- 
2.7.4



[PATCH 5/9] x86/fpu: remove fpu.counter

2016-10-04 Thread riel
From: Rik van Riel 

With the lazy FPU code gone, we no longer use the counter field
in struct fpu for anything. Get rid it.

Signed-off-by: Rik van Riel 
---
 arch/x86/include/asm/fpu/internal.h |  3 ---
 arch/x86/include/asm/fpu/types.h| 11 ---
 arch/x86/include/asm/trace/fpu.h|  5 +
 arch/x86/kernel/fpu/core.c  |  3 ---
 4 files changed, 1 insertion(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 7801d32347a2..499d6ed0e376 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -581,16 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
 
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
-   new_fpu->counter++;
__fpregs_activate(new_fpu);
trace_x86_fpu_regs_activated(new_fpu);
prefetch(_fpu->state);
}
} else {
-   old_fpu->counter = 0;
old_fpu->last_cpu = -1;
if (fpu.preload) {
-   new_fpu->counter++;
if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486b02f9..e31332d6f0e8 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -322,17 +322,6 @@ struct fpu {
unsigned char   fpregs_active;
 
/*
-* @counter:
-*
-* This counter contains the number of consecutive context switches
-* during which the FPU stays used. If this is over a threshold, the
-* lazy FPU restore logic becomes eager, to save the trap overhead.
-* This is an unsigned char so that after 256 iterations the counter
-* wraps and the context switch behavior turns lazy again; this is to
-* deal with bursty apps that only use the FPU for a short time:
-*/
-   unsigned char   counter;
-   /*
 * @state:
 *
 * In-memory copy of all FPU registers that we save/restore
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1f5bf6..342e59789fcd 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
__field(struct fpu *, fpu)
__field(bool, fpregs_active)
__field(bool, fpstate_active)
-   __field(int, counter)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
__entry->fpu= fpu;
__entry->fpregs_active  = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active;
-   __entry->counter= fpu->counter;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
}
),
-   TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d 
xfeatures: %llx xcomp_bv: %llx",
+   TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: 
%llx xcomp_bv: %llx",
__entry->fpu,
__entry->fpregs_active,
__entry->fpstate_active,
-   __entry->counter,
__entry->xfeatures,
__entry->xcomp_bv
)
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 036e14fe3b77..6a37d525bdbe 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -222,7 +222,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
 
 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
-   dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
 
@@ -430,7 +429,6 @@ void fpu__restore(struct fpu *fpu)
trace_x86_fpu_before_restore(fpu);
fpregs_activate(fpu);
copy_kernel_to_fpregs(>state);
-   fpu->counter++;
trace_x86_fpu_after_restore(fpu);
kernel_fpu_enable();
 }
@@ -448,7 +446,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
 void fpu__drop(struct fpu *fpu)
 {
preempt_disable();
-   fpu->counter = 0;
 
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
-- 
2.7.4