[PATCH 1/2] x86: Delay loading sp0 slightly on task switch

2015-03-06 Thread Andy Lutomirski
The change:

75182b1632a8 x86/asm/entry: Switch all C consumers of kernel_stack to 
this_cpu_sp0()

had the unintended side effect of changing the return value of
current_thread_info() during part of the context switch process.
Change it back.

This has no effect as far as I can tell -- it's just for
consistency.

Signed-off-by: Andy Lutomirski 
---
 arch/x86/kernel/process_32.c | 10 +-
 arch/x86/kernel/process_64.c |  6 +++---
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index d3460af3d27a..0405cab6634d 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -256,11 +256,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
 
/*
-* Reload esp0.
-*/
-   load_sp0(tss, next);
-
-   /*
 * Save away %gs. No need to save %fs, as it was saved on the
 * stack on entry.  No need to save %es and %ds, as those are
 * always kernel segments while inside the kernel.  Doing this
@@ -310,6 +305,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
 */
arch_end_context_switch(next_p);
 
+   /*
+* Reload esp0.  This changes current_thread_info().
+*/
+   load_sp0(tss, next);
+
this_cpu_write(kernel_stack,
  (unsigned long)task_stack_page(next_p) +
  THREAD_SIZE - KERNEL_STACK_OFFSET);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 2cd562f96c1f..1e393d27d701 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -283,9 +283,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
 
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
 
-   /* Reload esp0 and ss1. */
-   load_sp0(tss, next);
-
/* We must save %fs and %gs before load_TLS() because
 * %fs and %gs may be cleared by load_TLS().
 *
@@ -413,6 +410,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
task_thread_info(prev_p)->saved_preempt_count = 
this_cpu_read(__preempt_count);
this_cpu_write(__preempt_count, 
task_thread_info(next_p)->saved_preempt_count);
 
+   /* Reload esp0 and ss1.  This changes current_thread_info(). */
+   load_sp0(tss, next);
+
this_cpu_write(kernel_stack,
  (unsigned long)task_stack_page(next_p) +
  THREAD_SIZE - KERNEL_STACK_OFFSET);
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/2] x86: Delay loading sp0 slightly on task switch

2015-03-06 Thread Andy Lutomirski
The change:

75182b1632a8 x86/asm/entry: Switch all C consumers of kernel_stack to 
this_cpu_sp0()

had the unintended side effect of changing the return value of
current_thread_info() during part of the context switch process.
Change it back.

This has no effect as far as I can tell -- it's just for
consistency.

Signed-off-by: Andy Lutomirski l...@amacapital.net
---
 arch/x86/kernel/process_32.c | 10 +-
 arch/x86/kernel/process_64.c |  6 +++---
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index d3460af3d27a..0405cab6634d 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -256,11 +256,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
 
/*
-* Reload esp0.
-*/
-   load_sp0(tss, next);
-
-   /*
 * Save away %gs. No need to save %fs, as it was saved on the
 * stack on entry.  No need to save %es and %ds, as those are
 * always kernel segments while inside the kernel.  Doing this
@@ -310,6 +305,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
 */
arch_end_context_switch(next_p);
 
+   /*
+* Reload esp0.  This changes current_thread_info().
+*/
+   load_sp0(tss, next);
+
this_cpu_write(kernel_stack,
  (unsigned long)task_stack_page(next_p) +
  THREAD_SIZE - KERNEL_STACK_OFFSET);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 2cd562f96c1f..1e393d27d701 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -283,9 +283,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
 
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
 
-   /* Reload esp0 and ss1. */
-   load_sp0(tss, next);
-
/* We must save %fs and %gs before load_TLS() because
 * %fs and %gs may be cleared by load_TLS().
 *
@@ -413,6 +410,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct 
*next_p)
task_thread_info(prev_p)-saved_preempt_count = 
this_cpu_read(__preempt_count);
this_cpu_write(__preempt_count, 
task_thread_info(next_p)-saved_preempt_count);
 
+   /* Reload esp0 and ss1.  This changes current_thread_info(). */
+   load_sp0(tss, next);
+
this_cpu_write(kernel_stack,
  (unsigned long)task_stack_page(next_p) +
  THREAD_SIZE - KERNEL_STACK_OFFSET);
-- 
2.1.0

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/