Today cpu_info->xen_cr3 is either 0 to indicate %cr3 doesn't need to
be switched on entry to Xen, or negative for keeping the value while
indicating not to restore %cr3, or positive in case %cr3 is to be
restored.
Advertising
Switch to use a flag byte instead of a negative xen_cr3 value in order
to allow %cr3 values with the high bit set in case we want to keep TLB
entries when using the PCID feature.
This reduces the number of branches in interrupt handling and results
in better performance (e.g. parallel make of the Xen hypervisor on my
system was using about 3% less system time).
Signed-off-by: Juergen Gross <jgr...@suse.com>
Reviewed-by: Jan Beulich <jbeul...@suse.com>
---
V3:
- renamed use_xen_cr3 to better fitting use_pv_cr3
- corrected comment regarding semantics of use_pv_cr3 (Jan Beulich)
- prefer 32-bit operations over 8- or 16-bit ones (Jan Beulich)
---
xen/arch/x86/domain.c | 1 +
xen/arch/x86/mm.c | 3 +-
xen/arch/x86/smpboot.c | 2 ++
xen/arch/x86/x86_64/asm-offsets.c | 1 +
xen/arch/x86/x86_64/compat/entry.S | 5 ++--
xen/arch/x86/x86_64/entry.S | 59 ++++++++++++++++----------------------
xen/include/asm-x86/current.h | 12 +++++---
7 files changed, 41 insertions(+), 42 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 9b001a03ec..801ac33810 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1696,6 +1696,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
ASSERT(local_irq_is_enabled());
+ get_cpu_info()->use_pv_cr3 = false;
get_cpu_info()->xen_cr3 = 0;
if ( unlikely(dirty_cpu != cpu) && dirty_cpu != VCPU_CPU_CLEAN )
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 73a38e8715..4997047edf 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -517,7 +517,8 @@ void write_ptbase(struct vcpu *v)
}
else
{
- /* Make sure to clear xen_cr3 before pv_cr3. */
+ /* Make sure to clear use_pv_cr3 and xen_cr3 before pv_cr3. */
+ cpu_info->use_pv_cr3 = false;
cpu_info->xen_cr3 = 0;
/* switch_cr3_cr4() serializes. */
switch_cr3_cr4(v->arch.cr3, new_cr4);
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index b4cfb01fd9..a34d38029e 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -324,6 +324,7 @@ void start_secondary(void *unused)
*/
spin_debug_disable();
+ get_cpu_info()->use_pv_cr3 = false;
get_cpu_info()->xen_cr3 = 0;
get_cpu_info()->pv_cr3 = 0;
@@ -1123,6 +1124,7 @@ void __init smp_prepare_boot_cpu(void)
per_cpu(scratch_cpumask, cpu) = &scratch_cpu0mask;
#endif
+ get_cpu_info()->use_pv_cr3 = false;
get_cpu_info()->xen_cr3 = 0;
get_cpu_info()->pv_cr3 = 0;
}
diff --git a/xen/arch/x86/x86_64/asm-offsets.c
b/xen/arch/x86/x86_64/asm-offsets.c
index 9e2aefb00f..7ad024cf37 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -144,6 +144,7 @@ void __dummy__(void)
OFFSET(CPUINFO_use_shadow_spec_ctrl, struct cpu_info,
use_shadow_spec_ctrl);
OFFSET(CPUINFO_bti_ist_info, struct cpu_info, bti_ist_info);
OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed);
+ OFFSET(CPUINFO_use_pv_cr3, struct cpu_info, use_pv_cr3);
DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
BLANK();
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index 6c7fcf95b3..c9ddf35ee5 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -210,10 +210,9 @@ ENTRY(cstar_enter)
GET_STACK_END(bx)
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
- neg %rcx
+ test %rcx, %rcx
jz .Lcstar_cr3_okay
- mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
- neg %rcx
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lcstar_cr3_okay:
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 5f0758d64f..8b7d1c48ad 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -154,6 +154,7 @@ restore_all_guest:
rep movsq
.Lrag_copy_done:
mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
+ movb $1, STACK_CPUINFO_FIELD(use_pv_cr3)(%rdx)
mov %rax, %cr3
.Lrag_keep_cr3:
@@ -202,14 +203,9 @@ restore_all_xen:
* case we return to late PV exit code (from an NMI or #MC).
*/
GET_STACK_END(bx)
- mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rdx
+ cmpb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
+UNLIKELY_START(ne, exit_cr3)
mov STACK_CPUINFO_FIELD(pv_cr3)(%rbx), %rax
- test %rdx, %rdx
- /*
- * Ideally the condition would be "nsz", but such doesn't exist,
- * so "g" will have to do.
- */
-UNLIKELY_START(g, exit_cr3)
mov %rax, %cr3
UNLIKELY_END(exit_cr3)
@@ -251,10 +247,9 @@ ENTRY(lstar_enter)
GET_STACK_END(bx)
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
- neg %rcx
+ test %rcx, %rcx
jz .Llstar_cr3_okay
- mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
- neg %rcx
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Llstar_cr3_okay:
@@ -288,10 +283,9 @@ GLOBAL(sysenter_eflags_saved)
/* PUSHF above has saved EFLAGS.IF clear (the caller had it set). */
orl $X86_EFLAGS_IF, UREGS_eflags(%rsp)
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
- neg %rcx
+ test %rcx, %rcx
jz .Lsyse_cr3_okay
- mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
- neg %rcx
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lsyse_cr3_okay:
@@ -338,10 +332,9 @@ ENTRY(int80_direct_trap)
GET_STACK_END(bx)
mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
- neg %rcx
+ test %rcx, %rcx
jz .Lint80_cr3_okay
- mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
- neg %rcx
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%rbx)
mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
.Lint80_cr3_okay:
@@ -546,24 +539,24 @@ ENTRY(common_interrupt)
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
+ mov STACK_CPUINFO_FIELD(use_pv_cr3)(%r14), %bl
mov %rcx, %r15
- neg %rcx
+ test %rcx, %rcx
jz .Lintr_cr3_okay
- jns .Lintr_cr3_load
- mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
- neg %rcx
-.Lintr_cr3_load:
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
mov %rcx, %cr3
xor %ecx, %ecx
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
testb $3, UREGS_cs(%rsp)
cmovnz %rcx, %r15
+ cmovnz %rcx, %rbx
.Lintr_cr3_okay:
CR4_PV32_RESTORE
movq %rsp,%rdi
callq do_IRQ
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
+ mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
jmp ret_from_intr
ENTRY(page_fault)
@@ -578,18 +571,17 @@ GLOBAL(handle_exception)
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
+ mov STACK_CPUINFO_FIELD(use_pv_cr3)(%r14), %r13b
mov %rcx, %r15
- neg %rcx
+ test %rcx, %rcx
jz .Lxcpt_cr3_okay
- jns .Lxcpt_cr3_load
- mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
- neg %rcx
-.Lxcpt_cr3_load:
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
mov %rcx, %cr3
xor %ecx, %ecx
mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
testb $3, UREGS_cs(%rsp)
cmovnz %rcx, %r15
+ cmovnz %rcx, %r13
.Lxcpt_cr3_okay:
handle_exception_saved:
@@ -644,6 +636,7 @@ handle_exception_saved:
mov (%rdx, %rax, 8), %rdx
INDIRECT_CALL %rdx
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
+ mov %r13b, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
leaq VCPU_trap_bounce(%rbx),%rdx
@@ -677,6 +670,7 @@ exception_with_ints_disabled:
1: movq UREGS_error_code(%rsp),%rax # ec/ev
movq %rax,UREGS_kernel_sizeof(%rsp)
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
+ mov %r13b, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
jmp restore_all_xen # return to fixup code
/* No special register assumptions. */
@@ -764,9 +758,6 @@ ENTRY(double_fault)
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rbx
test %rbx, %rbx
jz .Ldblf_cr3_okay
- jns .Ldblf_cr3_load
- neg %rbx
-.Ldblf_cr3_load:
mov %rbx, %cr3
.Ldblf_cr3_okay:
@@ -795,13 +786,11 @@ handle_ist_exception:
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
+ mov STACK_CPUINFO_FIELD(use_pv_cr3)(%r14), %bl
mov %rcx, %r15
- neg %rcx
+ test %rcx, %rcx
jz .List_cr3_okay
- jns .List_cr3_load
- mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
- neg %rcx
-.List_cr3_load:
+ movb $0, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
mov %rcx, %cr3
movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
.List_cr3_okay:
@@ -814,6 +803,7 @@ handle_ist_exception:
* and copy the context to stack bottom.
*/
xor %r15, %r15
+ xor %ebx, %ebx
GET_CPUINFO_FIELD(guest_cpu_user_regs,di)
movq %rsp,%rsi
movl $UREGS_kernel_sizeof/8,%ecx
@@ -825,6 +815,7 @@ handle_ist_exception:
mov (%rdx, %rax, 8), %rdx
INDIRECT_CALL %rdx
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
+ mov %bl, STACK_CPUINFO_FIELD(use_pv_cr3)(%r14)
cmpb $TRAP_nmi,UREGS_entry_vector(%rsp)
jne ret_from_intr
diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
index b2475783f8..43bdec1f49 100644
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -47,10 +47,7 @@ struct cpu_info {
* context is being entered. A value of zero indicates no setting of CR3
* is to be performed.
* The former is the value to restore when re-entering Xen, if any. IOW
- * its value being zero means there's nothing to restore. However, its
- * value can also be negative, indicating to the exit-to-Xen code that
- * restoring is not necessary, but allowing any nested entry code paths
- * to still know the value to put back into CR3.
+ * its value being zero means there's nothing to restore.
*/
unsigned long xen_cr3;
unsigned long pv_cr3;
@@ -68,6 +65,13 @@ struct cpu_info {
*/
bool root_pgt_changed;
+ /*
+ * use_pv_cr3 is set in case the value of pv_cr3 is to be written into
+ * CR3 when returning from an interrupt. The main use is when returning
+ * from a NMI or MCE to hypervisor code where pv_cr3 was active.
+ */
+ bool use_pv_cr3;
+
unsigned long __pad;
/* get_stack_bottom() must be 16-byte aligned */
};
--
2.13.6
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel