The clobbering of TRAPBOUNCE_flags in .L{compat_}bounce_exception is subsumed
by the logic at the end of pv_create_bounce_frame().
This cleanup removes all callers of asm_domain_crash_synchronous(), which is
therefore dropped as well.
Signed-off-by: Andrew Cooper
---
CC: Jan Beulich
CC: Wei Liu
CC: Roger Pau Monné
v2:
* Remove redundant lea's
---
xen/arch/x86/traps.c | 23 --
xen/arch/x86/x86_64/compat/entry.S | 120 ++
xen/arch/x86/x86_64/entry.S| 147 ++---
xen/include/xen/sched.h| 7 --
4 files changed, 10 insertions(+), 287 deletions(-)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 27190e0..a9b53da 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2103,29 +2103,6 @@ long set_debugreg(struct vcpu *v, unsigned int reg,
unsigned long value)
return 0;
}
-void asm_domain_crash_synchronous(unsigned long addr)
-{
-/*
- * We need clear AC bit here because in entry.S AC is set
- * by ASM_STAC to temporarily allow accesses to user pages
- * which is prevented by SMAP by default.
- *
- * For some code paths, where this function is called, clac()
- * is not needed, but adding clac() here instead of each place
- * asm_domain_crash_synchronous() is called can reduce the code
- * redundancy, and it is harmless as well.
- */
-clac();
-
-if ( addr == 0 )
-addr = this_cpu(last_extable_addr);
-
-printk("domain_crash_sync called from entry.S: fault at %p %pS\n",
- _p(addr), _p(addr));
-
-__domain_crash_synchronous();
-}
-
/*
* Local variables:
* mode: C
diff --git a/xen/arch/x86/x86_64/compat/entry.S
b/xen/arch/x86/x86_64/compat/entry.S
index 3e8b6c1..4f681bf 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -60,7 +60,7 @@ compat_test_guest_events:
movl VCPU_event_sel(%rbx),%eax
movw %ax,TRAPBOUNCE_cs(%rdx)
movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
-call compat_create_bounce_frame
+call pv_create_exception_frame
jmp compat_test_all_events
ALIGN
@@ -102,8 +102,7 @@ compat_process_nmi:
movb %dl,VCPU_async_exception_mask(%rbx)
/* FALLTHROUGH */
compat_process_trap:
-leaq VCPU_trap_bounce(%rbx),%rdx
-call compat_create_bounce_frame
+call pv_create_exception_frame
jmp compat_test_all_events
/* %rbx: struct vcpu, interrupts disabled */
@@ -196,8 +195,7 @@ ENTRY(compat_post_handle_exception)
testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
jzcompat_test_all_events
.Lcompat_bounce_exception:
-call compat_create_bounce_frame
-movb $0,TRAPBOUNCE_flags(%rdx)
+call pv_create_exception_frame
jmp compat_test_all_events
/* See lstar_enter for entry register state. */
@@ -264,118 +262,10 @@ ENTRY(compat_sysenter)
movl $FLAT_COMPAT_USER_SS,UREGS_ss(%rsp)
cmovzl %ecx,%eax
movw %ax,TRAPBOUNCE_cs(%rdx)
-call compat_create_bounce_frame
+call pv_create_exception_frame
jmp compat_test_all_events
ENTRY(compat_int80_direct_trap)
CR4_PV32_RESTORE
-call compat_create_bounce_frame
+call pv_create_exception_frame
jmp compat_test_all_events
-
-/* compat_create_bounce_frame & helpers don't need to be in
.text.entry */
-.text
-
-/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:*/
-/* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
-/* %rdx: trap_bounce, %rbx: struct vcpu */
-/* On return only %rbx and %rdx are guaranteed non-clobbered.*/
-compat_create_bounce_frame:
-ASSERT_INTERRUPTS_ENABLED
-mov %fs,%edi
-ASM_STAC
-testb $2,UREGS_cs+8(%rsp)
-jz1f
-/* Push new frame at registered guest-OS stack base. */
-movl VCPU_kernel_sp(%rbx),%esi
-.Lft1: mov VCPU_kernel_ss(%rbx),%fs
-subl $2*4,%esi
-movl UREGS_rsp+8(%rsp),%eax
-.Lft2: movl %eax,%fs:(%rsi)
-movl UREGS_ss+8(%rsp),%eax
-.Lft3: movl %eax,%fs:4(%rsi)
-jmp 2f
-1: /* In kernel context already: push new frame at existing %rsp. */
-movl UREGS_rsp+8(%rsp),%esi
-.Lft4: mov UREGS_ss+8(%rsp),%fs
-2:
-movq VCPU_domain(%rbx),%r8
-subl $3*4,%esi
-movq VCPU_vcpu_info(%rbx),%rax
-pushq COMPAT_VCPUINFO_upcall_mask(%rax)
-testb $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
-setnz %ch # TBF_INTERRUPT -> set upcall mask
-orb %ch,COMPAT_VCPUINFO_upcall_mask(%rax)
-popq %rax
-shll $16,%eax # Bits 16-23: saved_upcall_mask
-movw