The original implementation of guest_code_xsave makes a jmp to guest_sev_es_code in inline assembly. When code that uses guest_sev_es_code is removed, guest_sev_es_code will be optimized out, leading to a linking error since guest_code_xsave still tries to jmp to guest_sev_es_code.
Rewrite guest_code_xsave() to instead make a call, in C, to guest_sev_es_code(), so that usage of guest_sev_es_code() is made known to the compiler. This rewriting also gives a name to the xsave inline assembly, improving readability. Signed-off-by: Ackerley Tng <[email protected]> --- .../selftests/kvm/x86/sev_smoke_test.c | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c index 8bd37a476f159..7e69da01cecf4 100644 --- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c +++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c @@ -80,13 +80,23 @@ static void guest_sev_code(void) GUEST_DONE(); } -/* Stash state passed via VMSA before any compiled code runs. */ -extern void guest_code_xsave(void); -asm("guest_code_xsave:\n" - "mov $" __stringify(XFEATURE_MASK_X87_AVX) ", %eax\n" - "xor %edx, %edx\n" - "xsave (%rdi)\n" - "jmp guest_sev_es_code"); +static void xsave_all_registers(void *addr) +{ + __asm__ __volatile__( + "mov $" __stringify(XFEATURE_MASK_X87_AVX) ", %eax\n" + "xor %edx, %edx\n" + "xsave (%0)" + : + : "r"(addr) + : "eax", "edx", "memory" + ); +} + +static void guest_code_xsave(void *vmsa_gva) +{ + xsave_all_registers(vmsa_gva); + guest_sev_es_code(); +} static void compare_xsave(u8 *from_host, u8 *from_guest) { -- 2.53.0.1018.g2bb0e51243-goog
