From: Grygorii Strashko <[email protected]> Factor out COMPAT HVM code under ifdefs in preparation for making HVM COMPAT code optional.
Signed-off-by: Grygorii Strashko <[email protected]> --- xen/arch/x86/hvm/hvm.c | 13 ++++++++++++- xen/arch/x86/hvm/hypercall.c | 37 +++++++++++++++++++++++++++--------- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 2e47a71714fd..56c0059401d0 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -69,7 +69,9 @@ #include <public/version.h> #include <public/vm_event.h> +#ifdef CONFIG_COMPAT #include <compat/hvm/hvm_op.h> +#endif bool __read_mostly hvm_enabled; @@ -1255,6 +1257,7 @@ static int cf_check hvm_save_cpu_xsave_states( return 0; } +#ifdef CONFIG_COMPAT /* * Structure layout conformity checks, documenting correctness of the cast in * the invocation of validate_xstate() below. @@ -1267,6 +1270,7 @@ CHECK_FIELD_(struct, xsave_hdr, xcomp_bv); CHECK_FIELD_(struct, xsave_hdr, reserved); #undef compat_xsave_hdr #undef xen_xsave_hdr +#endif /* CONFIG_COMPAT */ static int cf_check hvm_load_cpu_xsave_states( struct domain *d, hvm_domain_context_t *h) @@ -3991,7 +3995,7 @@ static void hvm_latch_shinfo_size(struct domain *d) */ if ( current->domain == d ) { - d->arch.has_32bit_shinfo = + d->arch.has_32bit_shinfo = IS_ENABLED(CONFIG_COMPAT) && hvm_guest_x86_mode(current) != X86_MODE_64BIT; /* @@ -4965,6 +4969,7 @@ static int do_altp2m_op( #endif /* CONFIG_ALTP2M */ } +#ifdef CONFIG_COMPAT DEFINE_XEN_GUEST_HANDLE(compat_hvm_altp2m_op_t); /* @@ -5064,6 +5069,12 @@ static int compat_altp2m_op( return rc; } +#else +static int compat_altp2m_op(XEN_GUEST_HANDLE_PARAM(void) arg) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_COMPAT */ static int hvmop_get_mem_type( XEN_GUEST_HANDLE_PARAM(xen_hvm_get_mem_type_t) arg) diff --git a/xen/arch/x86/hvm/hypercall.c b/xen/arch/x86/hvm/hypercall.c index 549e25445e67..f8b2c90b7c41 100644 --- a/xen/arch/x86/hvm/hypercall.c +++ b/xen/arch/x86/hvm/hypercall.c @@ -29,10 +29,12 @@ long hvm_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return -ENOSYS; } - if ( !vcpu_is_hcall_compat(current) ) - rc = do_memory_op(cmd, arg); - else +#ifdef CONFIG_COMPAT + if ( vcpu_is_hcall_compat(current) ) rc = compat_memory_op(cmd, arg); + else +#endif + rc = do_memory_op(cmd, arg); return rc; } @@ -57,10 +59,12 @@ long hvm_grant_table_op( return -ENOSYS; } - if ( !vcpu_is_hcall_compat(current) ) - return do_grant_table_op(cmd, uop, count); - else +#ifdef CONFIG_COMPAT + if ( vcpu_is_hcall_compat(current) ) return compat_grant_table_op(cmd, uop, count); + else +#endif + return do_grant_table_op(cmd, uop, count); } #endif @@ -96,10 +100,12 @@ long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return -ENOSYS; } - if ( !vcpu_is_hcall_compat(curr) ) - return do_physdev_op(cmd, arg); - else +#ifdef CONFIG_COMPAT + if ( vcpu_is_hcall_compat(curr) ) return compat_physdev_op(cmd, arg); + else +#endif + return do_physdev_op(cmd, arg); } int hvm_hypercall(struct cpu_user_regs *regs) @@ -171,6 +177,7 @@ int hvm_hypercall(struct cpu_user_regs *regs) HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu(%x, %x, %x, %x, %x)", eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi); +#ifdef CONFIG_COMPAT curr->hcall_compat = true; call_handlers_hvm32(eax, regs->eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi); @@ -178,6 +185,9 @@ int hvm_hypercall(struct cpu_user_regs *regs) if ( !curr->hcall_preempted && regs->eax != -ENOSYS ) clobber_regs(regs, eax, hvm, 32); +#else + regs->eax = -EOPNOTSUPP; +#endif } hvmemul_cache_restore(curr, token); @@ -208,10 +218,19 @@ enum mc_disposition hvm_do_multicall_call(struct mc_state *state) } else { +#ifdef CONFIG_COMPAT struct compat_multicall_entry *call = &state->compat_call; call_handlers_hvm32(call->op, call->result, call->args[0], call->args[1], call->args[2], call->args[3], call->args[4]); +#else + /* + * code should never reach here in case !CONFIG_COMPAT as any + * 32-bit hypercall should bail out earlier from hvm_hypercall() + * with -EOPNOTSUPP + */ + unreachable(); +#endif } return !hvm_get_cpl(curr) ? mc_continue : mc_preempt; -- 2.34.1
