Address size is entirely irrelevant to operand size determination; For VMREAD and VMWRITE outside of 64-bit mode operand size is 32 bits, while in 64-bit mode it's (naturally) 64 bits. For all other insns it's 64 bits (a physical address) or 128 bits (INVEPT, INVVPID). To limit the amount of change here, keep the latter at reading only 64 bits from guest space.
Fixes: 09fce8016596 ("Nested VMX: Emulation of guest VMXON/OFF instruction") Signed-off-by: Jan Beulich <jbeul...@suse.com> --- Beyond the wrong operand handling for INVEPT and INVVPID, the latter also doesn't even have the part read checked to have bits 16 and above all clear. --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -399,13 +399,13 @@ static inline u32 __n2_secondary_exec_co static int decode_vmx_inst(struct cpu_user_regs *regs, struct vmx_inst_decoded *decode, - unsigned long *poperandS) + unsigned long *poperandS, unsigned int size) { struct vcpu *v = current; union vmx_inst_info info; struct segment_register seg; unsigned long base, index, seg_base, disp, offset; - int scale, size; + unsigned int scale; __vmread(VMX_INSTRUCTION_INFO, &offset); info.word = offset; @@ -437,7 +437,8 @@ static int decode_vmx_inst(struct cpu_us __vmread(EXIT_QUALIFICATION, &disp); - size = 1 << (info.fields.addr_size + 1); + if ( !size ) + size = 4 << mode_64bit; offset = base + index * scale + disp; base = !mode_64bit || info.fields.segment >= x86_seg_fs ? @@ -452,7 +453,9 @@ static int decode_vmx_inst(struct cpu_us if ( poperandS != NULL ) { pagefault_info_t pfinfo; - int rc = hvm_copy_from_guest_linear(poperandS, base, size, + int rc = hvm_copy_from_guest_linear(poperandS, base, + min_t(unsigned int, size, + sizeof(*poperandS)), 0, &pfinfo); if ( rc == HVMTRANS_bad_linear_to_gfn ) @@ -1549,7 +1552,7 @@ static int nvmx_handle_vmxon(struct cpu_ uint32_t nvmcs_revid; int rc; - rc = decode_vmx_inst(regs, &decode, &gpa); + rc = decode_vmx_inst(regs, &decode, &gpa, sizeof(gpa)); if ( rc != X86EMUL_OKAY ) return rc; @@ -1776,7 +1779,7 @@ static int nvmx_handle_vmptrld(struct cp unsigned long gpa = 0; int rc; - rc = decode_vmx_inst(regs, &decode, &gpa); + rc = decode_vmx_inst(regs, &decode, &gpa, sizeof(gpa)); if ( rc != X86EMUL_OKAY ) return rc; @@ -1853,7 +1856,7 @@ static int nvmx_handle_vmptrst(struct cp unsigned long gpa = 0; int rc; - rc = decode_vmx_inst(regs, &decode, &gpa); + rc = decode_vmx_inst(regs, &decode, &gpa, sizeof(gpa)); if ( rc != X86EMUL_OKAY ) return rc; @@ -1879,7 +1882,7 @@ static int nvmx_handle_vmclear(struct cp void *vvmcs; int rc; - rc = decode_vmx_inst(regs, &decode, &gpa); + rc = decode_vmx_inst(regs, &decode, &gpa, sizeof(gpa)); if ( rc != X86EMUL_OKAY ) return rc; @@ -1941,7 +1944,7 @@ static int nvmx_handle_vmread(struct cpu u64 value = 0; int rc; - rc = decode_vmx_inst(regs, &decode, NULL); + rc = decode_vmx_inst(regs, &decode, NULL, 0); if ( rc != X86EMUL_OKAY ) return rc; @@ -1984,7 +1987,7 @@ static int nvmx_handle_vmwrite(struct cp enum vmx_insn_errno err; int rc; - rc = decode_vmx_inst(regs, &decode, &operand); + rc = decode_vmx_inst(regs, &decode, &operand, 0); if ( rc != X86EMUL_OKAY ) return rc; @@ -2026,7 +2029,7 @@ static int nvmx_handle_invept(struct cpu unsigned long eptp; int ret; - if ( (ret = decode_vmx_inst(regs, &decode, &eptp)) != X86EMUL_OKAY ) + if ( (ret = decode_vmx_inst(regs, &decode, &eptp, 16)) != X86EMUL_OKAY ) return ret; switch ( reg_read(regs, decode.reg2) ) @@ -2054,7 +2057,7 @@ static int nvmx_handle_invvpid(struct cp unsigned long vpid; int ret; - if ( (ret = decode_vmx_inst(regs, &decode, &vpid)) != X86EMUL_OKAY ) + if ( (ret = decode_vmx_inst(regs, &decode, &vpid, 16)) != X86EMUL_OKAY ) return ret; switch ( reg_read(regs, decode.reg2) )