The functions being moved ahead of skip_emulated_instruction here don't
need updated IPs, and moving skip_emulated_instruction to the end will
make it easier to return its return value.

Signed-off-by: Kyle Huey <kh...@kylehuey.com>
---
 arch/x86/kvm/vmx.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e4af9699..f2f9cf5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5703,18 +5703,18 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                                vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
                                return 0;
                        }
                }
                break;
        case 2: /* clts */
                handle_clts(vcpu);
                trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
-               skip_emulated_instruction(vcpu);
                vmx_fpu_activate(vcpu);
+               skip_emulated_instruction(vcpu);
                return 1;
        case 1: /*mov from cr*/
                switch (cr) {
                case 3:
                        val = kvm_read_cr3(vcpu);
                        kvm_register_write(vcpu, reg, val);
                        trace_kvm_cr_read(cr, val);
                        skip_emulated_instruction(vcpu);
@@ -6128,18 +6128,18 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
 
 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 {
        int ret;
        gpa_t gpa;
 
        gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
        if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
-               skip_emulated_instruction(vcpu);
                trace_kvm_fast_mmio(gpa);
+               skip_emulated_instruction(vcpu);
                return 1;
        }
 
        ret = handle_mmio_page_fault(vcpu, gpa, true);
        if (likely(ret == RET_MMIO_PF_EMULATE))
                return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
                                              EMULATE_DONE;
 
@@ -6502,18 +6502,18 @@ static __exit void hardware_unsetup(void)
  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
  */
 static int handle_pause(struct kvm_vcpu *vcpu)
 {
        if (ple_gap)
                grow_ple_window(vcpu);
 
-       skip_emulated_instruction(vcpu);
        kvm_vcpu_on_spin(vcpu);
+       skip_emulated_instruction(vcpu);
 
        return 1;
 }
 
 static int handle_nop(struct kvm_vcpu *vcpu)
 {
        skip_emulated_instruction(vcpu);
        return 1;
@@ -6957,18 +6957,18 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
        vmx->nested.vmcs02_num = 0;
 
        hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
                     HRTIMER_MODE_REL_PINNED);
        vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
 
        vmx->nested.vmxon = true;
 
-       skip_emulated_instruction(vcpu);
        nested_vmx_succeed(vcpu);
+       skip_emulated_instruction(vcpu);
        return 1;
 
 out_shadow_vmcs:
        kfree(vmx->nested.cached_vmcs12);
 
 out_cached_vmcs12:
        free_page((unsigned long)vmx->nested.msr_bitmap);
 
@@ -7078,18 +7078,18 @@ static void free_nested(struct vcpu_vmx *vmx)
 }
 
 /* Emulate the VMXOFF instruction */
 static int handle_vmoff(struct kvm_vcpu *vcpu)
 {
        if (!nested_vmx_check_permission(vcpu))
                return 1;
        free_nested(to_vmx(vcpu));
-       skip_emulated_instruction(vcpu);
        nested_vmx_succeed(vcpu);
+       skip_emulated_instruction(vcpu);
        return 1;
 }
 
 /* Emulate the VMCLEAR instruction */
 static int handle_vmclear(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        gpa_t vmptr;
@@ -7119,18 +7119,18 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        }
        vmcs12 = kmap(page);
        vmcs12->launch_state = 0;
        kunmap(page);
        nested_release_page(page);
 
        nested_free_vmcs02(vmx, vmptr);
 
-       skip_emulated_instruction(vcpu);
        nested_vmx_succeed(vcpu);
+       skip_emulated_instruction(vcpu);
        return 1;
 }
 
 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
 
 /* Emulate the VMLAUNCH instruction */
 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
 {
-- 
2.10.2

Reply via email to