In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.

Signed-off-by: Andre Przywara <andre.przyw...@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    3 +++
 arch/x86/include/asm/svm.h      |    4 +++-
 arch/x86/kvm/emulate.c          |    1 +
 arch/x86/kvm/svm.c              |   20 ++++++++++++++++++++
 arch/x86/kvm/vmx.c              |    7 +++++++
 5 files changed, 34 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cfbcbfa..3e3a67e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -586,6 +586,9 @@ struct kvm_x86_ops {
        void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+       int (*prefetch_instruction)(struct kvm_vcpu *vcpu);
+
        const struct trace_print_flags *exit_reasons_str;
 };
 
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 589fc25..6d64b1d 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
        u64 lbr_ctl;
        u64 reserved_5;
        u64 next_rip;
-       u8 reserved_6[816];
+       u8 insn_len;
+       u8 insn_bytes[15];
+       u8 reserved_6[800];
 };
 
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6366735..abff8ff 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -525,6 +525,7 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
        /* x86 instructions are limited to 15 bytes. */
        if (eip + size - ctxt->eip > 15)
                return X86EMUL_UNHANDLEABLE;
+       kvm_x86_ops->prefetch_instruction(ctxt->vcpu);
        while (size--) {
                rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
                if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3cf2cef..ed94e9a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -464,6 +464,24 @@ static void skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
        svm_set_interrupt_shadow(vcpu, 0);
 }
 
+static int prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+       uint8_t len;
+       struct fetch_cache *fetch;
+
+       len = svm->vmcb->control.insn_len & 0x0F;
+       if (len == 0)
+               return 1;
+
+       fetch = &svm->vcpu.arch.emulate_ctxt.decode.fetch;
+       fetch->start = kvm_rip_read(&svm->vcpu);
+       fetch->end = fetch->start + len;
+       memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
+
+       return 0;
+}
+
 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
                                bool has_error_code, u32 error_code,
                                bool reinject)
@@ -3830,6 +3848,8 @@ static struct kvm_x86_ops svm_x86_ops = {
        .adjust_tsc_offset = svm_adjust_tsc_offset,
 
        .set_tdp_cr3 = set_tdp_cr3,
+
+       .prefetch_instruction = prefetch_instruction,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 72cfdb7..4825545 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1009,6 +1009,11 @@ static void skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
        vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static int prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
                                bool has_error_code, u32 error_code,
                                bool reinject)
@@ -4362,6 +4367,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .adjust_tsc_offset = vmx_adjust_tsc_offset,
 
        .set_tdp_cr3 = vmx_set_cr3,
+
+       .prefetch_instruction = prefetch_instruction,
 };
 
 static int __init vmx_init(void)
-- 
1.6.4


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to