From: Fred Griffoul <fgri...@amazon.co.uk>

Optimize L1 MSR bitmap access by replacing map/unmap operations with a
persistent gfn_to_pfn_cache. This optimization reduces overhead during
L2 VM-entry where nested_vmx_prepare_msr_bitmap() merges L1's MSR
intercepts with L0's requirements.

Current implementation using kvm_vcpu_map_readonly() and
kvm_vcpu_unmap() creates significant performance impact, particularly
with unmanaged guest memory.

New implementation:
- Initializes a pfn cache when entering VMX operation.
- Maintains persistent access throughout operation.
- Deactivates cache when VMX operation ends.

Signed-off-by: Fred Griffoul <fgri...@amazon.co.uk>
---
 arch/x86/kvm/vmx/nested.c | 42 +++++++++++++++++++++++++++++++++++----
 arch/x86/kvm/vmx/vmx.h    |  2 ++
 2 files changed, 40 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b8ea1969113d..aa4fe1fe571d 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -315,6 +315,34 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct 
loaded_vmcs *vmcs)
        vcpu->arch.regs_dirty = 0;
 }
 
+/*
+ * Maps a single guest page starting at @gpa and lock the cache for access.
+ */
+static int nested_gpc_lock(struct gfn_to_pfn_cache *gpc, gpa_t gpa)
+{
+       int err;
+
+       if (WARN_ON_ONCE(!PAGE_ALIGNED(gpa)))
+               return -EINVAL;
+retry:
+       read_lock(&gpc->lock);
+       if (!kvm_gpc_check(gpc, PAGE_SIZE) || (gpc->gpa != gpa)) {
+               read_unlock(&gpc->lock);
+               err = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
+               if (err)
+                       return err;
+
+               goto retry;
+       }
+
+       return 0;
+}
+
+static void nested_gpc_unlock(struct gfn_to_pfn_cache *gpc)
+{
+       read_unlock(&gpc->lock);
+}
+
 static void nested_put_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -344,6 +372,9 @@ static void free_nested(struct kvm_vcpu *vcpu)
        vmx->nested.vmxon = false;
        vmx->nested.smm.vmxon = false;
        vmx->nested.vmxon_ptr = INVALID_GPA;
+
+       kvm_gpc_deactivate(&vmx->nested.msr_bitmap_cache);
+
        free_vpid(vmx->nested.vpid02);
        vmx->nested.posted_intr_nv = -1;
        vmx->nested.current_vmptr = INVALID_GPA;
@@ -625,7 +656,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct 
kvm_vcpu *vcpu,
        int msr;
        unsigned long *msr_bitmap_l1;
        unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
-       struct kvm_host_map map;
+       struct gfn_to_pfn_cache *gpc;
 
        /* Nothing to do if the MSR bitmap is not in use.  */
        if (!cpu_has_vmx_msr_bitmap() ||
@@ -648,10 +679,11 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct 
kvm_vcpu *vcpu,
                        return true;
        }
 
-       if (kvm_vcpu_map_readonly(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), &map))
+       gpc = &vmx->nested.msr_bitmap_cache;
+       if (nested_gpc_lock(gpc, vmcs12->msr_bitmap))
                return false;
 
-       msr_bitmap_l1 = (unsigned long *)map.hva;
+       msr_bitmap_l1 = (unsigned long *)gpc->khva;
 
        /*
         * To keep the control flow simple, pay eight 8-byte writes (sixteen
@@ -721,7 +753,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct 
kvm_vcpu *vcpu,
        nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
                                         MSR_IA32_MPERF, MSR_TYPE_R);
 
-       kvm_vcpu_unmap(vcpu, &map);
+       nested_gpc_unlock(gpc);
 
        vmx->nested.force_msr_bitmap_recalc = false;
 
@@ -5352,6 +5384,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
 
        vmx->nested.vpid02 = allocate_vpid();
 
+       kvm_gpc_init(&vmx->nested.msr_bitmap_cache, vcpu->kvm);
+
        vmx->nested.vmcs02_initialized = false;
        vmx->nested.vmxon = true;
 
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index d3389baf3ab3..3a6983222841 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -152,6 +152,8 @@ struct nested_vmx {
 
        struct loaded_vmcs vmcs02;
 
+       struct gfn_to_pfn_cache msr_bitmap_cache;
+
        /*
         * Guest pages referred to in the vmcs02 with host-physical
         * pointers, so we must keep them pinned while L2 runs.
-- 
2.51.0


Reply via email to