... which also avoids using kvm_vcpu_gpa_to_page(..) which assumes that
there is a "struct page" for guest memory.

Signed-off-by: KarimAllah Ahmed <[email protected]>
---
 arch/x86/kvm/vmx.c | 13 ++++---------
 1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e5653d2..0a98d1a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -12111,9 +12111,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
 {
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       gpa_t gpa;
-       struct page *page = NULL;
-       u64 *pml_address;
+       gpa_t gpa, dst;
 
        if (is_guest_mode(vcpu)) {
                WARN_ON_ONCE(vmx->nested.pml_full);
@@ -12133,15 +12131,12 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
                }
 
                gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+               dst = (gpa_t)(((u64 *)vmcs12->pml_address) + 
vmcs12->guest_pml_index);
 
-               page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
-               if (is_error_page(page))
+               if (kvm_write_guest(vcpu->kvm, dst, &gpa, sizeof(gpa)))
                        return 0;
 
-               pml_address = kmap(page);
-               pml_address[vmcs12->guest_pml_index--] = gpa;
-               kunmap(page);
-               kvm_release_page_clean(page);
+               vmcs12->guest_pml_index--;
        }
 
        return 0;
-- 
2.7.4

Reply via email to