Currently, APF mechanism relies on the #PF abuse where the token is being
passed through CR2. If we switch to using interrupts to deliver page-ready
notifications we need a different way to pass the data. Extent the existing
'struct kvm_vcpu_pv_apf_data' with token information for page-ready
notifications.

The newly introduced apf_put_user_ready() temporary puts both reason
and token information, this will be changed to put token only when we
switch to interrupt based notifications.

Signed-off-by: Vitaly Kuznetsov <[email protected]>
---
 arch/x86/include/uapi/asm/kvm_para.h |  3 ++-
 arch/x86/kvm/x86.c                   | 17 +++++++++++++----
 2 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/uapi/asm/kvm_para.h 
b/arch/x86/include/uapi/asm/kvm_para.h
index 2a8e0b6b9805..e3602a1de136 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -113,7 +113,8 @@ struct kvm_mmu_op_release_pt {
 
 struct kvm_vcpu_pv_apf_data {
        __u32 reason;
-       __u8 pad[60];
+       __u32 pageready_token;
+       __u8 pad[56];
        __u32 enabled;
 };
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index edd4a6415b92..28868cc16e4d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2662,7 +2662,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, 
u64 data)
        }
 
        if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
-                                       sizeof(u32)))
+                                       sizeof(u64)))
                return 1;
 
        vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -10352,8 +10352,17 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu 
*vcpu, gfn_t gfn)
        }
 }
 
-static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
 {
+       u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
+
+       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
+                                     sizeof(reason));
+}
+
+static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
+{
+       u64 val = (u64)token << 32 | KVM_PV_REASON_PAGE_READY;
 
        return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
                                      sizeof(val));
@@ -10398,7 +10407,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu 
*vcpu,
        kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
 
        if (kvm_can_deliver_async_pf(vcpu) &&
-           !apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+           !apf_put_user_notpresent(vcpu)) {
                fault.vector = PF_VECTOR;
                fault.error_code_valid = true;
                fault.error_code = 0;
@@ -10431,7 +10440,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
        trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
 
        if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
-           !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+           !apf_put_user_ready(vcpu, work->arch.token)) {
                        fault.vector = PF_VECTOR;
                        fault.error_code_valid = true;
                        fault.error_code = 0;
-- 
2.25.4

Reply via email to