Introduce a new step in __kvm_faultin_pfn() that'll validate the
fault against the vCPU's VTL protections and generate a user space exit
when invalid.

Note that kvm_hv_faultin_pfn() has to be run after resolving the fault
against the memslots, since that operation steps over
'fault->map_writable'.

Non VSM users shouldn't see any behaviour change.

Signed-off-by: Nicolas Saenz Julienne <nsa...@amazon.com>
---
 arch/x86/kvm/hyperv.c  | 66 ++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/hyperv.h  |  1 +
 arch/x86/kvm/mmu/mmu.c |  9 +++++-
 3 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index bcace0258af1..eb6a4848e306 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -42,6 +42,8 @@
 #include "irq.h"
 #include "fpu.h"
 
+#include "mmu/mmu_internal.h"
+
 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 
HV_VCPUS_PER_SPARSE_BANK)
 
 /*
@@ -3032,6 +3034,55 @@ struct kvm_hv_vtl_dev {
        struct xarray mem_attrs;
 };
 
+static struct xarray *kvm_hv_vsm_get_memprots(struct kvm_vcpu *vcpu);
+
+bool kvm_hv_vsm_access_valid(struct kvm_page_fault *fault, unsigned long attrs)
+{
+       if (attrs == KVM_MEMORY_ATTRIBUTE_NO_ACCESS)
+               return false;
+
+       /* We should never get here without read permissions, force a fault. */
+       if (WARN_ON_ONCE(!(attrs & KVM_MEMORY_ATTRIBUTE_READ)))
+               return false;
+
+       if (fault->write && !(attrs & KVM_MEMORY_ATTRIBUTE_WRITE))
+               return false;
+
+       if (fault->exec && !(attrs & KVM_MEMORY_ATTRIBUTE_EXECUTE))
+               return false;
+
+       return true;
+}
+
+static unsigned long kvm_hv_vsm_get_memory_attributes(struct kvm_vcpu *vcpu,
+                                                     gfn_t gfn)
+{
+       struct xarray *prots = kvm_hv_vsm_get_memprots(vcpu);
+
+       if (!prots)
+               return 0;
+
+       return xa_to_value(xa_load(prots, gfn));
+}
+
+int kvm_hv_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+{
+       unsigned long attrs;
+
+       attrs = kvm_hv_vsm_get_memory_attributes(vcpu, fault->gfn);
+       if (!attrs)
+               return RET_PF_CONTINUE;
+
+       if (kvm_hv_vsm_access_valid(fault, attrs)) {
+               fault->map_executable =
+                       !!(attrs & KVM_MEMORY_ATTRIBUTE_EXECUTE);
+               fault->map_writable = !!(attrs & KVM_MEMORY_ATTRIBUTE_WRITE);
+               return RET_PF_CONTINUE;
+       }
+
+       return -EFAULT;
+}
+
 static int kvm_hv_vtl_get_attr(struct kvm_device *dev,
                               struct kvm_device_attr *attr)
 {
@@ -3120,6 +3171,21 @@ static struct kvm_device_ops kvm_hv_vtl_ops = {
        .get_attr = kvm_hv_vtl_get_attr,
 };
 
+static struct xarray *kvm_hv_vsm_get_memprots(struct kvm_vcpu *vcpu)
+{
+       struct kvm_hv_vtl_dev *vtl_dev;
+       struct kvm_device *tmp;
+
+       list_for_each_entry(tmp, &vcpu->kvm->devices, vm_node)
+               if (tmp->ops == &kvm_hv_vtl_ops) {
+                       vtl_dev = tmp->private;
+                       if (vtl_dev->vtl == kvm_hv_get_active_vtl(vcpu))
+                               return &vtl_dev->mem_attrs;
+               }
+
+       return NULL;
+}
+
 static int kvm_hv_vtl_create(struct kvm_device *dev, u32 type)
 {
        struct kvm_hv_vtl_dev *vtl_dev;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 3cc664e144d8..ae781b4d4669 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -271,5 +271,6 @@ static inline void kvm_mmu_role_set_hv_bits(struct kvm_vcpu 
*vcpu,
 
 int kvm_hv_vtl_dev_register(void);
 void kvm_hv_vtl_dev_unregister(void);
+int kvm_hv_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
 
 #endif
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a76028aa8fb3..ba454c7277dc 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4374,7 +4374,7 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, 
struct kvm_page_fault *fault
                                          fault->write, &fault->map_writable,
                                          &fault->hva);
        if (!async)
-               return RET_PF_CONTINUE; /* *pfn has correct page already */
+               goto pf_continue; /* *pfn has correct page already */
 
        if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
                trace_kvm_try_async_get_page(fault->addr, fault->gfn);
@@ -4395,6 +4395,13 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, 
struct kvm_page_fault *fault
        fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, true, NULL,
                                          fault->write, &fault->map_writable,
                                          &fault->hva);
+pf_continue:
+       if (kvm_hv_vsm_enabled(vcpu->kvm)) {
+               if (kvm_hv_faultin_pfn(vcpu, fault)) {
+                       kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
+                       return -EFAULT;
+               }
+       }
        return RET_PF_CONTINUE;
 }
 
-- 
2.40.1


Reply via email to