When mapping a page in a shadow stage-2, special care must be
taken not to be more permissive than the guest is (writable or
readable page when the guest hasn't set that permission).

Signed-off-by: Marc Zyngier <[email protected]>
---
 arch/arm64/include/asm/kvm_nested.h | 15 +++++++++++++++
 arch/arm64/kvm/mmu.c                | 14 +++++++++++++-
 arch/arm64/kvm/nested.c             |  2 +-
 3 files changed, 29 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_nested.h 
b/arch/arm64/include/asm/kvm_nested.h
index 4f93a5dab183..3f3d8e10bd99 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -93,6 +93,21 @@ static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans 
*trans)
        return trans->esr;
 }
 
+static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
+{
+       return trans->readable;
+}
+
+static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
+{
+       return trans->writable;
+}
+
+static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
+{
+       return !(trans->upper_attr & BIT(54));
+}
+
 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
                              struct kvm_s2_trans *result);
 
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 6b3753460293..6db8fa8bc5a3 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -991,6 +991,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        if (exec_fault && device)
                return -ENOEXEC;
 
+       /*
+        * Potentially reduce shadow S2 permissions to match the guest's own
+        * S2. For exec faults, we'd only reach this point if the guest
+        * actually allowed it (see kvm_s2_handle_perm_fault).
+        */
+       if (kvm_is_shadow_s2_fault(vcpu)) {
+               writable &= kvm_s2_trans_writable(nested);
+               if (!kvm_s2_trans_readable(nested))
+                       prot &= ~KVM_PGTABLE_PROT_R;
+       }
+
        spin_lock(&kvm->mmu_lock);
        pgt = vcpu->arch.hw_mmu->pgt;
        if (mmu_notifier_retry(kvm, mmu_seq))
@@ -1016,7 +1027,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
 
        if (device)
                prot |= KVM_PGTABLE_PROT_DEVICE;
-       else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
+       else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC) &&
+                kvm_s2_trans_executable(nested))
                prot |= KVM_PGTABLE_PROT_X;
 
        /*
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 57f32768d04d..2e6a97e43396 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -481,7 +481,7 @@ int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct 
kvm_s2_trans *trans)
                return 0;
 
        if (kvm_vcpu_trap_is_iabt(vcpu)) {
-               forward_fault = (trans->upper_attr & BIT(54));
+               forward_fault = !kvm_s2_trans_executable(trans);
        } else {
                bool write_fault = kvm_is_write_fault(vcpu);
 
-- 
2.29.2

_______________________________________________
kvmarm mailing list
[email protected]
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to