When mapping a page in a shadow stage-2, special care must be
taken not to be more permissive than the guest is (writable or
readable page when the guest hasn't set that permission).

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm/include/asm/kvm_mmu.h      | 18 ++++++++++++++++++
 arch/arm64/include/asm/kvm_mmu.h    | 18 ++++++++++++++++++
 arch/arm64/include/asm/kvm_nested.h | 10 ++++++++++
 virt/kvm/arm/mmu.c                  | 21 ++++++++++++++++++++-
 4 files changed, 66 insertions(+), 1 deletion(-)

diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 1d0d5f00f0af..be7be6583e54 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -113,6 +113,12 @@ static inline pud_t kvm_s2pud_mkexec(pud_t pud)
        return pud;
 }
 
+static inline pud_t kvm_s2pud_revoke_read(pud_t pud)
+{
+       WARN_ON(1);
+       return pud;
+}
+
 static inline bool kvm_s2pud_exec(pud_t *pud)
 {
        WARN_ON(1);
@@ -155,6 +161,18 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
        return pmd;
 }
 
+static inline pte_t kvm_s2pte_revoke_read(pte_t pte)
+{
+       pte_val(pte) &= ~L_PTE_S2_RDONLY;
+       return pte;
+}
+
+static inline pmd_t kvm_s2pmd_revoke_read(pmd_t pmd)
+{
+       pmd_val(pmd) &= ~L_PMD_S2_RDONLY;
+       return pmd;
+}
+
 static inline void kvm_set_s2pte_readonly(pte_t *pte)
 {
        pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 9c0bf878fb3b..b390bba466e9 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -228,6 +228,24 @@ static inline pud_t kvm_s2pud_mkexec(pud_t pud)
        return pud;
 }
 
+static inline pte_t kvm_s2pte_revoke_read(pte_t pte)
+{
+       pte_val(pte) &= ~PTE_S2_RDONLY;
+       return pte;
+}
+
+static inline pmd_t kvm_s2pmd_revoke_read(pmd_t pmd)
+{
+       pmd_val(pmd) &= ~PMD_S2_RDONLY;
+       return pmd;
+}
+
+static inline pud_t kvm_s2pud_revoke_read(pud_t pud)
+{
+       pud_val(pud) &= ~PUD_S2_RDONLY;
+       return pud;
+}
+
 static inline void kvm_set_s2pte_readonly(pte_t *ptep)
 {
        pteval_t old_pteval, pteval;
diff --git a/arch/arm64/include/asm/kvm_nested.h 
b/arch/arm64/include/asm/kvm_nested.h
index 45c013c0c856..7cd0c5b0fec9 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -43,6 +43,16 @@ static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans 
*trans)
        return trans->esr;
 }
 
+static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
+{
+       return trans->readable;
+}
+
+static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
+{
+       return trans->writable;
+}
+
 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
                              struct kvm_s2_trans *result);
 
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 0086b42237af..7da72c2b7f0f 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1721,7 +1721,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                          unsigned long hva, unsigned long fault_status)
 {
        int ret;
-       bool write_fault, writable;
+       bool write_fault, writable, readable = true;
        bool exec_fault, needs_exec;
        unsigned long mmu_seq;
        phys_addr_t ipa = fault_ipa;
@@ -1841,6 +1841,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        if (exec_fault && is_iomap(flags))
                return -ENOEXEC;
 
+       /*
+        * Potentially reduce shadow S2 permissions to match the guest's own
+        * S2. For exec faults, we'd only reach this point if the guest
+        * actually allowed it (see kvm_s2_handle_perm_fault).
+        */
+       if (kvm_is_shadow_s2_fault(vcpu)) {
+               writable &= kvm_s2_trans_writable(nested);
+               readable &= kvm_s2_trans_readable(nested);
+       }
+
        spin_lock(&kvm->mmu_lock);
        if (mmu_notifier_retry(kvm, mmu_seq))
                goto out_unlock;
@@ -1886,6 +1896,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                if (writable)
                        new_pud = kvm_s2pud_mkwrite(new_pud);
 
+               if (!readable)
+                       new_pud = kvm_s2pud_revoke_read(new_pud);
+
                if (needs_exec)
                        new_pud = kvm_s2pud_mkexec(new_pud);
 
@@ -1898,6 +1911,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                if (writable)
                        new_pmd = kvm_s2pmd_mkwrite(new_pmd);
 
+               if (!readable)
+                       new_pmd = kvm_s2pmd_revoke_read(new_pmd);
+
                if (needs_exec)
                        new_pmd = kvm_s2pmd_mkexec(new_pmd);
 
@@ -1910,6 +1926,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                        mark_page_dirty(kvm, gfn);
                }
 
+               if (!readable)
+                       new_pte = kvm_s2pte_revoke_read(new_pte);
+
                if (needs_exec)
                        new_pte = kvm_s2pte_mkexec(new_pte);
 
-- 
2.20.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to