Marking PTs young (set AF bit) should be atomic to avoid cover
dirty status set by hardware.

Signed-off-by: Keqian Zhu <[email protected]>
---
 arch/arm64/include/asm/kvm_mmu.h | 32 ++++++++++++++++++++++----------
 arch/arm64/kvm/mmu.c             | 15 ++++++++-------
 2 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e0ee6e23d626..51af71505fbc 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -215,6 +215,18 @@ static inline void kvm_set_s2pte_readonly(pte_t *ptep)
        } while (pteval != old_pteval);
 }
 
+static inline void kvm_set_s2pte_young(pte_t *ptep)
+{
+       pteval_t old_pteval, pteval;
+
+       pteval = READ_ONCE(pte_val(*ptep));
+       do {
+               old_pteval = pteval;
+               pteval |= PTE_AF;
+               pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
+       } while (pteval != old_pteval);
+}
+
 static inline bool kvm_s2pte_readonly(pte_t *ptep)
 {
        return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
@@ -230,6 +242,11 @@ static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
        kvm_set_s2pte_readonly((pte_t *)pmdp);
 }
 
+static inline void kvm_set_s2pmd_young(pmd_t *pmdp)
+{
+       kvm_set_s2pte_young((pte_t *)pmdp);
+}
+
 static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
 {
        return kvm_s2pte_readonly((pte_t *)pmdp);
@@ -245,6 +262,11 @@ static inline void kvm_set_s2pud_readonly(pud_t *pudp)
        kvm_set_s2pte_readonly((pte_t *)pudp);
 }
 
+static inline void kvm_set_s2pud_young(pud_t *pudp)
+{
+       kvm_set_s2pte_young((pte_t *)pudp);
+}
+
 static inline bool kvm_s2pud_readonly(pud_t *pudp)
 {
        return kvm_s2pte_readonly((pte_t *)pudp);
@@ -255,16 +277,6 @@ static inline bool kvm_s2pud_exec(pud_t *pudp)
        return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN);
 }
 
-static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
-{
-       return pud_mkyoung(pud);
-}
-
-static inline bool kvm_s2pud_young(pud_t pud)
-{
-       return pud_young(pud);
-}
-
 #ifdef CONFIG_ARM64_HW_AFDBM
 static inline bool kvm_hw_dbm_enabled(void)
 {
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8c0035cab6b6..5ad87bce23c0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -2008,8 +2008,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
  * Resolve the access fault by making the page young again.
  * Note that because the faulting entry is guaranteed not to be
  * cached in the TLB, we don't need to invalidate anything.
- * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
- * so there is no need for atomic (pte|pmd)_mkyoung operations.
+ *
+ * Note: Both DBM and HW AF updates are supported for Stage2, so
+ * young operations should be atomic.
  */
 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 {
@@ -2027,15 +2028,15 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa)
                goto out;
 
        if (pud) {              /* HugeTLB */
-               *pud = kvm_s2pud_mkyoung(*pud);
+               kvm_set_s2pud_young(pud);
                pfn = kvm_pud_pfn(*pud);
                pfn_valid = true;
        } else  if (pmd) {      /* THP, HugeTLB */
-               *pmd = pmd_mkyoung(*pmd);
+               kvm_set_s2pmd_young(pmd);
                pfn = pmd_pfn(*pmd);
                pfn_valid = true;
-       } else {
-               *pte = pte_mkyoung(*pte);       /* Just a page... */
+       } else {                /* Just a page... */
+               kvm_set_s2pte_young(pte);
                pfn = pte_pfn(*pte);
                pfn_valid = true;
        }
@@ -2280,7 +2281,7 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, 
gpa_t gpa, u64 size, void *
                return 0;
 
        if (pud)
-               return kvm_s2pud_young(*pud);
+               return pud_young(*pud);
        else if (pmd)
                return pmd_young(*pmd);
        else
-- 
2.19.1

Reply via email to