The concept of mmu idx between QEMU TLB and LoongArch TLB emulation
is different:
  mmu idx in QEMU TLB comes from currently working mode of vCPU
  mmu idx from LoongArch TLB is page priviledge level
With copy_from_user() executed in system, vCPU is in kernel mode
however PLV of PTE is user mode.

Here field KM is added in LoongArch TLB entry to track whether the
lo0/lo1 pte entry is accessed in kernel mode. If set, when LoongArch
TLB is flushed, need flush QEMU TLB with mmu idx MMU_KERNEL_IDX.

Signed-off-by: Bibo Mao <maob...@loongson.cn>
---
 target/loongarch/cpu-mmu.h        |  2 ++
 target/loongarch/cpu.h            | 14 ++++++++++++++
 target/loongarch/cpu_helper.c     |  3 +++
 target/loongarch/tcg/tlb_helper.c | 26 ++++++++++++++++++++++++++
 4 files changed, 45 insertions(+)

diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h
index 0068d22efc..311bf7b2d7 100644
--- a/target/loongarch/cpu-mmu.h
+++ b/target/loongarch/cpu-mmu.h
@@ -25,6 +25,8 @@ typedef struct MMUContext {
     hwaddr        physical;
     int           ps;  /* page size shift */
     int           prot;
+    int           tlb_index;
+    int           mmu_index;
 } MMUContext;
 
 bool check_ps(CPULoongArchState *ent, uint8_t ps);
diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h
index 7731f6acdc..3c5fcaa154 100644
--- a/target/loongarch/cpu.h
+++ b/target/loongarch/cpu.h
@@ -253,6 +253,20 @@ FIELD(TLB_MISC, E, 0, 1)
 FIELD(TLB_MISC, ASID, 1, 10)
 FIELD(TLB_MISC, VPPN, 13, 35)
 FIELD(TLB_MISC, PS, 48, 6)
+/*
+ * Used by QEMU software, concept of mmu idx between QEMU TLB and LoongArch
+ * TLB emulation is different:
+ *   mmu idx in QEMU TLB is current working mode of vCPU
+ *   mmu idx in LoongArch TLB is PLV access level
+ * When funtion copy_from_user() executed with system emulation method,
+ * vCPU is in kernel mode however accessed address is user memory space.
+ *
+ * TLB lo0/lo1 entry mask with PLV MMU_USER_IDX accessed in kernel mode
+ */
+FIELD(TLB_MISC, KM, 54, 2)
+#define TLB_MISC_KM_PTE(n)          BIT_ULL(R_TLB_MISC_KM_SHIFT + n)
+#define TLB_MISC_KM_PTE_LOW0        TLB_MISC_KM_PTE(0)
+#define TLB_MISC_KM_PTE_LOW1        TLB_MISC_KM_PTE(1)
 
 #define LSX_LEN    (128)
 #define LASX_LEN   (256)
diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c
index ec84fb5425..8831c95549 100644
--- a/target/loongarch/cpu_helper.c
+++ b/target/loongarch/cpu_helper.c
@@ -96,6 +96,7 @@ TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext 
*context,
     context->physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
                         (context->addr & MAKE_64BIT_MASK(0, tlb_ps));
     context->prot = PAGE_READ;
+    context->mmu_index = tlb_plv;
     if (tlb_d) {
         context->prot |= PAGE_WRITE;
     }
@@ -217,6 +218,7 @@ TLBRet get_physical_address(CPULoongArchState *env, 
MMUContext *context,
     if (da & !pg) {
         context->physical = address & TARGET_PHYS_MASK;
         context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+        context->mmu_index = MMU_DA_IDX;
         return TLBRET_MATCH;
     }
 
@@ -236,6 +238,7 @@ TLBRet get_physical_address(CPULoongArchState *env, 
MMUContext *context,
         if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
             context->physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
             context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+            context->mmu_index = MMU_DA_IDX;
             return TLBRET_MATCH;
         }
     }
diff --git a/target/loongarch/tcg/tlb_helper.c 
b/target/loongarch/tcg/tlb_helper.c
index 7d3f98633d..d86a189239 100644
--- a/target/loongarch/tcg/tlb_helper.c
+++ b/target/loongarch/tcg/tlb_helper.c
@@ -172,6 +172,7 @@ static void fill_tlb_entry(CPULoongArchState *env, int 
index)
     }
 
     /* Store page size in field PS */
+    tlb->tlb_misc = 0;
     tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
     tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
     tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
@@ -510,6 +511,24 @@ void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
     tlb_flush(env_cpu(env));
 }
 
+/*
+ * Record tlb entry with virtual address from user mode accessed from
+ * vCPU kernel mode.
+ *
+ * If set, when LoongArch TLB is flushed, need flush QEMU TLB with mmu
+ * idx MMU_KERNEL_IDX
+ */
+static inline void tlb_set_accessed(CPULoongArchState *env, vaddr address,
+                                    int index)
+{
+    LoongArchTLB *tlb = &env->tlb[index];
+    uint8_t tlb_ps, n;
+
+    tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
+    n = (address >> tlb_ps) & 0x1;/* Odd or even */
+    tlb->tlb_misc |= TLB_MISC_KM_PTE(n);
+}
+
 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
                             MMUAccessType access_type, int mmu_idx,
                             bool probe, uintptr_t retaddr)
@@ -529,6 +548,12 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, 
int size,
         tlb_set_page(cs, address & TARGET_PAGE_MASK,
                      physical & TARGET_PAGE_MASK, prot,
                      mmu_idx, TARGET_PAGE_SIZE);
+
+        /* user mode address space is accessed in vCPU kernel mode */
+        if (mmu_idx == MMU_KERNEL_IDX && context.mmu_index == MMU_USER_IDX) {
+            tlb_set_accessed(env, address, context.tlb_index);
+        }
+
         qemu_log_mask(CPU_LOG_MMU,
                       "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
                       " prot %d\n", __func__, address, physical, prot);
@@ -662,6 +687,7 @@ static TLBRet loongarch_map_tlb_entry(CPULoongArchState 
*env,
     n = (context->addr >> tlb_ps) & 0x1;/* Odd or even */
     context->pte = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
     context->ps = tlb_ps;
+    context->tlb_index = index;
     return loongarch_check_pte(env, context, access_type, mmu_idx);
 }
 
-- 
2.39.3


Reply via email to