With API tlb_flush_range_by_mmuidx(), bitmap of mmu idx should be used rather than itself. And mmu idx comes from page table entry information rather current running mode.
Also field KM in TLB misc records bitmap mask of TLB entry which is access in kernel mode. If set, MMU_KERNEL_IDX should be added to flush tlb. Signed-off-by: Bibo Mao <maob...@loongson.cn> --- target/loongarch/tcg/tlb_helper.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c index cf661aaeff..da2618ec62 100644 --- a/target/loongarch/tcg/tlb_helper.c +++ b/target/loongarch/tcg/tlb_helper.c @@ -101,8 +101,7 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index) target_ulong addr, mask, pagesize; uint8_t tlb_ps; LoongArchTLB *tlb = &env->tlb[index]; - - int mmu_idx = cpu_mmu_index(env_cpu(env), false); + int mmu_idx; uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); @@ -119,11 +118,21 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index) addr = sextract64(addr, 0, TARGET_VIRT_ADDR_SPACE_BITS); if (tlb_v0) { + mmu_idx = BIT(FIELD_EX64(tlb->tlb_entry0, TLBENTRY, PLV)); + /* Even page is accessed in kernel mode */ + if (tlb->tlb_misc & TLB_MISC_KM_PTE_LOW0) { + mmu_idx |= BIT(MMU_KERNEL_IDX); + } tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, mmu_idx, TARGET_LONG_BITS); } if (tlb_v1) { + mmu_idx = BIT(FIELD_EX64(tlb->tlb_entry1, TLBENTRY, PLV)); + /* Odd page is accessed in kernel mode */ + if (tlb->tlb_misc & TLB_MISC_KM_PTE_LOW1) { + mmu_idx |= BIT(MMU_KERNEL_IDX); + } tlb_flush_range_by_mmuidx(env_cpu(env), addr + pagesize, pagesize, mmu_idx, TARGET_LONG_BITS); } -- 2.39.3