This has the advantage of consolidating TLB flush code in fewer
places, and it also implements powerpc:tlbie trace events.

1GB pages should be handled without further modification.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_64_mmu_radix.c | 26 +++++++-------------------
 1 file changed, 7 insertions(+), 19 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 81d5ad26f9a1..dab6b622011c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -139,28 +139,16 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t 
eaddr,
        return 0;
 }
 
-#ifdef CONFIG_PPC_64K_PAGES
-#define MMU_BASE_PSIZE MMU_PAGE_64K
-#else
-#define MMU_BASE_PSIZE MMU_PAGE_4K
-#endif
-
 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
                                    unsigned int pshift)
 {
-       int psize = MMU_BASE_PSIZE;
-
-       if (pshift >= PMD_SHIFT)
-               psize = MMU_PAGE_2M;
-       addr &= ~0xfffUL;
-       addr |= mmu_psize_defs[psize].ap << 5;
-       asm volatile("ptesync": : :"memory");
-       asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
-                    : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
-       if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
-               asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
-                            : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
-       asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
+       unsigned long psize = PAGE_SIZE;
+
+       if (pshift)
+               psize = 1UL << pshift;
+
+       addr &= ~(psize - 1);
+       radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
 }
 
 unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
-- 
2.17.0

Reply via email to