From: Alex Bennée <alex.ben...@linaro.org>

This introduces a new flush_tlb_page function which does exactly what
you expect. It's going to be useful for the future TLB torture test.

Signed-off-by: Alex Bennée <alex.ben...@linaro.org>
Reviewed-by: Andrew Jones <drjo...@redhat.com>
---
 lib/arm/asm/mmu.h   | 11 +++++++++++
 lib/arm64/asm/mmu.h |  8 ++++++++
 2 files changed, 19 insertions(+)

diff --git a/lib/arm/asm/mmu.h b/lib/arm/asm/mmu.h
index c1bd01c9ee1b9..2bb0cde820f8a 100644
--- a/lib/arm/asm/mmu.h
+++ b/lib/arm/asm/mmu.h
@@ -14,8 +14,11 @@
 #define PTE_AF                 PTE_EXT_AF
 #define PTE_WBWA               L_PTE_MT_WRITEALLOC
 
+/* See B3.18.7 TLB maintenance operations */
+
 static inline void local_flush_tlb_all(void)
 {
+       /* TLBIALL */
        asm volatile("mcr p15, 0, %0, c8, c7, 0" :: "r" (0));
        dsb();
        isb();
@@ -27,6 +30,14 @@ static inline void flush_tlb_all(void)
        local_flush_tlb_all();
 }
 
+static inline void flush_tlb_page(unsigned long vaddr)
+{
+       /* TLBIMVAA */
+       asm volatile("mcr p15, 0, %0, c8, c7, 3" :: "r" (vaddr));
+       dsb();
+       isb();
+}
+
 #include <asm/mmu-api.h>
 
 #endif /* __ASMARM_MMU_H_ */
diff --git a/lib/arm64/asm/mmu.h b/lib/arm64/asm/mmu.h
index 18b4d6be18fae..3bc31c91c36f8 100644
--- a/lib/arm64/asm/mmu.h
+++ b/lib/arm64/asm/mmu.h
@@ -19,6 +19,14 @@ static inline void flush_tlb_all(void)
        isb();
 }
 
+static inline void flush_tlb_page(unsigned long vaddr)
+{
+       unsigned long page = vaddr >> 12;
+       dsb(ishst);
+       asm("tlbi       vaae1is, %0" :: "r" (page));
+       dsb(ish);
+}
+
 #include <asm/mmu-api.h>
 
 #endif /* __ASMARM64_MMU_H_ */
-- 
2.4.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to