From: Ulrich Wulff <[email protected]>

Set the FB bit in HCR_EL2 to force broadcasts invalidates within the
inner shareable by issuing only local invalidations.

The idea is to properly invalidate entries on a SMP system. Then the FB
bit gets cleared by the hypervisor and Linux switches to UP mode. The
memory between the two systems is not shared so invalidations on the
second CPU is not needed and Linux continues to invalidate only locally.

Signed-off-by: Ulrich Wulff <[email protected]>
Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
---
 arch/arm64/include/asm/cacheflush.h |  2 +-
 arch/arm64/include/asm/tlbflush.h   | 54 ++++++++++++++---------------
 arch/arm64/kernel/head.S            |  2 ++
 3 files changed, 30 insertions(+), 28 deletions(-)

diff --git a/arch/arm64/include/asm/cacheflush.h 
b/arch/arm64/include/asm/cacheflush.h
index 9384fd8fc13cc..5a0b126d0164b 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -132,7 +132,7 @@ static __always_inline void __flush_icache_all(void)
        if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
                return;
 
-       asm("ic ialluis");
+       asm("ic iallu"); /*  promoted to ic ialluis by HCR_EL2.HCR_FB */
        dsb(ish);
 }
 
diff --git a/arch/arm64/include/asm/tlbflush.h 
b/arch/arm64/include/asm/tlbflush.h
index cc3f5a33ff9c5..3f1b8f7970e7a 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -237,9 +237,9 @@ static inline void local_flush_tlb_all(void)
 
 static inline void flush_tlb_all(void)
 {
-       dsb(ishst);
-       __tlbi(vmalle1is);
-       dsb(ish);
+       dsb(nshst);
+       __tlbi(vmalle1);
+       dsb(nsh);
        isb();
 }
 
@@ -247,10 +247,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 {
        unsigned long asid = __TLBI_VADDR(0, ASID(mm));
 
-       dsb(ishst);
-       __tlbi(aside1is, asid);
-       __tlbi_user(aside1is, asid);
-       dsb(ish);
+       dsb(nshst);
+       __tlbi(aside1, asid);
+       __tlbi_user(aside1, asid);
+       dsb(nsh);
 }
 
 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
@@ -258,16 +258,16 @@ static inline void flush_tlb_page_nosync(struct 
vm_area_struct *vma,
 {
        unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
 
-       dsb(ishst);
-       __tlbi(vale1is, addr);
-       __tlbi_user(vale1is, addr);
+       dsb(nshst);
+       __tlbi(vale1, addr);
+       __tlbi_user(vale1, addr);
 }
 
 static inline void flush_tlb_page(struct vm_area_struct *vma,
                                  unsigned long uaddr)
 {
        flush_tlb_page_nosync(vma, uaddr);
-       dsb(ish);
+       dsb(nsh);
 }
 
 /*
@@ -304,7 +304,7 @@ static inline void __flush_tlb_range(struct vm_area_struct 
*vma,
                return;
        }
 
-       dsb(ishst);
+       dsb(nshst);
 
        /*
         * When the CPU does not support TLB range operations, flush the TLB
@@ -329,11 +329,11 @@ static inline void __flush_tlb_range(struct 
vm_area_struct *vma,
                    pages % 2 == 1) {
                        addr = __TLBI_VADDR(start, asid);
                        if (last_level) {
-                               __tlbi_level(vale1is, addr, tlb_level);
-                               __tlbi_user_level(vale1is, addr, tlb_level);
+                               __tlbi_level(vale1, addr, tlb_level);
+                               __tlbi_user_level(vale1, addr, tlb_level);
                        } else {
-                               __tlbi_level(vae1is, addr, tlb_level);
-                               __tlbi_user_level(vae1is, addr, tlb_level);
+                               __tlbi_level(vae1, addr, tlb_level);
+                               __tlbi_user_level(vae1, addr, tlb_level);
                        }
                        start += stride;
                        pages -= stride >> PAGE_SHIFT;
@@ -345,18 +345,18 @@ static inline void __flush_tlb_range(struct 
vm_area_struct *vma,
                        addr = __TLBI_VADDR_RANGE(start, asid, scale,
                                                  num, tlb_level);
                        if (last_level) {
-                               __tlbi(rvale1is, addr);
-                               __tlbi_user(rvale1is, addr);
+                               __tlbi(rvale1, addr);
+                               __tlbi_user(rvale1, addr);
                        } else {
-                               __tlbi(rvae1is, addr);
-                               __tlbi_user(rvae1is, addr);
+                               __tlbi(rvae1, addr);
+                               __tlbi_user(rvae1, addr);
                        }
                        start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
                        pages -= __TLBI_RANGE_PAGES(num, scale);
                }
                scale++;
        }
-       dsb(ish);
+       dsb(nsh);
 }
 
 static inline void flush_tlb_range(struct vm_area_struct *vma,
@@ -382,10 +382,10 @@ static inline void flush_tlb_kernel_range(unsigned long 
start, unsigned long end
        start = __TLBI_VADDR(start, 0);
        end = __TLBI_VADDR(end, 0);
 
-       dsb(ishst);
+       dsb(nshst);
        for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-               __tlbi(vaale1is, addr);
-       dsb(ish);
+               __tlbi(vaale1, addr);
+       dsb(nsh);
        isb();
 }
 
@@ -397,9 +397,9 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long 
kaddr)
 {
        unsigned long addr = __TLBI_VADDR(kaddr, 0);
 
-       dsb(ishst);
-       __tlbi(vaae1is, addr);
-       dsb(ish);
+       dsb(nshst);
+       __tlbi(vaae1, addr);
+       dsb(nsh);
        isb();
 }
 #endif
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index d8d9caf02834e..493c204eab39c 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -519,6 +519,8 @@ SYM_FUNC_START(el2_setup)
        cbz     x2, set_hcr
        mov_q   x0, HCR_HOST_VHE_FLAGS
 set_hcr:
+       /*  set HCR_EL2.FB to promote IC IALLU to IC IALLUIS */
+       orr     x0, x0, HCR_FB
        msr     hcr_el2, x0
        isb
 
-- 
2.40.1

-- 
You received this message because you are subscribed to the Google Groups 
"Jailhouse" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To view this discussion on the web visit 
https://groups.google.com/d/msgid/jailhouse-dev/20230602074808.1383333-5-bigeasy%40linutronix.de.

Reply via email to