It only make sense to rename these functions, so it's better reflect what they are supposed to do. For e.g. __tlbie_va_pid_lpid name better reflect that it is invalidating tlbie using VA, PID and LPID.
No functional change in this patch. Signed-off-by: Ritesh Harjani (IBM) <[email protected]> --- arch/powerpc/mm/book3s64/radix_tlb.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 339bd276840b..1adf20798ca6 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -1411,7 +1411,7 @@ static __always_inline void __tlbie_pid_lpid(unsigned long pid, trace_tlbie(0, 0, rb, rs, ric, prs, r); } -static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid, +static __always_inline void __tlbie_va_pid_lpid(unsigned long va, unsigned long pid, unsigned long lpid, unsigned long ap, unsigned long ric) { @@ -1443,7 +1443,7 @@ static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid) if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync" : : : "memory"); - __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K), + __tlbie_va_pid_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB); } } @@ -1474,7 +1474,7 @@ static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid, asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } -static inline void fixup_tlbie_va_range_lpid(unsigned long va, +static inline void fixup_tlbie_va_range_pid_lpid(unsigned long va, unsigned long pid, unsigned long lpid, unsigned long ap) @@ -1486,11 +1486,11 @@ static inline void fixup_tlbie_va_range_lpid(unsigned long va, if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync" : : : "memory"); - __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB); + __tlbie_va_pid_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB); } } -static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end, +static inline void __tlbie_va_range_pid_lpid(unsigned long start, unsigned long end, unsigned long pid, unsigned long lpid, unsigned long page_size, unsigned long psize) @@ -1499,12 +1499,12 @@ static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end, unsigned long ap = mmu_get_ap(psize); for (addr = start; addr < end; addr += page_size) - __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB); + __tlbie_va_pid_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB); - fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap); + fixup_tlbie_va_range_pid_lpid(addr - page_size, pid, lpid, ap); } -static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end, +static inline void _tlbie_va_range_pid_lpid(unsigned long start, unsigned long end, unsigned long pid, unsigned long lpid, unsigned long page_size, unsigned long psize, bool also_pwc) @@ -1512,7 +1512,7 @@ static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end, asm volatile("ptesync" : : : "memory"); if (also_pwc) __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC); - __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize); + __tlbie_va_range_pid_lpid(start, end, pid, lpid, page_size, psize); asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } @@ -1563,7 +1563,7 @@ void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid, _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB); return; } - _tlbie_va_range_lpid(start, end, pid, lpid, + _tlbie_va_range_pid_lpid(start, end, pid, lpid, (1UL << def->shift), psize, false); } } -- 2.53.0
