On 15.10.25 10:27, Kevin Brodsky wrote:
Upcoming changes to the lazy_mmu API will cause
arch_flush_lazy_mmu_mode() to be called when leaving a nested
lazy_mmu section.

Move the relevant logic from arch_leave_lazy_mmu_mode() to
arch_flush_lazy_mmu_mode() and have the former call the latter.

Signed-off-by: Kevin Brodsky <[email protected]>
---
  arch/sparc/include/asm/tlbflush_64.h | 2 +-
  arch/sparc/mm/tlb.c                  | 9 ++++++++-
  2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/arch/sparc/include/asm/tlbflush_64.h 
b/arch/sparc/include/asm/tlbflush_64.h
index 8b8cdaa69272..925bb5d7a4e1 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -43,8 +43,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned 
long end);
void flush_tlb_pending(void);
  void arch_enter_lazy_mmu_mode(void);
+void arch_flush_lazy_mmu_mode(void);
  void arch_leave_lazy_mmu_mode(void);
-#define arch_flush_lazy_mmu_mode()      do {} while (0)
/* Local cpu only. */
  void __flush_tlb_all(void);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index a35ddcca5e76..7b5dfcdb1243 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -59,12 +59,19 @@ void arch_enter_lazy_mmu_mode(void)
        tb->active = 1;
  }
-void arch_leave_lazy_mmu_mode(void)
+void arch_flush_lazy_mmu_mode(void)
  {
        struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
if (tb->tlb_nr)
                flush_tlb_pending();
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+       struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
+

Just like for ppc now a double this_cpu_ptr(). I'd similarly just replicate the two statements.

+       arch_flush_lazy_mmu_mode();
        tb->active = 0;
        preempt_enable();
  }


--
Cheers

David / dhildenb


Reply via email to