The slice state of an mm gets zeroed then initialised upon exec.
This is the only caller of slice_set_user_psize now, so that can be
removed and instead implement a faster and simplified approach that
requires no locking or checking existing state.

This speeds up vfork+exec+exit performance on POWER8 by 3%.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/include/asm/slice.h       |  8 ++--
 arch/powerpc/mm/mmu_context_book3s64.c |  9 +----
 arch/powerpc/mm/mmu_context_nohash.c   |  5 +--
 arch/powerpc/mm/slice.c                | 72 +++++++++-------------------------
 4 files changed, 23 insertions(+), 71 deletions(-)

diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
index 172711fadb1c..e40406cf5628 100644
--- a/arch/powerpc/include/asm/slice.h
+++ b/arch/powerpc/include/asm/slice.h
@@ -28,15 +28,13 @@ unsigned long slice_get_unmapped_area(unsigned long addr, 
unsigned long len,
 
 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
 
-void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
                           unsigned long len, unsigned int psize);
-#endif /* __ASSEMBLY__ */
 
-#else /* CONFIG_PPC_MM_SLICES */
+void slice_init_new_context_exec(struct mm_struct *mm);
+
+#endif /* __ASSEMBLY__ */
 
-#define slice_set_range_psize(mm, start, len, psize)   \
-       slice_set_user_psize((mm), (psize))
 #endif /* CONFIG_PPC_MM_SLICES */
 
 #endif /* _ASM_POWERPC_SLICE_H */
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c 
b/arch/powerpc/mm/mmu_context_book3s64.c
index 929d9ef7083f..80acad52b006 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -93,13 +93,6 @@ static int hash__init_new_context(struct mm_struct *mm)
        if (index < 0)
                return index;
 
-       /*
-        * In the case of exec, use the default limit,
-        * otherwise inherit it from the mm we are duplicating.
-        */
-       if (!mm->context.slb_addr_limit)
-               mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
-
        /*
         * The old code would re-promote on fork, we don't do that when using
         * slices as it could cause problem promoting slices that have been
@@ -115,7 +108,7 @@ static int hash__init_new_context(struct mm_struct *mm)
         * check against 0 is OK.
         */
        if (mm->context.id == 0)
-               slice_set_user_psize(mm, mmu_virtual_psize);
+               slice_init_new_context_exec(mm);
 
        subpage_prot_init_new_context(mm);
 
diff --git a/arch/powerpc/mm/mmu_context_nohash.c 
b/arch/powerpc/mm/mmu_context_nohash.c
index d98f7e5c141b..be8f5c9d4d08 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -332,9 +332,6 @@ int init_new_context(struct task_struct *t, struct 
mm_struct *mm)
        pr_hard("initing context for mm @%p\n", mm);
 
 #ifdef CONFIG_PPC_MM_SLICES
-       if (!mm->context.slb_addr_limit)
-               mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
-
        /*
         * We have MMU_NO_CONTEXT set to be ~0. Hence check
         * explicitly against context.id == 0. This ensures that we properly
@@ -343,7 +340,7 @@ int init_new_context(struct task_struct *t, struct 
mm_struct *mm)
         * will have id != 0).
         */
        if (mm->context.id == 0)
-               slice_set_user_psize(mm, mmu_virtual_psize);
+               slice_init_new_context_exec(mm);
 #endif
        mm->context.id = MMU_NO_CONTEXT;
        mm->context.active = 0;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 5e9e1e57d580..7b51f962ce0c 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -671,70 +671,34 @@ unsigned int get_slice_psize(struct mm_struct *mm, 
unsigned long addr)
 }
 EXPORT_SYMBOL_GPL(get_slice_psize);
 
-/*
- * This is called by hash_page when it needs to do a lazy conversion of
- * an address space from real 64K pages to combo 4K pages (typically
- * when hitting a non cacheable mapping on a processor or hypervisor
- * that won't allow them for 64K pages).
- *
- * This is also called in init_new_context() to change back the user
- * psize from whatever the parent context had it set to
- * N.B. This may be called before mm->context.id has been set.
- *
- * This function will only change the content of the {low,high)_slice_psize
- * masks, it will not flush SLBs as this shall be handled lazily by the
- * caller.
- */
-void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
+void slice_init_new_context_exec(struct mm_struct *mm)
 {
-       int index, mask_index;
        unsigned char *hpsizes, *lpsizes;
-       unsigned long flags;
-       unsigned int old_psize;
-       int i;
+       unsigned int psize = mmu_virtual_psize;
 
-       slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
+       slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
 
-       VM_BUG_ON(radix_enabled());
-       spin_lock_irqsave(&slice_convert_lock, flags);
-
-       old_psize = mm->context.user_psize;
-       slice_dbg(" old_psize=%d\n", old_psize);
-       if (old_psize == psize)
-               goto bail;
+       /*
+        * In the case of exec, use the default limit. In the
+        * case of fork it is just inherited from the mm being
+        * duplicated.
+        */
+#ifdef CONFIG_PPC64
+       mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
+#else
+       mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
+#endif
 
        mm->context.user_psize = psize;
-       wmb();
 
+       /*
+        * Set all slice psizes to the default.
+        */
        lpsizes = mm->context.low_slices_psize;
-       for (i = 0; i < SLICE_NUM_LOW; i++) {
-               mask_index = i & 0x1;
-               index = i >> 1;
-               if (((lpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
-                       lpsizes[index] = (lpsizes[index] &
-                                         ~(0xf << (mask_index * 4))) |
-                               (((unsigned long)psize) << (mask_index * 4));
-       }
+       memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
 
        hpsizes = mm->context.high_slices_psize;
-       for (i = 0; i < SLICE_NUM_HIGH; i++) {
-               mask_index = i & 0x1;
-               index = i >> 1;
-               if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
-                       hpsizes[index] = (hpsizes[index] &
-                                         ~(0xf << (mask_index * 4))) |
-                               (((unsigned long)psize) << (mask_index * 4));
-       }
-
-
-
-
-       slice_dbg(" lsps=%lx, hsps=%lx\n",
-                 (unsigned long)mm->context.low_slices_psize,
-                 (unsigned long)mm->context.high_slices_psize);
-
- bail:
-       spin_unlock_irqrestore(&slice_convert_lock, flags);
+       memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
 }
 
 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
-- 
2.16.1

Reply via email to