Re: [PATCH V2 26/31] powerpc/mm: Remove the dependency on pte bit position in asm code

2015-09-30 Thread Anshuman Khandual
On 09/30/2015 07:57 AM, Aneesh Kumar K.V wrote:
> We should not expect pte bit position in asm code. Simply
> by moving part of that to C

I guess this does not depend on the new code changes and can
simply be part of the preparatory patches at the beginning. 

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH V2 26/31] powerpc/mm: Remove the dependency on pte bit position in asm code

2015-09-29 Thread Aneesh Kumar K.V
We should not expect pte bit position in asm code. Simply
by moving part of that to C

Signed-off-by: Aneesh Kumar K.V 
---
 arch/powerpc/kernel/exceptions-64s.S | 16 +++-
 arch/powerpc/mm/hash_utils_64.c  | 29 +
 2 files changed, 32 insertions(+), 13 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64s.S 
b/arch/powerpc/kernel/exceptions-64s.S
index 0a0399c2af11..34920f11dbdd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1556,28 +1556,18 @@ do_hash_page:
lwz r0,TI_PREEMPT(r11)  /* If we're in an "NMI" */
andis.  r0,r0,NMI_MASK@h/* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
-   /*
-* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
-* accessing a userspace segment (even from the kernel). We assume
-* kernel addresses always have the high bit set.
-*/
-   rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
-   rotldi  r0,r3,15/* Move high bit into MSR_PR posn */
-   orc r0,r12,r0   /* MSR_PR | ~high_bit */
-   rlwimi  r4,r0,32-13,30,30   /* becomes _PAGE_USER access bit */
-   ori r4,r4,1 /* add _PAGE_PRESENT */
-   rlwimi  r4,r5,22+2,31-2,31-2/* Set _PAGE_EXEC if trap is 0x400 */
 
/*
 * r3 contains the faulting address
-* r4 contains the required access permissions
+* r4 msr
 * r5 contains the trap number
 * r6 contains dsisr
 *
 * at return r3 = 0 for success, 1 for page fault, negative for error
 */
+mr r4,r12
ld  r6,_DSISR(r1)
-   bl  hash_page   /* build HPTE if possible */
+   bl  __hash_page /* build HPTE if possible */
cmpdi   r3,0/* see if hash_page succeeded */
 
/* Success */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2b90850bdaf8..6cd9e40aae01 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1161,6 +1161,35 @@ int hash_page(unsigned long ea, unsigned long access, 
unsigned long trap,
 }
 EXPORT_SYMBOL_GPL(hash_page);
 
+int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
+   unsigned long dsisr)
+{
+   unsigned long access = _PAGE_PRESENT;
+   unsigned long flags = 0;
+   struct mm_struct *mm = current->mm;
+
+   if (REGION_ID(ea) == VMALLOC_REGION_ID)
+   mm = _mm;
+
+   if (dsisr & DSISR_NOHPTE)
+   flags |= HPTE_NOHPTE_UPDATE;
+
+   if (dsisr & DSISR_ISSTORE)
+   access |= _PAGE_RW;
+   /*
+* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+* accessing a userspace segment (even from the kernel). We assume
+* kernel addresses always have the high bit set.
+*/
+   if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
+   access |= _PAGE_USER;
+
+   if (trap == 0x400)
+   access |= _PAGE_EXEC;
+
+   return hash_page_mm(mm, ea, access, trap, flags);
+}
+
 void hash_preload(struct mm_struct *mm, unsigned long ea,
  unsigned long access, unsigned long trap)
 {
-- 
2.5.0

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev