Re: [PATCH 02/15] powerpc/cell: Move data segment faulting code out of cell platform

2014-09-29 Thread Michael Neuling
On Mon, 2014-09-29 at 14:00 +0530, Aneesh Kumar K.V wrote:
> Michael Neuling  writes:
> 
> > From: Ian Munsie 
> >
> > __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> > required for a particular EA and mm struct.
> >
> > This code is generically useful for other co-processors.  This moves the 
> > code
> > of the cell platform so it can be used by other powerpc code.
> >
> > Signed-off-by: Ian Munsie 
> > Signed-off-by: Michael Neuling 
> > ---
> >  arch/powerpc/include/asm/mmu-hash64.h  |  2 ++
> >  arch/powerpc/mm/copro_fault.c  | 48 
> > ++
> >  arch/powerpc/mm/slb.c  |  3 ---
> >  arch/powerpc/platforms/cell/spu_base.c | 41 +++--
> >  4 files changed, 54 insertions(+), 40 deletions(-)
> >
> > diff --git a/arch/powerpc/include/asm/mmu-hash64.h 
> > b/arch/powerpc/include/asm/mmu-hash64.h
> > index d765144..fd19a53 100644
> > --- a/arch/powerpc/include/asm/mmu-hash64.h
> > +++ b/arch/powerpc/include/asm/mmu-hash64.h
> > @@ -180,6 +180,8 @@ static inline unsigned int mmu_psize_to_shift(unsigned 
> > int mmu_psize)
> >   * we work in all cases including 4k page size.
> >   */
> >  #define VPN_SHIFT  12
> > +#define slb_vsid_shift(ssize)  \
> > +   ((ssize) == MMU_SEGSIZE_256M ? SLB_VSID_SHIFT : SLB_VSID_SHIFT_1T)
> 
> can it be static inline similar to segment_shift() ?

Yep.

> 
> >  
> >  /*
> >   * HPTE Large Page (LP) details
> > diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
> > index ba7df14..4105a63 100644
> > --- a/arch/powerpc/mm/copro_fault.c
> > +++ b/arch/powerpc/mm/copro_fault.c
> > @@ -90,3 +90,51 @@ out_unlock:
> > return ret;
> >  }
> >  EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
> > +
> > +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
> > +{
> > +   int psize, ssize;
> > +
> > +   *esid = (ea & ESID_MASK) | SLB_ESID_V;
> > +
> > +   switch (REGION_ID(ea)) {
> > +   case USER_REGION_ID:
> > +   pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
> > +#ifdef CONFIG_PPC_MM_SLICES
> > +   psize = get_slice_psize(mm, ea);
> > +#else
> > +   psize = mm->context.user_psize;
> > +#endif
> 
> We don't need that.
> 
> #ifdef CONFIG_PPC_STD_MMU_64
> #define get_slice_psize(mm, addr) ((mm)->context.user_psize)

OK

> 
> 
> > +   ssize = user_segment_size(ea);
> > +   *vsid = (get_vsid(mm->context.id, ea, ssize)
> > +   << slb_vsid_shift(ssize)) | SLB_VSID_USER
> > +   | (ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0);
> > +   break;
> > +   case VMALLOC_REGION_ID:
> > +   pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", 
> > ea);
> > +   if (ea < VMALLOC_END)
> > +   psize = mmu_vmalloc_psize;
> > +   else
> > +   psize = mmu_io_psize;
> > +   *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> > +   << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
> > +   | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
> > 0);
> > +   break;
> > +   case KERNEL_REGION_ID:
> > +   pr_devel("copro_data_segment: 0x%llx -- KERNEL_REGION_ID\n", 
> > ea);
> > +   psize = mmu_linear_psize;
> > +   *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> > +   << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
> > +   | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
> > 0);
> > +   break;
> > +   default:
> > +   /* Future: support kernel segments so that drivers can use the
> > +* CoProcessors */
> > +   pr_debug("invalid region access at %016llx\n", ea);
> > +   return 1;
> > +   }
> > +   *vsid |= mmu_psize_defs[psize].sllp;
> > +
> > +   return 0;
> > +}
> 
> large part of this is same as what we do in hash_page. And we are not
> really updating vsid here, it is vsid slb encoding. So why not abstract
> the vsid part and use that in hash_page also ? That would have also taken
> care of the above #ifdef.

Ok, I've merge these two variants.

Going to repost this whole series again soon.  I'll be in there.

Thanks for the comments.

Mikey

> 
> > +EXPORT_SYMBOL_GPL(copro_data_segment);
> > diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> > index 0399a67..6e450ca 100644
> > --- a/arch/powerpc/mm/slb.c
> > +++ b/arch/powerpc/mm/slb.c
> > @@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long 
> > ea, int ssize,
> > return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
> >  }
> >  
> > -#define slb_vsid_shift(ssize)  \
> > -   ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
> > -
> >  static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
> >  unsigned long flags)
> >  {
> > diff --git a/arch/powerpc/platforms/cell/spu_base.c 
> > b/arch/powerpc/platforms/cell/spu_

Re: [PATCH 02/15] powerpc/cell: Move data segment faulting code out of cell platform

2014-09-29 Thread Aneesh Kumar K.V
Michael Neuling  writes:

> From: Ian Munsie 
>
> __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> required for a particular EA and mm struct.
>
> This code is generically useful for other co-processors.  This moves the code
> of the cell platform so it can be used by other powerpc code.
>
> Signed-off-by: Ian Munsie 
> Signed-off-by: Michael Neuling 
> ---
>  arch/powerpc/include/asm/mmu-hash64.h  |  2 ++
>  arch/powerpc/mm/copro_fault.c  | 48 
> ++
>  arch/powerpc/mm/slb.c  |  3 ---
>  arch/powerpc/platforms/cell/spu_base.c | 41 +++--
>  4 files changed, 54 insertions(+), 40 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/mmu-hash64.h 
> b/arch/powerpc/include/asm/mmu-hash64.h
> index d765144..fd19a53 100644
> --- a/arch/powerpc/include/asm/mmu-hash64.h
> +++ b/arch/powerpc/include/asm/mmu-hash64.h
> @@ -180,6 +180,8 @@ static inline unsigned int mmu_psize_to_shift(unsigned 
> int mmu_psize)
>   * we work in all cases including 4k page size.
>   */
>  #define VPN_SHIFT12
> +#define slb_vsid_shift(ssize)\
> + ((ssize) == MMU_SEGSIZE_256M ? SLB_VSID_SHIFT : SLB_VSID_SHIFT_1T)

can it be static inline similar to segment_shift() ?

>  
>  /*
>   * HPTE Large Page (LP) details
> diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
> index ba7df14..4105a63 100644
> --- a/arch/powerpc/mm/copro_fault.c
> +++ b/arch/powerpc/mm/copro_fault.c
> @@ -90,3 +90,51 @@ out_unlock:
>   return ret;
>  }
>  EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
> +
> +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
> +{
> + int psize, ssize;
> +
> + *esid = (ea & ESID_MASK) | SLB_ESID_V;
> +
> + switch (REGION_ID(ea)) {
> + case USER_REGION_ID:
> + pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
> +#ifdef CONFIG_PPC_MM_SLICES
> + psize = get_slice_psize(mm, ea);
> +#else
> + psize = mm->context.user_psize;
> +#endif

We don't need that.

#ifdef CONFIG_PPC_STD_MMU_64
#define get_slice_psize(mm, addr)   ((mm)->context.user_psize)



> + ssize = user_segment_size(ea);
> + *vsid = (get_vsid(mm->context.id, ea, ssize)
> + << slb_vsid_shift(ssize)) | SLB_VSID_USER
> + | (ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0);
> + break;
> + case VMALLOC_REGION_ID:
> + pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", 
> ea);
> + if (ea < VMALLOC_END)
> + psize = mmu_vmalloc_psize;
> + else
> + psize = mmu_io_psize;
> + *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> + << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
> + | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
> 0);
> + break;
> + case KERNEL_REGION_ID:
> + pr_devel("copro_data_segment: 0x%llx -- KERNEL_REGION_ID\n", 
> ea);
> + psize = mmu_linear_psize;
> + *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> + << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
> + | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
> 0);
> + break;
> + default:
> + /* Future: support kernel segments so that drivers can use the
> +  * CoProcessors */
> + pr_debug("invalid region access at %016llx\n", ea);
> + return 1;
> + }
> + *vsid |= mmu_psize_defs[psize].sllp;
> +
> + return 0;
> +}

large part of this is same as what we do in hash_page. And we are not
really updating vsid here, it is vsid slb encoding. So why not abstract
the vsid part and use that in hash_page also ? That would have also taken
care of the above #ifdef.

> +EXPORT_SYMBOL_GPL(copro_data_segment);
> diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
> index 0399a67..6e450ca 100644
> --- a/arch/powerpc/mm/slb.c
> +++ b/arch/powerpc/mm/slb.c
> @@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long ea, 
> int ssize,
>   return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
>  }
>  
> -#define slb_vsid_shift(ssize)\
> - ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
> -
>  static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
>unsigned long flags)
>  {
> diff --git a/arch/powerpc/platforms/cell/spu_base.c 
> b/arch/powerpc/platforms/cell/spu_base.c
> index 2930d1e..fe004b1 100644
> --- a/arch/powerpc/platforms/cell/spu_base.c
> +++ b/arch/powerpc/platforms/cell/spu_base.c
> @@ -167,45 +167,12 @@ static inline void spu_load_slb(struct spu *spu, int 
> slbe, struct spu_slb *slb)
>  
>  static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
>  {
> - struct mm_struct *mm = spu->mm;
>   struct sp

Re: [PATCH 02/15] powerpc/cell: Move data segment faulting code out of cell platform

2014-09-26 Thread Michael Neuling
On Fri, 2014-09-26 at 14:05 +1000, Anton Blanchard wrote:
> > From: Ian Munsie 
> > 
> > __spu_trap_data_seg() currently contains code to determine the VSID
> > and ESID required for a particular EA and mm struct.
> > 
> > This code is generically useful for other co-processors.  This moves
> > the code of the cell platform so it can be used by other powerpc code.
> 
> Could we also mention:
> 
> and adds 1TB segment support.

Good point.  I'll add.


> > Signed-off-by: Ian Munsie 
> > Signed-off-by: Michael Neuling 
> 
> Reviewed-by: Anton Blanchard 

Thanks,
Mikey
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 02/15] powerpc/cell: Move data segment faulting code out of cell platform

2014-09-25 Thread Anton Blanchard

> From: Ian Munsie 
> 
> __spu_trap_data_seg() currently contains code to determine the VSID
> and ESID required for a particular EA and mm struct.
> 
> This code is generically useful for other co-processors.  This moves
> the code of the cell platform so it can be used by other powerpc code.

Could we also mention:

and adds 1TB segment support.

> Signed-off-by: Ian Munsie 
> Signed-off-by: Michael Neuling 

Reviewed-by: Anton Blanchard 

Anton
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 02/15] powerpc/cell: Move data segment faulting code out of cell platform

2014-09-18 Thread Michael Neuling
> > +
> > +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
> > +{
> > +   int psize, ssize;
> > +
> > +   *esid = (ea & ESID_MASK) | SLB_ESID_V;
> > +
> > +   switch (REGION_ID(ea)) {
> > +   case USER_REGION_ID:
> > +   pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
> > +#ifdef CONFIG_PPC_MM_SLICES
> > +   psize = get_slice_psize(mm, ea);
> > +#else
> > +   psize = mm->context.user_psize;
> > +#endif
> > +   ssize = user_segment_size(ea);
> > +   *vsid = (get_vsid(mm->context.id, ea, ssize)
> > +   << slb_vsid_shift(ssize)) | SLB_VSID_USER
> > +   | (ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0);
> > +   break;
> > +   case VMALLOC_REGION_ID:
> > +   pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", 
> > ea);
> > +   if (ea < VMALLOC_END)
> > +   psize = mmu_vmalloc_psize;
> > +   else
> > +   psize = mmu_io_psize;
> > +   *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> > +   << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
> > +   | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
> > 0);
> > +   break;
> > +   case KERNEL_REGION_ID:
> > +   pr_devel("copro_data_segment: 0x%llx -- KERNEL_REGION_ID\n", 
> > ea);
> > +   psize = mmu_linear_psize;
> > +   *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> > +   << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
> > +   | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
> > 0);
> > +   break;
> > +   default:
> > +   /* Future: support kernel segments so that drivers can use the
> > +* CoProcessors */
> > +   pr_debug("invalid region access at %016llx\n", ea);
> > +   return 1;
> > +   }
> > +   *vsid |= mmu_psize_defs[psize].sllp;
> 
> A bit of a nitpick, but how about you remove the repeated:
> 
>   | ( == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0)
> 
> then set ssize in each of the switch cases (like we do with psize), and
> or-in the VSID_B_1T bit at the end:
>   
>   *vsid |= mmu_psize_defs[psize].sllp
>   | (ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0);

Nice.  I think below is what you mean.

I'll fold this into the existing patch and repost in a few days.

Thanks,
Mikey

diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 4105a63..939caf6 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -107,8 +107,7 @@ int copro_data_segment(struct mm_struct *mm, u64 ea, u64 
*esid, u64 *vsid)
 #endif
ssize = user_segment_size(ea);
*vsid = (get_vsid(mm->context.id, ea, ssize)
-   << slb_vsid_shift(ssize)) | SLB_VSID_USER
-   | (ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0);
+<< slb_vsid_shift(ssize)) | SLB_VSID_USER;
break;
case VMALLOC_REGION_ID:
pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", 
ea);
@@ -116,16 +115,16 @@ int copro_data_segment(struct mm_struct *mm, u64 ea, u64 
*esid, u64 *vsid)
psize = mmu_vmalloc_psize;
else
psize = mmu_io_psize;
+   ssize = mmu_kernel_ssize;
*vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
-   << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
-   | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
0);
+<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
break;
case KERNEL_REGION_ID:
pr_devel("copro_data_segment: 0x%llx -- KERNEL_REGION_ID\n", 
ea);
psize = mmu_linear_psize;
+   ssize = mmu_kernel_ssize;
*vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
-   << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
-   | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
0);
+<< SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
break;
default:
/* Future: support kernel segments so that drivers can use the
@@ -133,7 +132,8 @@ int copro_data_segment(struct mm_struct *mm, u64 ea, u64 
*esid, u64 *vsid)
pr_debug("invalid region access at %016llx\n", ea);
return 1;
}
-   *vsid |= mmu_psize_defs[psize].sllp;
+   *vsid |= mmu_psize_defs[psize].sllp |
+   (ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0;
 
return 0;
 }

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 02/15] powerpc/cell: Move data segment faulting code out of cell platform

2014-09-18 Thread Jeremy Kerr
Hi Mikey & Ian,

> __spu_trap_data_seg() currently contains code to determine the VSID and ESID
> required for a particular EA and mm struct.
> 
> This code is generically useful for other co-processors.  This moves the code
> of the cell platform so it can be used by other powerpc code.

OK, nice.

> +
> +int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
> +{
> + int psize, ssize;
> +
> + *esid = (ea & ESID_MASK) | SLB_ESID_V;
> +
> + switch (REGION_ID(ea)) {
> + case USER_REGION_ID:
> + pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
> +#ifdef CONFIG_PPC_MM_SLICES
> + psize = get_slice_psize(mm, ea);
> +#else
> + psize = mm->context.user_psize;
> +#endif
> + ssize = user_segment_size(ea);
> + *vsid = (get_vsid(mm->context.id, ea, ssize)
> + << slb_vsid_shift(ssize)) | SLB_VSID_USER
> + | (ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0);
> + break;
> + case VMALLOC_REGION_ID:
> + pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", 
> ea);
> + if (ea < VMALLOC_END)
> + psize = mmu_vmalloc_psize;
> + else
> + psize = mmu_io_psize;
> + *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> + << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
> + | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
> 0);
> + break;
> + case KERNEL_REGION_ID:
> + pr_devel("copro_data_segment: 0x%llx -- KERNEL_REGION_ID\n", 
> ea);
> + psize = mmu_linear_psize;
> + *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
> + << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
> + | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
> 0);
> + break;
> + default:
> + /* Future: support kernel segments so that drivers can use the
> +  * CoProcessors */
> + pr_debug("invalid region access at %016llx\n", ea);
> + return 1;
> + }
> + *vsid |= mmu_psize_defs[psize].sllp;

A bit of a nitpick, but how about you remove the repeated:

| ( == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0)

then set ssize in each of the switch cases (like we do with psize), and
or-in the VSID_B_1T bit at the end:

*vsid |= mmu_psize_defs[psize].sllp
| (ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0);

Otherwise, looks good to me.

Cheers,


Jeremy

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 02/15] powerpc/cell: Move data segment faulting code out of cell platform

2014-09-18 Thread Michael Neuling
From: Ian Munsie 

__spu_trap_data_seg() currently contains code to determine the VSID and ESID
required for a particular EA and mm struct.

This code is generically useful for other co-processors.  This moves the code
of the cell platform so it can be used by other powerpc code.

Signed-off-by: Ian Munsie 
Signed-off-by: Michael Neuling 
---
 arch/powerpc/include/asm/mmu-hash64.h  |  2 ++
 arch/powerpc/mm/copro_fault.c  | 48 ++
 arch/powerpc/mm/slb.c  |  3 ---
 arch/powerpc/platforms/cell/spu_base.c | 41 +++--
 4 files changed, 54 insertions(+), 40 deletions(-)

diff --git a/arch/powerpc/include/asm/mmu-hash64.h 
b/arch/powerpc/include/asm/mmu-hash64.h
index d765144..fd19a53 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -180,6 +180,8 @@ static inline unsigned int mmu_psize_to_shift(unsigned int 
mmu_psize)
  * we work in all cases including 4k page size.
  */
 #define VPN_SHIFT  12
+#define slb_vsid_shift(ssize)  \
+   ((ssize) == MMU_SEGSIZE_256M ? SLB_VSID_SHIFT : SLB_VSID_SHIFT_1T)
 
 /*
  * HPTE Large Page (LP) details
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index ba7df14..4105a63 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -90,3 +90,51 @@ out_unlock:
return ret;
 }
 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
+
+int copro_data_segment(struct mm_struct *mm, u64 ea, u64 *esid, u64 *vsid)
+{
+   int psize, ssize;
+
+   *esid = (ea & ESID_MASK) | SLB_ESID_V;
+
+   switch (REGION_ID(ea)) {
+   case USER_REGION_ID:
+   pr_devel("copro_data_segment: 0x%llx -- USER_REGION_ID\n", ea);
+#ifdef CONFIG_PPC_MM_SLICES
+   psize = get_slice_psize(mm, ea);
+#else
+   psize = mm->context.user_psize;
+#endif
+   ssize = user_segment_size(ea);
+   *vsid = (get_vsid(mm->context.id, ea, ssize)
+   << slb_vsid_shift(ssize)) | SLB_VSID_USER
+   | (ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 0);
+   break;
+   case VMALLOC_REGION_ID:
+   pr_devel("copro_data_segment: 0x%llx -- VMALLOC_REGION_ID\n", 
ea);
+   if (ea < VMALLOC_END)
+   psize = mmu_vmalloc_psize;
+   else
+   psize = mmu_io_psize;
+   *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
+   << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
+   | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
0);
+   break;
+   case KERNEL_REGION_ID:
+   pr_devel("copro_data_segment: 0x%llx -- KERNEL_REGION_ID\n", 
ea);
+   psize = mmu_linear_psize;
+   *vsid = (get_kernel_vsid(ea, mmu_kernel_ssize)
+   << SLB_VSID_SHIFT) | SLB_VSID_KERNEL
+   | (mmu_kernel_ssize == MMU_SEGSIZE_1T ? SLB_VSID_B_1T : 
0);
+   break;
+   default:
+   /* Future: support kernel segments so that drivers can use the
+* CoProcessors */
+   pr_debug("invalid region access at %016llx\n", ea);
+   return 1;
+   }
+   *vsid |= mmu_psize_defs[psize].sllp;
+
+   return 0;
+}
+EXPORT_SYMBOL_GPL(copro_data_segment);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 0399a67..6e450ca 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long ea, 
int ssize,
return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
 }
 
-#define slb_vsid_shift(ssize)  \
-   ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
-
 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
 unsigned long flags)
 {
diff --git a/arch/powerpc/platforms/cell/spu_base.c 
b/arch/powerpc/platforms/cell/spu_base.c
index 2930d1e..fe004b1 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -167,45 +167,12 @@ static inline void spu_load_slb(struct spu *spu, int 
slbe, struct spu_slb *slb)
 
 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
 {
-   struct mm_struct *mm = spu->mm;
struct spu_slb slb;
-   int psize;
-
-   pr_debug("%s\n", __func__);
-
-   slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
+   int ret;
 
-   switch(REGION_ID(ea)) {
-   case USER_REGION_ID:
-#ifdef CONFIG_PPC_MM_SLICES
-   psize = get_slice_psize(mm, ea);
-#else
-   psize = mm->context.user_psize;
-#endif
-   slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
-   << SLB_VSID_SHIFT) | SLB_VSID_USER;
-   break;
-   case VMALLOC_REGION_ID:
-   if (ea < VMALLOC_END)
-   psiz