Re: [PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-06-06 Thread Segher Boessenkool
On Wed, Jun 06, 2018 at 02:42:27PM +0800, Simon Guo wrote:
> I now felt unformatable to use mcrf like:
> mcrf7,0
> 
> since I cannot 100% confident that compiler will not use CR7 or other
> CR# in exit_vmx_ops().

It wasn't clear to me this macro boils down to a function call.

You can use CR2,CR3,CR4, but you'll need to save and restore those at
the start and end of function then, which is just as nasty.

Better is to restructure some code so you don't need that CR field
there anymore.

> Can we switch back to mfocrf/mtocrf with correct CR0 value?
>mfocrf  r5,128
> ...
>mtocrf  128,r5

Sure, I'm not your boss ;-)  It seems a shame to me to have this 12 or
whatever cycle delay here, since the whole point of the patch is to
make things faster, that's all (but it still is faster, right, you
tested it).


Segher


Re: [PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-06-06 Thread Simon Guo
Hi segher,
On Wed, May 30, 2018 at 05:03:21PM +0800, Simon Guo wrote:
> On Wed, May 30, 2018 at 03:35:40AM -0500, Segher Boessenkool wrote:
> > On Wed, May 30, 2018 at 04:14:02PM +0800, Simon Guo wrote:
> > > Hi Segher,
> > > On Mon, May 28, 2018 at 06:05:59AM -0500, Segher Boessenkool wrote:
> > > > On Fri, May 25, 2018 at 12:07:34PM +0800, wei.guo.si...@gmail.com wrote:
> > > > > + /* save and restore cr0 */
> > > > > + mfocrf  r5,64
> > > > > + EXIT_VMX_OPS
> > > > > + mtocrf  64,r5
> > > > > + b   .LcmpAB_lightweight
> > > > 
> > > > That's cr1, not cr0.  You can use mcrf instead, it is cheaper (esp. if
> > > > you have it in a non-volatile CR field before so you need only one, if 
> > > > any).
> > > > 
> > > You are right :) How about using mtcr/mfcr instead, I think they are
> > > fast as well and more readable.
> > 
> > Those are much worse than m[ft]ocrf.
> > 
> > You probably should just shuffle things around so that EXIT_VMX_OPS
> > does not clobber the CR field you need to keep.
> Let me use mcrf then :)

I now felt unformatable to use mcrf like:
mcrf7,0

since I cannot 100% confident that compiler will not use CR7 or other
CR# in exit_vmx_ops().

Can we switch back to mfocrf/mtocrf with correct CR0 value?
   mfocrf  r5,128
...
   mtocrf  128,r5

Thanks,
- Simon


Re: [PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-05-30 Thread Simon Guo
On Wed, May 30, 2018 at 03:35:40AM -0500, Segher Boessenkool wrote:
> On Wed, May 30, 2018 at 04:14:02PM +0800, Simon Guo wrote:
> > Hi Segher,
> > On Mon, May 28, 2018 at 06:05:59AM -0500, Segher Boessenkool wrote:
> > > On Fri, May 25, 2018 at 12:07:34PM +0800, wei.guo.si...@gmail.com wrote:
> > > > +   /* save and restore cr0 */
> > > > +   mfocrf  r5,64
> > > > +   EXIT_VMX_OPS
> > > > +   mtocrf  64,r5
> > > > +   b   .LcmpAB_lightweight
> > > 
> > > That's cr1, not cr0.  You can use mcrf instead, it is cheaper (esp. if
> > > you have it in a non-volatile CR field before so you need only one, if 
> > > any).
> > > 
> > You are right :) How about using mtcr/mfcr instead, I think they are
> > fast as well and more readable.
> 
> Those are much worse than m[ft]ocrf.
> 
> You probably should just shuffle things around so that EXIT_VMX_OPS
> does not clobber the CR field you need to keep.
Let me use mcrf then :)

Thanks,
- Simon


Re: [PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-05-30 Thread Segher Boessenkool
On Wed, May 30, 2018 at 04:14:02PM +0800, Simon Guo wrote:
> Hi Segher,
> On Mon, May 28, 2018 at 06:05:59AM -0500, Segher Boessenkool wrote:
> > On Fri, May 25, 2018 at 12:07:34PM +0800, wei.guo.si...@gmail.com wrote:
> > > + /* save and restore cr0 */
> > > + mfocrf  r5,64
> > > + EXIT_VMX_OPS
> > > + mtocrf  64,r5
> > > + b   .LcmpAB_lightweight
> > 
> > That's cr1, not cr0.  You can use mcrf instead, it is cheaper (esp. if
> > you have it in a non-volatile CR field before so you need only one, if any).
> > 
> You are right :) How about using mtcr/mfcr instead, I think they are
> fast as well and more readable.

Those are much worse than m[ft]ocrf.

You probably should just shuffle things around so that EXIT_VMX_OPS
does not clobber the CR field you need to keep.


Segher


Re: [PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-05-30 Thread Simon Guo
Hi Michael,
On Mon, May 28, 2018 at 09:59:29PM +1000, Michael Ellerman wrote:
> Hi Simon,
> 
> wei.guo.si...@gmail.com writes:
> > diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
> > index f20e883..4ba7bb6 100644
> > --- a/arch/powerpc/lib/memcmp_64.S
> > +++ b/arch/powerpc/lib/memcmp_64.S
> > @@ -174,6 +235,13 @@ _GLOBAL(memcmp)
> > blr
> >  
> >  .Llong:
> > +#ifdef CONFIG_ALTIVEC
> > +   /* Try to use vmx loop if length is equal or greater than 4K */
> > +   cmpldi  cr6,r5,VMX_THRESH
> > +   bge cr6,.Lsameoffset_vmx_cmp
> > +
> 
> Here we decide to use vmx, but we don't do any CPU feature checks.
> 
> 
> > @@ -332,7 +400,94 @@ _GLOBAL(memcmp)
> >  8:
> > blr
> >  
> > +#ifdef CONFIG_ALTIVEC
> > +.Lsameoffset_vmx_cmp:
> > +   /* Enter with src/dst addrs has the same offset with 8 bytes
> > +* align boundary
> > +*/
> > +   ENTER_VMX_OPS
> > +   beq cr1,.Llong_novmx_cmp
> > +
> > +3:
> > +   /* need to check whether r4 has the same offset with r3
> > +* for 16 bytes boundary.
> > +*/
> > +   xor r0,r3,r4
> > +   andi.   r0,r0,0xf
> > +   bne .Ldiffoffset_vmx_cmp_start
> > +
> > +   /* len is no less than 4KB. Need to align with 16 bytes further.
> > +*/
> > +   andi.   rA,r3,8
> > +   LD  rA,0,r3
> > +   beq 4f
> > +   LD  rB,0,r4
> > +   cmpld   cr0,rA,rB
> > +   addir3,r3,8
> > +   addir4,r4,8
> > +   addir5,r5,-8
> > +
> > +   beq cr0,4f
> > +   /* save and restore cr0 */
> > +   mfocrf  r5,64
> > +   EXIT_VMX_OPS
> > +   mtocrf  64,r5
> > +   b   .LcmpAB_lightweight
> > +
> > +4:
> > +   /* compare 32 bytes for each loop */
> > +   srdir0,r5,5
> > +   mtctr   r0
> > +   clrldi  r5,r5,59
> > +   li  off16,16
> > +
> > +.balign 16
> > +5:
> > +   lvx v0,0,r3
> > +   lvx v1,0,r4
> > +   vcmpequd. v0,v0,v1
> 
> vcmpequd is only available on Power8 and later CPUs.
> 
> Which means this will crash on Power7 or earlier.
> 
> Something like this should fix it I think.
> 
> diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
> index 96eb08b2be2e..0a11ff14dcd9 100644
> --- a/arch/powerpc/lib/memcmp_64.S
> +++ b/arch/powerpc/lib/memcmp_64.S
> @@ -236,9 +236,11 @@ _GLOBAL(memcmp)
>  
>  .Llong:
>  #ifdef CONFIG_ALTIVEC
> +BEGIN_FTR_SECTION
>   /* Try to use vmx loop if length is equal or greater than 4K */
>   cmpldi  cr6,r5,VMX_THRESH
>   bge cr6,.Lsameoffset_vmx_cmp
> +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
>  
>  .Llong_novmx_cmp:
>  #endif
Thanks for the good catch! I will update that.
> 
> 
> There's another problem which is that old toolchains don't know about
> vcmpequd. To fix that we'll need to add a macro that uses .long to
> construct the instruction.
Right. I will add the corresponding macros.

Thanks for your review.

BR,
 - Simon


Re: [PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-05-30 Thread Simon Guo
Hi Segher,
On Mon, May 28, 2018 at 06:05:59AM -0500, Segher Boessenkool wrote:
> On Fri, May 25, 2018 at 12:07:34PM +0800, wei.guo.si...@gmail.com wrote:
> > +   /* save and restore cr0 */
> > +   mfocrf  r5,64
> > +   EXIT_VMX_OPS
> > +   mtocrf  64,r5
> > +   b   .LcmpAB_lightweight
> 
> That's cr1, not cr0.  You can use mcrf instead, it is cheaper (esp. if
> you have it in a non-volatile CR field before so you need only one, if any).
> 
You are right :) How about using mtcr/mfcr instead, I think they are
fast as well and more readable.

> > +   vcmpequb.  v7,v9,v10
> > +   bnl cr6,.Ldiffoffset_vmx_diff_found
> 
> In other places you say  bf 24,...  Dunno which is more readable, but
> please pick one?
I will update to bnl for other cases.
> 
> 
> Segher

Thanks for your review.

BR,
- Simon


Re: [PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-05-28 Thread Michael Ellerman
Hi Simon,

wei.guo.si...@gmail.com writes:
> diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
> index f20e883..4ba7bb6 100644
> --- a/arch/powerpc/lib/memcmp_64.S
> +++ b/arch/powerpc/lib/memcmp_64.S
> @@ -174,6 +235,13 @@ _GLOBAL(memcmp)
>   blr
>  
>  .Llong:
> +#ifdef CONFIG_ALTIVEC
> + /* Try to use vmx loop if length is equal or greater than 4K */
> + cmpldi  cr6,r5,VMX_THRESH
> + bge cr6,.Lsameoffset_vmx_cmp
> +

Here we decide to use vmx, but we don't do any CPU feature checks.


> @@ -332,7 +400,94 @@ _GLOBAL(memcmp)
>  8:
>   blr
>  
> +#ifdef CONFIG_ALTIVEC
> +.Lsameoffset_vmx_cmp:
> + /* Enter with src/dst addrs has the same offset with 8 bytes
> +  * align boundary
> +  */
> + ENTER_VMX_OPS
> + beq cr1,.Llong_novmx_cmp
> +
> +3:
> + /* need to check whether r4 has the same offset with r3
> +  * for 16 bytes boundary.
> +  */
> + xor r0,r3,r4
> + andi.   r0,r0,0xf
> + bne .Ldiffoffset_vmx_cmp_start
> +
> + /* len is no less than 4KB. Need to align with 16 bytes further.
> +  */
> + andi.   rA,r3,8
> + LD  rA,0,r3
> + beq 4f
> + LD  rB,0,r4
> + cmpld   cr0,rA,rB
> + addir3,r3,8
> + addir4,r4,8
> + addir5,r5,-8
> +
> + beq cr0,4f
> + /* save and restore cr0 */
> + mfocrf  r5,64
> + EXIT_VMX_OPS
> + mtocrf  64,r5
> + b   .LcmpAB_lightweight
> +
> +4:
> + /* compare 32 bytes for each loop */
> + srdir0,r5,5
> + mtctr   r0
> + clrldi  r5,r5,59
> + li  off16,16
> +
> +.balign 16
> +5:
> + lvx v0,0,r3
> + lvx v1,0,r4
> + vcmpequd. v0,v0,v1

vcmpequd is only available on Power8 and later CPUs.

Which means this will crash on Power7 or earlier.

Something like this should fix it I think.

diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 96eb08b2be2e..0a11ff14dcd9 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -236,9 +236,11 @@ _GLOBAL(memcmp)
 
 .Llong:
 #ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
/* Try to use vmx loop if length is equal or greater than 4K */
cmpldi  cr6,r5,VMX_THRESH
bge cr6,.Lsameoffset_vmx_cmp
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
 .Llong_novmx_cmp:
 #endif


There's another problem which is that old toolchains don't know about
vcmpequd. To fix that we'll need to add a macro that uses .long to
construct the instruction.

cheers


Re: [PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-05-28 Thread Segher Boessenkool
On Fri, May 25, 2018 at 12:07:34PM +0800, wei.guo.si...@gmail.com wrote:
> + /* save and restore cr0 */
> + mfocrf  r5,64
> + EXIT_VMX_OPS
> + mtocrf  64,r5
> + b   .LcmpAB_lightweight

That's cr1, not cr0.  You can use mcrf instead, it is cheaper (esp. if
you have it in a non-volatile CR field before so you need only one, if any).

> + vcmpequb.  v7,v9,v10
> + bnl cr6,.Ldiffoffset_vmx_diff_found

In other places you say  bf 24,...  Dunno which is more readable, but
please pick one?


Segher


[PATCH v6 2/4] powerpc/64: enhance memcmp() with VMX instruction for long bytes comparision

2018-05-24 Thread wei . guo . simon
From: Simon Guo 

This patch add VMX primitives to do memcmp() in case the compare size
is equal or greater than 4K bytes. KSM feature can benefit from this.

Test result with following test program(replace the "^>" with ""):
--
># cat tools/testing/selftests/powerpc/stringloops/memcmp.c
>#include 
>#include 
>#include 
>#include 
>#include "utils.h"
>#define SIZE (1024 * 1024 * 900)
>#define ITERATIONS 40

int test_memcmp(const void *s1, const void *s2, size_t n);

static int testcase(void)
{
char *s1;
char *s2;
unsigned long i;

s1 = memalign(128, SIZE);
if (!s1) {
perror("memalign");
exit(1);
}

s2 = memalign(128, SIZE);
if (!s2) {
perror("memalign");
exit(1);
}

for (i = 0; i < SIZE; i++)  {
s1[i] = i & 0xff;
s2[i] = i & 0xff;
}
for (i = 0; i < ITERATIONS; i++) {
int ret = test_memcmp(s1, s2, SIZE);

if (ret) {
printf("return %d at[%ld]! should have returned 
zero\n", ret, i);
abort();
}
}

return 0;
}

int main(void)
{
return test_harness(testcase, "memcmp");
}
--
Without this patch (but with the first patch "powerpc/64: Align bytes
before fall back to .Lshort in powerpc64 memcmp()." in the series):
4.726728762 seconds time elapsed
  ( +-  3.54%)
With VMX patch:
4.234335473 seconds time elapsed
  ( +-  2.63%)
There is ~+10% improvement.

Testing with unaligned and different offset version (make s1 and s2 shift
random offset within 16 bytes) can archieve higher improvement than 10%..

Signed-off-by: Simon Guo 
---
 arch/powerpc/include/asm/asm-prototypes.h |   4 +-
 arch/powerpc/lib/copypage_power7.S|   4 +-
 arch/powerpc/lib/memcmp_64.S  | 233 +-
 arch/powerpc/lib/memcpy_power7.S  |   6 +-
 arch/powerpc/lib/vmx-helper.c |   4 +-
 5 files changed, 241 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/asm-prototypes.h 
b/arch/powerpc/include/asm/asm-prototypes.h
index d9713ad..31fdcee 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -49,8 +49,8 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
 /* VMX copying */
 int enter_vmx_usercopy(void);
 int exit_vmx_usercopy(void);
-int enter_vmx_copy(void);
-void * exit_vmx_copy(void *dest);
+int enter_vmx_ops(void);
+void *exit_vmx_ops(void *dest);
 
 /* Traps */
 long machine_check_early(struct pt_regs *regs);
diff --git a/arch/powerpc/lib/copypage_power7.S 
b/arch/powerpc/lib/copypage_power7.S
index 8fa73b7..e38f956 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -57,7 +57,7 @@ _GLOBAL(copypage_power7)
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r0,16(r1)
stdur1,-STACKFRAMESIZE(r1)
-   bl  enter_vmx_copy
+   bl  enter_vmx_ops
cmpwi   r3,0
ld  r0,STACKFRAMESIZE+16(r1)
ld  r3,STK_REG(R31)(r1)
@@ -100,7 +100,7 @@ _GLOBAL(copypage_power7)
addir3,r3,128
bdnz1b
 
-   b   exit_vmx_copy   /* tail call optimise */
+   b   exit_vmx_ops/* tail call optimise */
 
 #else
li  r0,(PAGE_SIZE/128)
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index f20e883..4ba7bb6 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -27,12 +27,73 @@
 #define LH lhbrx
 #define LW lwbrx
 #define LD ldbrx
+#define LVSlvsr
+#define VPERM(_VRT,_VRA,_VRB,_VRC) \
+   vperm _VRT,_VRB,_VRA,_VRC
 #else
 #define LH lhzx
 #define LW lwzx
 #define LD ldx
+#define LVSlvsl
+#define VPERM(_VRT,_VRA,_VRB,_VRC) \
+   vperm _VRT,_VRA,_VRB,_VRC
 #endif
 
+#define VMX_THRESH 4096
+#define ENTER_VMX_OPS  \
+   mflrr0; \
+   std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
+   std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
+   std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
+   std r0,16(r1); \
+   stdur1,-STACKFRAMESIZE(r1); \
+   bl  enter_vmx_ops; \
+   cmpwi   cr1,r3,0; \
+   ld  r0,STACKFRAMESIZE+16(r1); \
+   ld  r3,STK_REG(R31)(r1); \
+   ld  r4,STK_REG(R30)(r1); \
+   ld  r5,STK_REG(R29)(r1); \
+   addir1,r1,STACKFRAMESIZE; \
+   mtlrr0
+
+#define EXIT_VMX_OPS \
+   mflrr0; \
+   std r3,-STACKFRAMESIZE+STK_REG(R31)(r1); \
+   std r4,-STACKFRAMESIZE+STK_REG(R30)(r1); \
+   std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
+   std r0,16(r1); \
+   stdu