SSE3 sad_x3[ 8x16] 21.74x 458.93 9978.52 AVX2 sad_x3[ 8x16] 16.15x 619.32 10002.79 btw: please ident RET to code, don't put first column, it is reserved for label
At 2015-03-09 17:40:38,[email protected] wrote: ># HG changeset patch ># User Sumalatha Polureddy<[email protected]> ># Date 1425894028 -19800 ># Node ID 7f8b41ba71cb518597202c5da7c16e2ad3e4d6f9 ># Parent b52c021d220ec4a88d798a794b57cbfc72678025 >asm: avx2 code for sad_x3[8x16] for 8bpp - 63x > >sad_x3[ 8x16] 63.75x 699.79 44612.27 > >diff -r b52c021d220e -r 7f8b41ba71cb source/common/x86/asm-primitives.cpp >--- a/source/common/x86/asm-primitives.cpp Mon Mar 09 13:50:37 2015 +0530 >+++ b/source/common/x86/asm-primitives.cpp Mon Mar 09 15:10:28 2015 +0530 >@@ -1443,6 +1443,7 @@ > p.pu[LUMA_8x8].satd = x265_pixel_satd_8x8_avx2; > > p.pu[LUMA_8x8].sad_x3 = x265_pixel_sad_x3_8x8_avx2; >+ p.pu[LUMA_8x16].sad_x3 = x265_pixel_sad_x3_8x16_avx2; > > p.pu[LUMA_16x8].sad_x4 = x265_pixel_sad_x4_16x8_avx2; > p.pu[LUMA_16x12].sad_x4 = x265_pixel_sad_x4_16x12_avx2; >diff -r b52c021d220e -r 7f8b41ba71cb source/common/x86/sad-a.asm >--- a/source/common/x86/sad-a.asm Mon Mar 09 13:50:37 2015 +0530 >+++ b/source/common/x86/sad-a.asm Mon Mar 09 15:10:28 2015 +0530 >@@ -3807,4 +3807,74 @@ > movd [r5 + 8], xm2 > > RET >+ >+INIT_YMM avx2 >+cglobal pixel_sad_x3_8x16, 6, 7, 7, pix1, pix2, pix3, pix4, frefstride, res >+ pxor xm0, xm0 >+ mova xm1, xm0 >+ mova xm2, xm0 >+ mov r6d, 4 >+ >+.loop >+ movq xm3, [r0] >+ movhps xm3, [r0 + FENC_STRIDE] >+ movq xm4, [r0 + 2 * FENC_STRIDE] >+ movhps xm4, [r0 + FENC_STRIDE + 2 * FENC_STRIDE] >+ vinserti128 m3, m3, xm4, 1 ; pix1 >+ >+ movq xm4, [r1] >+ movhps xm4, [r1 + r4] >+ lea r1, [r1 + 2 * r4] >+ movq xm5, [r1] >+ movhps xm5, [r1 + r4] >+ vinserti128 m4, m4, xm5, 1 ; pix2 >+ psadbw m6, m3, m4 >+ paddq m0, m6 ; res[0] >+ >+ movq xm4, [r2] >+ movhps xm4, [r2 + r4] >+ lea r2, [r2 + 2 * r4] >+ movq xm5, [r2] >+ movhps xm5, [r2 + r4] >+ vinserti128 m4, m4, xm5, 1 ; pix3 >+ psadbw m6, m3, m4 >+ paddq m1, m6 ; res[1] >+ >+ movq xm4, [r3] >+ movhps xm4, [r3 + r4] >+ lea r3, [r3 + 2 * r4] >+ movq xm5, [r3] >+ movhps xm5, [r3 + r4] >+ vinserti128 m4, m4, xm5, 1 ; pix2 >+ psadbw m3, m4 >+ paddq m2, m3 ; res[2] >+ >+ lea r0, [r0 + 4 * FENC_STRIDE] >+ lea r1, [r1 + 2* r4] >+ lea r2, [r2 + 2 * r4] >+ lea r3, [r3 + 2 * r4] >+ >+ dec r6d >+ jnz .loop >+ >+ vextracti128 xm4, m0, 1 >+ paddd xm0, xm4 >+ pshufd xm4, xm0, 2 >+ paddd xm0,xm4 >+ movd [r5], xm0 >+ >+ vextracti128 xm4, m1, 1 >+ paddd xm1, xm4 >+ pshufd xm4, xm1, 2 >+ paddd xm1,xm4 >+ movd [r5 + 4], xm1 >+ >+ vextracti128 xm4, m2, 1 >+ paddd xm2, xm4 >+ pshufd xm4, xm2, 2 >+ paddd xm2,xm4 >+ movd [r5 + 8], xm2 >+ >+RET >+ > %endif >_______________________________________________ >x265-devel mailing list >[email protected] >https://mailman.videolan.org/listinfo/x265-devel
_______________________________________________ x265-devel mailing list [email protected] https://mailman.videolan.org/listinfo/x265-devel
