# HG changeset patch
# User Gopi Satykrishna Akisetty <gopi.satykris...@multicorewareinc.com>
# Date 1501837071 -19800
#      Fri Aug 04 14:27:51 2017 +0530
# Node ID c5b5b7cb9bbef4365692bfaf05a2a83796d5f1b0
# Parent  c3a2abd8e46f8db3ba7c276f39fe41ed002ce295
[x265-avx512]x86: AVX512 pixel_sad_x4_32xN for high bit depth

Size    | AVX2 performance | AVX512 performance
------------------------------------------------
32x8    |     16.73x       |      25.16x
32x16   |     18.36x       |      29.04x
32x24   |     19.52x       |      31.03x
32x32   |     18.78x       |      31.95x
32x64   |     19.01x       |      34.20x

diff -r c3a2abd8e46f -r c5b5b7cb9bbe source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp      Fri Aug 04 14:27:51 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp      Fri Aug 04 14:27:51 2017 +0530
@@ -2325,6 +2325,12 @@
         p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx512);
         p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx512);
 
+        p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx512);
+        p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx512);
+        p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx512);
+        p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx512);
+        p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx512);
+
     }
 }
 #else // if HIGH_BIT_DEPTH
diff -r c3a2abd8e46f -r c5b5b7cb9bbe source/common/x86/sad16-a.asm
--- a/source/common/x86/sad16-a.asm     Fri Aug 04 14:27:51 2017 +0530
+++ b/source/common/x86/sad16-a.asm     Fri Aug 04 14:27:51 2017 +0530
@@ -3286,6 +3286,160 @@
 ; SAD x3/x4 avx512 code start
 ;============================
 
+%macro PROCESS_SAD_X4_32x4_AVX512 0
+    movu    m8, [r0]
+    movu    m4, [r1]
+    movu    m5, [r2]
+    movu    m6, [r3]
+    movu    m7, [r4]
+
+    
+    psubw   m4, m8
+    psubw   m5, m8
+    psubw   m6, m8
+    psubw   m7, m8
+    pabsw   m4, m4
+    pabsw   m5, m5
+    pabsw   m6, m6
+    pabsw   m7, m7
+
+    pmaddwd m4, m9
+    paddd   m0, m4
+    pmaddwd m5, m9
+    paddd   m1, m5
+    pmaddwd m6, m9
+    paddd   m2, m6
+    pmaddwd m7, m9
+    paddd   m3, m7
+
+
+    movu    m8, [r0 + 2 * FENC_STRIDE]
+    movu    m4, [r1 + r5]
+    movu    m5, [r2 + r5]
+    movu    m6, [r3 + r5]
+    movu    m7, [r4 + r5]
+
+    
+    psubw   m4, m8
+    psubw   m5, m8
+    psubw   m6, m8
+    psubw   m7, m8
+    pabsw   m4, m4
+    pabsw   m5, m5
+    pabsw   m6, m6
+    pabsw   m7, m7
+
+    pmaddwd m4, m9
+    paddd   m0, m4
+    pmaddwd m5, m9
+    paddd   m1, m5
+    pmaddwd m6, m9
+    paddd   m2, m6
+    pmaddwd m7, m9
+    paddd   m3, m7
+
+    movu    m8, [r0 + 4 * FENC_STRIDE]
+    movu    m4, [r1 + 2 * r5]
+    movu    m5, [r2 + 2 * r5]
+    movu    m6, [r3 + 2 * r5]
+    movu    m7, [r4 + 2 * r5]
+
+    
+    psubw   m4, m8
+    psubw   m5, m8
+    psubw   m6, m8
+    psubw   m7, m8
+    pabsw   m4, m4
+    pabsw   m5, m5
+    pabsw   m6, m6
+    pabsw   m7, m7
+
+    pmaddwd m4, m9
+    paddd   m0, m4
+    pmaddwd m5, m9
+    paddd   m1, m5
+    pmaddwd m6, m9
+    paddd   m2, m6
+    pmaddwd m7, m9
+    paddd   m3, m7
+
+    movu    m8, [r0 + 6 * FENC_STRIDE]
+    movu    m4, [r1 + r7]
+    movu    m5, [r2 + r7]
+    movu    m6, [r3 + r7]
+    movu    m7, [r4 + r7]
+
+    
+    psubw   m4, m8
+    psubw   m5, m8
+    psubw   m6, m8
+    psubw   m7, m8
+    pabsw   m4, m4
+    pabsw   m5, m5
+    pabsw   m6, m6
+    pabsw   m7, m7
+
+    pmaddwd m4, m9
+    paddd   m0, m4
+    pmaddwd m5, m9
+    paddd   m1, m5
+    pmaddwd m6, m9
+    paddd   m2, m6
+    pmaddwd m7, m9
+    paddd   m3, m7
+%endmacro
+
+
+%macro PROCESS_SAD_X4_END_AVX512 0
+    vextracti32x8  ym4, m0, 1
+    vextracti32x8  ym5, m1, 1
+    vextracti32x8  ym6, m2, 1
+    vextracti32x8  ym7, m3, 1
+
+    paddd          ym0, ym4
+    paddd          ym1, ym5
+    paddd          ym2, ym6
+    paddd          ym3, ym7
+
+    vextracti64x2  xm4, m0, 1
+    vextracti64x2  xm5, m1, 1
+    vextracti64x2  xm6, m2, 1
+    vextracti64x2  xm7, m3, 1
+
+    paddd          xm0, xm4
+    paddd          xm1, xm5
+    paddd          xm2, xm6
+    paddd          xm3, xm7
+
+    pshufd         xm4, xm0, 00001110b
+    pshufd         xm5, xm1, 00001110b
+    pshufd         xm6, xm2, 00001110b
+    pshufd         xm7, xm3, 00001110b
+
+    paddd          xm0, xm4
+    paddd          xm1, xm5
+    paddd          xm2, xm6
+    paddd          xm3, xm7
+
+    pshufd         xm4, xm0, 00000001b
+    pshufd         xm5, xm1, 00000001b
+    pshufd         xm6, xm2, 00000001b
+    pshufd         xm7, xm3, 00000001b
+
+    paddd          xm0, xm4
+    paddd          xm1, xm5
+    paddd          xm2, xm6
+    paddd          xm3, xm7
+
+    mov                  r0,  r6mp
+    movd           [r0 + 0],  xm0
+    movd           [r0 + 4],  xm1
+    movd           [r0 + 8],  xm2
+    movd           [r0 + 12], xm3
+%endmacro
+
+
+
 %macro PROCESS_SAD_X3_32x4_AVX512 0
     movu    m6, [r0]
     movu    m3, [r1]
@@ -3641,3 +3795,275 @@
     PROCESS_SAD_X3_END_AVX512
     RET
 
+
+;------------------------------------------------------------------------------------------------------------------------------------------------------------
+; void pixel_sad_x4_32x%1( const pixel* pix1, const pixel* pix2, const pixel* 
pix3, const pixel* pix4, const pixel* pix5, intptr_t frefstride, int32_t* res )
+;------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x8, 6,8,10
+    pxor    m0,  m0
+    pxor    m1,  m1
+    pxor    m2,  m2
+    pxor    m3,  m3
+
+    vbroadcasti32x8 m9, [pw_1]
+
+    add     r5d, r5d
+    lea     r7d, [r5 * 3]
+
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    PROCESS_SAD_X4_END_AVX512
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x16, 6,8,10
+    pxor    m0,  m0
+    pxor    m1,  m1
+    pxor    m2,  m2
+    pxor    m3,  m3
+
+    vbroadcasti32x8 m9, [pw_1]
+
+    add     r5d, r5d
+    lea     r7d, [r5 * 3]
+
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    PROCESS_SAD_X4_END_AVX512
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x24, 6,8,10
+    pxor    m0,  m0
+    pxor    m1,  m1
+    pxor    m2,  m2
+    pxor    m3,  m3
+
+    vbroadcasti32x8 m9, [pw_1]
+
+    add     r5d, r5d
+    lea     r7d, [r5 * 3]
+
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+     add            r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+     add            r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    PROCESS_SAD_X4_END_AVX512
+    RET
+
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x32, 6,8,10
+    pxor    m0,  m0
+    pxor    m1,  m1
+    pxor    m2,  m2
+    pxor    m3,  m3
+
+    vbroadcasti32x8 m9, [pw_1]
+
+    add     r5d, r5d
+    lea     r7d, [r5 * 3]
+
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+   add              r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    PROCESS_SAD_X4_END_AVX512
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x4_32x64, 6,8,10
+    pxor    m0,  m0
+    pxor    m1,  m1
+    pxor    m2,  m2
+    pxor    m3,  m3
+
+    vbroadcasti32x8 m9, [pw_1]
+
+    add     r5d, r5d
+    lea     r7d, [r5 * 3]
+
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    add             r0, FENC_STRIDE * 8
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+    PROCESS_SAD_X4_32x4_AVX512
+    PROCESS_SAD_X4_END_AVX512
+    RET
+
_______________________________________________
x265-devel mailing list
x265-devel@videolan.org
https://mailman.videolan.org/listinfo/x265-devel

Reply via email to