# HG changeset patch # User Vignesh Vijayakumar # Date 1500011514 -19800 # Fri Jul 14 11:21:54 2017 +0530 # Node ID 3183189cf8a0f1b95c31ecc39dd07b220ec53cea # Parent 77b61125a20591cb5bad2a15a30cb9114a1d8d30 x86: AVX512 pixel_sad_x4_W32
Size | AVX2 performance | AVX512 performance --------------------------------------------- 32x8 | 46.21x | 57.48x 32x16 | 50.19x | 72.69x 32x24 | 53.83x | 77.17x 32x32 | 56.39x | 82.85x 32x64 | 58.53x | 88.15x diff -r 77b61125a205 -r 3183189cf8a0 source/common/x86/asm-primitives.cpp --- a/source/common/x86/asm-primitives.cpp Wed Jul 12 15:42:46 2017 +0530 +++ b/source/common/x86/asm-primitives.cpp Fri Jul 14 11:21:54 2017 +0530 @@ -3736,6 +3736,11 @@ p.pu[LUMA_64x48].sad = PFX(pixel_sad_64x48_avx512); p.pu[LUMA_64x64].sad = PFX(pixel_sad_64x64_avx512); + p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx512); + p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx512); + p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx512); + p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx512); + p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx512); p.pu[LUMA_64x16].sad_x4 = PFX(pixel_sad_x4_64x16_avx512); p.pu[LUMA_64x32].sad_x4 = PFX(pixel_sad_x4_64x32_avx512); p.pu[LUMA_64x48].sad_x4 = PFX(pixel_sad_x4_64x48_avx512); diff -r 77b61125a205 -r 3183189cf8a0 source/common/x86/sad-a.asm --- a/source/common/x86/sad-a.asm Wed Jul 12 15:42:46 2017 +0530 +++ b/source/common/x86/sad-a.asm Fri Jul 14 11:21:54 2017 +0530 @@ -4260,6 +4260,94 @@ paddd m3, m4 %endmacro +%macro SAD_X4_32x8_AVX512 0 + movu ym4, [r0] + vinserti32x8 m4, [r0 + FENC_STRIDE], 1 + movu ym5, [r1] + vinserti32x8 m5, [r1 + r5], 1 + movu ym6, [r2] + vinserti32x8 m6, [r2 + r5], 1 + movu ym7, [r3] + vinserti32x8 m7, [r3 + r5], 1 + movu ym8, [r4] + vinserti32x8 m8, [r4 + r5], 1 + + psadbw m9, m4, m5 + paddd m0, m9 + psadbw m5, m4, m6 + paddd m1, m5 + psadbw m6, m4, m7 + paddd m2, m6 + psadbw m4, m8 + paddd m3, m4 + + movu ym4, [r0 + FENC_STRIDE * 2] + vinserti32x8 m4, [r0 + FENC_STRIDE * 3], 1 + movu ym5, [r1 + r5 * 2] + vinserti32x8 m5, [r1 + r7], 1 + movu ym6, [r2 + r5 * 2] + vinserti32x8 m6, [r2 + r7], 1 + movu ym7, [r3 + r5 * 2] + vinserti32x8 m7, [r3 + r7], 1 + movu ym8, [r4 + r5 * 2] + vinserti32x8 m8, [r4 + r7], 1 + + psadbw m9, m4, m5 + paddd m0, m9 + psadbw m5, m4, m6 + paddd m1, m5 + psadbw m6, m4, m7 + paddd m2, m6 + psadbw m4, m8 + paddd m3, m4 + + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + + movu ym4, [r0] + vinserti32x8 m4, [r0 + FENC_STRIDE], 1 + movu ym5, [r1] + vinserti32x8 m5, [r1 + r5], 1 + movu ym6, [r2] + vinserti32x8 m6, [r2 + r5], 1 + movu ym7, [r3] + vinserti32x8 m7, [r3 + r5], 1 + movu ym8, [r4] + vinserti32x8 m8, [r4 + r5], 1 + + psadbw m9, m4, m5 + paddd m0, m9 + psadbw m5, m4, m6 + paddd m1, m5 + psadbw m6, m4, m7 + paddd m2, m6 + psadbw m4, m8 + paddd m3, m4 + + movu ym4, [r0 + FENC_STRIDE * 2] + vinserti32x8 m4, [r0 + FENC_STRIDE * 3], 1 + movu ym5, [r1 + r5 * 2] + vinserti32x8 m5, [r1 + r7], 1 + movu ym6, [r2 + r5 * 2] + vinserti32x8 m6, [r2 + r7], 1 + movu ym7, [r3 + r5 * 2] + vinserti32x8 m7, [r3 + r7], 1 + movu ym8, [r4 + r5 * 2] + vinserti32x8 m8, [r4 + r7], 1 + + psadbw m9, m4, m5 + paddd m0, m9 + psadbw m5, m4, m6 + paddd m1, m5 + psadbw m6, m4, m7 + paddd m2, m6 + psadbw m4, m8 + paddd m3, m4 +%endmacro + %macro PIXEL_SAD_X4_END_AVX512 0 vextracti32x8 ym4, m0, 1 vextracti32x8 ym5, m1, 1 @@ -4434,6 +4522,144 @@ SAD_X4_64x8_AVX512 PIXEL_SAD_X4_END_AVX512 RET + +INIT_ZMM avx512 +cglobal pixel_sad_x4_32x8, 7,8,10 + pxor m0, m0 + pxor m1, m1 + pxor m2, m2 + pxor m3, m3 + lea r7, [r5 * 3] + + SAD_X4_32x8_AVX512 + PIXEL_SAD_X4_END_AVX512 + RET + +INIT_ZMM avx512 +cglobal pixel_sad_x4_32x16, 7,8,10 + pxor m0, m0 + pxor m1, m1 + pxor m2, m2 + pxor m3, m3 + lea r7, [r5 * 3] + + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + PIXEL_SAD_X4_END_AVX512 + RET + +INIT_ZMM avx512 +cglobal pixel_sad_x4_32x24, 7,8,10 + pxor m0, m0 + pxor m1, m1 + pxor m2, m2 + pxor m3, m3 + lea r7, [r5 * 3] + + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + PIXEL_SAD_X4_END_AVX512 + RET + +INIT_ZMM avx512 +cglobal pixel_sad_x4_32x32, 7,8,10 + pxor m0, m0 + pxor m1, m1 + pxor m2, m2 + pxor m3, m3 + lea r7, [r5 * 3] + + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + PIXEL_SAD_X4_END_AVX512 + RET + +INIT_ZMM avx512 +cglobal pixel_sad_x4_32x64, 7,8,10 + pxor m0, m0 + pxor m1, m1 + pxor m2, m2 + pxor m3, m3 + lea r7, [r5 * 3] + + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + add r0, FENC_STRIDE * 4 + lea r1, [r1 + r5 * 4] + lea r2, [r2 + r5 * 4] + lea r3, [r3 + r5 * 4] + lea r4, [r4 + r5 * 4] + SAD_X4_32x8_AVX512 + PIXEL_SAD_X4_END_AVX512 + RET ;------------------------------------------------------------ ;sad_x4 avx512 code end ;------------------------------------------------------------ _______________________________________________ x265-devel mailing list x265-devel@videolan.org https://mailman.videolan.org/listinfo/x265-devel