# HG changeset patch # User Jayashree # Date 1516018946 -19800 # Mon Jan 15 17:52:26 2018 +0530 # Node ID a2b347ed81f90ac82f59d891deba7fa876df7f62 # Parent 1107c2def5f9dbee9947a2c9c41f50961fa31bc6 x86:AVX512 intra_pred-ang32 mode 4 and 32 high bit depth Mode | AVX2 performance | AVX512 performance --------------------------------------------------- 4 | 9.1x | 14.6x 32 | 11.35x | 20.85x
diff -r 1107c2def5f9 -r a2b347ed81f9 source/common/x86/asm-primitives.cpp --- a/source/common/x86/asm-primitives.cpp Mon Jan 15 12:22:40 2018 +0530 +++ b/source/common/x86/asm-primitives.cpp Mon Jan 15 17:52:26 2018 +0530 @@ -3107,6 +3107,9 @@ p.cu[BLOCK_32x32].intra_pred[27] = PFX(intra_pred_ang32_27_avx512); p.cu[BLOCK_32x32].intra_pred[5] = PFX(intra_pred_ang32_5_avx512); p.cu[BLOCK_32x32].intra_pred[31] = PFX(intra_pred_ang32_31_avx512); + p.cu[BLOCK_32x32].intra_pred[32] = PFX(intra_pred_ang32_32_avx512); + p.cu[BLOCK_32x32].intra_pred[4] = PFX(intra_pred_ang32_4_avx512); + p.cu[BLOCK_16x16].intra_pred[9] = PFX(intra_pred_ang16_9_avx512); p.cu[BLOCK_16x16].intra_pred[11] = PFX(intra_pred_ang16_11_avx512); p.cu[BLOCK_16x16].intra_pred[25] = PFX(intra_pred_ang16_25_avx512); diff -r 1107c2def5f9 -r a2b347ed81f9 source/common/x86/intrapred16.asm --- a/source/common/x86/intrapred16.asm Mon Jan 15 12:22:40 2018 +0530 +++ b/source/common/x86/intrapred16.asm Mon Jan 15 17:52:26 2018 +0530 @@ -19303,9 +19303,363 @@ lea r3, [ang_table_avx2 + 16 * 32] add r1d, r1d lea r4, [r1 * 3] - call ang16_mode_5_31 RET +;; angle 16, modes 4 and 32 +cglobal ang16_mode_4_32 + test r6d, r6d + + vbroadcasti32x8 m0, [r2 + 2] ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1] + vbroadcasti32x8 m1, [r2 + 4] ; [17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2] + + punpcklwd m3, m0, m1 ; [13 12 12 11 11 10 10 9 5 4 4 3 3 2 2 1] + punpckhwd m0, m1 ; [17 16 16 15 15 14 14 13 9 8 8 7 7 6 6 5] + + vbroadcasti32x8 m1, [r2 + 18] ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9] + vbroadcasti32x8 m4, [r2 + 20] ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10] + punpcklwd m2, m1, m4 ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10 9] + punpckhwd m1, m4 ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13] + + pmaddwd m4, m3, [r3 + 3 * 32] ; [21] + paddd m4, m15 + psrld m4, 5 + pmaddwd m5, m0, [r3 + 3 * 32] + paddd m5, m15 + psrld m5, 5 + packusdw m4, m5 + + palignr m6, m0, m3, 4 ; [14 13 13 12 12 11 11 10 6 5 5 4 4 3 3 2] + palignr m7, m2, m0, 4 ; [18 17 17 16 16 15 15 14 10 9 9 8 8 7 7 6] + movu ym16,[r3 - 8 * 32] ; [10] + vinserti32x8 m16, [r3 + 13 * 32] ,1 ; [31] + pmaddwd m5, m6, m16 + paddd m5, m15 + psrld m5, 5 + pmaddwd m8, m7,m16 + paddd m8, m15 + psrld m8, 5 + packusdw m5, m8 + vextracti32x8 ym6, m5, 1 + + + palignr m7, m0, m3, 8 ; [15 14 14 13 13 12 12 11 7 6 6 5 5 4 4 3] + pmaddwd m7, [r3 + 2 * 32] ; [20] + paddd m7, m15 + psrld m7, 5 + palignr m8, m2, m0, 8 ; [19 18 18 17 17 16 16 15 11 10 10 9 9 8 8 7] + pmaddwd m8, [r3 + 2 * 32] + paddd m8, m15 + psrld m8, 5 + packusdw m7, m8 + + palignr m9, m0, m3, 12 + palignr m3, m2, m0, 12 + movu ym16,[r3 - 9 * 32] ; [9] + vinserti32x8 m16, [r3 + 12 * 32] ,1 ; [30] + pmaddwd m8, m9, m16 + paddd m8, m15 + psrld m8, 5 + pmaddwd m10, m3,m16 + paddd m10,m15 + psrld m10, 5 + packusdw m8, m10 + vextracti32x8 ym9, m8, 1 + + + pmaddwd m10, m0, [r3 + 1 * 32] ; [19] + paddd m10,m15 + psrld m10, 5 + pmaddwd m3, m2, [r3 + 1 * 32] + paddd m3, m15 + psrld m3, 5 + packusdw m10, m3 + + palignr m11, m2, m0, 4 + pmaddwd m11, [r3 - 10 * 32] ; [8] + paddd m11, m15 + psrld m11, 5 + palignr m3, m1, m2, 4 + pmaddwd m3, [r3 - 10 * 32] + paddd m3, m15 + psrld m3, 5 + packusdw m11, m3 + + TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 3, 0 + + palignr m4, m2, m0, 4 + pmaddwd m4, [r3 + 11 * 32] ; [29] + paddd m4, m15 + psrld m4, 5 + palignr m5, m1, m2, 4 + pmaddwd m5, [r3 + 11 * 32] + paddd m5, m15 + psrld m5, 5 + packusdw m4, m5 + + palignr m5, m2, m0, 8 + pmaddwd m5, [r3] ; [18] + paddd m5, m15 + psrld m5, 5 + palignr m6, m1, m2, 8 + pmaddwd m6, [r3] + paddd m6, m15 + psrld m6, 5 + packusdw m5, m6 + + palignr m7, m2, m0, 12 + palignr m8, m1, m2, 12 + movu ym16,[r3 - 11 * 32] ; [7] + vinserti32x8 m16, [r3 + 10 * 32],1 ; [28] + pmaddwd m6, m7, m16 + paddd m6, m15 + psrld m6, 5 + palignr m8, m1, m2, 12 + pmaddwd m3, m8, m16 + paddd m3,m15 + psrld m3, 5 + packusdw m6, m3 + vextracti32x8 ym7, m6, 1 + + movu m0, [r2 + 34] ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17] + pmaddwd m8, m2, [r3 - 1 * 32] ; [17] + paddd m8, m15 + psrld m8, 5 + pmaddwd m9, m1, [r3 - 1 * 32] + paddd m9, m15 + psrld m9, 5 + packusdw m8, m9 + + palignr m3, m0, m0, 2 ; [ x 32 31 30 29 28 27 26 x 24 23 22 21 20 19 18] + punpcklwd m0, m3 ; [29 29 28 28 27 27 26 22 21 20 20 19 19 18 18 17] + + palignr m10, m1, m2, 4 + pmaddwd m9, m10, [r3 - 12 * 32] ; [6] + paddd m9, m15 + psrld m9, 5 + palignr m11, m0, m1, 4 + pmaddwd m3, m11, [r3 - 12 * 32] + paddd m3, m15 + psrld m3, 5 + packusdw m9, m3 + + pmaddwd m10, [r3 + 9 * 32] ; [27] + paddd m10,m15 + psrld m10, 5 + pmaddwd m11, [r3 + 9 * 32] + paddd m11, m15 + psrld m11, 5 + packusdw m10, m11 + + palignr m3, m1, m2, 8 + pmaddwd m3, [r3 - 2 * 32] ; [16] + paddd m3, m15 + psrld m3, 5 + palignr m0, m1, 8 + pmaddwd m0, [r3 - 2 * 32] + paddd m0,m15 + psrld m0, 5 + packusdw m3, m0 + TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 3, 0, 1, 16 + ret +;; angle 32, modes 4 and 32 +cglobal ang32_mode_4_32 + test r6d, r6d + + vbroadcasti32x8 m0, [r2 + 2] ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1] + vbroadcasti32x8 m1, [r2 + 4] ; [17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2] + + punpcklwd m3, m0, m1 ; [13 12 12 11 11 10 10 9 5 4 4 3 3 2 2 1] + punpckhwd m0, m1 ; [17 16 16 15 15 14 14 13 9 8 8 7 7 6 6 5] + + vbroadcasti32x8 m1, [r2 + 18] ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9] + vbroadcasti32x8 m4, [r2 + 20] ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10] + punpcklwd m2, m1, m4 ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10 9] + punpckhwd m1, m4 ; [25 24 24 23 23 22 22 21 17 16 16 15 15 14 14 13] + + movu ym16, [r3 - 13 * 32] ; [5] + vinserti32x8 m16, [r3 + 8 * 32],1 ; [26] + pmaddwd m4, m3, m16 + paddd m4, m15 + psrld m4, 5 + pmaddwd m5, m0,m16 + paddd m5, m15 + psrld m5, 5 + packusdw m4, m5 + vextracti32x8 ym5, m4, 1 + + palignr m6, m0, m3, 4 ; [14 13 13 12 12 11 11 10 6 5 5 4 4 3 3 2] + pmaddwd m6, [r3 - 3 * 32] ; [15] + paddd m6, m15 + psrld m6, 5 + palignr m7, m2, m0, 4 ; [18 17 17 16 16 15 15 14 10 9 9 8 8 7 7 6] + pmaddwd m7, [r3 - 3 * 32] + paddd m7, m15 + psrld m7, 5 + packusdw m6, m7 + + palignr m8, m0, m3, 8 ; [15 14 14 13 13 12 12 11 7 6 6 5 5 4 4 3] + palignr m9, m2, m0, 8 ; [19 18 18 17 17 16 16 15 11 10 10 9 9 8 8 7] + movu ym16, [r3 - 14 * 32] ; [4] + vinserti32x8 m16, [r3 + 7 * 32] ,1 ; [25] + pmaddwd m7, m8, m16 + paddd m7, m15 + psrld m7, 5 + pmaddwd m10, m9, m16 + paddd m10, m15 + psrld m10, 5 + packusdw m7, m10 + vextracti32x8 ym8, m7, 1 + + palignr m9, m0, m3, 12 + pmaddwd m9, [r3 - 4 * 32] ; [14] + paddd m9, m15 + psrld m9, 5 + palignr m3, m2, m0, 12 + pmaddwd m3, [r3 - 4 * 32] + paddd m3,m15 + psrld m3, 5 + packusdw m9, m3 + + movu ym16, [r3 - 15 * 32] ; [3] + vinserti32x8 m16, [r3 + 6 * 32] ,1 ; [24] + pmaddwd m10, m0, m16 + paddd m10, m15 + psrld m10, 5 + pmaddwd m3, m2, m16 + paddd m3,m15 + psrld m3, 5 + packusdw m10, m3 + vextracti32x8 ym11, m10, 1 + + TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 3, 0 + + palignr m4, m2, m0, 4 + pmaddwd m4, [r3 - 5* 32] ; [13] + paddd m4, m15 + psrld m4, 5 + palignr m5, m1, m2, 4 + pmaddwd m5, [r3 - 5 * 32] + paddd m5, m15 + psrld m5, 5 + packusdw m4, m5 + + palignr m6, m2, m0, 8 + palignr m7, m1, m2, 8 + movu ym16, [r3 - 16 * 32] ; [2] + vinserti32x8 m16, [r3 + 5 * 32] ,1 ; [23] + pmaddwd m5, m6, m16 + paddd m5, m15 + psrld m5, 5 + palignr m7, m1, m2, 8 + pmaddwd m8, m7,m16 + paddd m8, m15 + psrld m8, 5 + packusdw m5, m8 + vextracti32x8 ym6, m5, 1 + + + palignr m7, m2, m0, 12 + pmaddwd m7, [r3 - 6 * 32] ; [12] + paddd m7, m15 + psrld m7, 5 + palignr m8, m1, m2, 12 + pmaddwd m8, [r3 - 6 * 32] + paddd m8, m15 + psrld m8, 5 + packusdw m7, m8 + + movu m0, [r2 + 34] ; [32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17] + pmaddwd m8, m2, [r3 - 17 * 32] ; [1] + paddd m8, m15 + psrld m8, 5 + pmaddwd m9, m1, [r3 - 17 * 32] + paddd m9, m15 + psrld m9, 5 + packusdw m8, m9 + + palignr m3, m0, m0, 2 ; [ x 32 31 30 29 28 27 26 x 24 23 22 21 20 19 18] + punpcklwd m0, m3 ; [29 29 28 28 27 27 26 22 21 20 20 19 19 18 18 17] + + pmaddwd m9, m2, [r3 + 4 * 32] ; [22] + paddd m9, m15 + psrld m9, 5 + pmaddwd m3, m1, [r3 + 4 * 32] + paddd m3, m15 + psrld m3, 5 + packusdw m9, m3 + + palignr m10, m1, m2, 4 + pmaddwd m10, [r3 - 7 * 32] ; [11] + paddd m10, m15 + psrld m10, 5 + palignr m11, m0, m1, 4 + pmaddwd m11, [r3 - 7 * 32] + paddd m11, m15 + psrld m11, 5 + packusdw m10, m11 + + palignr m3, m1, m2, 8 + pmaddwd m3, [r3 - 18 * 32] ; [0] + paddd m3, m15 + psrld m3, 5 + palignr m0, m1, 8 + pmaddwd m0, [r3 - 18 * 32] + paddd m0, m15 + psrld m0, 5 + packusdw m3, m0 + TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 3, 0, 1, 16 + ret + +cglobal intra_pred_ang32_4, 3,8,13 + add r2, 128 + xor r6d, r6d + lea r3, [ang_table_avx2 + 18 * 32] + add r1d, r1d + lea r4, [r1 * 3] + lea r7, [r0 + 8 * r1] + vbroadcasti32x8 m15, [pd_16] + call ang16_mode_4_32 + + add r2, 22 + lea r0, [r0 + 32] + + call ang32_mode_4_32 + + add r2, 10 + lea r0, [r7 + 8 * r1] + + call ang16_mode_4_32 + + add r2, 22 + lea r0, [r0 + 32] + + call ang32_mode_4_32 + RET + +cglobal intra_pred_ang32_32, 3,7,13 + xor r6d, r6d + inc r6d + lea r3, [ang_table_avx2 + 18 * 32] + add r1d, r1d + lea r4, [r1 * 3] + lea r5, [r0 + 32] + vbroadcasti32x8 m15, [pd_16] + call ang16_mode_4_32 + + add r2, 22 + + call ang32_mode_4_32 + + add r2, 10 + mov r0, r5 + + call ang16_mode_4_32 + + add r2, 22 + + call ang32_mode_4_32 + RET + ;------------------------------------------------------------------------------------------------------- ; avx512 code for intra_pred_ang32 mode 2 to 34 end ;------------------------------------------------------------------------------------------------------- _______________________________________________ x265-devel mailing list x265-devel@videolan.org https://mailman.videolan.org/listinfo/x265-devel