right
At 2015-03-12 19:26:18,"Divya Manivannan" <[email protected]> wrote: ># HG changeset patch ># User Divya Manivannan <[email protected]> ># Date 1426159503 -19800 ># Thu Mar 12 16:55:03 2015 +0530 ># Node ID 45d498a1a59263fd758bf71c910e7d4289e771d3 ># Parent 698ee043fd4f294b15c4e8d1bbf94ab44cd806eb >asm: filter_vsp[16x16, 32x16], filter_vss[16x16, 32x16]: 3042c->1875c, >5844c->3724c, 2646c->1988c, 4655c->4040c > >diff -r 698ee043fd4f -r 45d498a1a592 source/common/x86/asm-primitives.cpp >--- a/source/common/x86/asm-primitives.cpp Thu Mar 12 14:02:18 2015 +0530 >+++ b/source/common/x86/asm-primitives.cpp Thu Mar 12 16:55:03 2015 +0530 >@@ -1624,9 +1624,13 @@ > > p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vsp = > x265_interp_4tap_vert_sp_4x4_avx2; > p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vsp = > x265_interp_4tap_vert_sp_8x8_avx2; >+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vsp = >x265_interp_4tap_vert_sp_16x16_avx2; >+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vsp = >x265_interp_4tap_vert_sp_32x16_avx2; > > p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vss = > x265_interp_4tap_vert_ss_4x4_avx2; > p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].filter_vss = > x265_interp_4tap_vert_ss_8x8_avx2; >+ p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vss = >x265_interp_4tap_vert_ss_16x16_avx2; >+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vss = >x265_interp_4tap_vert_ss_32x16_avx2; > } > #endif > } >diff -r 698ee043fd4f -r 45d498a1a592 source/common/x86/ipfilter8.asm >--- a/source/common/x86/ipfilter8.asm Thu Mar 12 14:02:18 2015 +0530 >+++ b/source/common/x86/ipfilter8.asm Thu Mar 12 16:55:03 2015 +0530 >@@ -11902,6 +11902,324 @@ > FILTER_VER_CHROMA_S_AVX2_8x8 sp > FILTER_VER_CHROMA_S_AVX2_8x8 ss > >+%macro PROCESS_CHROMA_S_AVX2_W8_16R 1 >+ movu xm0, [r0] ; m0 = row 0 >+ movu xm1, [r0 + r1] ; m1 = row 1 >+ punpckhwd xm2, xm0, xm1 >+ punpcklwd xm0, xm1 >+ vinserti128 m0, m0, xm2, 1 >+ pmaddwd m0, [r5] >+ movu xm2, [r0 + r1 * 2] ; m2 = row 2 >+ punpckhwd xm3, xm1, xm2 >+ punpcklwd xm1, xm2 >+ vinserti128 m1, m1, xm3, 1 >+ pmaddwd m1, [r5] >+ movu xm3, [r0 + r4] ; m3 = row 3 >+ punpckhwd xm4, xm2, xm3 >+ punpcklwd xm2, xm3 >+ vinserti128 m2, m2, xm4, 1 >+ pmaddwd m4, m2, [r5 + 1 * mmsize] >+ paddd m0, m4 >+ pmaddwd m2, [r5] >+ lea r7, [r0 + r1 * 4] >+ movu xm4, [r7] ; m4 = row 4 >+ punpckhwd xm5, xm3, xm4 >+ punpcklwd xm3, xm4 >+ vinserti128 m3, m3, xm5, 1 >+ pmaddwd m5, m3, [r5 + 1 * mmsize] >+ paddd m1, m5 >+ pmaddwd m3, [r5] >+ movu xm5, [r7 + r1] ; m5 = row 5 >+ punpckhwd xm6, xm4, xm5 >+ punpcklwd xm4, xm5 >+ vinserti128 m4, m4, xm6, 1 >+ pmaddwd m6, m4, [r5 + 1 * mmsize] >+ paddd m2, m6 >+ pmaddwd m4, [r5] >+ movu xm6, [r7 + r1 * 2] ; m6 = row 6 >+ punpckhwd xm7, xm5, xm6 >+ punpcklwd xm5, xm6 >+ vinserti128 m5, m5, xm7, 1 >+ pmaddwd m7, m5, [r5 + 1 * mmsize] >+ paddd m3, m7 >+ pmaddwd m5, [r5] >+%ifidn %1,sp >+ paddd m0, m9 >+ paddd m1, m9 >+ paddd m2, m9 >+ paddd m3, m9 >+ psrad m0, 12 >+ psrad m1, 12 >+ psrad m2, 12 >+ psrad m3, 12 >+%else >+ psrad m0, 6 >+ psrad m1, 6 >+ psrad m2, 6 >+ psrad m3, 6 >+%endif >+ packssdw m0, m1 >+ packssdw m2, m3 >+%ifidn %1,sp >+ packuswb m0, m2 >+ mova m3, [interp8_hps_shuf] >+ vpermd m0, m3, m0 >+ vextracti128 xm2, m0, 1 >+ movq [r2], xm0 >+ movhps [r2 + r3], xm0 >+ movq [r2 + r3 * 2], xm2 >+ movhps [r2 + r6], xm2 >+%else >+ vpermq m0, m0, 11011000b >+ vpermq m2, m2, 11011000b >+ vextracti128 xm1, m0, 1 >+ vextracti128 xm3, m2, 1 >+ movu [r2], xm0 >+ movu [r2 + r3], xm1 >+ movu [r2 + r3 * 2], xm2 >+ movu [r2 + r6], xm3 >+%endif >+ >+ movu xm7, [r7 + r4] ; m7 = row 7 >+ punpckhwd xm8, xm6, xm7 >+ punpcklwd xm6, xm7 >+ vinserti128 m6, m6, xm8, 1 >+ pmaddwd m8, m6, [r5 + 1 * mmsize] >+ paddd m4, m8 >+ pmaddwd m6, [r5] >+ lea r7, [r7 + r1 * 4] >+ movu xm8, [r7] ; m8 = row 8 >+ punpckhwd xm0, xm7, xm8 >+ punpcklwd xm7, xm8 >+ vinserti128 m7, m7, xm0, 1 >+ pmaddwd m0, m7, [r5 + 1 * mmsize] >+ paddd m5, m0 >+ pmaddwd m7, [r5] >+ movu xm0, [r7 + r1] ; m0 = row 9 >+ punpckhwd xm1, xm8, xm0 >+ punpcklwd xm8, xm0 >+ vinserti128 m8, m8, xm1, 1 >+ pmaddwd m1, m8, [r5 + 1 * mmsize] >+ paddd m6, m1 >+ pmaddwd m8, [r5] >+ movu xm1, [r7 + r1 * 2] ; m1 = row 10 >+ punpckhwd xm2, xm0, xm1 >+ punpcklwd xm0, xm1 >+ vinserti128 m0, m0, xm2, 1 >+ pmaddwd m2, m0, [r5 + 1 * mmsize] >+ paddd m7, m2 >+ pmaddwd m0, [r5] >+%ifidn %1,sp >+ paddd m4, m9 >+ paddd m5, m9 >+ psrad m4, 12 >+ psrad m5, 12 >+ paddd m6, m9 >+ paddd m7, m9 >+ psrad m6, 12 >+ psrad m7, 12 >+%else >+ psrad m4, 6 >+ psrad m5, 6 >+ psrad m6, 6 >+ psrad m7, 6 >+%endif >+ packssdw m4, m5 >+ packssdw m6, m7 >+ lea r8, [r2 + r3 * 4] >+%ifidn %1,sp >+ packuswb m4, m6 >+ vpermd m4, m3, m4 >+ vextracti128 xm6, m4, 1 >+ movq [r8], xm4 >+ movhps [r8 + r3], xm4 >+ movq [r8 + r3 * 2], xm6 >+ movhps [r8 + r6], xm6 >+%else >+ vpermq m4, m4, 11011000b >+ vpermq m6, m6, 11011000b >+ vextracti128 xm5, m4, 1 >+ vextracti128 xm7, m6, 1 >+ movu [r8], xm4 >+ movu [r8 + r3], xm5 >+ movu [r8 + r3 * 2], xm6 >+ movu [r8 + r6], xm7 >+%endif >+ >+ movu xm2, [r7 + r4] ; m2 = row 11 >+ punpckhwd xm4, xm1, xm2 >+ punpcklwd xm1, xm2 >+ vinserti128 m1, m1, xm4, 1 >+ pmaddwd m4, m1, [r5 + 1 * mmsize] >+ paddd m8, m4 >+ pmaddwd m1, [r5] >+ lea r7, [r7 + r1 * 4] >+ movu xm4, [r7] ; m4 = row 12 >+ punpckhwd xm5, xm2, xm4 >+ punpcklwd xm2, xm4 >+ vinserti128 m2, m2, xm5, 1 >+ pmaddwd m5, m2, [r5 + 1 * mmsize] >+ paddd m0, m5 >+ pmaddwd m2, [r5] >+ movu xm5, [r7 + r1] ; m5 = row 13 >+ punpckhwd xm6, xm4, xm5 >+ punpcklwd xm4, xm5 >+ vinserti128 m4, m4, xm6, 1 >+ pmaddwd m6, m4, [r5 + 1 * mmsize] >+ paddd m1, m6 >+ pmaddwd m4, [r5] >+ movu xm6, [r7 + r1 * 2] ; m6 = row 14 >+ punpckhwd xm7, xm5, xm6 >+ punpcklwd xm5, xm6 >+ vinserti128 m5, m5, xm7, 1 >+ pmaddwd m7, m5, [r5 + 1 * mmsize] >+ paddd m2, m7 >+ pmaddwd m5, [r5] >+%ifidn %1,sp >+ paddd m8, m9 >+ paddd m0, m9 >+ paddd m1, m9 >+ paddd m2, m9 >+ psrad m8, 12 >+ psrad m0, 12 >+ psrad m1, 12 >+ psrad m2, 12 >+%else >+ psrad m8, 6 >+ psrad m0, 6 >+ psrad m1, 6 >+ psrad m2, 6 >+%endif >+ packssdw m8, m0 >+ packssdw m1, m2 >+ lea r8, [r8 + r3 * 4] >+%ifidn %1,sp >+ packuswb m8, m1 >+ vpermd m8, m3, m8 >+ vextracti128 xm1, m8, 1 >+ movq [r8], xm8 >+ movhps [r8 + r3], xm8 >+ movq [r8 + r3 * 2], xm1 >+ movhps [r8 + r6], xm1 >+%else >+ vpermq m8, m8, 11011000b >+ vpermq m1, m1, 11011000b >+ vextracti128 xm0, m8, 1 >+ vextracti128 xm2, m1, 1 >+ movu [r8], xm8 >+ movu [r8 + r3], xm0 >+ movu [r8 + r3 * 2], xm1 >+ movu [r8 + r6], xm2 >+%endif >+ lea r8, [r8 + r3 * 4] >+ >+ movu xm7, [r7 + r4] ; m7 = row 15 >+ punpckhwd xm2, xm6, xm7 >+ punpcklwd xm6, xm7 >+ vinserti128 m6, m6, xm2, 1 >+ pmaddwd m2, m6, [r5 + 1 * mmsize] >+ paddd m4, m2 >+ pmaddwd m6, [r5] >+ lea r7, [r7 + r1 * 4] >+ movu xm2, [r7] ; m2 = row 16 >+ punpckhwd xm1, xm7, xm2 >+ punpcklwd xm7, xm2 >+ vinserti128 m7, m7, xm1, 1 >+ pmaddwd m1, m7, [r5 + 1 * mmsize] >+ paddd m5, m1 >+ pmaddwd m7, [r5] >+ movu xm1, [r7 + r1] ; m1 = row 17 >+ punpckhwd xm0, xm2, xm1 >+ punpcklwd xm2, xm1 >+ vinserti128 m2, m2, xm0, 1 >+ pmaddwd m2, [r5 + 1 * mmsize] >+ paddd m6, m2 >+ movu xm0, [r7 + r1 * 2] ; m0 = row 18 >+ punpckhwd xm2, xm1, xm0 >+ punpcklwd xm1, xm0 >+ vinserti128 m1, m1, xm2, 1 >+ pmaddwd m1, [r5 + 1 * mmsize] >+ paddd m7, m1 >+ >+%ifidn %1,sp >+ paddd m4, m9 >+ paddd m5, m9 >+ paddd m6, m9 >+ paddd m7, m9 >+ psrad m4, 12 >+ psrad m5, 12 >+ psrad m6, 12 >+ psrad m7, 12 >+%else >+ psrad m4, 6 >+ psrad m5, 6 >+ psrad m6, 6 >+ psrad m7, 6 >+%endif >+ packssdw m4, m5 >+ packssdw m6, m7 >+%ifidn %1,sp >+ packuswb m4, m6 >+ vpermd m4, m3, m4 >+ vextracti128 xm6, m4, 1 >+ movq [r8], xm4 >+ movhps [r8 + r3], xm4 >+ movq [r8 + r3 * 2], xm6 >+ movhps [r8 + r6], xm6 >+%else >+ vpermq m4, m4, 11011000b >+ vpermq m6, m6, 11011000b >+ vextracti128 xm5, m4, 1 >+ vextracti128 xm7, m6, 1 >+ movu [r8], xm4 >+ movu [r8 + r3], xm5 >+ movu [r8 + r3 * 2], xm6 >+ movu [r8 + r6], xm7 >+%endif >+%endmacro >+ >+%macro FILTER_VER_CHROMA_S_AVX2_Nx16 2 >+INIT_YMM avx2 >+%if ARCH_X86_64 == 1 >+cglobal interp_4tap_vert_%1_%2x16, 4, 10, 10 >+ mov r4d, r4m >+ shl r4d, 6 >+ add r1d, r1d >+ >+%ifdef PIC >+ lea r5, [pw_ChromaCoeffV] >+ add r5, r4 >+%else >+ lea r5, [pw_ChromaCoeffV + r4] >+%endif >+ lea r4, [r1 * 3] >+ sub r0, r1 >+%ifidn %1,sp >+ mova m9, [pd_526336] >+%else >+ add r3d, r3d >+%endif >+ lea r6, [r3 * 3] >+ mov r9d, %2 / 8 >+.loopW: >+ PROCESS_CHROMA_S_AVX2_W8_16R %1 >+%ifidn %1,sp >+ add r2, 8 >+%else >+ add r2, 16 >+%endif >+ add r0, 16 >+ dec r9d >+ jnz .loopW >+ RET >+%endif >+%endmacro >+ >+FILTER_VER_CHROMA_S_AVX2_Nx16 sp, 16 >+FILTER_VER_CHROMA_S_AVX2_Nx16 sp, 32 >+FILTER_VER_CHROMA_S_AVX2_Nx16 ss, 16 >+FILTER_VER_CHROMA_S_AVX2_Nx16 ss, 32 >+ > ;--------------------------------------------------------------------------------------------------------------------- > ; void interp_4tap_vertical_ss_%1x%2(int16_t *src, intptr_t srcStride, > int16_t *dst, intptr_t dstStride, int coeffIdx) > ;--------------------------------------------------------------------------------------------------------------------- >_______________________________________________ >x265-devel mailing list >[email protected] >https://mailman.videolan.org/listinfo/x265-devel
_______________________________________________ x265-devel mailing list [email protected] https://mailman.videolan.org/listinfo/x265-devel
