# HG changeset patch
# User Vignesh Vijayakumar<vign...@multicorewareinc.com>
# Date 1509529084 -19800
#      Wed Nov 01 15:08:04 2017 +0530
# Node ID 94a80faedde6b0510c58d80c812170c120d8d918
# Parent  c8804484d98e358e475303ac85e2e9fa13bc43d3
x86: AVX512 interp_4tap_vert_pp_64xN for high bit depth
i444
Size  |  AVX2 performance | AVX512 performance
----------------------------------------------
64x16 |      26.16x       |      76.80x
64x32 |      26.19x       |      77.98x
64x48 |      26.14x       |      78.28x
64x64 |      26.17x       |      78.43x

diff -r c8804484d98e -r 94a80faedde6 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp      Thu Apr 05 15:07:25 2018 -0700
+++ b/source/common/x86/asm-primitives.cpp      Wed Nov 01 15:08:04 2017 +0530
@@ -2635,6 +2635,10 @@
         p.pu[LUMA_64x64].luma_hpp = PFX(interp_8tap_horiz_pp_64x64_avx512);
         p.pu[LUMA_48x64].luma_hpp = PFX(interp_8tap_horiz_pp_48x64_avx512);
 
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vpp = 
PFX(interp_4tap_vert_pp_64x16_avx512);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vpp = 
PFX(interp_4tap_vert_pp_64x32_avx512);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vpp = 
PFX(interp_4tap_vert_pp_64x48_avx512);
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vpp = 
PFX(interp_4tap_vert_pp_64x64_avx512);
         p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vpp = 
PFX(interp_4tap_vert_pp_32x8_avx512);
         p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vpp = 
PFX(interp_4tap_vert_pp_32x16_avx512);
         p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vpp = 
PFX(interp_4tap_vert_pp_32x24_avx512);
diff -r c8804484d98e -r 94a80faedde6 source/common/x86/ipfilter16.asm
--- a/source/common/x86/ipfilter16.asm  Thu Apr 05 15:07:25 2018 -0700
+++ b/source/common/x86/ipfilter16.asm  Wed Nov 01 15:08:04 2017 +0530
@@ -5948,7 +5948,7 @@
     packssdw              m0,                 m1
     packssdw              m2,                 m3
     pxor                  m5,                 m5
-    CLIPW2                m0,                 m2,                m5,       m8
+    CLIPW2                m0,                 m2,                  m5,         
        m8
     movu                  [r2],               m0
     movu                  [r2 + r3],          m2
 %endmacro
@@ -5989,6 +5989,132 @@
 FILTER_VER_PP_CHROMA_32xN_AVX512 48
 FILTER_VER_PP_CHROMA_32xN_AVX512 64
 %endif
+
+%macro PROCESS_CHROMA_VERT_PP_64x2_AVX512 0
+    movu                 m1,                  [r0]
+    movu                 m3,                  [r0 + r1]
+    punpcklwd            m0,                  m1,                     m3
+    pmaddwd              m0,                  [r5]
+    punpckhwd            m1,                  m3
+    pmaddwd              m1,                  [r5]
+
+    movu                 m9,                  [r0 + mmsize]
+    movu                 m11,                 [r0 + r1 + mmsize]
+    punpcklwd            m8,                  m9,                     m11
+    pmaddwd              m8,                  [r5]
+    punpckhwd            m9,                  m11
+    pmaddwd              m9,                  [r5]
+
+    movu                 m4,                  [r0 + 2 * r1]
+    punpcklwd            m2,                  m3,                     m4
+    pmaddwd              m2,                  [r5]
+    punpckhwd            m3,                  m4
+    pmaddwd              m3,                  [r5]
+
+    movu                 m12,                 [r0 + 2 * r1 + mmsize]
+    punpcklwd            m10,                 m11,                    m12
+    pmaddwd              m10,                 [r5]
+    punpckhwd            m11,                 m12
+    pmaddwd              m11,                 [r5]
+
+    lea                  r0,                  [r0 + 2 * r1]
+    movu                 m5,                  [r0 + r1]
+    punpcklwd            m6,                  m4,                     m5
+    pmaddwd              m6,                  [r5 + 1 * mmsize]
+    paddd                m0,                  m6
+    punpckhwd            m4,                  m5
+    pmaddwd              m4,                  [r5 + 1 * mmsize]
+    paddd                m1,                  m4
+
+    movu                 m13,                 [r0 + r1 + mmsize]
+    punpcklwd            m14,                 m12,                    m13
+    pmaddwd              m14,                 [r5 + 1 * mmsize]
+    paddd                m8,                  m14
+    punpckhwd            m12,                 m13
+    pmaddwd              m12,                 [r5 + 1 * mmsize]
+    paddd                m9,                  m12
+
+    movu                 m4,                  [r0 + 2 * r1]
+    punpcklwd            m6,                  m5,                     m4
+    pmaddwd              m6,                  [r5 + 1 * mmsize]
+    paddd                m2,                  m6
+    punpckhwd            m5,                  m4
+    pmaddwd              m5,                  [r5 + 1 * mmsize]
+    paddd                m3,                  m5
+
+    movu                 m12,                 [r0 + 2 * r1 + mmsize]
+    punpcklwd            m14,                 m13,                    m12
+    pmaddwd              m14,                 [r5 + 1 * mmsize]
+    paddd                m10,                 m14
+    punpckhwd            m13,                 m12
+    pmaddwd              m13,                 [r5 + 1 * mmsize]
+    paddd                m11,                 m13
+
+    paddd                m0,                  m7
+    paddd                m1,                  m7
+    paddd                m2,                  m7
+    paddd                m3,                  m7
+    paddd                m8,                  m7
+    paddd                m9,                  m7
+    paddd                m10,                 m7
+    paddd                m11,                 m7
+
+    psrad                m0,                  INTERP_SHIFT_PP
+    psrad                m1,                  INTERP_SHIFT_PP
+    psrad                m2,                  INTERP_SHIFT_PP
+    psrad                m3,                  INTERP_SHIFT_PP
+    psrad                m8,                  INTERP_SHIFT_PP
+    psrad                m9,                  INTERP_SHIFT_PP
+    psrad                m10,                 INTERP_SHIFT_PP
+    psrad                m11,                 INTERP_SHIFT_PP
+
+    packssdw             m0,                  m1
+    packssdw             m2,                  m3
+    packssdw             m8,                  m9
+    packssdw             m10,                 m11
+    pxor                 m5,                  m5
+    CLIPW2               m0,                  m2,                  m5,         
        m16
+    CLIPW2               m8,                  m10,                 m5,         
        m16
+    movu                 [r2],                m0
+    movu                 [r2 + r3],           m2
+    movu                 [r2 + mmsize],       m8
+    movu                 [r2 + r3 + mmsize],  m10
+%endmacro
+
+;-----------------------------------------------------------------------------------------------------------------
+; void interp_4tap_vert(int16_t *src, intptr_t srcStride, int16_t *dst, 
intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------------------------------------------
+%macro FILTER_VER_PP_CHROMA_64xN_AVX512 1
+INIT_ZMM avx512
+cglobal interp_4tap_vert_pp_64x%1, 5, 7, 16
+    add                   r1d,                r1d
+    add                   r3d,                r3d
+    sub                   r0,                 r1
+    shl                   r4d,                7
+
+%ifdef PIC
+    lea                   r5,                 [tab_ChromaCoeffV_avx512]
+    lea                   r5,                 [r5 + r4]
+%else
+    lea                   r5,                 [tab_ChromaCoeffV_avx512 + r4]
+%endif
+    vbroadcasti32x8       m7,                 [INTERP_OFFSET_PP]
+    vbroadcasti32x8       m16,                [pw_pixel_max]
+
+%rep %1/2 - 1
+    PROCESS_CHROMA_VERT_PP_64x2_AVX512
+    lea                   r2,                 [r2 + 2 * r3]
+%endrep
+    PROCESS_CHROMA_VERT_PP_64x2_AVX512
+    RET
+%endmacro
+
+%if ARCH_X86_64
+FILTER_VER_PP_CHROMA_64xN_AVX512 16
+FILTER_VER_PP_CHROMA_64xN_AVX512 32
+FILTER_VER_PP_CHROMA_64xN_AVX512 48
+FILTER_VER_PP_CHROMA_64xN_AVX512 64
+%endif
 
;-------------------------------------------------------------------------------------------------------------
 ;ipfilter_chroma_avx512 code end
 
;-------------------------------------------------------------------------------------------------------------
_______________________________________________
x265-devel mailing list
x265-devel@videolan.org
https://mailman.videolan.org/listinfo/x265-devel

Reply via email to