# HG changeset patch
# User Vignesh Vijayakumar
# Date 1501586151 -19800
#      Tue Aug 01 16:45:51 2017 +0530
# Node ID 73ee464e136910a95d7b3070a1c736dedeaa6278
# Parent  55ed1898de6bd2b8688aa8f1f7b29ae35f674ab4
x86: AVX512 addAvg_W32 for high bit depth

Size  |  AVX2 performance | AVX512 performance
----------------------------------------------
32x8  |      9.83x        |      18.11x
32x16 |      9.65x        |      17.72x
32x24 |      9.50x        |      18.41x
32x32 |      9.28x        |      19.29x
32x64 |      9.23x        |      18.71x

diff -r 55ed1898de6b -r 73ee464e1369 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp      Wed Aug 02 17:16:48 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp      Tue Aug 01 16:45:51 2017 +0530
@@ -2278,6 +2278,20 @@
         p.pu[LUMA_64x48].sad = PFX(pixel_sad_64x48_avx512);
         p.pu[LUMA_64x64].sad = PFX(pixel_sad_64x64_avx512);
 
+        p.pu[LUMA_32x8].addAvg = PFX(addAvg_32x8_avx512);
+        p.pu[LUMA_32x16].addAvg = PFX(addAvg_32x16_avx512);
+        p.pu[LUMA_32x24].addAvg = PFX(addAvg_32x24_avx512);
+        p.pu[LUMA_32x32].addAvg = PFX(addAvg_32x32_avx512);
+        p.pu[LUMA_32x64].addAvg = PFX(addAvg_32x64_avx512);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg = 
PFX(addAvg_32x8_avx512);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg = 
PFX(addAvg_32x16_avx512);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = 
PFX(addAvg_32x24_avx512);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = 
PFX(addAvg_32x32_avx512);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg = 
PFX(addAvg_32x16_avx512);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg = 
PFX(addAvg_32x32_avx512);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg = 
PFX(addAvg_32x48_avx512);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg = 
PFX(addAvg_32x64_avx512);
+
     }
 }
 #else // if HIGH_BIT_DEPTH
diff -r 55ed1898de6b -r 73ee464e1369 source/common/x86/mc-a.asm
--- a/source/common/x86/mc-a.asm        Wed Aug 02 17:16:48 2017 +0530
+++ b/source/common/x86/mc-a.asm        Tue Aug 01 16:45:51 2017 +0530
@@ -1656,6 +1656,124 @@
 ADDAVG_W64_H1_AVX2 32
 ADDAVG_W64_H1_AVX2 48
 ADDAVG_W64_H1_AVX2 64
+
+;-----------------------------------------------------------------------------
+;addAvg avx512 high bit depth code start
+;-----------------------------------------------------------------------------
+%macro PROCESS_ADDAVG_32x8_HBD_AVX512 0
+    movu        m0,              [r0]
+    movu        m1,              [r1]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + r3]
+    movu        m1,              [r1 + r4]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5],       m0
+
+    movu        m0,              [r0 + 2 * r3]
+    movu        m1,              [r1 + 2 * r4]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + 2 * r5],   m0
+
+    movu        m0,              [r0 + r6]
+    movu        m1,              [r1 + r7]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r8],       m0
+
+    lea         r2,              [r2 + 4 * r5]
+    lea         r0,              [r0 + 4 * r3]
+    lea         r1,              [r1 + 4 * r4]
+
+    movu        m0,              [r0]
+    movu        m1,              [r1]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2],            m0
+
+    movu        m0,              [r0 + r3]
+    movu        m1,              [r1 + r4]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r5],       m0
+
+    movu        m0,              [r0 + 2 * r3]
+    movu        m1,              [r1 + 2 * r4]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + 2 * r5],   m0
+
+    movu        m0,              [r0 + r6]
+    movu        m1,              [r1 + r7]
+    paddw       m0,              m1
+    pmulhrsw    m0,              m3
+    paddw       m0,              m4
+    pmaxsw      m0,              m2
+    pminsw      m0,              m5
+    movu        [r2 + r8],       m0
+%endmacro
+
+;-----------------------------------------------------------------------------
+;void addAvg (int16_t* src0, int16_t* src1, pixel* dst, intptr_t src0Stride, 
intptr_t src1Stride, intptr_t dstStride)
+;-----------------------------------------------------------------------------
+%macro ADDAVG_W32_HBD_AVX512 1
+INIT_ZMM avx512
+cglobal addAvg_32x%1, 6,9,6
+    vbroadcasti32x8        m4,              [pw_ %+ ADDAVG_ROUND]
+    vbroadcasti32x8        m5,              [pw_pixel_max]
+    vbroadcasti32x8        m3,              [pw_ %+ ADDAVG_FACTOR]
+    pxor        m2,              m2
+    add         r3,              r3
+    add         r4,              r4
+    add         r5,              r5
+    lea         r6,              [3 * r3]
+    lea         r7,              [3 * r4]
+    lea         r8,              [3 * r5]
+
+%rep %1/8 - 1
+    PROCESS_ADDAVG_32x8_HBD_AVX512
+    lea         r2,              [r2 + 4 * r5]
+    lea         r0,              [r0 + 4 * r3]
+    lea         r1,              [r1 + 4 * r4]
+%endrep
+    PROCESS_ADDAVG_32x8_HBD_AVX512
+    RET
+%endmacro
+
+ADDAVG_W32_HBD_AVX512 8
+ADDAVG_W32_HBD_AVX512 16
+ADDAVG_W32_HBD_AVX512 24
+ADDAVG_W32_HBD_AVX512 32
+ADDAVG_W32_HBD_AVX512 48
+ADDAVG_W32_HBD_AVX512 64
+;-----------------------------------------------------------------------------
+;addAvg avx512 high bit depth code end
+;-----------------------------------------------------------------------------
 ;-----------------------------------------------------------------------------
 %else ; !HIGH_BIT_DEPTH
 ;-----------------------------------------------------------------------------
_______________________________________________
x265-devel mailing list
x265-devel@videolan.org
https://mailman.videolan.org/listinfo/x265-devel

Reply via email to