From: Pierre Edouard Lepere <pierre-edouard.lep...@insa-rennes.fr>

Initially written by Pierre Edouard Lepere 
<pierre-edouard.lep...@insa-rennes.fr>,
extended by James Almer <jamr...@gmail.com>.

Signed-off-by: Alexandra Hájková <alexan...@khirnov.net>
---
 libavcodec/x86/Makefile         |   3 +-
 libavcodec/x86/hevc_res_add.asm | 391 ++++++++++++++++++++++++++++++++++++++++
 libavcodec/x86/hevcdsp_init.c   |  40 ++++
 3 files changed, 433 insertions(+), 1 deletion(-)
 create mode 100644 libavcodec/x86/hevc_res_add.asm

diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
index a38535b..aa93e67 100644
--- a/libavcodec/x86/Makefile
+++ b/libavcodec/x86/Makefile
@@ -117,7 +117,8 @@ YASM-OBJS-$(CONFIG_DCA_DECODER)        += x86/dcadsp.o
 YASM-OBJS-$(CONFIG_DNXHD_ENCODER)      += x86/dnxhdenc.o
 YASM-OBJS-$(CONFIG_HEVC_DECODER)       += x86/hevc_deblock.o            \
                                           x86/hevc_mc.o                 \
-                                          x86/hevc_idct.o
+                                          x86/hevc_idct.o               \
+                                          x86/hevc_res_add.o
 YASM-OBJS-$(CONFIG_PNG_DECODER)        += x86/pngdsp.o
 YASM-OBJS-$(CONFIG_PRORES_DECODER)     += x86/proresdsp.o
 YASM-OBJS-$(CONFIG_RV40_DECODER)       += x86/rv40dsp.o
diff --git a/libavcodec/x86/hevc_res_add.asm b/libavcodec/x86/hevc_res_add.asm
new file mode 100644
index 0000000..f8d9fd7
--- /dev/null
+++ b/libavcodec/x86/hevc_res_add.asm
@@ -0,0 +1,391 @@
+; *****************************************************************************
+; * Provide SIMD optimizations for add_residual functions for HEVC decoding
+; * Copyright (c) 2014 Pierre-Edouard LEPERE
+; *
+; * This file is part of Libav.
+; *
+; * Libav is free software; you can redistribute it and/or
+; * modify it under the terms of the GNU Lesser General Public
+; * License as published by the Free Software Foundation; either
+; * version 2.1 of the License, or (at your option) any later version.
+; *
+; * Libav is distributed in the hope that it will be useful,
+; * but WITHOUT ANY WARRANTY; without even the implied warranty of
+; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+; * Lesser General Public License for more details.
+; *
+; * You should have received a copy of the GNU Lesser General Public
+; * License along with Libav; if not, write to the Free Software
+; * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
USA
+; 
******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION_RODATA 32
+max_pixels_10:          times 16  dw ((1 << 10)-1)
+
+SECTION .text
+
+; the add_res macros and functions were largely inspired by x264 project's 
code in the h264_idct.asm file
+%macro ADD_RES_MMX_4_8 0
+    mova              m2, [r1]
+    mova              m4, [r1+8]
+    pxor              m3, m3
+    psubw             m3, m2
+    packuswb          m2, m2
+    packuswb          m3, m3
+    pxor              m5, m5
+    psubw             m5, m4
+    packuswb          m4, m4
+    packuswb          m5, m5
+
+    movh              m0, [r0     ]
+    movh              m1, [r0+r2  ]
+    paddusb           m0, m2
+    paddusb           m1, m4
+    psubusb           m0, m3
+    psubusb           m1, m5
+    movh       [r0     ], m0
+    movh       [r0+r2  ], m1
+%endmacro
+
+
+INIT_MMX mmxext
+; void ff_hevc_add_residual_4_8_mmxext(uint8_t *dst, int16_t *coeffs, 
ptrdiff_t stride)
+cglobal hevc_add_residual_4_8, 3, 4, 6
+    ADD_RES_MMX_4_8
+    add               r1, 16
+    lea               r0, [r0+r2*2]
+    ADD_RES_MMX_4_8
+    RET
+
+%macro ADD_RES_SSE_8_8 0
+    pxor              m3, m3
+    mova              m4, [r1]
+    mova              m6, [r1+16]
+    mova              m0, [r1+32]
+    mova              m2, [r1+48]
+    psubw             m5, m3, m4
+    psubw             m7, m3, m6
+    psubw             m1, m3, m0
+    packuswb          m4, m0
+    packuswb          m5, m1
+    psubw             m3, m2
+    packuswb          m6, m2
+    packuswb          m7, m3
+
+    movq                m0, [r0     ]
+    movq                m1, [r0+r2  ]
+    movhps              m0, [r0+r2*2]
+    movhps              m1, [r0+r3  ]
+    paddusb             m0, m4
+    paddusb             m1, m6
+    psubusb             m0, m5
+    psubusb             m1, m7
+    movq         [r0     ], m0
+    movq         [r0+r2  ], m1
+    movhps       [r0+2*r2], m0
+    movhps       [r0+r3  ], m1
+%endmacro
+
+%macro ADD_RES_SSE_16_32_8 3
+    mova             xm2, [r1+%1   ]
+    mova             xm6, [r1+%1+16]
+%if cpuflag(avx2)
+    vinserti128       m2, m2, [r1+%1+32], 1
+    vinserti128       m6, m6, [r1+%1+48], 1
+%endif
+%if cpuflag(avx)
+    psubw             m1, m0, m2
+    psubw             m5, m0, m6
+%else
+    mova              m1, m0
+    mova              m5, m0
+    psubw             m1, m2
+    psubw             m5, m6
+%endif
+    packuswb          m2, m6
+    packuswb          m1, m5
+
+    mova             xm4, [r1+%1+mmsize*2   ]
+    mova             xm6, [r1+%1+mmsize*2+16]
+%if cpuflag(avx2)
+    vinserti128       m4, m4, [r1+%1+96 ], 1
+    vinserti128       m6, m6, [r1+%1+112], 1
+%endif
+%if cpuflag(avx)
+    psubw             m3, m0, m4
+    psubw             m5, m0, m6
+%else
+    mova              m3, m0
+    mova              m5, m0
+    psubw             m3, m4
+    psubw             m5, m6
+%endif
+    packuswb          m4, m6
+    packuswb          m3, m5
+
+    paddusb           m2, [%2]
+    paddusb           m4, [%3]
+    psubusb           m2, m1
+    psubusb           m4, m3
+    mova            [%2], m2
+    mova            [%3], m4
+%endmacro
+
+
+%macro TRANSFORM_ADD_8 0
+; void ff_hevc_add_residual_8_8_<opt>(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride)
+cglobal hevc_add_residual_8_8, 3, 4, 8
+    lea               r3, [r2*3]
+    ADD_RES_SSE_8_8
+    add               r1, 64
+    lea               r0, [r0+r2*4]
+    ADD_RES_SSE_8_8
+    RET
+
+; void ff_hevc_add_residual_16_8_<opt>(uint8_t *dst, int16_t *coeffs, 
ptrdiff_t stride)
+cglobal hevc_add_residual_16_8, 3, 5, 7
+    pxor              m0, m0
+    lea               r3, [r2*3]
+    mov r4d, 4
+.loop:
+    ADD_RES_SSE_16_32_8  0, r0,      r0+r2
+    ADD_RES_SSE_16_32_8 64, r0+r2*2, r0+r3
+    add                r1, 128
+    lea                r0, [r0+r2*4]
+    dec r4d
+    jnz .loop
+    RET
+
+; void ff_hevc_add_residual_32_8_<opt>(uint8_t *dst, int16_t *coeffs, 
ptrdiff_t stride)
+cglobal hevc_add_residual_32_8, 3, 5, 7
+    pxor               m0, m0
+    mov r4d, 16
+.loop:
+    ADD_RES_SSE_16_32_8  0, r0,    r0+16
+    ADD_RES_SSE_16_32_8 64, r0+r2, r0+r2+16
+    add                r1, 128
+    lea                r0, [r0+r2*2]
+    dec r4d
+    jnz .loop
+    RET
+%endmacro
+
+INIT_XMM sse2
+TRANSFORM_ADD_8
+INIT_XMM avx
+TRANSFORM_ADD_8
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+; void ff_hevc_add_residual_32_8_avx2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride)
+cglobal hevc_add_residual_32_8, 3, 5, 7
+    pxor              m0, m0
+    lea               r3, [r2*3]
+    mov r4d, 8
+.loop:
+    ADD_RES_SSE_16_32_8   0, r0,      r0+r2
+    ADD_RES_SSE_16_32_8 128, r0+r2*2, r0+r3
+    add                r1, 256
+    lea                r0, [r0+r2*4]
+    dec r4d
+    jnz .loop
+    RET
+%endif
+
+%macro ADD_RES_SSE_8_10 4
+    mova              m0, [%4]
+    mova              m1, [%4+16]
+    mova              m2, [%4+32]
+    mova              m3, [%4+48]
+    paddw             m0, [%1+0   ]
+    paddw             m1, [%1+%2  ]
+    paddw             m2, [%1+%2*2]
+    paddw             m3, [%1+%3  ]
+    CLIPW             m0, m4, m5
+    CLIPW             m1, m4, m5
+    CLIPW             m2, m4, m5
+    CLIPW             m3, m4, m5
+    mova       [%1+0   ], m0
+    mova       [%1+%2  ], m1
+    mova       [%1+%2*2], m2
+    mova       [%1+%3  ], m3
+%endmacro
+
+%macro ADD_RES_MMX4_10 3
+    mova              m0, [%1+0   ]
+    mova              m1, [%1+%2  ]
+    paddw             m0, [%3]
+    paddw             m1, [%3+8]
+    CLIPW             m0, m2, m3
+    CLIPW             m1, m2, m3
+    mova       [%1+0   ], m0
+    mova       [%1+%2  ], m1
+%endmacro
+
+%macro ADD_RESIDUAL__SSE_16_10 3
+    mova              m0, [%3]
+    mova              m1, [%3+16]
+    mova              m2, [%3+32]
+    mova              m3, [%3+48]
+    paddw             m0, [%1      ]
+    paddw             m1, [%1+16   ]
+    paddw             m2, [%1+%2   ]
+    paddw             m3, [%1+%2+16]
+    CLIPW             m0, m4, m5
+    CLIPW             m1, m4, m5
+    CLIPW             m2, m4, m5
+    CLIPW             m3, m4, m5
+    mova      [%1      ], m0
+    mova      [%1+16   ], m1
+    mova      [%1+%2   ], m2
+    mova      [%1+%2+16], m3
+%endmacro
+
+%macro ADD_RESIDUAL__SSE_32_10 2
+    mova              m0, [%2]
+    mova              m1, [%2+16]
+    mova              m2, [%2+32]
+    mova              m3, [%2+48]
+
+    paddw             m0, [%1   ]
+    paddw             m1, [%1+16]
+    paddw             m2, [%1+32]
+    paddw             m3, [%1+48]
+    CLIPW             m0, m4, m5
+    CLIPW             m1, m4, m5
+    CLIPW             m2, m4, m5
+    CLIPW             m3, m4, m5
+    mova         [%1   ], m0
+    mova         [%1+16], m1
+    mova         [%1+32], m2
+    mova         [%1+48], m3
+%endmacro
+
+%macro ADD_RESIDUAL_16_AVX2 4
+    mova              m0, [%4]
+    mova              m1, [%4+32]
+    mova              m2, [%4+64]
+    mova              m3, [%4+96]
+
+    paddw             m0, [%1+0   ]
+    paddw             m1, [%1+%2  ]
+    paddw             m2, [%1+%2*2]
+    paddw             m3, [%1+%3  ]
+
+    CLIPW             m0, m4, m5
+    CLIPW             m1, m4, m5
+    CLIPW             m2, m4, m5
+    CLIPW             m3, m4, m5
+    mova       [%1+0   ], m0
+    mova       [%1+%2  ], m1
+    mova       [%1+%2*2], m2
+    mova       [%1+%3  ], m3
+%endmacro
+
+%macro ADD_RESIDUAL_32_AVX2 3
+    mova              m0, [%3]
+    mova              m1, [%3+32]
+    mova              m2, [%3+64]
+    mova              m3, [%3+96]
+
+    paddw             m0, [%1      ]
+    paddw             m1, [%1+32   ]
+    paddw             m2, [%1+%2   ]
+    paddw             m3, [%1+%2+32]
+
+    CLIPW             m0, m4, m5
+    CLIPW             m1, m4, m5
+    CLIPW             m2, m4, m5
+    CLIPW             m3, m4, m5
+    mova      [%1      ], m0
+    mova      [%1+32   ], m1
+    mova      [%1+%2   ], m2
+    mova      [%1+%2+32], m3
+%endmacro
+
+;-----------------------------------------------------------------------------
+; void ff_hevc_add_residual_4_10(pixel *dst, int16_t *block, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_MMX mmxext
+cglobal hevc_add_residual_4_10,3,4, 6
+    pxor              m2, m2
+    mova              m3, [max_pixels_10]
+    ADD_RES_MMX4_10     r0, r2, r1
+    add               r1, 16
+    lea               r0, [r0+2*r2]
+    ADD_RES_MMX4_10     r0, r2, r1
+    RET
+
+;-----------------------------------------------------------------------------
+; void ff_hevc_add_residual_8_10(pixel *dst, int16_t *block, ptrdiff_t stride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal hevc_add_residual_8_10,3,5,6
+    pxor              m4, m4
+    mova              m5, [max_pixels_10]
+    lea               r3, [r2*3]
+
+    ADD_RES_SSE_8_10      r0, r2, r3, r1
+    lea               r0, [r0+r2*4]
+    add               r1, 64
+    ADD_RES_SSE_8_10      r0, r2, r3, r1
+    RET
+
+cglobal hevc_add_residual_16_10,3,5,6
+    pxor              m4, m4
+    mova              m5, [max_pixels_10]
+
+    mov r4d, 8
+.loop:
+    ADD_RESIDUAL__SSE_16_10 r0, r2, r1
+    lea                 r0, [r0+r2*2]
+    add                 r1, 64
+    dec r4d
+    jnz .loop
+    RET
+
+cglobal hevc_add_residual_32_10,3,5,6
+    pxor              m4, m4
+    mova              m5, [max_pixels_10]
+
+    mov r4d, 32
+.loop
+    ADD_RESIDUAL__SSE_32_10 r0, r1
+    lea                 r0, [r0+r2]
+    add                 r1, 64
+    dec r4d
+    jnz .loop
+    RET
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+
+cglobal hevc_add_residual_16_10,3,5,6
+    pxor              m4, m4
+    mova              m5, [max_pixels_10]
+    lea               r3, [r2*3]
+
+    mov r4d, 4
+.loop
+    ADD_RESIDUAL_16_AVX2  r0, r2, r3, r1
+    lea               r0, [r0+r2*4]
+    add               r1, 128
+    dec r4d
+    jnz .loop
+    RET
+
+cglobal hevc_add_residual_32_10,3,5,6
+    pxor              m4, m4
+    mova              m5, [max_pixels_10]
+
+    mov r4d, 6
+.loop
+    ADD_RESIDUAL_32_AVX2  r0, r2, r1
+    lea               r0, [r0+r2*2]
+    add               r1, 128
+    dec r4d
+    jnz .loop
+    RET
+%endif ;HAVE_AVX2_EXTERNAL
diff --git a/libavcodec/x86/hevcdsp_init.c b/libavcodec/x86/hevcdsp_init.c
index 0a06347..73279c2 100644
--- a/libavcodec/x86/hevcdsp_init.c
+++ b/libavcodec/x86/hevcdsp_init.c
@@ -91,6 +91,25 @@ void ff_hevc_idct_32x32_10_ ## opt(int16_t *coeffs, int 
col_limit);
 IDCT_FUNCS(sse2)
 IDCT_FUNCS(avx)
 
+void ff_hevc_add_residual_4_8_mmxext(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_8_8_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_16_8_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_32_8_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+
+void ff_hevc_add_residual_8_8_avx(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_16_8_avx(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_32_8_avx(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+
+void ff_hevc_add_residual_32_8_avx2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+
+void ff_hevc_add_residual_4_10_mmxext(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_8_10_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_16_10_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_32_10_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+
+void ff_hevc_add_residual_16_10_avx2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+void ff_hevc_add_residual_32_10_avx2(uint8_t *dst, int16_t *coeffs, ptrdiff_t 
stride);
+
 #define GET_PIXELS(width, depth, cf)                                           
                           \
 void ff_hevc_get_pixels_ ## width ## _ ## depth ## _ ## cf(int16_t *dst, 
ptrdiff_t dststride,             \
                                                            uint8_t *src, 
ptrdiff_t srcstride,             \
@@ -278,17 +297,24 @@ void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int 
bit_depth)
         if (EXTERNAL_MMXEXT(cpu_flags)) {
             c->idct_dc[0] = ff_hevc_idct_4x4_dc_8_mmxext;
             c->idct_dc[1] = ff_hevc_idct_8x8_dc_8_mmxext;
+
+            c->add_residual[0] = ff_hevc_add_residual_4_8_mmxext;
         }
         if (EXTERNAL_SSE2(cpu_flags)) {
             c->hevc_v_loop_filter_chroma = ff_hevc_v_loop_filter_chroma_8_sse2;
             c->hevc_h_loop_filter_chroma = ff_hevc_h_loop_filter_chroma_8_sse2;
 
+            c->add_residual[1] = ff_hevc_add_residual_8_8_sse2;
+            c->add_residual[2] = ff_hevc_add_residual_16_8_sse2;
+            c->add_residual[3] = ff_hevc_add_residual_32_8_sse2;
+
             c->idct_dc[1] = ff_hevc_idct_8x8_dc_8_sse2;
             c->idct_dc[2] = ff_hevc_idct_16x16_dc_8_sse2;
             c->idct_dc[3] = ff_hevc_idct_32x32_dc_8_sse2;
 
             c->idct[0]    = ff_hevc_idct_4x4_8_sse2;
             c->idct[1]    = ff_hevc_idct_8x8_8_sse2;
+
             SET_QPEL_FUNCS(0, 0, 8, sse2, ff_hevc_get_pixels);
             SET_EPEL_FUNCS(0, 0, 8, sse2, ff_hevc_get_pixels);
 
@@ -307,11 +333,16 @@ void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int 
bit_depth)
         if (EXTERNAL_AVX(cpu_flags)) {
             c->idct[0] = ff_hevc_idct_4x4_8_avx;
             c->idct[1] = ff_hevc_idct_8x8_8_avx;
+            c->add_residual[1] = ff_hevc_add_residual_8_8_avx;
+            c->add_residual[2] = ff_hevc_add_residual_16_8_avx;
+            c->add_residual[3] = ff_hevc_add_residual_32_8_avx;
         }
     } else if (bit_depth == 10) {
         if (EXTERNAL_MMXEXT(cpu_flags)) {
             c->idct_dc[0] = ff_hevc_idct_4x4_dc_10_mmxext;
             c->idct_dc[1] = ff_hevc_idct_8x8_dc_10_mmxext;
+
+            c->add_residual[0] = ff_hevc_add_residual_4_10_mmxext;
         }
         if (EXTERNAL_SSE2(cpu_flags)) {
             c->hevc_v_loop_filter_chroma = 
ff_hevc_v_loop_filter_chroma_10_sse2;
@@ -330,6 +361,10 @@ void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int 
bit_depth)
             SET_LUMA_FUNCS(put_unweighted_pred_avg,          
ff_hevc_put_unweighted_pred_avg, 10, sse2);
             SET_CHROMA_FUNCS(put_unweighted_pred_chroma,     
ff_hevc_put_unweighted_pred,     10, sse2);
             SET_CHROMA_FUNCS(put_unweighted_pred_avg_chroma, 
ff_hevc_put_unweighted_pred_avg, 10, sse2);
+
+            c->add_residual[1] = ff_hevc_add_residual_8_10_sse2;
+            c->add_residual[2] = ff_hevc_add_residual_16_10_sse2;
+            c->add_residual[3] = ff_hevc_add_residual_32_10_sse2;
         }
         if (EXTERNAL_AVX(cpu_flags)) {
             c->idct[0] = ff_hevc_idct_4x4_10_avx;
@@ -366,6 +401,8 @@ void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int 
bit_depth)
         if (EXTERNAL_AVX2(cpu_flags)) {
             c->idct_dc[2] = ff_hevc_idct_16x16_dc_8_avx2;
             c->idct_dc[3] = ff_hevc_idct_32x32_dc_8_avx2;
+
+            c->add_residual[3] = ff_hevc_add_residual_32_8_avx2;
         }
     } else if (bit_depth == 10) {
         if (EXTERNAL_SSE2(cpu_flags)) {
@@ -397,6 +434,9 @@ void ff_hevc_dsp_init_x86(HEVCDSPContext *c, const int 
bit_depth)
         if (EXTERNAL_AVX2(cpu_flags)) {
             c->idct_dc[2] = ff_hevc_idct_16x16_dc_10_avx2;
             c->idct_dc[3] = ff_hevc_idct_32x32_dc_10_avx2;
+
+            c->add_residual[2] = ff_hevc_add_residual_16_10_avx2;
+            c->add_residual[3] = ff_hevc_add_residual_32_10_avx2;
         }
     }
 #endif /* ARCH_X86_64 */
-- 
2.1.4

_______________________________________________
libav-devel mailing list
libav-devel@libav.org
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to