---
 libavcodec/x86/h264_intrapred_10bit.asm |  576 +++++++++++++------------------
 libavcodec/x86/h264_intrapred_init.c    |   29 ++
 2 files changed, 277 insertions(+), 328 deletions(-)

diff --git a/libavcodec/x86/h264_intrapred_10bit.asm 
b/libavcodec/x86/h264_intrapred_10bit.asm
index 24a7bfa..d12b0db 100644
--- a/libavcodec/x86/h264_intrapred_10bit.asm
+++ b/libavcodec/x86/h264_intrapred_10bit.asm
@@ -27,8 +27,6 @@
 
 SECTION_RODATA
 
-SECTION .text
-
 cextern pw_16
 cextern pw_8
 cextern pw_4
@@ -42,6 +40,8 @@ pw_512:       times 8 dw 512
 pd_17:        times 4 dd 17
 pd_16:        times 4 dd 16
 
+SECTION .text
+
 ; dest, left, right, src
 ; output: %1 = (t[n-1] + t[n]*2 + t[n+1] + 2) >> 2
 %macro PRED4x4_LOWPASS 4
@@ -333,7 +333,7 @@ cglobal pred8x8_vertical_10_sse2, 2,2
 ;-----------------------------------------------------------------------------
 INIT_XMM
 cglobal pred8x8_horizontal_10_sse2, 2,3
-    mov          r2, 4
+    mov         r2d, 4
 .loop:
     movq         m0, [r0+r1*0-8]
     movq         m1, [r0+r1*1-8]
@@ -344,7 +344,7 @@ cglobal pred8x8_horizontal_10_sse2, 2,3
     mova  [r0+r1*0], m0
     mova  [r0+r1*1], m1
     lea          r0, [r0+r1*2]
-    dec          r2
+    dec          r2d
     jg .loop
     REP_RET
 
@@ -402,7 +402,7 @@ cglobal pred8x8_dc_10_%1, 2,4
     punpcklwd   m2, m3
     punpckldq   m0, m2            ; s0, s1, s2, s3
     %2          m3, m0, 11110110b ; s2, s1, s3, s3
-    lea         r2, [r1+r1*2]
+    lea         r2, [r1*3]
     %2          m0, m0, 01110100b ; s0, s1, s3, s1
     paddw       m0, m3
     lea         r3, [r0+r1*4]
@@ -445,7 +445,7 @@ cglobal pred8x8_top_dc_10_%1, 2,4
     movq        m1, [r0+8]
     HADDW       m0, m2
     HADDW       m1, m3
-    lea         r2, [r1+r1*2]
+    lea         r2, [r1*3]
     paddw       m0, [pw_2]
     paddw       m1, [pw_2]
     lea         r3, [r0+r1*4]
@@ -478,7 +478,7 @@ PRED8x8_TOP_DC sse2  , pshuflw
 INIT_XMM
 cglobal pred8x8_plane_10_sse2, 2,7,7
     sub       r0, r1
-    lea       r2, [r1+r1*2]
+    lea       r2, [r1*3]
     lea       r3, [r0+r1*4]
     mova      m2, [r0]
     pmaddwd   m2, [pw_m32101234]
@@ -500,7 +500,7 @@ cglobal pred8x8_plane_10_sse2, 2,7,7
     movzx    r5d, word [r3+r2*1-2] ; src[6*stride-1]
     movzx    r6d, word [r0+r1*1-2] ; src[0*stride-1]
     sub      r5d, r6d
-    lea      r5d, [r5+r5*2]
+    lea      r5d, [r5*3]
     add      r4d, r5d
     movzx    r6d, word [r3+r1*4-2] ; src[7*stride-1]
     movzx    r5d, word [r0+r1*0-2] ; src[ -stride-1]
@@ -541,7 +541,7 @@ cglobal pred8x8_plane_10_sse2, 2,7,7
 %macro PRED8x8L_128_DC 1
 cglobal pred8x8l_128_dc_10_%1, 4,4
     mova      m0, [pw_512]
-    lea       r1, [r3+r3*2]
+    lea       r1, [r3*3]
     lea       r2, [r0+r3*4]
     MOV8 r0+r3*0, m0, m0
     MOV8 r0+r3*1, m0, m0
@@ -562,6 +562,48 @@ PRED8x8L_128_DC sse2
 ;-----------------------------------------------------------------------------
 ; void pred8x8l_top_dc(pixel *src, int has_topleft, int has_topright, int 
stride)
 ;-----------------------------------------------------------------------------
+%macro FIX_LT_2 3-4
+%ifidn %1,sse4
+    pblendw  %2, %3, 00000001b
+%elifidn %1,avx
+    pblendw  %2, %3, 00000001b
+%else
+    mova     %4, %3
+    pxor     %4, %2
+    pslldq   %4, 14
+    psrldq   %4, 14
+    pxor     %2, %4
+%endif
+%endmacro
+
+%macro FIX_TR_1 3-4
+%ifidn %1,sse4
+    pblendw  %2, %3, 10000000b
+%elifidn %1,avx
+    pblendw  %2, %3, 10000000b
+%else
+    mova     %4, %3
+    pxor     %4, %2
+    psrldq   %4, 14
+    pslldq   %4, 14
+    pxor     %2, %4
+%endif
+%endmacro
+
+%macro FIX_LT_1 3-5
+%ifidn %1, sse4
+    pblendw  %2, %3, 01000000b
+%elifidn %1,avx
+    pblendw  %2, %3, 01000000b
+%else
+    mova     %5, %3
+    pxor     %5, %4
+    psrldq   %5, 14
+    pslldq   %5, 12
+    pxor     %2, %5
+%endif
+%endmacro
+
 %macro PRED8x8L_TOP_DC 1
 cglobal pred8x8l_top_dc_10_%1, 4,4,6
     sub         r0, r3
@@ -569,31 +611,21 @@ cglobal pred8x8l_top_dc_10_%1, 4,4,6
     mova        m0, [r0-16]
     mova        m3, [r0]
     mova        m1, [r0+16]
-    mova        m2, m3
-    mova        m4, m3
-    PALIGNR     m2, m0, 14, m0
-    PALIGNR     m1, m4,  2, m4
-    test        r1, r1 ; top_left
+    PALIGNR     m2, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m4
+    test       r1d, r1d ; top_left
     jz .fix_lt_2
-    test        r2, r2 ; top_right
+    test       r2d, r2d ; top_right
     jz .fix_tr_1
     jmp .body
 .fix_lt_2:
-    mova        m5, m3
-    pxor        m5, m2
-    pslldq      m5, 14
-    psrldq      m5, 14
-    pxor        m2, m5
-    test        r2, r2 ; top_right
+    FIX_LT_2    %1, m2, m3, m5
+    test       r2d, r2d ; top_right
     jnz .body
 .fix_tr_1:
-    mova        m5, m3
-    pxor        m5, m1
-    psrldq      m5, 14
-    pslldq      m5, 14
-    pxor        m1, m5
+    FIX_TR_1    %1, m1, m3, m5
 .body
-    lea         r1, [r3+r3*2]
+    lea         r1, [r3*3]
     lea         r2, [r0+r3*4]
     PRED4x4_LOWPASS m0, m2, m1, m3
     HADDW       m0, m1
@@ -616,6 +648,7 @@ INIT_XMM
 PRED8x8L_TOP_DC sse2
 %define PALIGNR PALIGNR_SSSE3
 PRED8x8L_TOP_DC ssse3
+PRED8x8L_TOP_DC sse4
 
 ;-----------------------------------------------------------------------------
 ;void pred8x8l_dc(pixel *src, int has_topleft, int has_topright, int stride)
@@ -643,71 +676,51 @@ cglobal pred8x8l_dc_10_%1, 4,5,8
     mova        m0, [r0+r3*0-16]
     mova        m1, [r4]
     mov         r0, r4
-    mova        m4, m3
-    mova        m2, m3
-    PALIGNR     m4, m0, 14, m0
-    PALIGNR     m1, m2,  2, m2
-    test        r1, r1
+    PALIGNR     m4, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m2
+    test       r1d, r1d
     jnz .do_left
 .fix_lt_1:
-    mova        m5, m3
-    pxor        m5, m4
-    psrldq      m5, 14
-    pslldq      m5, 12
-    pxor        m1, m5
+    FIX_LT_1    %1, m1, m3, m4, m5
     jmp .do_left
 .fix_lt_2:
-    mova        m5, m3
-    pxor        m5, m2
-    pslldq      m5, 14
-    psrldq      m5, 14
-    pxor        m2, m5
-    test        r2, r2
+    FIX_LT_2    %1, m2, m3, m5
+    test       r2d, r2d
     jnz .body
 .fix_tr_1:
-    mova        m5, m3
-    pxor        m5, m1
-    psrldq      m5, 14
-    pslldq      m5, 14
-    pxor        m1, m5
+    FIX_TR_1    %1, m1, m3, m5
     jmp .body
 .do_left:
-    mova        m0, m4
     PRED4x4_LOWPASS m2, m1, m4, m3
-    mova        m4, m0
-    mova        m7, m2
-    PRED4x4_LOWPASS m1, m3, m0, m4
+    PRED4x4_LOWPASS m1, m3, m4, m4
     pslldq      m1, 14
-    PALIGNR     m7, m1, 14, m3
+    PALIGNR     m2, m1, 14, m3
     mova        m0, [r0-16]
     mova        m3, [r0]
     mova        m1, [r0+16]
-    mova        m2, m3
-    mova        m4, m3
-    PALIGNR     m2, m0, 14, m0
-    PALIGNR     m1, m4,  2, m4
-    test        r1, r1
+    PALIGNR     m7, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m4
+    test       r1d, r1d
     jz .fix_lt_2
-    test        r2, r2
+    test       r2d, r2d
     jz .fix_tr_1
 .body
-    lea         r1, [r3+r3*2]
-    PRED4x4_LOWPASS m6, m2, m1, m3
-    HADDW       m7, m0
-    HADDW       m6, m0
+    lea         r1, [r3*3]
+    PRED4x4_LOWPASS m6, m7, m1, m3
+    paddw       m2, m6
+    HADDW       m2, m0
     lea         r2, [r0+r3*4]
-    paddw       m7, [pw_8]
-    paddw       m7, m6
-    psrlw       m7, 4
-    SPLATW      m7, m7
-    mova [r0+r3*1], m7
-    mova [r0+r3*2], m7
-    mova [r0+r1*1], m7
-    mova [r0+r3*4], m7
-    mova [r2+r3*1], m7
-    mova [r2+r3*2], m7
-    mova [r2+r1*1], m7
-    mova [r2+r3*4], m7
+    paddw       m2, [pw_8]
+    psrlw       m2, 4
+    SPLATW      m2, m2
+    mova [r0+r3*1], m2
+    mova [r0+r3*2], m2
+    mova [r0+r1*1], m2
+    mova [r0+r3*4], m2
+    mova [r2+r3*1], m2
+    mova [r2+r3*2], m2
+    mova [r2+r1*1], m2
+    mova [r2+r3*4], m2
     RET
 %endmacro
 
@@ -716,6 +729,11 @@ INIT_XMM
 PRED8x8L_DC sse2
 %define PALIGNR PALIGNR_SSSE3
 PRED8x8L_DC ssse3
+PRED8x8L_DC sse4
+%ifdef HAVE_AVX
+INIT_AVX
+PRED8x8L_DC avx
+%endif
 
 ;-----------------------------------------------------------------------------
 ; void pred8x8l_vertical(pixel *src, int has_topleft, int has_topright, int 
stride)
@@ -726,31 +744,21 @@ cglobal pred8x8l_vertical_10_%1, 4,4,6
     mova        m0, [r0-16]
     mova        m3, [r0]
     mova        m1, [r0+16]
-    mova        m2, m3
-    mova        m4, m3
-    PALIGNR     m2, m0, 14, m0
-    PALIGNR     m1, m4,  2, m4
-    test        r1, r1 ; top_left
+    PALIGNR     m2, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m4
+    test       r1d, r1d ; top_left
     jz .fix_lt_2
-    test        r2, r2 ; top_right
+    test       r2d, r2d ; top_right
     jz .fix_tr_1
     jmp .body
 .fix_lt_2:
-    mova        m5, m3
-    pxor        m5, m2
-    pslldq      m5, 14
-    psrldq      m5, 14
-    pxor        m2, m5
-    test        r2, r2 ; top_right
+    FIX_LT_2    %1, m2, m3, m5
+    test       r2d, r2d ; top_right
     jnz .body
 .fix_tr_1:
-    mova        m5, m3
-    pxor        m5, m1
-    psrldq      m5, 14
-    pslldq      m5, 14
-    pxor        m1, m5
+    FIX_TR_1    %1, m1, m3, m5
 .body
-    lea         r1, [r3+r3*2]
+    lea         r1, [r3*3]
     lea         r2, [r0+r3*4]
     PRED4x4_LOWPASS m0, m2, m1, m3
     mova [r0+r3*1], m0
@@ -769,6 +777,11 @@ INIT_XMM
 PRED8x8L_VERTICAL sse2
 %define PALIGNR PALIGNR_SSSE3
 PRED8x8L_VERTICAL ssse3
+PRED8x8L_VERTICAL sse4
+%ifdef HAVE_AVX
+INIT_AVX
+PRED8x8L_VERTICAL avx
+%endif
 
 ;-----------------------------------------------------------------------------
 ; void pred8x8l_horizontal(uint8_t *src, int has_topleft, int has_topright, 
int stride)
@@ -778,7 +791,7 @@ cglobal pred8x8l_horizontal_10_%1, 4,4,8
     sub         r0, r3
     lea         r2, [r0+r3*2]
     mova        m0, [r0+r3*1-16]
-    test        r1, r1
+    test       r1d, r1d
     lea         r1, [r0+r3]
     cmovnz      r1, r0
     punpckhwd   m0, [r1+r3*0-16]
@@ -798,18 +811,13 @@ cglobal pred8x8l_horizontal_10_%1, 4,4,8
     mova        m0, [r0+r3*0-16]
     mova        m1, [r1+r3*0-16]
     mov         r0, r2
-    mova        m4, m3
-    mova        m2, m3
-    PALIGNR     m4, m0, 14, m0
-    PALIGNR     m1, m2,  2, m2
-    mova        m0, m4
+    PALIGNR     m4, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m2
     PRED4x4_LOWPASS m2, m1, m4, m3
-    mova        m4, m0
-    mova        m7, m2
-    PRED4x4_LOWPASS m1, m3, m0, m4
+    PRED4x4_LOWPASS m1, m3, m4, m4
     pslldq      m1, 14
-    PALIGNR     m7, m1, 14, m3
-    lea         r1, [r3+r3*2]
+    PALIGNR     m7, m2, m1, 14, m3
+    lea         r1, [r3*3]
     punpckhwd   m3, m7, m7
     punpcklwd   m7, m7
     pshufd      m0, m3, 0xff
@@ -837,6 +845,10 @@ INIT_XMM
 PRED8x8L_HORIZONTAL sse2
 %define PALIGNR PALIGNR_SSSE3
 PRED8x8L_HORIZONTAL ssse3
+%ifdef HAVE_AVX
+INIT_AVX
+PRED8x8L_HORIZONTAL avx
+%endif
 
 ;-----------------------------------------------------------------------------
 ;void pred8x8l_down_left(pixel *src, int has_topleft, int has_topright, int 
stride)
@@ -847,29 +859,19 @@ cglobal pred8x8l_down_left_10_%1, 4,4,8
     mova        m0, [r0-16]
     mova        m3, [r0]
     mova        m1, [r0+16]
-    mova        m2, m3
-    mova        m4, m3
-    PALIGNR     m2, m0, 14, m0
-    PALIGNR     m1, m4,  2, m4
-    test        r1, r1
+    PALIGNR     m2, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m4
+    test       r1d, r1d
     jz .fix_lt_2
-    test        r2, r2
+    test       r2d, r2d
     jz .fix_tr_1
     jmp .do_top
 .fix_lt_2:
-    mova        m5, m3
-    pxor        m5, m2
-    pslldq      m5, 14
-    psrldq      m5, 14
-    pxor        m2, m5
-    test        r2, r2
+    FIX_LT_2    %1, m2, m3, m5
+    test       r2d, r2d
     jnz .do_top
 .fix_tr_1:
-    mova        m5, m3
-    pxor        m5, m1
-    psrldq      m5, 14
-    pslldq      m5, 14
-    pxor        m1, m5
+    FIX_TR_1    %1, m1, m3, m5
     jmp .do_top
 .fix_tr_2:
     punpckhwd   m3, m3
@@ -878,73 +880,45 @@ cglobal pred8x8l_down_left_10_%1, 4,4,8
 .do_top:
     PRED4x4_LOWPASS m4, m2, m1, m3
     mova        m7, m4
-    test        r2, r2
+    test       r2d, r2d
     jz .fix_tr_2
     mova        m0, [r0+16]
-    mova        m5, m0
-    mova        m2, m0
-    mova        m4, m0
-    psrldq      m5, 14
-    PALIGNR     m2, m3, 14, m3
-    PALIGNR     m5, m4,  2, m4
+    psrldq      m5, m0, 14
+    PALIGNR     m2, m0, m3, 14, m3
+    PALIGNR     m5, m0,  2, m4
     PRED4x4_LOWPASS m1, m2, m5, m0
 .do_topright:
-    lea         r1, [r3+r3*2]
+    lea         r1, [r3*3]
     mova        m6, m1
     psrldq      m1, 14
     mova        m4, m1
     lea         r2, [r0+r3*4]
-    mova        m2, m6
-    PALIGNR     m2, m7,  2, m0
-    mova        m3, m6
-    PALIGNR     m3, m7, 14, m0
+    PALIGNR     m2, m6, m7,  2, m0
+    PALIGNR     m3, m6, m7, 14, m0
     PALIGNR     m4, m6,  2, m0
-    mova        m5, m7
-    mova        m1, m7
-    mova        m7, m6
-    pslldq      m1, 2
-    PRED4x4_LOWPASS m0, m1, m2, m5
-    PRED4x4_LOWPASS m1, m3, m4, m7
+    pslldq      m1, m7, 2
+    PRED4x4_LOWPASS m0, m1, m2, m7
+    PRED4x4_LOWPASS m1, m3, m4, m6
     mova [r2+r3*4], m1
-    mova        m2, m0
-    pslldq      m1, 2
-    psrldq      m2, 14
+    PALIGNR     m1, m0, 14, m2
     pslldq      m0, 2
-    por         m1, m2
     mova [r2+r1*1], m1
-    mova        m2, m0
-    pslldq      m1, 2
-    psrldq      m2, 14
+    PALIGNR     m1, m0, 14, m2
     pslldq      m0, 2
-    por         m1, m2
     mova [r2+r3*2], m1
-    mova        m2, m0
-    pslldq      m1, 2
-    psrldq      m2, 14
+    PALIGNR     m1, m0, 14, m2
     pslldq      m0, 2
-    por         m1, m2
     mova [r2+r3*1], m1
-    mova        m2, m0
-    pslldq      m1, 2
-    psrldq      m2, 14
+    PALIGNR     m1, m0, 14, m2
     pslldq      m0, 2
-    por         m1, m2
     mova [r0+r3*4], m1
-    mova        m2, m0
-    pslldq      m1, 2
-    psrldq      m2, 14
+    PALIGNR     m1, m0, 14, m2
     pslldq      m0, 2
-    por         m1, m2
     mova [r0+r1*1], m1
-    mova        m2, m0
-    pslldq      m1, 2
-    psrldq      m2, 14
+    PALIGNR     m1, m0, 14, m2
     pslldq      m0, 2
-    por         m1, m2
     mova [r0+r3*2], m1
-    pslldq      m1, 2
-    psrldq      m0, 14
-    por         m1, m0
+    PALIGNR     m1, m0, 14, m0
     mova [r0+r3*1], m1
     RET
 %endmacro
@@ -954,9 +928,14 @@ INIT_XMM
 PRED8x8L_DOWN_LEFT sse2
 %define PALIGNR PALIGNR_SSSE3
 PRED8x8L_DOWN_LEFT ssse3
+PRED8x8L_DOWN_LEFT sse4
+%ifdef HAVE_AVX
+INIT_AVX
+PRED8x8L_DOWN_LEFT avx
+%endif
 
 ;-----------------------------------------------------------------------------
-;void pred8x8l_down_right_mxext(pixel *src, int has_topleft, int has_topright, 
int stride)
+;void pred8x8l_down_right(pixel *src, int has_topleft, int has_topright, int 
stride)
 ;-----------------------------------------------------------------------------
 %macro PRED8x8L_DOWN_RIGHT 1
 cglobal pred8x8l_down_right_10_%1, 4,5,8
@@ -980,113 +959,71 @@ cglobal pred8x8l_down_right_10_%1, 4,5,8
     mova        m0, [r0+r3*0-16]
     mova        m1, [r4]
     mov         r0, r4
-    mova        m4, m3
-    mova        m2, m3
-    PALIGNR     m4, m0, 14, m0
-    PALIGNR     m1, m2,  2, m2
-    test        r1, r1 ; top_left
+    PALIGNR     m4, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m2
+    test       r1d, r1d ; top_left
     jz .fix_lt_1
 .do_left:
-    mova        m0, m4
     PRED4x4_LOWPASS m2, m1, m4, m3
-    mova        m4, m0
-    mova        m7, m2
     mova        m6, m2
-    PRED4x4_LOWPASS m1, m3, m0, m4
+    PRED4x4_LOWPASS m1, m3, m4, m4
     pslldq      m1, 14
-    PALIGNR     m7, m1, 14, m3
+    PALIGNR     m7, m2, m1, 14, m1
     mova        m0, [r0-16]
     mova        m3, [r0]
     mova        m1, [r0+16]
-    mova        m2, m3
-    mova        m4, m3
-    PALIGNR     m2, m0, 14, m0
-    PALIGNR     m1, m4,  2, m4
-    test        r1, r1 ; top_left
+    PALIGNR     m2, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m4
+    test       r1d, r1d ; top_left
     jz .fix_lt_2
-    test        r2, r2 ; top_right
+    test       r2d, r2d ; top_right
     jz .fix_tr_1
 .do_top:
     PRED4x4_LOWPASS m4, m2, m1, m3
     mova        m5, m4
     jmp .body
 .fix_lt_1:
-    mova        m5, m3
-    pxor        m5, m4
-    psrldq      m5, 14
-    pslldq      m5, 12
-    pxor        m1, m5
+    FIX_LT_1    %1, m1, m3, m4, m5
     jmp .do_left
 .fix_lt_2:
-    mova        m5, m3
-    pxor        m5, m2
-    pslldq      m5, 14
-    psrldq      m5, 14
-    pxor        m2, m5
-    test        r2, r2 ; top_right
+    FIX_LT_2    %1, m2, m3, m5
+    test       r2d, r2d ; top_right
     jnz .do_top
 .fix_tr_1:
-    mova        m5, m3
-    pxor        m5, m1
-    psrldq      m5, 14
-    pslldq      m5, 14
-    pxor        m1, m5
+    FIX_TR_1    %1, m1, m3, m5
     jmp .do_top
 .body
-    lea         r1, [r3+r3*2]
+    lea         r1, [r3*3]
     mova        m1, m7
-    mova        m7, m5
-    mova        m5, m6
-    mova        m2, m7
     lea         r2, [r0+r3*4]
-    PALIGNR     m2, m6,  2, m0
-    mova        m3, m7
-    PALIGNR     m3, m6, 14, m0
-    mova        m4, m7
-    psrldq      m4, 2
-    PRED4x4_LOWPASS m0, m1, m2, m5
-    PRED4x4_LOWPASS m1, m3, m4, m7
-    mova [r2+r3*4], m0
-    mova        m2, m1
-    psrldq      m0, 2
-    pslldq      m2, 14
-    psrldq      m1, 2
-    por         m0, m2
-    mova [r2+r1*1], m0
-    mova        m2, m1
-    psrldq      m0, 2
-    pslldq      m2, 14
-    psrldq      m1, 2
-    por         m0, m2
-    mova [r2+r3*2], m0
-    mova        m2, m1
-    psrldq      m0, 2
-    pslldq      m2, 14
-    psrldq      m1, 2
-    por         m0, m2
-    mova [r2+r3*1], m0
-    mova        m2, m1
-    psrldq      m0, 2
-    pslldq      m2, 14
-    psrldq      m1, 2
-    por         m0, m2
-    mova [r0+r3*4], m0
-    mova        m2, m1
-    psrldq      m0, 2
-    pslldq      m2, 14
-    psrldq      m1, 2
-    por         m0, m2
-    mova [r0+r1*1], m0
-    mova        m2, m1
-    psrldq      m0, 2
-    pslldq      m2, 14
-    psrldq      m1, 2
-    por         m0, m2
-    mova [r0+r3*2], m0
-    psrldq      m0, 2
-    pslldq      m1, 14
-    por         m0, m1
-    mova [r0+r3*1], m0
+    PALIGNR     m2, m5, m6,  2, m0
+    PALIGNR     m3, m5, m6, 14, m0
+    psrldq      m4, m5, 2
+    PRED4x4_LOWPASS m0, m1, m2, m6
+    PRED4x4_LOWPASS m1, m3, m4, m5
+    PALIGNR     m1, m0, 14, m2
+    pslldq      m0, 2
+    mova [r0+r3*1], m1
+    PALIGNR     m1, m0, 14, m2
+    pslldq      m0, 2
+    mova [r0+r3*2], m1
+    PALIGNR     m1, m0, 14, m2
+    pslldq      m0, 2
+    mova [r0+r1*1], m1
+    PALIGNR     m1, m0, 14, m2
+    pslldq      m0, 2
+    mova [r0+r3*4], m1
+    PALIGNR     m1, m0, 14, m2
+    pslldq      m0, 2
+    mova [r2+r3*1], m1
+    PALIGNR     m1, m0, 14, m2
+    pslldq      m0, 2
+    mova [r2+r3*2], m1
+    PALIGNR     m1, m0, 14, m2
+    pslldq      m0, 2
+    mova [r2+r1*1], m1
+    PALIGNR     m1, m0, 14, m0
+    mova [r2+r3*4], m1
     RET
 %endmacro
 
@@ -1095,6 +1032,11 @@ INIT_XMM
 PRED8x8L_DOWN_RIGHT sse2
 %define PALIGNR PALIGNR_SSSE3
 PRED8x8L_DOWN_RIGHT ssse3
+PRED8x8L_DOWN_RIGHT sse4
+%ifdef HAVE_AVX
+INIT_AVX
+PRED8x8L_DOWN_RIGHT avx
+%endif
 
 ;-----------------------------------------------------------------------------
 ; void pred8x8l_vertical_right(pixel *src, int has_topleft, int has_topright, 
int stride)
@@ -1121,56 +1063,38 @@ cglobal pred8x8l_vertical_right_10_%1, 4,5,8
     mova        m0, [r0+r3*0-16]
     mova        m1, [r4]
     mov         r0, r4
-    mova        m4, m3
-    mova        m2, m3
-    PALIGNR     m4, m0, 14, m0
-    PALIGNR     m1, m2,  2, m2
-    test        r1, r1
+    PALIGNR     m4, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m2
+    test       r1d, r1d
     jz .fix_lt_1
     jmp .do_left
 .fix_lt_1:
-    mova        m5, m3
-    pxor        m5, m4
-    psrldq      m5, 14
-    pslldq      m5, 12
-    pxor        m1, m5
+    FIX_LT_1    %1, m1, m3, m4, m5
     jmp .do_left
 .fix_lt_2:
-    mova        m5, m3
-    pxor        m5, m2
-    pslldq      m5, 14
-    psrldq      m5, 14
-    pxor        m2, m5
-    test        r2, r2
+    FIX_LT_2    %1, m2, m3, m5
+    test       r2d, r2d
     jnz .do_top
 .fix_tr_1:
-    mova        m5, m3
-    pxor        m5, m1
-    psrldq      m5, 14
-    pslldq      m5, 14
-    pxor        m1, m5
+    FIX_TR_1    %1, m1, m3, m5
     jmp .do_top
 .do_left:
-    mova        m0, m4
     PRED4x4_LOWPASS m2, m1, m4, m3
     mova        m7, m2
     mova        m0, [r0-16]
     mova        m3, [r0]
     mova        m1, [r0+16]
-    mova        m2, m3
-    mova        m4, m3
-    PALIGNR     m2, m0, 14, m0
-    PALIGNR     m1, m4,  2, m4
-    test        r1, r1
+    PALIGNR     m2, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m4
+    test       r1d, r1d
     jz .fix_lt_2
-    test        r2, r2
+    test       r2d, r2d
     jz .fix_tr_1
 .do_top
     PRED4x4_LOWPASS m6, m2, m1, m3
-    lea         r1, [r3+r3*2]
+    lea         r1, [r3*3]
     mova        m2, m6
-    mova        m3, m6
-    PALIGNR     m3, m7, 14, m0
+    PALIGNR     m3, m6, m7, 14, m0
     PALIGNR     m6, m7, 12, m1
     mova        m4, m3
     pavgw       m3, m2
@@ -1180,28 +1104,25 @@ cglobal pred8x8l_vertical_right_10_%1, 4,5,8
     mova [r0+r3*2], m0
     mova        m5, m0
     mova        m6, m3
-    mova        m1, m7
-    mova        m2, m1
-    pslldq      m2, 2
-    mova        m3, m1
-    pslldq      m3, 4
-    PRED4x4_LOWPASS m0, m1, m3, m2
+    pslldq      m2, m7, 2
+    pslldq      m3, m7, 4
+    PRED4x4_LOWPASS m0, m7, m3, m2
     PALIGNR     m6, m0, 14, m2
     mova [r0+r1*1], m6
     pslldq      m0, 2
-    PALIGNR     m5, m0, 14, m1
+    PALIGNR     m5, m0, 14, m7
     mova [r0+r3*4], m5
     pslldq      m0, 2
     PALIGNR     m6, m0, 14, m2
     mova [r2+r3*1], m6
     pslldq      m0, 2
-    PALIGNR     m5, m0, 14, m1
+    PALIGNR     m5, m0, 14, m7
     mova [r2+r3*2], m5
     pslldq      m0, 2
     PALIGNR     m6, m0, 14, m2
     mova [r2+r1*1], m6
     pslldq      m0, 2
-    PALIGNR     m5, m0, 14, m1
+    PALIGNR     m5, m0, 14, m0
     mova [r2+r3*4], m5
     RET
 %endmacro
@@ -1211,6 +1132,11 @@ INIT_XMM
 PRED8x8L_VERTICAL_RIGHT sse2
 %define PALIGNR PALIGNR_SSSE3
 PRED8x8L_VERTICAL_RIGHT ssse3
+PRED8x8L_VERTICAL_RIGHT sse4
+%ifdef HAVE_AVX
+INIT_AVX
+PRED8x8L_VERTICAL_RIGHT avx
+%endif
 
 ;-----------------------------------------------------------------------------
 ; void pred8x8l_horizontal_up(pixel *src, int has_topleft, int has_topright, 
int stride)
@@ -1220,7 +1146,7 @@ cglobal pred8x8l_horizontal_up_10_%1, 4,4,8
     sub         r0, r3
     lea         r2, [r0+r3*2]
     mova        m0, [r0+r3*1-16]
-    test        r1, r1
+    test       r1d, r1d
     lea         r1, [r0+r3]
     cmovnz      r1, r0
     punpckhwd   m0, [r1+r3*0-16]
@@ -1240,38 +1166,30 @@ cglobal pred8x8l_horizontal_up_10_%1, 4,4,8
     mova        m0, [r0+r3*0-16]
     mova        m1, [r1+r3*0-16]
     mov         r0, r2
-    mova        m4, m3
-    mova        m2, m3
-    PALIGNR     m4, m0, 14, m0
-    PALIGNR     m1, m2,  2, m2
-    mova        m0, m4
+    PALIGNR     m4, m3, m0, 14, m0
+    PALIGNR     m1, m3,  2, m2
     PRED4x4_LOWPASS m2, m1, m4, m3
-    mova        m4, m0
-    mova        m7, m2
-    PRED4x4_LOWPASS m1, m3, m0, m4
+    PRED4x4_LOWPASS m1, m3, m4, m4
     pslldq      m1, 14
-    PALIGNR     m7, m1, 14, m3
-    lea         r1, [r3+r3*2]
+    PALIGNR     m7, m2, m1, 14, m3
+    lea         r1, [r3*3]
     pshufd      m0, m7, 00011011b ; l6 l7 l4 l5 l2 l3 l0 l1
-    pslldq      m7, 14             ; l7 .. .. .. .. .. .. ..
-    mova        m2, m0
+    pslldq      m7, 14            ; l7 .. .. .. .. .. .. ..
+    psrld       m2, m0, 16
     pslld       m0, 16
-    psrld       m2, 16
     por         m2, m0            ; l7 l6 l5 l4 l3 l2 l1 l0
-    mova        m3, m2
     mova        m4, m2
     mova        m5, m2
+    psrldq      m3, m2, 4
     psrldq      m2, 2
-    psrldq      m3, 4
     lea         r2, [r0+r3*4]
     por         m2, m7            ; l7 l7 l6 l5 l4 l3 l2 l1
     punpckhwd   m7, m7
     por         m3, m7            ; l7 l7 l7 l6 l5 l4 l3 l2
     pavgw       m4, m2
     PRED4x4_LOWPASS m1, m3, m5, m2
-    mova        m5, m4
+    punpckhwd   m5, m4, m1        ; p8 p7 p6 p5
     punpcklwd   m4, m1            ; p4 p3 p2 p1
-    punpckhwd   m5, m1            ; p8 p7 p6 p5
     mova        m6, m5
     mova        m7, m5
     mova        m0, m5
@@ -1279,9 +1197,9 @@ cglobal pred8x8l_horizontal_up_10_%1, 4,4,8
     pshufd      m1, m6, 11111001b
     PALIGNR     m6, m4, 8, m2
     pshufd      m2, m7, 11111110b
-    PALIGNR     m7, m4, 12, m3
-    pshufd      m3, m0, 11111111b
     mova [r0+r3*1], m4
+    PALIGNR     m7, m4, 12, m4
+    pshufd      m3, m0, 11111111b
     mova [r0+r3*2], m5
     mova [r0+r1*1], m6
     mova [r0+r3*4], m7
@@ -1297,7 +1215,9 @@ INIT_XMM
 PRED8x8L_HORIZONTAL_UP sse2
 %define PALIGNR PALIGNR_SSSE3
 PRED8x8L_HORIZONTAL_UP ssse3
-
+%ifdef HAVE_AVX
+PRED8x8L_HORIZONTAL_UP avx
+%endif
 
 
 ;-----------------------------------------------------------------------------
@@ -1315,7 +1235,7 @@ PRED8x8L_HORIZONTAL_UP ssse3
 %macro PRED16x16_VERTICAL 1
 cglobal pred16x16_vertical_10_%1, 2,3
     sub   r0, r1
-    mov   r2, 8
+    mov  r2d, 8
     mova  m0, [r0+ 0]
     mova  m1, [r0+mmsize]
 %if mmsize==8
@@ -1326,7 +1246,7 @@ cglobal pred16x16_vertical_10_%1, 2,3
     MOV16 r0+r1*1, m0, m1, m2, m3
     MOV16 r0+r1*2, m0, m1, m2, m3
     lea   r0, [r0+r1*2]
-    dec   r2
+    dec   r2d
     jg .loop
     REP_RET
 %endmacro
@@ -1341,7 +1261,7 @@ PRED16x16_VERTICAL sse2
 ;-----------------------------------------------------------------------------
 %macro PRED16x16_HORIZONTAL 1
 cglobal pred16x16_horizontal_10_%1, 2,3
-    mov    r2, 8
+    mov   r2d, 8
 .vloop:
     movd   m0, [r0+r1*0-4]
     movd   m1, [r0+r1*1-4]
@@ -1350,7 +1270,7 @@ cglobal pred16x16_horizontal_10_%1, 2,3
     MOV16  r0+r1*0, m0, m0, m0, m0
     MOV16  r0+r1*1, m1, m1, m1, m1
     lea    r0, [r0+r1*2]
-    dec    r2
+    dec    r2d
     jg .vloop
     REP_RET
 %endmacro
@@ -1364,8 +1284,8 @@ PRED16x16_HORIZONTAL sse2
 ; void pred16x16_dc(pixel *src, int stride)
 ;-----------------------------------------------------------------------------
 %macro PRED16x16_DC 1
-cglobal pred16x16_dc_10_%1, 2,7
-    mov        r4, r0
+cglobal pred16x16_dc_10_%1, 2,6
+    mov        r5, r0
     sub        r0, r1
     mova       m0, [r0+0]
     paddw      m0, [r0+mmsize]
@@ -1375,17 +1295,17 @@ cglobal pred16x16_dc_10_%1, 2,7
 %endif
     HADDW      m0, m2
 
-    sub        r0, 2
-    movzx     r3d, word [r0+r1*1]
-    movzx     r5d, word [r0+r1*2]
+    lea        r0, [r0+r1-2]
+    movzx     r3d, word [r0]
+    movzx     r4d, word [r0+r1]
 %rep 7
     lea        r0, [r0+r1*2]
-    movzx     r2d, word [r0+r1*1]
+    movzx     r2d, word [r0]
     add       r3d, r2d
-    movzx     r2d, word [r0+r1*2]
-    add       r5d, r2d
+    movzx     r2d, word [r0+r1]
+    add       r4d, r2d
 %endrep
-    lea       r3d, [r3+r5+16]
+    lea       r3d, [r3+r4+16]
 
     movd       m1, r3d
     paddw      m0, m1
@@ -1393,9 +1313,9 @@ cglobal pred16x16_dc_10_%1, 2,7
     SPLATW     m0, m0
     mov       r3d, 8
 .loop:
-    MOV16 r4+r1*0, m0, m0, m0, m0
-    MOV16 r4+r1*1, m0, m0, m0, m0
-    lea        r4, [r4+r1*2]
+    MOV16 r5+r1*0, m0, m0, m0, m0
+    MOV16 r5+r1*1, m0, m0, m0, m0
+    lea        r5, [r5+r1*2]
     dec       r3d
     jg .loop
     REP_RET
diff --git a/libavcodec/x86/h264_intrapred_init.c 
b/libavcodec/x86/h264_intrapred_init.c
index 62e4c87..e676d05 100644
--- a/libavcodec/x86/h264_intrapred_init.c
+++ b/libavcodec/x86/h264_intrapred_init.c
@@ -56,22 +56,35 @@ void ff_pred8x8l_ ## TYPE ## _ ## DEPTH ## _ ## OPT 
(uint8_t *src, int has_tople
 
 PRED8x8L(dc, 10, sse2)
 PRED8x8L(dc, 10, ssse3)
+PRED8x8L(dc, 10, sse4)
+PRED8x8L(dc, 10, avx)
 PRED8x8L(128_dc, 10, mmxext)
 PRED8x8L(128_dc, 10, sse2)
 PRED8x8L(top_dc, 10, sse2)
 PRED8x8L(top_dc, 10, ssse3)
+PRED8x8L(top_dc, 10, sse4)
 PRED8x8L(vertical, 10, sse2)
 PRED8x8L(vertical, 10, ssse3)
+PRED8x8L(vertical, 10, sse4)
+PRED8x8L(vertical, 10, avx)
 PRED8x8L(horizontal, 10, sse2)
 PRED8x8L(horizontal, 10, ssse3)
+PRED8x8L(horizontal, 10, avx)
 PRED8x8L(down_left, 10, sse2)
 PRED8x8L(down_left, 10, ssse3)
+PRED8x8L(down_left, 10, sse4)
+PRED8x8L(down_left, 10, avx)
 PRED8x8L(down_right, 10, sse2)
 PRED8x8L(down_right, 10, ssse3)
+PRED8x8L(down_right, 10, sse4)
+PRED8x8L(down_right, 10, avx)
 PRED8x8L(vertical_right, 10, sse2)
 PRED8x8L(vertical_right, 10, ssse3)
+PRED8x8L(vertical_right, 10, sse4)
+PRED8x8L(vertical_right, 10, avx)
 PRED8x8L(horizontal_up, 10, sse2)
 PRED8x8L(horizontal_up, 10, ssse3)
+PRED8x8L(horizontal_up, 10, avx)
 
 #define PRED16x16(TYPE, DEPTH, OPT)\
 void ff_pred16x16_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, int 
stride);
@@ -350,12 +363,28 @@ void ff_h264_pred_init_x86(H264PredContext *h, int 
codec_id, const int bit_depth
             h->pred8x8l[TOP_DC_PRED         ] = ff_pred8x8l_top_dc_10_ssse3;
             h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_ssse3;
         }
+        if (mm_flags & AV_CPU_FLAG_SSE4) {
+            h->pred8x8l[VERT_PRED           ] = ff_pred8x8l_vertical_10_sse4;
+            h->pred8x8l[DC_PRED             ] = ff_pred8x8l_dc_10_sse4;
+            h->pred8x8l[TOP_DC_PRED         ] = ff_pred8x8l_top_dc_10_sse4;
+            h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_sse4;
+            h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_sse4;
+            h->pred8x8l[VERT_RIGHT_PRED     ] = 
ff_pred8x8l_vertical_right_10_sse4;
+        }
 #if HAVE_AVX
         if (mm_flags & AV_CPU_FLAG_AVX) {
             h->pred4x4[DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_10_avx;
             h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_avx;
             h->pred4x4[VERT_RIGHT_PRED     ] = 
ff_pred4x4_vertical_right_10_avx;
             h->pred4x4[HOR_DOWN_PRED       ] = 
ff_pred4x4_horizontal_down_10_avx;
+
+            h->pred8x8l[VERT_PRED           ] = ff_pred8x8l_vertical_10_avx;
+            h->pred8x8l[HOR_PRED            ] = ff_pred8x8l_horizontal_10_avx;
+            h->pred8x8l[DC_PRED             ] = ff_pred8x8l_dc_10_avx;
+            h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_avx;
+            h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_avx;
+            h->pred8x8l[VERT_RIGHT_PRED     ] = 
ff_pred8x8l_vertical_right_10_avx;
+            h->pred8x8l[HOR_UP_PRED         ] = 
ff_pred8x8l_horizontal_up_10_avx;
         }
 #endif /* HAVE_AVX */
     }
-- 
1.7.5.1

_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to