---
 libavcodec/x86/dsputil_yasm.asm |   17 +++++++++--------
 1 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm
index c07c1f3..570182a 100644
--- a/libavcodec/x86/dsputil_yasm.asm
+++ b/libavcodec/x86/dsputil_yasm.asm
@@ -33,9 +33,9 @@ pd_16384: times 4 dd 16384
 
 SECTION_TEXT
 
-%macro SCALARPRODUCT 1
+%macro SCALARPRODUCT 0
 ; int scalarproduct_int16(int16_t *v1, int16_t *v2, int order, int shift)
-cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift
+cglobal scalarproduct_int16, 3,3,4, v1, v2, order, shift
     shl orderq, 1
     add v1q, orderq
     add v2q, orderq
@@ -65,7 +65,7 @@ cglobal scalarproduct_int16_%1, 3,3,4, v1, v2, order, shift
     RET
 
 ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int 
order, int mul)
-cglobal scalarproduct_and_madd_int16_%1, 4,4,8, v1, v2, v3, order, mul
+cglobal scalarproduct_and_madd_int16, 4,4,8, v1, v2, v3, order, mul
     shl orderq, 1
     movd    m7, mulm
 %if mmsize == 16
@@ -110,10 +110,10 @@ cglobal scalarproduct_and_madd_int16_%1, 4,4,8, v1, v2, 
v3, order, mul
     RET
 %endmacro
 
-INIT_MMX
-SCALARPRODUCT mmx2
-INIT_XMM
-SCALARPRODUCT sse2
+INIT_MMX mmx2
+SCALARPRODUCT
+INIT_XMM sse2
+SCALARPRODUCT
 
 %macro SCALARPRODUCT_LOOP 1
 align 16
@@ -161,7 +161,8 @@ align 16
 %endmacro
 
 ; int scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3, int 
order, int mul)
-cglobal scalarproduct_and_madd_int16_ssse3, 4,5,10, v1, v2, v3, order, mul
+INIT_XMM ssse3
+cglobal scalarproduct_and_madd_int16, 4,5,10, v1, v2, v3, order, mul
     shl orderq, 1
     movd    m7, mulm
     pshuflw m7, m7, 0
-- 
1.7.1

_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to