---
libavcodec/x86/dsputil_mmx.c | 6 ++--
libavcodec/x86/dsputil_yasm.asm | 66 +++++++++++++++++++--------------------
libavutil/x86/x86util.asm | 50 ++++++++++++++++++++++--------
libswscale/x86/scale.asm | 22 +++++--------
4 files changed, 80 insertions(+), 64 deletions(-)
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index 827705c..aca0827 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -2513,8 +2513,8 @@ void ff_vector_clip_int32_mmx (int32_t *dst, const
int32_t *src,
int32_t min, int32_t max, unsigned int len);
void ff_vector_clip_int32_sse2 (int32_t *dst, const int32_t *src,
int32_t min, int32_t max, unsigned int len);
-void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
- int32_t min, int32_t max, unsigned int len);
+void ff_vector_clip_int32_sse2_atom(int32_t *dst, const int32_t *src,
+ int32_t min, int32_t max, unsigned int
len);
void ff_vector_clip_int32_sse4 (int32_t *dst, const int32_t *src,
int32_t min, int32_t max, unsigned int len);
@@ -2910,7 +2910,7 @@ static void dsputil_init_sse2(DSPContext *c,
AVCodecContext *avctx,
c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
if (mm_flags & AV_CPU_FLAG_ATOM) {
- c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
+ c->vector_clip_int32 = ff_vector_clip_int32_sse2_atom;
} else {
c->vector_clip_int32 = ff_vector_clip_int32_sse2;
}
diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm
index 70a0aa1..746ba69 100644
--- a/libavcodec/x86/dsputil_yasm.asm
+++ b/libavcodec/x86/dsputil_yasm.asm
@@ -1054,50 +1054,50 @@ emu_edge mmx
; int32_t max, unsigned int len)
;-----------------------------------------------------------------------------
-; %1 = number of xmm registers used
-; %2 = number of inline load/process/store loops per asm loop
-; %3 = process 4*mmsize (%3=0) or 8*mmsize (%3=1) bytes per loop
-; %4 = CLIPD function takes min/max as float instead of int (CLIPD_SSE2)
-; %5 = suffix
-%macro VECTOR_CLIP_INT32 4-5
-cglobal vector_clip_int32%5, 5,5,%1, dst, src, min, max, len
-%if %4
- cvtsi2ss m4, minm
- cvtsi2ss m5, maxm
+; %1 = number of inline load/process/store loops per asm loop
+; %2 = process 4*mmsize (%3=0) or 8*mmsize (%3=1) bytes per loop
+
+%macro VECTOR_CLIP_INT32 2
+cglobal vector_clip_int32, 5,5,11, dst, src, min, max, len
+%if notcpuflag(sse4) && cpuflag(sse2) && notcpuflag(atom)
+ cvtsi2ss m4, minm
+ cvtsi2ss m5, maxm
+ %assign is_float 1
%else
movd m4, minm
movd m5, maxm
+ %assign is_float 0
%endif
SPLATD m4
SPLATD m5
.loop:
%assign %%i 1
-%rep %2
+%rep %1
mova m0, [srcq+mmsize*0*%%i]
mova m1, [srcq+mmsize*1*%%i]
mova m2, [srcq+mmsize*2*%%i]
mova m3, [srcq+mmsize*3*%%i]
-%if %3
+%if %2
mova m7, [srcq+mmsize*4*%%i]
mova m8, [srcq+mmsize*5*%%i]
mova m9, [srcq+mmsize*6*%%i]
mova m10, [srcq+mmsize*7*%%i]
%endif
- CLIPD m0, m4, m5, m6
- CLIPD m1, m4, m5, m6
- CLIPD m2, m4, m5, m6
- CLIPD m3, m4, m5, m6
-%if %3
- CLIPD m7, m4, m5, m6
- CLIPD m8, m4, m5, m6
- CLIPD m9, m4, m5, m6
- CLIPD m10, m4, m5, m6
+ CLIPD m0, m4, m5, m6, is_float
+ CLIPD m1, m4, m5, m6, is_float
+ CLIPD m2, m4, m5, m6, is_float
+ CLIPD m3, m4, m5, m6, is_float
+%if %2
+ CLIPD m7, m4, m5, m6, is_float
+ CLIPD m8, m4, m5, m6, is_float
+ CLIPD m9, m4, m5, m6, is_float
+ CLIPD m10, m4, m5, m6, is_float
%endif
mova [dstq+mmsize*0*%%i], m0
mova [dstq+mmsize*1*%%i], m1
mova [dstq+mmsize*2*%%i], m2
mova [dstq+mmsize*3*%%i], m3
-%if %3
+%if %2
mova [dstq+mmsize*4*%%i], m7
mova [dstq+mmsize*5*%%i], m8
mova [dstq+mmsize*6*%%i], m9
@@ -1105,28 +1105,26 @@ cglobal vector_clip_int32%5, 5,5,%1, dst, src, min,
max, len
%endif
%assign %%i %%i+1
%endrep
- add srcq, mmsize*4*(%2+%3)
- add dstq, mmsize*4*(%2+%3)
- sub lend, mmsize*(%2+%3)
+ add srcq, mmsize*4*(%1+%2)
+ add dstq, mmsize*4*(%1+%2)
+ sub lend, mmsize*(%1+%2)
jg .loop
REP_RET
%endmacro
INIT_MMX mmx
%define SPLATD SPLATD_MMX
-%define CLIPD CLIPD_MMX
-VECTOR_CLIP_INT32 0, 1, 0, 0
-INIT_XMM sse2
+VECTOR_CLIP_INT32 1, 0
+INIT_XMM sse2,atom
%define SPLATD SPLATD_SSE2
-VECTOR_CLIP_INT32 6, 1, 0, 0, _int
-%define CLIPD CLIPD_SSE2
-VECTOR_CLIP_INT32 6, 2, 0, 1
+VECTOR_CLIP_INT32 1, 0
+INIT_XMM sse2
+VECTOR_CLIP_INT32 2, 0
INIT_XMM sse4
-%define CLIPD CLIPD_SSE41
%ifdef m8
-VECTOR_CLIP_INT32 11, 1, 1, 0
+VECTOR_CLIP_INT32 1, 1
%else
-VECTOR_CLIP_INT32 6, 1, 0, 0
+VECTOR_CLIP_INT32 1, 0
%endif
;-----------------------------------------------------------------------------
diff --git a/libavutil/x86/x86util.asm b/libavutil/x86/x86util.asm
index 8450acd..79e1b17 100644
--- a/libavutil/x86/x86util.asm
+++ b/libavutil/x86/x86util.asm
@@ -584,37 +584,61 @@
pminsw %1, %3
%endmacro
-%macro PMINSD_MMX 3 ; dst, src, tmp
+; %1 = dst
+; %2 = src
+; %3 = tmp
+; %4 = src format: 0=dwords, 1=floats (requires SSE2)
+%macro PMINSD 2-4 0, 0
+%if %4 == 1
+ cvtdq2ps %1, %1
+ minps %1, %2
+ cvtps2dq %1, %1
+%elif cpuflag(sse4) && mmsize >= 16
+ pminsd %1, %2
+%else
mova %3, %2
pcmpgtd %3, %1
pxor %1, %2
pand %1, %3
pxor %1, %2
+%endif
%endmacro
-%macro PMAXSD_MMX 3 ; dst, src, tmp
+; %1 = dst
+; %2 = src
+; %3 = tmp
+; %4 = src format: 0=dwords, 1=floats (requires SSE2)
+%macro PMAXSD 2-4 0, 0
+%if %4 == 1
+ cvtdq2ps %1, %1
+ maxps %1, %2
+ cvtps2dq %1, %1
+%elif cpuflag(sse4) && mmsize >= 16
+ pmaxsd %1, %2
+%else
mova %3, %1
pcmpgtd %3, %2
pand %1, %3
pandn %3, %2
por %1, %3
+%endif
%endmacro
-%macro CLIPD_MMX 3-4 ; src/dst, min, max, tmp
- PMINSD_MMX %1, %3, %4
- PMAXSD_MMX %1, %2, %4
-%endmacro
-
-%macro CLIPD_SSE2 3-4 ; src/dst, min (float), max (float), unused
+; %1 = src/dst
+; %2 = min
+; %3 = max
+; %4 = tmp
+; %5 = min/max format: 0=dwords, 1=floats (requires SSE2)
+%macro CLIPD 3-5 0, 0
+%if %5 == 1
cvtdq2ps %1, %1
minps %1, %3
maxps %1, %2
cvtps2dq %1, %1
-%endmacro
-
-%macro CLIPD_SSE41 3-4 ; src/dst, min, max, unused
- pminsd %1, %3
- pmaxsd %1, %2
+%else
+ PMINSD %1, %3, %4, 0
+ PMAXSD %1, %2, %4, 0
+%endif
%endmacro
%macro VBROADCASTSS 2 ; dst xmm/ymm, src m32
diff --git a/libswscale/x86/scale.asm b/libswscale/x86/scale.asm
index d56e253..05ee06d 100644
--- a/libswscale/x86/scale.asm
+++ b/libswscale/x86/scale.asm
@@ -62,13 +62,11 @@ cglobal hscale%1to%2_%4, %5, 10, %6, pos0, dst, w, srcmem,
filter, fltpos, fltsi
%define mov32 mov
%endif ; x86-64
%if %2 == 19
-%if mmsize == 8 ; mmx
- mova m2, [max_19bit_int]
-%elif cpuflag(sse4)
+%if cpuflag(sse4) || mmsize == 8
mova m2, [max_19bit_int]
-%else ; ssse3/sse2
+%else ; sse2
mova m2, [max_19bit_flt]
-%endif ; mmx/sse2/ssse3/sse4
+%endif
%endif ; %2 == 19
%if %1 == 16
mova m6, [minshort]
@@ -365,15 +363,11 @@ cglobal hscale%1to%2_%4, %5, 10, %6, pos0, dst, w,
srcmem, filter, fltpos, fltsi
movd [dstq+wq*2], m0
%endif ; %3 ==/!= X
%else ; %2 == 19
-%if mmsize == 8
- PMINSD_MMX m0, m2, m4
-%elif cpuflag(sse4)
- pminsd m0, m2
-%else ; sse2/ssse3
- cvtdq2ps m0, m0
- minps m0, m2
- cvtps2dq m0, m0
-%endif ; mmx/sse2/ssse3/sse4
+%if cpuflag(sse4) || mmsize == 8
+ PMINSD m0, m2, m4, 0
+%else ; sse2
+ PMINSD m0, m2, m4, 1
+%endif
%ifnidn %3, X
mova [dstq+wq*(4>>wshr)], m0
%else ; %3 == X
--
1.7.1
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel