From: "Ronald S. Bultje" <[email protected]>
This makes add_hfyu_left_prediction_sse4() handle sources that are not
16-byte aligned in its own function rather than by proxying the call to
add_hfyu_left_prediction_ssse3(). This fixes a crash on Win64, since the
sse4 version clobberes xmm6, but the ssse3 version (which uses MMX regs)
does not restore it, thus leading to XMM clobbering and RSP being off.
Fixes bug 342.
---
The previous patch assumed that dst would be aligned if src was not,
causing another crash. Instead, assume both are not aligned.
libavcodec/x86/dsputil_yasm.asm | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/libavcodec/x86/dsputil_yasm.asm b/libavcodec/x86/dsputil_yasm.asm
index 70a0aa1..58e4ca0 100644
--- a/libavcodec/x86/dsputil_yasm.asm
+++ b/libavcodec/x86/dsputil_yasm.asm
@@ -388,12 +388,16 @@ cglobal add_hfyu_median_prediction_mmx2, 6,6,0, dst, top,
diff, w, left, left_to
RET
-%macro ADD_HFYU_LEFT_LOOP 1 ; %1 = is_aligned
+%macro ADD_HFYU_LEFT_LOOP 1-2 ; %1 = is_aligned, %2 = src_is_unaligned
add srcq, wq
add dstq, wq
neg wq
%%.loop:
+%if %0 == 2
+ movu m1, [srcq+wq]
+%else
mova m1, [srcq+wq]
+%endif
mova m2, m1
psllw m1, 8
paddb m1, m2
@@ -446,12 +450,14 @@ cglobal add_hfyu_left_prediction_sse4, 3,3,7, dst, src,
w, left
movd m0, leftm
pslldq m0, 15
test srcq, 15
- jnz add_hfyu_left_prediction_ssse3.skip_prologue
+ jnz .src_unaligned
test dstq, 15
- jnz .unaligned
+ jnz .dst_unaligned
ADD_HFYU_LEFT_LOOP 1
-.unaligned:
+.dst_unaligned:
ADD_HFYU_LEFT_LOOP 0
+.src_unaligned:
+ ADD_HFYU_LEFT_LOOP 0, 1
; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
--
1.7.10.msysgit.1
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel