x64 always has MMX, MMXEXT, SSE and SSE2 and this means that some functions for MMX, MMXEXT, SSE and 3dnow are always overridden by other functions (unless one e.g. explicitly disables SSE2). This commit therefore disables such CAVS-dsp functions at compile-time.
Signed-off-by: Andreas Rheinhardt <andreas.rheinha...@outlook.com> --- libavcodec/x86/cavsdsp.c | 20 +++++++++++++++----- libavcodec/x86/cavsidct.asm | 2 ++ 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/libavcodec/x86/cavsdsp.c b/libavcodec/x86/cavsdsp.c index f974f93fc0..fea9daa0ff 100644 --- a/libavcodec/x86/cavsdsp.c +++ b/libavcodec/x86/cavsdsp.c @@ -38,6 +38,7 @@ #if HAVE_MMX_EXTERNAL +#if ARCH_X86_32 void ff_cavs_idct8_mmx(int16_t *out, const int16_t *in); static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, ptrdiff_t stride) @@ -46,6 +47,7 @@ static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, ptrdiff_t stride) ff_cavs_idct8_mmx(b2, block); ff_add_pixels_clamped_mmx(b2, dst, stride); } +#endif /* ARCH_X86_32 */ void ff_cavs_idct8_sse2(int16_t *out, const int16_t *in); @@ -335,11 +337,13 @@ static void put_cavs_qpel8_mc00_mmx(uint8_t *dst, const uint8_t *src, ff_put_pixels8_mmx(dst, src, stride, 8); } +#if ARCH_X86_32 static void avg_cavs_qpel8_mc00_mmx(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { ff_avg_pixels8_mmx(dst, src, stride, 8); } +#endif static void avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) @@ -347,6 +351,7 @@ static void avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, const uint8_t *src, ff_avg_pixels8_mmxext(dst, src, stride, 8); } +#if ARCH_X86_32 static void put_cavs_qpel16_mc00_mmx(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { @@ -364,6 +369,7 @@ static void avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, const uint8_t *src, { ff_avg_pixels16_mmxext(dst, src, stride, 16); } +#endif static void put_cavs_qpel16_mc00_sse2(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) @@ -382,13 +388,15 @@ static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c, AVCodecContext *avctx) { #if HAVE_MMX_EXTERNAL - c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx; c->put_cavs_qpel_pixels_tab[1][0] = put_cavs_qpel8_mc00_mmx; +#if ARCH_X86_32 + c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx; c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmx; c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmx; c->cavs_idct8_add = cavs_idct8_add_mmx; c->idct_perm = FF_IDCT_PERM_TRANSPOSE; +#endif /* ARCH_X86_32 */ #endif /* HAVE_MMX_EXTERNAL */ } @@ -408,7 +416,7 @@ CAVS_MC(avg_, 8, mmxext) CAVS_MC(avg_, 16, mmxext) #endif /* HAVE_MMXEXT_INLINE */ -#if HAVE_AMD3DNOW_INLINE +#if ARCH_X86_32 && HAVE_AMD3DNOW_INLINE QPEL_CAVS(put_, PUT_OP, 3dnow) QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow) @@ -425,7 +433,7 @@ static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c, DSPFUNC(avg, 0, 16, 3dnow); DSPFUNC(avg, 1, 8, 3dnow); } -#endif /* HAVE_AMD3DNOW_INLINE */ +#endif /* ARCH_X86_32 && HAVE_AMD3DNOW_INLINE */ av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx) { @@ -434,10 +442,10 @@ av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx) if (X86_MMX(cpu_flags)) cavsdsp_init_mmx(c, avctx); -#if HAVE_AMD3DNOW_INLINE +#if ARCH_X86_32 && HAVE_AMD3DNOW_INLINE if (INLINE_AMD3DNOW(cpu_flags)) cavsdsp_init_3dnow(c, avctx); -#endif /* HAVE_AMD3DNOW_INLINE */ +#endif /* ARCH_X86_32 && HAVE_AMD3DNOW_INLINE */ #if HAVE_MMXEXT_INLINE if (INLINE_MMXEXT(cpu_flags)) { DSPFUNC(put, 0, 16, mmxext); @@ -448,7 +456,9 @@ av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx) #endif #if HAVE_MMX_EXTERNAL if (EXTERNAL_MMXEXT(cpu_flags)) { +#if ARCH_X86_32 c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmxext; +#endif c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmxext; } #endif diff --git a/libavcodec/x86/cavsidct.asm b/libavcodec/x86/cavsidct.asm index 6c768c2646..070b46a6cc 100644 --- a/libavcodec/x86/cavsidct.asm +++ b/libavcodec/x86/cavsidct.asm @@ -107,6 +107,7 @@ SECTION .text SUMSUB_BA w, 1, 0 ; m1 = dst3, m0 = dst4 %endmacro +%if ARCH_X86_32 INIT_MMX mmx cglobal cavs_idct8, 2, 4, 8, 8 * 16, out, in, cnt, tmp mov cntd, 2 @@ -168,6 +169,7 @@ cglobal cavs_idct8, 2, 4, 8, 8 * 16, out, in, cnt, tmp jg .loop_2 RET +%endif INIT_XMM sse2 cglobal cavs_idct8, 2, 2, 8 + ARCH_X86_64, 0 - 8 * 16, out, in -- 2.34.1 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".