It seems like something overflowed, I'll take a look at it... Rémi Denis-Courmont <r...@remlab.net> 于2024年11月18日周一 00:51写道:
> This reverts commit 5bc3b7f51308b8027e5468ef60d8336a960193e2. > > put_chroma_mc4, put_chroma_mc8 and avg_chroma_mc8 are confirmed to > break `fate-rv40`. It is probably just luck that avg_chroma_mc4 does > not also fail. > > Fixes #11306. > --- > libavcodec/riscv/Makefile | 2 - > libavcodec/riscv/rv40dsp_init.c | 51 ----- > libavcodec/riscv/rv40dsp_rvv.S | 375 -------------------------------- > libavcodec/rv34dsp.h | 1 - > libavcodec/rv40dsp.c | 2 - > 5 files changed, 431 deletions(-) > delete mode 100644 libavcodec/riscv/rv40dsp_init.c > delete mode 100644 libavcodec/riscv/rv40dsp_rvv.S > > diff --git a/libavcodec/riscv/Makefile b/libavcodec/riscv/Makefile > index 1f1fa03329..58e14bb972 100644 > --- a/libavcodec/riscv/Makefile > +++ b/libavcodec/riscv/Makefile > @@ -58,8 +58,6 @@ RV-OBJS-$(CONFIG_PIXBLOCKDSP) += riscv/pixblockdsp_rvi.o > RVV-OBJS-$(CONFIG_PIXBLOCKDSP) += riscv/pixblockdsp_rvv.o > OBJS-$(CONFIG_RV34DSP) += riscv/rv34dsp_init.o > RVV-OBJS-$(CONFIG_RV34DSP) += riscv/rv34dsp_rvv.o > -OBJS-$(CONFIG_RV40_DECODER) += riscv/rv40dsp_init.o > -RVV-OBJS-$(CONFIG_RV40_DECODER) += riscv/rv40dsp_rvv.o > RV-OBJS-$(CONFIG_STARTCODE) += riscv/startcode_rvb.o > RVV-OBJS-$(CONFIG_STARTCODE) += riscv/startcode_rvv.o > OBJS-$(CONFIG_SVQ1_ENCODER) += riscv/svqenc_init.o > diff --git a/libavcodec/riscv/rv40dsp_init.c > b/libavcodec/riscv/rv40dsp_init.c > deleted file mode 100644 > index 6aba571794..0000000000 > --- a/libavcodec/riscv/rv40dsp_init.c > +++ /dev/null > @@ -1,51 +0,0 @@ > -/* > - * Copyright (c) 2024 Institue of Software Chinese Academy of Sciences > (ISCAS). > - * > - * This file is part of FFmpeg. > - * > - * FFmpeg is free software; you can redistribute it and/or > - * modify it under the terms of the GNU Lesser General Public > - * License as published by the Free Software Foundation; either > - * version 2.1 of the License, or (at your option) any later version. > - * > - * FFmpeg is distributed in the hope that it will be useful, > - * but WITHOUT ANY WARRANTY; without even the implied warranty of > - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > - * Lesser General Public License for more details. > - * > - * You should have received a copy of the GNU Lesser General Public > - * License along with FFmpeg; if not, write to the Free Software > - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA > 02110-1301 USA > - */ > - > -#include "config.h" > - > -#include "libavutil/attributes.h" > -#include "libavutil/cpu.h" > -#include "libavutil/riscv/cpu.h" > -#include "libavcodec/rv34dsp.h" > - > -void ff_put_rv40_chroma_mc8_rvv(uint8_t *dst, const uint8_t *src, > ptrdiff_t stride, > - int h, int x, int y); > -void ff_put_rv40_chroma_mc4_rvv(uint8_t *dst, const uint8_t *src, > ptrdiff_t stride, > - int h, int x, int y); > - > -void ff_avg_rv40_chroma_mc8_rvv(uint8_t *dst, const uint8_t *src, > ptrdiff_t stride, > - int h, int x, int y); > -void ff_avg_rv40_chroma_mc4_rvv(uint8_t *dst, const uint8_t *src, > ptrdiff_t stride, > - int h, int x, int y); > - > -av_cold void ff_rv40dsp_init_riscv(RV34DSPContext *c) > -{ > -#if HAVE_RVV > - int flags = av_get_cpu_flags(); > - > - if ((flags & AV_CPU_FLAG_RVV_I32) && ff_rv_vlen_least(128) && > - (flags & AV_CPU_FLAG_RVB)) { > - c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_rvv; > - c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_rvv; > - c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_rvv; > - c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_rvv; > - } > -#endif > -} > diff --git a/libavcodec/riscv/rv40dsp_rvv.S > b/libavcodec/riscv/rv40dsp_rvv.S > deleted file mode 100644 > index ca431eb8ab..0000000000 > --- a/libavcodec/riscv/rv40dsp_rvv.S > +++ /dev/null > @@ -1,375 +0,0 @@ > -/* > - * Copyright (c) 2024 Institue of Software Chinese Academy of Sciences > (ISCAS). > - * > - * This file is part of FFmpeg. > - * > - * FFmpeg is free software; you can redistribute it and/or > - * modify it under the terms of the GNU Lesser General Public > - * License as published by the Free Software Foundation; either > - * version 2.1 of the License, or (at your option) any later version. > - * > - * FFmpeg is distributed in the hope that it will be useful, > - * but WITHOUT ANY WARRANTY; without even the implied warranty of > - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > - * Lesser General Public License for more details. > - * > - * You should have received a copy of the GNU Lesser General Public > - * License along with FFmpeg; if not, write to the Free Software > - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA > 02110-1301 USA > - */ > - > -#include "libavutil/riscv/asm.S" > - > -.macro manual_avg dst src1 src2 > - vadd.vv \dst, \src1, \src2 > - vadd.vi \dst, \dst, 1 > - vsrl.vi \dst, \dst, 1 > -.endm > - > -.macro do_chroma_mc type unroll > - csrwi vxrm, 2 > - slli t2, a5, 3 > - mul t1, a5, a4 > - sh3add a5, a4, t2 > - slli a4, a4, 3 > - sub a5, t1, a5 > - sub a7, a4, t1 > - addi a6, a5, 64 > - sub t0, t2, t1 > - vsetvli t3, t6, e8, m1, ta, mu > - beqz t1, 2f > - blez a3, 8f > - li t4, 0 > - li t2, 0 > - li t5, 1 > - addi a5, t3, 1 > - slli t3, a2, (1 + \unroll) > -1: # if (xy != 0) > - add a4, a1, t4 > - vsetvli zero, a5, e8, m1, ta, ma > - .ifc \unroll,1 > - addi t2, t2, 4 > - .else > - addi t2, t2, 2 > - .endif > - vle8.v v10, (a4) > - add a4, a4, a2 > - vslide1down.vx v11, v10, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmulu.vx v8, v10, a6 > - vwmaccu.vx v8, a7, v11 > - vsetvli zero, a5, e8, m1, ta, ma > - vle8.v v12, (a4) > - vsetvli zero, t6, e8, m1, ta, ma > - add a4, a4, a2 > - vwmaccu.vx v8, t0, v12 > - vsetvli zero, a5, e8, m1, ta, ma > - vslide1down.vx v13, v12, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmulu.vx v10, v12, a6 > - vwmaccu.vx v8, t1, v13 > - vwmaccu.vx v10, a7, v13 > - vsetvli zero, a5, e8, m1, ta, ma > - vle8.v v14, (a4) > - vsetvli zero, t6, e8, m1, ta, ma > - add a4, a4, a2 > - vwmaccu.vx v10, t0, v14 > - vsetvli zero, a5, e8, m1, ta, ma > - vslide1down.vx v15, v14, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmulu.vx v12, v14, a6 > - vwmaccu.vx v10, t1, v15 > - vwmaccu.vx v12, a7, v15 > - vnclipu.wi v15, v8, 6 > - .ifc \type,avg > - vle8.v v9, (a0) > - manual_avg v15, v15, v9 > - .endif > - vse8.v v15, (a0) > - add a0, a0, a2 > - vnclipu.wi v8, v10, 6 > - .ifc \type,avg > - vle8.v v9, (a0) > - manual_avg v8, v8, v9 > - .endif > - add t4, t4, t3 > - vse8.v v8, (a0) > - add a0, a0, a2 > - .ifc \unroll,1 > - vsetvli zero, a5, e8, m1, ta, ma > - vle8.v v14, (a4) > - vsetvli zero, t6, e8, m1, ta, ma > - add a4, a4, a2 > - vwmaccu.vx v12, t0, v14 > - vsetvli zero, a5, e8, m1, ta, ma > - vslide1down.vx v15, v14, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmulu.vx v16, v14, a6 > - vwmaccu.vx v12, t1, v15 > - vwmaccu.vx v16, a7, v15 > - vsetvli zero, a5, e8, m1, ta, ma > - vle8.v v14, (a4) > - vsetvli zero, t6, e8, m1, ta, ma > - vwmaccu.vx v16, t0, v14 > - vsetvli zero, a5, e8, m1, ta, ma > - vslide1down.vx v14, v14, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmaccu.vx v16, t1, v14 > - vnclipu.wi v8, v12, 6 > - .ifc \type,avg > - vle8.v v9, (a0) > - manual_avg v8, v8, v9 > - .endif > - vse8.v v8, (a0) > - add a0, a0, a2 > - vnclipu.wi v8, v16, 6 > - .ifc \type,avg > - vle8.v v9, (a0) > - manual_avg v8, v8, v9 > - .endif > - vse8.v v8, (a0) > - add a0, a0, a2 > - .endif > - blt t2, a3, 1b > - j 8f > -2: > - bnez a4, 4f > - beqz t2, 4f > - blez a3, 8f > - li a4, 0 > - li t1, 0 > - slli a7, a2, (1 + \unroll) > -3: # if ((x8 - xy) == 0 && (y8 -xy) != 0) > - add a5, a1, a4 > - vsetvli zero, zero, e8, m1, ta, ma > - .ifc \unroll,1 > - addi t1, t1, 4 > - .else > - addi t1, t1, 2 > - .endif > - vle8.v v8, (a5) > - add a5, a5, a2 > - add t2, a5, a2 > - vwmulu.vx v10, v8, a6 > - vle8.v v8, (a5) > - vwmulu.vx v12, v8, a6 > - vle8.v v9, (t2) > - add t2, t2, a2 > - add a5, t2, a2 > - vwmaccu.vx v10, t0, v8 > - add a4, a4, a7 > - vwmaccu.vx v12, t0, v9 > - vnclipu.wi v15, v10, 6 > - vwmulu.vx v10, v9, a6 > - vnclipu.wi v9, v12, 6 > - .ifc \type,avg > - vle8.v v16, (a0) > - manual_avg v15, v15, v16 > - .endif > - vse8.v v15, (a0) > - add a0, a0, a2 > - .ifc \type,avg > - vle8.v v16, (a0) > - manual_avg v9, v9, v16 > - .endif > - vse8.v v9, (a0) > - add a0, a0, a2 > - .ifc \unroll,1 > - vle8.v v8, (t2) > - vle8.v v14, (a5) > - vwmaccu.vx v10, t0, v8 > - vwmulu.vx v12, v8, a6 > - vnclipu.wi v8, v10, 6 > - vwmaccu.vx v12, t0, v14 > - .ifc \type,avg > - vle8.v v16, (a0) > - manual_avg v8, v8, v16 > - .endif > - vse8.v v8, (a0) > - add a0, a0, a2 > - vnclipu.wi v8, v12, 6 > - .ifc \type,avg > - vle8.v v16, (a0) > - manual_avg v8, v8, v16 > - .endif > - vse8.v v8, (a0) > - add a0, a0, a2 > - .endif > - blt t1, a3, 3b > - j 8f > -4: > - beqz a4, 6f > - bnez t2, 6f > - blez a3, 8f > - li a4, 0 > - li t2, 0 > - addi t0, t3, 1 > - slli t1, a2, (1 + \unroll) > -5: # if ((x8 - xy) != 0 && (y8 -xy) == 0) > - add a5, a1, a4 > - vsetvli zero, t0, e8, m1, ta, ma > - .ifc \unroll,1 > - addi t2, t2, 4 > - .else > - addi t2, t2, 2 > - .endif > - vle8.v v8, (a5) > - add a5, a5, a2 > - vslide1down.vx v9, v8, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmulu.vx v10, v8, a6 > - vwmaccu.vx v10, a7, v9 > - vsetvli zero, t0, e8, m1, ta, ma > - vle8.v v8, (a5) > - add a5, a5, a2 > - vslide1down.vx v9, v8, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmulu.vx v12, v8, a6 > - vwmaccu.vx v12, a7, v9 > - vnclipu.wi v16, v10, 6 > - .ifc \type,avg > - vle8.v v18, (a0) > - manual_avg v16, v16, v18 > - .endif > - vse8.v v16, (a0) > - add a0, a0, a2 > - vnclipu.wi v10, v12, 6 > - .ifc \type,avg > - vle8.v v18, (a0) > - manual_avg v10, v10, v18 > - .endif > - add a4, a4, t1 > - vse8.v v10, (a0) > - add a0, a0, a2 > - .ifc \unroll,1 > - vsetvli zero, t0, e8, m1, ta, ma > - vle8.v v8, (a5) > - add a5, a5, a2 > - vslide1down.vx v9, v8, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmulu.vx v14, v8, a6 > - vwmaccu.vx v14, a7, v9 > - vsetvli zero, t0, e8, m1, ta, ma > - vle8.v v8, (a5) > - vslide1down.vx v9, v8, t5 > - vsetvli zero, t6, e8, m1, ta, ma > - vwmulu.vx v12, v8, a6 > - vnclipu.wi v8, v14, 6 > - vwmaccu.vx v12, a7, v9 > - .ifc \type,avg > - vle8.v v18, (a0) > - manual_avg v8, v8, v18 > - .endif > - vse8.v v8, (a0) > - add a0, a0, a2 > - vnclipu.wi v8, v12, 6 > - .ifc \type,avg > - vle8.v v18, (a0) > - manual_avg v8, v8, v18 > - .endif > - vse8.v v8, (a0) > - add a0, a0, a2 > - .endif > - blt t2, a3, 5b > - j 8f > -6: > - blez a3, 8f > - li a4, 0 > - li t2, 0 > - slli a7, a2, (1 + \unroll) > -7: # the final else, none of the above > conditions are met > - add t0, a1, a4 > - vsetvli zero, zero, e8, m1, ta, ma > - add a5, a0, a4 > - add a4, a4, a7 > - .ifc \unroll,1 > - addi t2, t2, 4 > - .else > - addi t2, t2, 2 > - .endif > - vle8.v v8, (t0) > - add t0, t0, a2 > - add t1, t0, a2 > - vwmulu.vx v10, v8, a6 > - vle8.v v8, (t0) > - add t0, t1, a2 > - vnclipu.wi v13, v10, 6 > - vwmulu.vx v10, v8, a6 > - .ifc \type,avg > - vle8.v v18, (a5) > - manual_avg v13, v13, v18 > - .endif > - vse8.v v13, (a5) > - add a5, a5, a2 > - vnclipu.wi v8, v10, 6 > - .ifc \type,avg > - vle8.v v18, (a5) > - manual_avg v8, v8, v18 > - .endif > - vse8.v v8, (a5) > - add a5, a5, a2 > - .ifc \unroll,1 > - vle8.v v9, (t1) > - vle8.v v12, (t0) > - vwmulu.vx v10, v9, a6 > - vnclipu.wi v8, v10, 6 > - vwmulu.vx v10, v12, a6 > - .ifc \type,avg > - vle8.v v18, (a5) > - manual_avg v8, v8, v18 > - .endif > - vse8.v v8, (a5) > - add a5, a5, a2 > - vnclipu.wi v8, v10, 6 > - .ifc \type,avg > - vle8.v v18, (a5) > - manual_avg v8, v8, v18 > - .endif > - vse8.v v8, (a5) > - .endif > - blt t2, a3, 7b > -8: > - ret > -.endm > - > -func ff_put_rv40_chroma_mc_rvv, zve32x, zba > -11: > - li a7, 3 > - blt a3, a7, 12f > - do_chroma_mc put 1 > -12: > - do_chroma_mc put 0 > -endfunc > - > -func ff_avg_rv40_chroma_mc_rvv, zve32x, zba > -21: > - li a7, 3 > - blt a3, a7, 22f > - do_chroma_mc avg 1 > -22: > - do_chroma_mc avg 0 > -endfunc > - > -func ff_put_rv40_chroma_mc8_rvv, zve32x > - lpad 0 > - li t6, 8 > - j 11b > -endfunc > - > -func ff_put_rv40_chroma_mc4_rvv, zve32x > - lpad 0 > - li t6, 4 > - j 11b > -endfunc > - > -func ff_avg_rv40_chroma_mc8_rvv, zve32x > - lpad 0 > - li t6, 8 > - j 21b > -endfunc > - > -func ff_avg_rv40_chroma_mc4_rvv, zve32x > - lpad 0 > - li t6, 4 > - j 21b > -endfunc > diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h > index d59b3c2732..b15424d4ae 100644 > --- a/libavcodec/rv34dsp.h > +++ b/libavcodec/rv34dsp.h > @@ -83,7 +83,6 @@ void ff_rv34dsp_init_riscv(RV34DSPContext *c); > void ff_rv34dsp_init_x86(RV34DSPContext *c); > > void ff_rv40dsp_init_aarch64(RV34DSPContext *c); > -void ff_rv40dsp_init_riscv(RV34DSPContext *c); > void ff_rv40dsp_init_x86(RV34DSPContext *c); > void ff_rv40dsp_init_arm(RV34DSPContext *c); > > diff --git a/libavcodec/rv40dsp.c b/libavcodec/rv40dsp.c > index 970faec5de..f0208b16ea 100644 > --- a/libavcodec/rv40dsp.c > +++ b/libavcodec/rv40dsp.c > @@ -709,8 +709,6 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c) > ff_rv40dsp_init_aarch64(c); > #elif ARCH_ARM > ff_rv40dsp_init_arm(c); > -#elif ARCH_RISCV > - ff_rv40dsp_init_riscv(c); > #elif ARCH_X86 > ff_rv40dsp_init_x86(c); > #endif > -- > 2.45.2 > > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe". > _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".