> -----原始邮件-----
> 发件人: "Shiyou Yin" <yinshiyou...@loongson.cn>
> 发送时间:2025-04-18 09:37:08 (星期五)
> 收件人: ffmpeg-devel@ffmpeg.org
> 主题: [FFmpeg-devel] [PATCH v3] avcodec: Fix fate-checkasm-hevc_pel failed on
> LA.
>
> Some loop counters were initialized incorrectly.
> This patch enhances the handling of loop iterations and residual parts.
> ---
> libavcodec/loongarch/hevc_mc_bi_lsx.c | 299 ++++++----------
> libavcodec/loongarch/hevc_mc_uni_lsx.c | 325 +++++-------------
> libavcodec/loongarch/hevcdsp_init_loongarch.c | 1 -
> libavcodec/loongarch/hevcdsp_lsx.c | 233 +++++--------
> libavcodec/loongarch/hevcdsp_lsx.h | 1 -
> 5 files changed, 276 insertions(+), 583 deletions(-)
>
> diff --git a/libavcodec/loongarch/hevc_mc_bi_lsx.c
> b/libavcodec/loongarch/hevc_mc_bi_lsx.c
> index d7ddd1c246..feb85956ac 100644
> --- a/libavcodec/loongarch/hevc_mc_bi_lsx.c
> +++ b/libavcodec/loongarch/hevc_mc_bi_lsx.c
> @@ -227,7 +227,7 @@ void hevc_bi_copy_8w_lsx(const uint8_t *src0_ptr, int32_t
> src_stride,
> const int16_t *src1_ptr, int32_t src2_stride,
> uint8_t *dst, int32_t dst_stride, int32_t height)
> {
> - int32_t loop_cnt = height >> 3;
> + uint32_t loop_cnt;
> int32_t res = (height & 7) >> 1;
> int32_t src_stride_2x = (src_stride << 1);
> int32_t dst_stride_2x = (dst_stride << 1);
> @@ -312,7 +312,8 @@ void hevc_bi_copy_12w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> const int16_t *src1_ptr, int32_t src2_stride,
> uint8_t *dst, int32_t dst_stride, int32_t height)
> {
> - uint32_t loop_cnt;
> + uint32_t loop_cnt = height >> 2;
> + uint32_t res = (height & 3) >> 1;
> int32_t src_stride_2x = (src_stride << 1);
> int32_t dst_stride_2x = (dst_stride << 1);
> int32_t src_stride_4x = (src_stride << 2);
> @@ -328,10 +329,9 @@ void hevc_bi_copy_12w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> __m128i in0, in1, in2, in3, in4, in5, in6, in7;
> __m128i dst0, dst1, dst2, dst3, dst4, dst5;
>
> - for (loop_cnt = 4; loop_cnt--;) {
> + for (; loop_cnt--;) {
> src0 = __lsx_vld(src0_ptr, 0);
> - DUP2_ARG2(__lsx_vldx, src0_ptr, src_stride, src0_ptr, src_stride_2x,
> - src1, src2);
> + DUP2_ARG2(__lsx_vldx, src0_ptr, src_stride, src0_ptr, src_stride_2x,
> src1, src2);
> src3 = __lsx_vldx(src0_ptr, src_stride_3x);
> src0_ptr += src_stride_4x;
> in0 = __lsx_vld(src1_ptr, 0);
> @@ -340,8 +340,7 @@ void hevc_bi_copy_12w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> in3 = __lsx_vldx(src1_ptr, src2_stride_3x);
> src1_ptr += src2_stride_2x;
> in4 = __lsx_vld(_src1, 0);
> - DUP2_ARG2(__lsx_vldx, _src1, src2_stride_x, _src1, src2_stride_2x,
> - in5, in6);
> + DUP2_ARG2(__lsx_vldx, _src1, src2_stride_x, _src1, src2_stride_2x,
> in5, in6);
> in7 = __lsx_vldx(_src1, src2_stride_3x);
> _src1 += src2_stride_2x;
>
> @@ -363,6 +362,31 @@ void hevc_bi_copy_12w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> __lsx_vstelm_w(out2, dst + dst_stride_3x, 8, 3);
> dst += dst_stride_4x;
> }
> + for (;res--;) {
> + src0 = __lsx_vld(src0_ptr, 0);
> + src1 = __lsx_vld(src0_ptr + src_stride, 0);
> + in0 = __lsx_vld(src1_ptr, 0);
> + in1 = __lsx_vldx(src1_ptr, src2_stride_x);
> + dst0 = __lsx_vsllwil_hu_bu(src0, 6);
> + dst1 = __lsx_vsllwil_hu_bu(src1, 6);
> + out0 = hevc_bi_rnd_clip(in0, dst0, in1, dst1);
> + __lsx_vstelm_d(out0, dst, 0, 0);
> + __lsx_vstelm_d(out0, dst + dst_stride, 0, 1);
> +
> + in0 = __lsx_vldrepl_d(_src1, 0);
> + in1 = __lsx_vldrepl_d(_src1 + src2_stride, 0);
> + src0 = __lsx_vilvh_w(src1, src0);
> + in0 = __lsx_vilvl_d(in1, in0);
> + dst0 = __lsx_vsllwil_hu_bu(src0, 6);
> + dst0 = __lsx_vsadd_h(dst0, in0);
> + dst0 = __lsx_vssrarni_bu_h(dst0, dst0, 7);
> + __lsx_vstelm_w(dst0, dst, 8, 0);
> + __lsx_vstelm_w(dst0, dst + dst_stride, 8, 1);
> + src0_ptr += src_stride_2x;
> + _src1 += src2_stride_x;
> + src1_ptr += src2_stride_x;
> + dst += dst_stride_2x;
> + }
> }
>
> static
> @@ -370,7 +394,8 @@ void hevc_bi_copy_16w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> const int16_t *src1_ptr, int32_t src2_stride,
> uint8_t *dst, int32_t dst_stride, int32_t height)
> {
> - uint32_t loop_cnt;
> + uint32_t loop_cnt = height >> 2;
> + uint32_t res = (height & 3) >> 1;
> int32_t src_stride_2x = (src_stride << 1);
> int32_t dst_stride_2x = (dst_stride << 1);
> int32_t src_stride_4x = (src_stride << 2);
> @@ -387,7 +412,7 @@ void hevc_bi_copy_16w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> __m128i dst0_r, dst1_r, dst2_r, dst3_r, dst0_l, dst1_l, dst2_l, dst3_l;
> __m128i zero = {0};
>
> - for (loop_cnt = (height >> 2); loop_cnt--;) {
> + for (; loop_cnt--;) {
> src0 = __lsx_vld(src0_ptr, 0);
> DUP2_ARG2(__lsx_vldx, src0_ptr, src_stride, src0_ptr, src_stride_2x,
> src1, src2);
> @@ -420,6 +445,27 @@ void hevc_bi_copy_16w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> __lsx_vstx(out3, dst, dst_stride_3x);
> dst += dst_stride_4x;
> }
> + for (;res--;) {
> + src0 = __lsx_vld(src0_ptr, 0);
> + src1 = __lsx_vldx(src0_ptr, src_stride);
> + in0 = __lsx_vld(src1_ptr, 0);
> + in1 = __lsx_vldx(src1_ptr, src2_stride_x);
> + in4 = __lsx_vld(_src1, 0);
> + in5 = __lsx_vldx(_src1, src2_stride_x);
> +
> + DUP2_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, dst0_r, dst1_r);
> + DUP2_ARG2(__lsx_vilvh_b, zero, src0, zero, src1, dst0_l, dst1_l);
> + DUP2_ARG2(__lsx_vslli_h, dst0_l, 6, dst1_l, 6, dst0_l, dst1_l);
> + out0 = hevc_bi_rnd_clip(in0, dst0_r, in4, dst0_l);
> + out1 = hevc_bi_rnd_clip(in1, dst1_r, in5, dst1_l);
> + __lsx_vst(out0, dst, 0);
> + __lsx_vstx(out1, dst, dst_stride);
> +
> + src0_ptr += src_stride_2x;
> + _src1 += src2_stride_x;
> + src1_ptr += src2_stride_x;
> + dst += dst_stride_2x;
> + }
> }
>
> static
> @@ -1061,7 +1107,8 @@ static void hevc_hz_4t_24w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> {
> const int16_t *src1_ptr_tmp;
> uint8_t *dst_tmp;
> - uint32_t loop_cnt;
> + uint32_t loop_cnt = height >> 2;
> + uint32_t res = (height & 3) >> 1;
> int32_t dst_stride_2x = (dst_stride << 1);
> int32_t dst_stride_4x = (dst_stride << 2);
> int32_t dst_stride_3x = dst_stride_2x + dst_stride;
> @@ -1086,7 +1133,7 @@ static void hevc_hz_4t_24w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> dst_tmp = dst + 16;
> src1_ptr_tmp = src1_ptr + 16;
>
> - for (loop_cnt = (height >> 2); loop_cnt--;) {
> + for (; loop_cnt--;) {
> DUP2_ARG2(__lsx_vld, src0_ptr, 0, src0_ptr, 16, src0, src1);
> src0_ptr += src_stride;
> DUP2_ARG2(__lsx_vld, src0_ptr, 0, src0_ptr, 16, src2, src3);
> @@ -1155,6 +1202,42 @@ static void hevc_hz_4t_24w_lsx(const uint8_t
> *src0_ptr, int32_t src_stride,
> __lsx_vstelm_d(dst1, dst_tmp + dst_stride_3x, 0, 1);
> dst_tmp += dst_stride_4x;
> }
> + for (; res--;) {
> + DUP2_ARG2(__lsx_vld, src0_ptr, 0, src0_ptr, 16, src0, src1);
> + src0_ptr += src_stride;
> + DUP2_ARG2(__lsx_vld, src0_ptr, 0, src0_ptr, 16, src2, src3);
> + src0_ptr += src_stride;
> + DUP2_ARG2(__lsx_vld, src1_ptr, 0, src1_ptr, 16, in0, in1);
> + src1_ptr += src2_stride;
> + DUP2_ARG2(__lsx_vld, src1_ptr, 0, src1_ptr, 16, in2, in3);
> + src1_ptr += src2_stride;
> +
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src0, mask2, src2,
> + src2, mask0, src3, src2, mask2, vec0, vec1, vec2, vec3);
> + DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
> + vec3, filt0, dst0, dst1, dst2, dst3);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src0, mask3, src2,
> + src2, mask1, src3, src2, mask3, vec0, vec1, vec2, vec3);
> + DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
> + dst2, vec2, filt1, dst3, vec3, filt1, dst0, dst1, dst2,
> dst3);
> + dst0 = hevc_bi_rnd_clip(in0, dst0, in1, dst1);
> + dst1 = hevc_bi_rnd_clip(in2, dst2, in3, dst3);
> + __lsx_vst(dst0, dst, 0);
> + __lsx_vstx(dst1, dst, dst_stride);
> + dst += dst_stride_2x;
> +
> + in0 = __lsx_vld(src1_ptr_tmp, 0);
> + in1 = __lsx_vldx(src1_ptr_tmp, src2_stride_x);
> + src1_ptr_tmp += src2_stride_x;
> + DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src3, src3, mask0,
> vec0, vec1);
> + DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, dst0, dst1);
> + DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask1, src3, src3, mask1, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1,
> filt1, dst0, dst1);
> + dst0 = hevc_bi_rnd_clip(in0, dst0, in1, dst1);
> + __lsx_vstelm_d(dst0, dst_tmp, 0, 0);
> + __lsx_vstelm_d(dst0, dst_tmp + dst_stride, 0, 1);
> + dst_tmp += dst_stride_2x;
> + }
> }
>
> static void hevc_hz_4t_32w_lsx(const uint8_t *src0_ptr, int32_t src_stride,
> @@ -1206,7 +1289,7 @@ static void hevc_vt_4t_12w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> uint8_t *dst, int32_t dst_stride,
> const int8_t *filter, int32_t height)
> {
> - int32_t loop_cnt;
> + uint32_t loop_cnt;
> int32_t src_stride_2x = (src_stride << 1);
> int32_t dst_stride_2x = (dst_stride << 1);
> int32_t dst_stride_4x = (dst_stride << 2);
> @@ -1295,7 +1378,7 @@ static void hevc_vt_4t_16w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> uint8_t *dst, int32_t dst_stride,
> const int8_t *filter, int32_t height)
> {
> - int32_t loop_cnt;
> + uint32_t loop_cnt = height >> 2;
> const int32_t src_stride_2x = (src_stride << 1);
> const int32_t dst_stride_2x = (dst_stride << 1);
> const int32_t src_stride_3x = src_stride_2x + src_stride;
> @@ -1316,7 +1399,7 @@ static void hevc_vt_4t_16w_lsx(const uint8_t *src0_ptr,
> int32_t src_stride,
> DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
> DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_l, src21_l);
>
> - for (loop_cnt = (height >> 2); loop_cnt--;) {
> + for (; loop_cnt--;) {
> src3 = __lsx_vld(src0_ptr, 0);
> src4 = __lsx_vldx(src0_ptr, src_stride);
> src0_ptr += src_stride_2x;
> @@ -1480,193 +1563,6 @@ static void hevc_vt_4t_32w_lsx(const uint8_t
> *src0_ptr, int32_t src_stride,
> dst + 16, dst_stride, filter, height);
> }
>
> -static void hevc_hv_4t_6w_lsx(const uint8_t *src0_ptr, int32_t src_stride,
> - const int16_t *src1_ptr, int32_t src2_stride,
> - uint8_t *dst, int32_t dst_stride,
> - const int8_t *filter_x, const int8_t *filter_y,
> - int32_t height)
> -{
> - int32_t src_stride_2x = (src_stride << 1);
> - int32_t dst_stride_2x = (dst_stride << 1);
> - int32_t src_stride_4x = (src_stride << 2);
> - int32_t dst_stride_4x = (dst_stride << 2);
> - int32_t src2_stride_2x = (src2_stride << 1);
> - int32_t src2_stride_4x = (src2_stride << 2);
> - int32_t src_stride_3x = src_stride_2x + src_stride;
> - int32_t dst_stride_3x = dst_stride_2x + dst_stride;
> - int32_t src2_stride_3x = src2_stride_2x + src2_stride;
> - __m128i out0, out1;
> - __m128i src0, src1, src2, src3, src4, src5, src6;
> - __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, mask1;
> - __m128i filt0, filt1, filt_h0, filt_h1;
> - __m128i dsth0, dsth1, dsth2, dsth3, dsth4, dsth5;
> - __m128i dsth6, dsth7, dsth8, dsth9, dsth10;
> - __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
> - __m128i dst4_r, dst5_r, dst6_r, dst7_r;
> - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
> - __m128i reg0, reg1, reg2, reg3;
> - __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
> -
> - src0_ptr -= (src_stride + 1);
> - DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
> -
> - filt_h1 = __lsx_vld(filter_y, 0);
> - filt_h1 = __lsx_vsllwil_h_b(filt_h1, 0);
> - DUP2_ARG2(__lsx_vreplvei_w, filt_h1, 0, filt_h1, 1, filt_h0, filt_h1);
> -
> - mask1 = __lsx_vaddi_bu(mask0, 2);
> -
> - src0 = __lsx_vld(src0_ptr, 0);
> - DUP2_ARG2(__lsx_vldx, src0_ptr, src_stride, src0_ptr, src_stride_2x,
> - src1, src2);
> - src0_ptr += src_stride_3x;
> -
> - DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0,
> vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, vec2,
> vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, vec4,
> vec5);
> -
> - DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dsth0, dsth1);
> - dsth2 = __lsx_vdp2_h_bu_b(vec4, filt0);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dsth0, vec1, filt1, dsth1, vec3, filt1,
> - dsth0, dsth1);
> - dsth2 = __lsx_vdp2add_h_bu_b(dsth2, vec5, filt1);
> -
> - DUP2_ARG2(__lsx_vilvl_h, dsth1, dsth0, dsth2, dsth1, tmp0, tmp2);
> - DUP2_ARG2(__lsx_vilvh_h, dsth1, dsth0, dsth2, dsth1, tmp1, tmp3);
> -
> - src3 = __lsx_vld(src0_ptr, 0);
> - DUP2_ARG2(__lsx_vldx, src0_ptr, src_stride, src0_ptr, src_stride_2x,
> - src4, src5);
> - src6 = __lsx_vldx(src0_ptr, src_stride_3x);
> - src0_ptr += src_stride_4x;
> - DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, vec0,
> vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, vec2,
> vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, vec4,
> vec5);
> - DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, vec6,
> vec7);
> -
> - DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0, vec6,
> - filt0, dsth3, dsth4, dsth5, dsth6);
> - DUP4_ARG3(__lsx_vdp2add_h_bu_b, dsth3, vec1, filt1, dsth4, vec3, filt1,
> dsth5,
> - vec5, filt1, dsth6, vec7, filt1, dsth3, dsth4, dsth5, dsth6);
> -
> - src3 = __lsx_vld(src0_ptr, 0);
> - DUP2_ARG2(__lsx_vldx, src0_ptr, src_stride, src0_ptr, src_stride_2x,
> - src4, src5);
> - src6 = __lsx_vldx(src0_ptr, src_stride_3x);
> -
> - DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, vec0,
> vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, vec2,
> vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, vec4,
> vec5);
> - DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, vec6,
> vec7);
> -
> - DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0, vec6,
> - filt0, dsth7, dsth8, dsth9, dsth10);
> - DUP4_ARG3(__lsx_vdp2add_h_bu_b, dsth7, vec1, filt1, dsth8, vec3, filt1,
> dsth9,
> - vec5, filt1, dsth10, vec7, filt1, dsth7, dsth8, dsth9, dsth10);
> -
> - DUP2_ARG2(__lsx_vilvl_h, dsth3, dsth2, dsth4, dsth3, tmp4, tmp6);
> - DUP2_ARG2(__lsx_vilvh_h, dsth3, dsth2, dsth4, dsth3, tmp5, tmp7);
> - DUP2_ARG2(__lsx_vilvl_h, dsth5, dsth4, dsth6, dsth5, dsth0, dsth2);
> - DUP2_ARG2(__lsx_vilvh_h, dsth5, dsth4, dsth6, dsth5, dsth1, dsth3);
> - DUP4_ARG2(__lsx_vdp2_w_h, tmp0, filt_h0, tmp2, filt_h0, tmp4, filt_h0,
> - tmp6, filt_h0, dst0_r, dst1_r, dst2_r, dst3_r);
> - DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, tmp4, filt_h1, dst1_r, tmp6,
> - filt_h1, dst2_r, dsth0, filt_h1, dst3_r, dsth2, filt_h1,
> - dst0_r, dst1_r, dst2_r, dst3_r);
> - DUP2_ARG2(__lsx_vpickev_d, tmp3, tmp1, tmp7, tmp5, tmp0, tmp8);
> - dst0_l = __lsx_vdp2_w_h(tmp0, filt_h0);
> - dst0_l = __lsx_vdp2add_w_h(dst0_l, tmp8, filt_h1);
> -
> - DUP2_ARG2(__lsx_vilvl_h, dsth7, dsth6, dsth8, dsth7, tmp0, tmp2);
> - DUP2_ARG2(__lsx_vilvh_h, dsth7, dsth6, dsth8, dsth7, tmp1, tmp3);
> - DUP2_ARG2(__lsx_vilvl_h, dsth9, dsth8, dsth10, dsth9, tmp4, tmp6);
> - DUP2_ARG2(__lsx_vilvh_h, dsth9, dsth8, dsth10, dsth9, tmp5, tmp7);
> - DUP4_ARG2(__lsx_vdp2_w_h, dsth0, filt_h0, dsth2, filt_h0, tmp0, filt_h0,
> - tmp2, filt_h0, dst4_r, dst5_r, dst6_r, dst7_r);
> - DUP4_ARG3(__lsx_vdp2add_w_h, dst4_r, tmp0, filt_h1, dst5_r, tmp2,
> - filt_h1, dst6_r, tmp4, filt_h1, dst7_r, tmp6, filt_h1,
> - dst4_r, dst5_r, dst6_r, dst7_r);
> - DUP2_ARG2(__lsx_vpickev_d, dsth3, dsth1, tmp3, tmp1, tmp0, tmp1);
> - tmp2 = __lsx_vpickev_d(tmp7, tmp5);
> -
> - DUP2_ARG2(__lsx_vdp2_w_h, tmp8, filt_h0, tmp0, filt_h0, dst1_l, dst2_l);
> - dst3_l = __lsx_vdp2_w_h(tmp1, filt_h0);
> - DUP2_ARG3(__lsx_vdp2add_w_h, dst1_l, tmp0, filt_h1, dst2_l, tmp1,
> filt_h1,
> - dst1_l, dst2_l);
> - dst3_l = __lsx_vdp2add_w_h(dst3_l, tmp2, filt_h1);
> -
> - DUP4_ARG2(__lsx_vsrai_d, dst0_r, 6, dst1_r, 6, dst2_r, 6, dst3_r, 6,
> - dst0_r, dst1_r, dst2_r, dst3_r);
> - DUP4_ARG2(__lsx_vsrai_d, dst4_r, 6, dst5_r, 6, dst6_r, 6, dst7_r, 6,
> - dst4_r, dst5_r, dst6_r, dst7_r);
> - DUP4_ARG2(__lsx_vsrai_d, dst0_l, 6, dst1_l, 6, dst2_l, 6, dst3_l, 6,
> - dst0_l, dst1_l, dst2_l, dst3_l);
> - DUP2_ARG2(__lsx_vpickev_h, dst1_r, dst0_r, dst3_r, dst2_r, tmp0, tmp1);
> - DUP2_ARG2(__lsx_vpickev_h, dst5_r, dst4_r, dst7_r, dst6_r, tmp2, tmp3);
> - DUP2_ARG2(__lsx_vpickev_h, dst1_l, dst0_l, dst3_l, dst2_l, tmp4, tmp5);
> -
> - reg0 = __lsx_vldrepl_d(src1_ptr, 0);
> - reg1 = __lsx_vldrepl_d(src1_ptr + src2_stride, 0);
> - dsth0 = __lsx_vilvl_d(reg1, reg0);
> - reg0 = __lsx_vldrepl_d(src1_ptr + src2_stride_2x, 0);
> - reg1 = __lsx_vldrepl_d(src1_ptr + src2_stride_3x, 0);
> - dsth1 = __lsx_vilvl_d(reg1, reg0);
> - src1_ptr += src2_stride_4x;
> - reg0 = __lsx_vldrepl_d(src1_ptr, 0);
> - reg1 = __lsx_vldrepl_d(src1_ptr + src2_stride, 0);
> - dsth2 = __lsx_vilvl_d(reg1, reg0);
> - reg0 = __lsx_vldrepl_d(src1_ptr + src2_stride_2x, 0);
> - reg1 = __lsx_vldrepl_d(src1_ptr + src2_stride_3x, 0);
> - dsth3 = __lsx_vilvl_d(reg1, reg0);
> -
> - DUP4_ARG2(__lsx_vsadd_h, dsth0, tmp0, dsth1, tmp1, dsth2, tmp2, dsth3,
> - tmp3, tmp0, tmp1, tmp2, tmp3);
> - DUP4_ARG2(__lsx_vmaxi_h, tmp0, 0, tmp1, 0, tmp2, 0, tmp3, 0,
> - tmp0, tmp1, tmp2, tmp3);
> - DUP2_ARG3(__lsx_vssrlrni_bu_h, tmp1, tmp0, 7, tmp3, tmp2, 7, out0, out1);
> -
> - __lsx_vstelm_w(out0, dst, 0, 0);
> - __lsx_vstelm_w(out0, dst + dst_stride, 0, 1);
> - __lsx_vstelm_w(out0, dst + dst_stride_2x, 0, 2);
> - __lsx_vstelm_w(out0, dst + dst_stride_3x, 0, 3);
> - dst += dst_stride_4x;
> - __lsx_vstelm_w(out1, dst, 0, 0);
> - __lsx_vstelm_w(out1, dst + dst_stride, 0, 1);
> - __lsx_vstelm_w(out1, dst + dst_stride_2x, 0, 2);
> - __lsx_vstelm_w(out1, dst + dst_stride_3x, 0, 3);
> - dst -= dst_stride_4x;
> -
> - src1_ptr -= src2_stride_4x;
> -
> - reg0 = __lsx_vldrepl_w(src1_ptr, 8);
> - reg1 = __lsx_vldrepl_w(src1_ptr + src2_stride, 8);
> - reg2 = __lsx_vldrepl_w(src1_ptr + src2_stride_2x, 8);
> - reg3 = __lsx_vldrepl_w(src1_ptr + src2_stride_3x, 8);
> - DUP2_ARG2(__lsx_vilvl_w, reg1, reg0, reg3, reg2, tmp0, tmp1);
> - dsth4 = __lsx_vilvl_d(tmp1, tmp0);
> - src1_ptr += src2_stride_4x;
> -
> - reg0 = __lsx_vldrepl_w(src1_ptr, 8);
> - reg1 = __lsx_vldrepl_w(src1_ptr + src2_stride, 8);
> - reg2 = __lsx_vldrepl_w(src1_ptr + src2_stride_2x, 8);
> - reg3 = __lsx_vldrepl_w(src1_ptr + src2_stride_3x, 8);
> - DUP2_ARG2(__lsx_vilvl_w, reg1, reg0, reg3, reg2, tmp0, tmp1);
> - dsth5 = __lsx_vilvl_d(tmp1, tmp0);
> - DUP2_ARG2(__lsx_vsadd_h, dsth4, tmp4, dsth5, tmp5, tmp4, tmp5);
> - DUP2_ARG2(__lsx_vmaxi_h, tmp4, 0, tmp5, 7, tmp4, tmp5);
> - out0 = __lsx_vssrlrni_bu_h(tmp5, tmp4, 7);
> -
> - __lsx_vstelm_h(out0, dst, 4, 0);
> - __lsx_vstelm_h(out0, dst + dst_stride, 4, 1);
> - __lsx_vstelm_h(out0, dst + dst_stride_2x, 4, 2);
> - __lsx_vstelm_h(out0, dst + dst_stride_3x, 4, 3);
> - dst += dst_stride_4x;
> - __lsx_vstelm_h(out0, dst, 4, 4);
> - __lsx_vstelm_h(out0, dst + dst_stride, 4, 5);
> - __lsx_vstelm_h(out0, dst + dst_stride_2x, 4, 6);
> - __lsx_vstelm_h(out0, dst + dst_stride_3x, 4, 7);
> -}
> -
> static av_always_inline
> void hevc_hv_4t_8x2_lsx(const uint8_t *src0_ptr, int32_t src_stride, const
> int16_t *src1_ptr,
> int32_t src2_stride, uint8_t *dst, int32_t
> dst_stride,
> @@ -2281,7 +2177,6 @@ BI_MC_HV(qpel, 48, 8);
> BI_MC_HV(qpel, 64, 8);
>
> BI_MC_HV(epel, 8, 4);
> -BI_MC_HV(epel, 6, 4);
> BI_MC_HV(epel, 16, 4);
> BI_MC_HV(epel, 24, 4);
> BI_MC_HV(epel, 32, 4);
> diff --git a/libavcodec/loongarch/hevc_mc_uni_lsx.c
> b/libavcodec/loongarch/hevc_mc_uni_lsx.c
> index 6bdc27a824..5ec115bab1 100644
> --- a/libavcodec/loongarch/hevc_mc_uni_lsx.c
> +++ b/libavcodec/loongarch/hevc_mc_uni_lsx.c
> @@ -545,7 +545,7 @@ static void hevc_hv_8t_64w_lsx(const uint8_t *src,
> int32_t src_stride, uint8_t *
> }
>
> static av_always_inline
> -void common_vt_4t_24w_lsx(const uint8_t *src, int32_t src_stride,
> +void common_vt_2t_24w_lsx(const uint8_t *src, int32_t src_stride,
> uint8_t *dst, int32_t dst_stride,
> const int8_t *filter, int32_t height)
> {
> @@ -554,8 +554,8 @@ void common_vt_4t_24w_lsx(const uint8_t *src, int32_t
> src_stride,
> int32_t src_stride_3x = src_stride_2x + src_stride;
> const uint8_t *_src;
>
> - __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9,
> src10;
> - __m128i src11, filt0, filt1;
> + __m128i src0, src1, src2, src3, src4, src6, src7, src8, src9, src10;
> + __m128i filt0, filt1;
> __m128i src10_r, src32_r, src76_r, src98_r, src21_r, src43_r, src87_r;
> __m128i src109_r, src10_l, src32_l, src21_l, src43_l;
> __m128i out0_r, out1_r, out2_r, out3_r, out0_l, out1_l;
> @@ -578,7 +578,7 @@ void common_vt_4t_24w_lsx(const uint8_t *src, int32_t
> src_stride,
> _src += src_stride_3x;
> DUP2_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src76_r, src87_r);
>
> - for (loop_cnt = 8; loop_cnt--;) {
> + for (loop_cnt = height >> 1; loop_cnt--;) {
> /* 16 width */
> DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src3, src9);
> DUP2_ARG2(__lsx_vldx, src, src_stride, _src, src_stride, src4,
> src10);
> @@ -605,53 +605,27 @@ void common_vt_4t_24w_lsx(const uint8_t *src, int32_t
> src_stride,
>
> /* 16 + 8 width */
> DUP4_ARG3(__lsx_vssrarni_bu_h, out0_l, out0_r, 6, out2_r, out2_r, 6,
> - out3_r, out3_r, 6, out1_l, out1_r, 6, out1, out2, out3,
> out4);
> - __lsx_vst(out1, dst, 0);
> - __lsx_vstelm_d(out2, dst, 16, 0);
> - dst += dst_stride;
> - __lsx_vst(out4, dst, 0);
> - __lsx_vstelm_d(out3, dst, 16, 0);
> - dst += dst_stride;
> -
> - /* 16 width */
> - DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src5, src11);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, _src, src_stride, src2, src8);
> - DUP2_ARG2(__lsx_vilvl_b, src5, src4, src2, src5, src10_r, src21_r);
> - DUP2_ARG2(__lsx_vilvh_b, src5, src4, src2, src5, src10_l, src21_l);
> -
> - /* 8 width */
> - src += src_stride_2x;
> - _src += src_stride_2x;
> - DUP2_ARG2(__lsx_vilvl_b, src11, src10, src8, src11, src76_r,
> src87_r);
> -
> - /* 16 width */
> - DUP4_ARG2(__lsx_vdp2_h_bu_b, src32_r, filt0, src32_l, filt0, src43_r,
> - filt0, src43_l, filt0, out0_r, out0_l, out1_r, out1_l);
> - DUP4_ARG3(__lsx_vdp2add_h_bu_b, out0_r, src10_r, filt1, out0_l,
> src10_l,
> - filt1, out1_r, src21_r, filt1, out1_l, src21_l, filt1,
> - out0_r, out0_l, out1_r, out1_l);
> -
> - /* 8 width */
> - DUP2_ARG2(__lsx_vdp2_h_bu_b, src98_r, filt0, src109_r, filt0,
> - out2_r, out3_r);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, out2_r, src76_r, filt1, out3_r,
> - src87_r, filt1, out2_r, out3_r);
> -
> - /* 16 + 8 width */
> - DUP4_ARG3(__lsx_vssrarni_bu_h, out0_l, out0_r, 6, out2_r, out2_r, 6,
> - out1_l, out1_r, 6, out3_r, out3_r, 6, out1, out2, out3,
> out4);
> -
> + out1_l, out1_r, 6, out3_r, out3_r, 6, out1, out2, out3,
> out4);
> __lsx_vst(out1, dst, 0);
> __lsx_vstelm_d(out2, dst, 16, 0);
> dst += dst_stride;
> __lsx_vst(out3, dst, 0);
> __lsx_vstelm_d(out4, dst, 16, 0);
> dst += dst_stride;
> +
> + src10_r = src32_r;
> + src21_r = src43_r;
> + src10_l = src32_l;
> + src21_l = src43_l;
> + src2 = src4;
> + src76_r = src98_r;
> + src87_r = src109_r;
> + src8 = src10;
> }
> }
>
> static av_always_inline
> -void common_vt_4t_32w_lsx(const uint8_t *src, int32_t src_stride,
> +void common_vt_2t_32w_lsx(const uint8_t *src, int32_t src_stride,
> uint8_t *dst, int32_t dst_stride,
> const int8_t *filter, int32_t height)
> {
> @@ -805,102 +779,6 @@ void hevc_hv_4t_8x2_lsx(const uint8_t *src, int32_t
> src_stride, uint8_t *dst,
> __lsx_vstelm_d(out, dst + dst_stride, 0, 1);
> }
>
> -static av_always_inline
> -void hevc_hv_4t_8multx4_lsx(const uint8_t *src, int32_t src_stride, uint8_t
> *dst,
> - int32_t dst_stride, const int8_t *filter_x,
> - const int8_t *filter_y, int32_t width8mult)
> -{
> - uint32_t cnt;
> - const int32_t src_stride_2x = (src_stride << 1);
> - const int32_t dst_stride_2x = (dst_stride << 1);
> - const int32_t src_stride_4x = (src_stride << 2);
> - const int32_t src_stride_3x = src_stride_2x + src_stride;
> - const int32_t dst_stride_3x = dst_stride_2x + dst_stride;
> -
> - __m128i out0, out1;
> - __m128i src0, src1, src2, src3, src4, src5, src6, mask0, mask1;
> - __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
> - __m128i filt0, filt1, filt_h0, filt_h1, filter_vec;
> - __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, tmp0, tmp1, tmp2, tmp3;
> - __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
> - __m128i dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
> - __m128i dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
> -
> - src -= (src_stride + 1);
> - DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
> -
> - filter_vec = __lsx_vld(filter_y, 0);
> - filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
> - DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0,
> filt_h1);
> -
> - mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
> - mask1 = __lsx_vaddi_bu(mask0, 2);
> -
> - for (cnt = width8mult; cnt--;) {
> - src0 = __lsx_vld(src, 0);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1,
> src2);
> - src3 = __lsx_vldx(src, src_stride_3x);
> - src += src_stride_4x;
> - src4 = __lsx_vld(src, 0);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5,
> src6);
> - src += (8 - src_stride_4x);
> - DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1,
> - vec0, vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1,
> - vec2, vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1,
> - vec4, vec5);
> -
> - DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst0, dst1);
> - dst2 = __lsx_vdp2_h_bu_b(vec4, filt0);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
> - dst0, dst1);
> - dst2 = __lsx_vdp2add_h_bu_b(dst2, vec5, filt1);
> -
> - DUP2_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst10_r, dst21_r);
> - DUP2_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst10_l, dst21_l);
> -
> - DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1,
> - vec0, vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1,
> - vec2, vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1,
> - vec4, vec5);
> - DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1,
> - vec6, vec7);
> -
> - DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
> - vec6, filt0, dst3, dst4, dst5, dst6);
> - DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3, filt1,
> - dst5, vec5, filt1, dst6, vec7, filt1, dst3, dst4, dst5,
> dst6);
> -
> - DUP4_ARG2(__lsx_vilvl_h, dst3, dst2, dst4, dst3, dst5, dst4, dst6,
> - dst5, dst32_r, dst43_r, dst54_r, dst65_r);
> - DUP4_ARG2(__lsx_vilvh_h, dst3, dst2, dst4, dst3, dst5, dst4, dst6,
> - dst5, dst32_l, dst43_l, dst54_l, dst65_l);
> -
> - DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0,
> dst21_r,
> - filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
> - DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0,
> dst43_r,
> - filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
> - DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l,
> dst32_l,
> - filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l,
> filt_h1,
> - dst0_r, dst0_l, dst1_r, dst1_l);
> - DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l,
> dst54_l,
> - filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l,
> filt_h1,
> - dst2_r, dst2_l, dst3_r, dst3_l);
> -
> - DUP4_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6,
> - dst2_l, dst2_r, 6, dst3_l, dst3_r, 6, tmp0, tmp1, tmp2,
> tmp3);
> - DUP2_ARG3(__lsx_vssrarni_bu_h, tmp1, tmp0, 6, tmp3, tmp2, 6, out0,
> out1);
> - __lsx_vstelm_d(out0, dst, 0, 0);
> - __lsx_vstelm_d(out0, dst + dst_stride, 0, 1);
> - __lsx_vstelm_d(out1, dst + dst_stride_2x, 0, 0);
> - __lsx_vstelm_d(out1, dst + dst_stride_3x, 0, 1);
> - dst += 8;
> - }
> -}
> -
> static av_always_inline
> void hevc_hv_4t_8x6_lsx(const uint8_t *src, int32_t src_stride, uint8_t *dst,
> int32_t dst_stride, const int8_t *filter_x,
> @@ -1009,10 +887,9 @@ void hevc_hv_4t_8x6_lsx(const uint8_t *src, int32_t
> src_stride, uint8_t *dst,
> }
>
> static av_always_inline
> -void hevc_hv_4t_8multx4mult_lsx(const uint8_t *src, int32_t src_stride,
> uint8_t *dst,
> - int32_t dst_stride, const int8_t *filter_x,
> - const int8_t *filter_y, int32_t height,
> - int32_t width8mult)
> +void hevc_hv_4t_8mult_lsx(const uint8_t *src, int32_t src_stride, uint8_t
> *dst,
> + int32_t dst_stride, const int8_t *filter_x,
> + const int8_t *filter_y, int32_t height, int32_t
> width8mult)
> {
> uint32_t loop_cnt, cnt;
> const uint8_t *src_tmp;
> @@ -1134,14 +1011,10 @@ void hevc_hv_4t_8w_lsx(const uint8_t *src, int32_t
> src_stride, uint8_t *dst,
> {
> if (2 == height) {
> hevc_hv_4t_8x2_lsx(src, src_stride, dst, dst_stride, filter_x,
> filter_y);
> - } else if (4 == height) {
> - hevc_hv_4t_8multx4_lsx(src, src_stride, dst, dst_stride,
> - filter_x, filter_y, 1);
> } else if (6 == height) {
> hevc_hv_4t_8x6_lsx(src, src_stride, dst, dst_stride, filter_x,
> filter_y);
> } else if (0 == (height & 0x03)) {
> - hevc_hv_4t_8multx4mult_lsx(src, src_stride, dst, dst_stride,
> - filter_x, filter_y, height, 1);
> + hevc_hv_4t_8mult_lsx(src, src_stride, dst, dst_stride, filter_x,
> filter_y, height, 1);
> }
> }
>
> @@ -1160,17 +1033,14 @@ void hevc_hv_4t_12w_lsx(const uint8_t *src, int32_t
> src_stride, uint8_t *dst,
> const int32_t src_stride_3x = src_stride_2x + src_stride;
> const int32_t dst_stride_3x = dst_stride_2x + dst_stride;
> __m128i out0, out1;
> - __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9,
> src10;
> + __m128i src0, src1, src2, src3, src4, src5, src6;
> __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
> - __m128i mask0, mask1, mask2, mask3;
> + __m128i mask0, mask1;
> __m128i filt0, filt1, filt_h0, filt_h1, filter_vec, tmp0, tmp1, tmp2,
> tmp3;
> __m128i dsth0, dsth1, dsth2, dsth3, dsth4, dsth5, dsth6;
> - __m128i dst10, dst21, dst22, dst73, dst84, dst95, dst106;
> - __m128i dst76_r, dst98_r, dst87_r, dst109_r;
> __m128i dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
> __m128i dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
> __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
> - __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
>
> src -= (src_stride + 1);
> DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
> @@ -1186,8 +1056,7 @@ void hevc_hv_4t_12w_lsx(const uint8_t *src, int32_t
> src_stride, uint8_t *dst,
> dst_tmp = dst;
>
> src0 = __lsx_vld(src_tmp, 0);
> - DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
> - src1, src2);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x, src1,
> src2);
> src_tmp += src_stride_3x;
>
> DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0,
> vec1);
> @@ -1196,17 +1065,15 @@ void hevc_hv_4t_12w_lsx(const uint8_t *src, int32_t
> src_stride, uint8_t *dst,
>
> DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dsth0, dsth1);
> dsth2 = __lsx_vdp2_h_bu_b(vec4, filt0);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dsth0, vec1, filt1, dsth1, vec3, filt1,
> - dsth0, dsth1);
> + DUP2_ARG3(__lsx_vdp2add_h_bu_b, dsth0, vec1, filt1, dsth1, vec3, filt1,
> dsth0, dsth1);
> dsth2 = __lsx_vdp2add_h_bu_b(dsth2, vec5, filt1);
>
> DUP2_ARG2(__lsx_vilvl_h, dsth1, dsth0, dsth2, dsth1, dst10_r, dst21_r);
> DUP2_ARG2(__lsx_vilvh_h, dsth1, dsth0, dsth2, dsth1, dst10_l, dst21_l);
>
> - for (loop_cnt = 4; loop_cnt--;) {
> + for (loop_cnt = height >> 2; loop_cnt--;) {
> src3 = __lsx_vld(src_tmp, 0);
> - DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
> - src4, src5);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
> src4, src5);
> src6 = __lsx_vldx(src_tmp, src_stride_3x);
> src_tmp += src_stride_4x;
>
> @@ -1254,83 +1121,73 @@ void hevc_hv_4t_12w_lsx(const uint8_t *src, int32_t
> src_stride, uint8_t *dst,
> dsth2 = dsth6;
> }
>
> - src += 8;
> - dst += 8;
> + src_tmp = src + 8;
> + dst_tmp = dst + 8;
>
> - mask2 = __lsx_vld(ff_hevc_mask_arr, 16);
> - mask3 = __lsx_vaddi_bu(mask2, 2);
> + src0 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x, src1,
> src2);
> + src_tmp += src_stride_3x;
>
> - src0 = __lsx_vld(src, 0);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
> - src += src_stride_3x;
> - DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask2, src1, src0, mask3, vec0,
> vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src2, src1, mask2, src2, src1, mask3, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, vec4,
> vec5);
>
> - DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst10, dst21);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst10, vec1, filt1, dst21, vec3, filt1,
> - dst10, dst21);
> + DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dsth0, dsth1);
> + dsth2 = __lsx_vdp2_h_bu_b(vec4, filt0);
> + DUP2_ARG3(__lsx_vdp2add_h_bu_b, dsth0, vec1, filt1, dsth1, vec3, filt1,
> dsth0, dsth1);
> + dsth2 = __lsx_vdp2add_h_bu_b(dsth2, vec5, filt1);
>
> - dst10_r = __lsx_vilvl_h(dst21, dst10);
> - dst21_r = __lsx_vilvh_h(dst21, dst10);
> - dst22 = __lsx_vreplvei_d(dst21, 1);
> + DUP2_ARG2(__lsx_vilvl_h, dsth1, dsth0, dsth2, dsth1, dst10_r, dst21_r);
> + DUP2_ARG2(__lsx_vilvh_h, dsth1, dsth0, dsth2, dsth1, dst10_l, dst21_l);
>
> - for (loop_cnt = 2; loop_cnt--;) {
> - src3 = __lsx_vld(src, 0);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src4,
> src5);
> - src6 = __lsx_vldx(src, src_stride_3x);
> - src += src_stride_4x;
> - src7 = __lsx_vld(src, 0);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8,
> src9);
> - src10 = __lsx_vldx(src, src_stride_3x);
> - src += src_stride_4x;
> - DUP4_ARG3(__lsx_vshuf_b, src7, src3, mask2, src7, src3, mask3, src8,
> - src4, mask2, src8, src4, mask3, vec0, vec1, vec2, vec3);
> - DUP4_ARG3(__lsx_vshuf_b, src9, src5, mask2, src9, src5, mask3, src10,
> - src6, mask2, src10, src6, mask3, vec4, vec5, vec6, vec7);
> + for (loop_cnt = height >> 2; loop_cnt--;) {
> + src3 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
> src4, src5);
> + src6 = __lsx_vldx(src_tmp, src_stride_3x);
> + src_tmp += src_stride_4x;
> +
> + DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src4,
> + src4, mask0, src4, src4, mask1, vec0, vec1, vec2, vec3);
> + DUP4_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, src6,
> + src6, mask0, src6, src6, mask1, vec4, vec5, vec6, vec7);
>
> DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
> - vec6, filt0, dst73, dst84, dst95, dst106);
> - DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst73, vec1, filt1, dst84, vec3,
> - filt1, dst95, vec5, filt1, dst106, vec7, filt1,
> - dst73, dst84, dst95, dst106);
> -
> - dst32_r = __lsx_vilvl_h(dst73, dst22);
> - DUP2_ARG2(__lsx_vilvl_h, dst84, dst73, dst95, dst84, dst43_r,
> dst54_r);
> - DUP2_ARG2(__lsx_vilvh_h, dst84, dst73, dst95, dst84, dst87_r,
> dst98_r);
> - dst65_r = __lsx_vilvl_h(dst106, dst95);
> - dst109_r = __lsx_vilvh_h(dst106, dst95);
> - dst22 = __lsx_vreplvei_d(dst73, 1);
> - dst76_r = __lsx_vilvl_h(dst22, dst106);
> -
> - DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst21_r, filt_h0,
> dst32_r,
> - filt_h0, dst43_r, filt_h0, dst0, dst1, dst2, dst3);
> - DUP4_ARG2(__lsx_vdp2_w_h, dst54_r, filt_h0, dst65_r, filt_h0,
> dst76_r,
> - filt_h0, dst87_r, filt_h0, dst4, dst5, dst6, dst7);
> - DUP4_ARG3(__lsx_vdp2add_w_h, dst0, dst32_r, filt_h1, dst1, dst43_r,
> - filt_h1, dst2, dst54_r, filt_h1, dst3, dst65_r, filt_h1,
> - dst0, dst1, dst2, dst3);
> - DUP4_ARG3(__lsx_vdp2add_w_h, dst4, dst76_r, filt_h1, dst5, dst87_r,
> - filt_h1, dst6, dst98_r, filt_h1, dst7, dst109_r, filt_h1,
> - dst4, dst5, dst6, dst7);
> -
> - DUP4_ARG3(__lsx_vsrani_h_w, dst1, dst0, 6, dst3, dst2, 6, dst5, dst4,
> - 6, dst7, dst6, 6, tmp0, tmp1, tmp2, tmp3);
> + vec6, filt0, dsth3, dsth4, dsth5, dsth6);
> + DUP4_ARG3(__lsx_vdp2add_h_bu_b, dsth3, vec1, filt1, dsth4,
> + vec3, filt1, dsth5, vec5, filt1, dsth6, vec7, filt1,
> + dsth3, dsth4, dsth5, dsth6);
> +
> + DUP4_ARG2(__lsx_vilvl_h, dsth3, dsth2, dsth4, dsth3, dsth5, dsth4,
> + dsth6, dsth5, dst32_r, dst43_r, dst54_r, dst65_r);
> + DUP4_ARG2(__lsx_vilvh_h, dsth3, dsth2, dsth4, dsth3, dsth5, dsth4,
> + dsth6, dsth5, dst32_l, dst43_l, dst54_l, dst65_l);
> +
> + DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0,
> dst21_r,
> + filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
> + DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0,
> dst43_r,
> + filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
> + DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l,
> dst32_l,
> + filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l,
> filt_h1,
> + dst0_r, dst0_l, dst1_r, dst1_l);
> + DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l,
> dst54_l,
> + filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l,
> filt_h1,
> + dst2_r, dst2_l, dst3_r, dst3_l);
> +
> + DUP4_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6,
> dst2_l,
> + dst2_r, 6, dst3_l, dst3_r, 6, tmp0, tmp1, tmp2, tmp3);
> DUP2_ARG3(__lsx_vssrarni_bu_h, tmp1, tmp0, 6, tmp3, tmp2, 6, out0,
> out1);
>
> - __lsx_vstelm_w(out0, dst, 0, 0);
> - __lsx_vstelm_w(out0, dst + dst_stride, 0, 1);
> - __lsx_vstelm_w(out0, dst + dst_stride_2x, 0, 2);
> - __lsx_vstelm_w(out0, dst + dst_stride_3x, 0, 3);
> - dst += dst_stride_4x;
> - __lsx_vstelm_w(out1, dst, 0, 0);
> - __lsx_vstelm_w(out1, dst + dst_stride, 0, 1);
> - __lsx_vstelm_w(out1, dst + dst_stride_2x, 0, 2);
> - __lsx_vstelm_w(out1, dst + dst_stride_3x, 0, 3);
> - dst += dst_stride_4x;
> + __lsx_vstelm_w(out0, dst_tmp, 0, 0);
> + __lsx_vstelm_w(out0, dst_tmp + dst_stride, 0, 2);
> + __lsx_vstelm_w(out1, dst_tmp + dst_stride_2x, 0, 0);
> + __lsx_vstelm_w(out1, dst_tmp + dst_stride_3x, 0, 2);
> + dst_tmp += dst_stride_4x;
>
> - dst10_r = dst98_r;
> - dst21_r = dst109_r;
> - dst22 = __lsx_vreplvei_d(dst106, 1);
> + dst10_r = dst54_r;
> + dst10_l = dst54_l;
> + dst21_r = dst65_r;
> + dst21_l = dst65_l;
> + dsth2 = dsth6;
> }
> }
>
> @@ -1338,29 +1195,21 @@ static void hevc_hv_4t_16w_lsx(const uint8_t *src,
> int32_t src_stride, uint8_t *
> int32_t dst_stride, const int8_t *filter_x,
> const int8_t *filter_y, int32_t height)
> {
> - if (4 == height) {
> - hevc_hv_4t_8multx4_lsx(src, src_stride, dst, dst_stride, filter_x,
> - filter_y, 2);
> - } else {
> - hevc_hv_4t_8multx4mult_lsx(src, src_stride, dst, dst_stride,
> - filter_x, filter_y, height, 2);
> - }
> + hevc_hv_4t_8mult_lsx(src, src_stride, dst, dst_stride, filter_x,
> filter_y, height, 2);
> }
>
> static void hevc_hv_4t_24w_lsx(const uint8_t *src, int32_t src_stride,
> uint8_t *dst,
> int32_t dst_stride, const int8_t *filter_x,
> const int8_t *filter_y, int32_t height)
> {
> - hevc_hv_4t_8multx4mult_lsx(src, src_stride, dst, dst_stride,
> - filter_x, filter_y, height, 3);
> + hevc_hv_4t_8mult_lsx(src, src_stride, dst, dst_stride, filter_x,
> filter_y, height, 3);
> }
>
> static void hevc_hv_4t_32w_lsx(const uint8_t *src, int32_t src_stride,
> uint8_t *dst,
> int32_t dst_stride, const int8_t *filter_x,
> const int8_t *filter_y, int32_t height)
> {
> - hevc_hv_4t_8multx4mult_lsx(src, src_stride, dst, dst_stride,
> - filter_x, filter_y, height, 4);
> + hevc_hv_4t_8mult_lsx(src, src_stride, dst, dst_stride, filter_x,
> filter_y, height, 4);
> }
>
> #define UNI_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR)
> \
> @@ -1386,8 +1235,8 @@ UNI_MC(qpel, v, 32, 8, vt, my);
> UNI_MC(qpel, v, 48, 8, vt, my);
> UNI_MC(qpel, v, 64, 8, vt, my);
>
> -UNI_MC(epel, v, 24, 4, vt, my);
> -UNI_MC(epel, v, 32, 4, vt, my);
> +UNI_MC(epel, v, 24, 2, vt, my);
> +UNI_MC(epel, v, 32, 2, vt, my);
>
> #undef UNI_MC
>
> diff --git a/libavcodec/loongarch/hevcdsp_init_loongarch.c
> b/libavcodec/loongarch/hevcdsp_init_loongarch.c
> index 1585bda276..55fe6b15ad 100644
> --- a/libavcodec/loongarch/hevcdsp_init_loongarch.c
> +++ b/libavcodec/loongarch/hevcdsp_init_loongarch.c
> @@ -139,7 +139,6 @@ void ff_hevc_dsp_init_loongarch(HEVCDSPContext *c, const
> int bit_depth)
> c->put_hevc_epel_bi[6][1][0] =
> ff_hevc_put_hevc_bi_epel_v24_8_lsx;
> c->put_hevc_epel_bi[7][1][0] =
> ff_hevc_put_hevc_bi_epel_v32_8_lsx;
>
> - c->put_hevc_epel_bi[2][1][1] =
> ff_hevc_put_hevc_bi_epel_hv6_8_lsx;
> c->put_hevc_epel_bi[3][1][1] =
> ff_hevc_put_hevc_bi_epel_hv8_8_lsx;
> c->put_hevc_epel_bi[5][1][1] =
> ff_hevc_put_hevc_bi_epel_hv16_8_lsx;
> c->put_hevc_epel_bi[6][1][1] =
> ff_hevc_put_hevc_bi_epel_hv24_8_lsx;
> diff --git a/libavcodec/loongarch/hevcdsp_lsx.c
> b/libavcodec/loongarch/hevcdsp_lsx.c
> index 5747925525..993944164d 100644
> --- a/libavcodec/loongarch/hevcdsp_lsx.c
> +++ b/libavcodec/loongarch/hevcdsp_lsx.c
> @@ -772,56 +772,40 @@ static void hevc_hz_8t_12w_lsx(const uint8_t *src,
> int32_t src_stride,
> DUP2_ARG2(__lsx_vaddi_bu, mask4, 2, mask4, 4, mask5, mask6);
> mask7 = __lsx_vaddi_bu(mask4, 6);
>
> - for (loop_cnt = 4; loop_cnt--;) {
> + for (loop_cnt = height >> 2; loop_cnt--;) {
> src0 = __lsx_vld(src, 0);
> DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1,
> src2);
> src3 = __lsx_vldx(src, src_stride_3x);
> src4 = __lsx_vld(_src, 0);
> - DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x,
> - src5, src6);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x, src5,
> src6);
> src7 = __lsx_vldx(_src, src_stride_3x);
> src += src_stride_4x;
> _src += src_stride_4x;
>
> - DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
> - vec0, vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0,
> - vec2, vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask4, src7, src6, mask4,
> - vec4, vec5);
> + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask4, src7, src6, mask4, vec4,
> vec5);
> DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
> vec3, filt0, dst0, dst1, dst2, dst3);
> DUP2_ARG2(__lsx_vdp2_h_bu_b, vec4, filt0, vec5, filt0, dst4, dst5);
> - DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
> - vec0, vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1,
> - vec2, vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask5, src7, src6, mask5,
> - vec4, vec5);
> + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask5, src7, src6, mask5, vec4,
> vec5);
> DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
> dst2, vec2, filt1, dst3, vec3, filt1, dst0, dst1, dst2,
> dst3);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt1, dst5, vec5, filt1,
> - dst4, dst5);
> - DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
> - vec0, vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2,
> - vec2, vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask6, src7, src6, mask6,
> - vec4, vec5);
> + DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt1, dst5, vec5,
> filt1, dst4, dst5);
> + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask6, src7, src6, mask6, vec4,
> vec5);
> DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt2, dst1, vec1, filt2,
> dst2, vec2, filt2, dst3, vec3, filt2, dst0, dst1, dst2,
> dst3);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt2, dst5, vec5, filt2,
> - dst4, dst5);
> - DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
> - vec0, vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask3, src3, src3, mask3,
> - vec2, vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask7, src7, src6, mask7,
> - vec4, vec5);
> + DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt2, dst5, vec5,
> filt2, dst4, dst5);
> + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask3, src3, src3, mask3, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask7, src7, src6, mask7, vec4,
> vec5);
> DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt3, dst1, vec1, filt3,
> dst2, vec2, filt3, dst3, vec3, filt3, dst0, dst1, dst2,
> dst3);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt3, dst5, vec5, filt3,
> - dst4, dst5);
> + DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt3, dst5, vec5,
> filt3, dst4, dst5);
>
> __lsx_vst(dst0, dst, 0);
> __lsx_vstelm_d(dst4, dst, 16, 0);
> @@ -2972,16 +2956,14 @@ static void hevc_hv_4t_12w_lsx(const uint8_t *src,
> int32_t src_stride_3x = src_stride_2x + src_stride;
> int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
>
> - __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9,
> src10;
> + __m128i src0, src1, src2, src3, src4, src5, src6;
> __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
> - __m128i mask0, mask1, mask2, mask3;
> + __m128i mask0, mask1;
> __m128i filt0, filt1, filt_h0, filt_h1, filter_vec, dst0;
> - __m128i dst1, dst2, dst3, dst4, dst5, dst6, dst10, dst21, dst22, dst73;
> - __m128i dst84, dst95, dst106, dst76_r, dst98_r, dst87_r, dst109_r;
> + __m128i dst1, dst2, dst3, dst4, dst5, dst6;
> __m128i dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
> __m128i dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
> __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
> - __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>
> src -= (src_stride + 1);
> DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
> @@ -2997,8 +2979,7 @@ static void hevc_hv_4t_12w_lsx(const uint8_t *src,
> dst_tmp = dst;
>
> src0 = __lsx_vld(src_tmp, 0);
> - DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
> - src1, src2);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x, src1,
> src2);
> src_tmp += src_stride_3x;
>
> DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0,
> vec1);
> @@ -3007,34 +2988,27 @@ static void hevc_hv_4t_12w_lsx(const uint8_t *src,
>
> DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst0, dst1);
> dst2 = __lsx_vdp2_h_bu_b(vec4, filt0);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
> - dst0, dst1);
> + DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
> dst0, dst1);
> dst2 = __lsx_vdp2add_h_bu_b(dst2, vec5, filt1);
>
> DUP2_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst10_r, dst21_r);
> DUP2_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst10_l, dst21_l);
>
> - for (loop_cnt = 4; loop_cnt--;) {
> + for (loop_cnt = height >> 2; loop_cnt--;) {
> src3 = __lsx_vld(src_tmp, 0);
> - DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
> - src4, src5);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
> src4, src5);
> src6 = __lsx_vldx(src_tmp, src_stride_3x);
> src_tmp += src_stride_4x;
>
> - DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1,
> - vec0, vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1,
> - vec2, vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1,
> - vec4, vec5);
> - DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1,
> - vec6, vec7);
> + DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, vec4,
> vec5);
> + DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, vec6,
> vec7);
>
> DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
> vec6, filt0, dst3, dst4, dst5, dst6);
> - DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3,
> - filt1, dst5, vec5, filt1, dst6, vec7, filt1, dst3,
> - dst4, dst5, dst6);
> + DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3, filt1,
> + dst5, vec5, filt1, dst6, vec7, filt1, dst3, dst4, dst5,
> dst6);
>
> DUP2_ARG2(__lsx_vilvl_h, dst3, dst2, dst4, dst3, dst32_r, dst43_r);
> DUP2_ARG2(__lsx_vilvh_h, dst3, dst2, dst4, dst3, dst32_l, dst43_l);
> @@ -3051,12 +3025,8 @@ static void hevc_hv_4t_12w_lsx(const uint8_t *src,
> DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l,
> dst54_l,
> filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l,
> filt_h1,
> dst2_r, dst2_l, dst3_r, dst3_l);
> - DUP4_ARG2(__lsx_vsrai_w, dst0_r, 6, dst0_l, 6, dst1_r, 6, dst1_l, 6,
> - dst0_r, dst0_l, dst1_r, dst1_l);
> - DUP4_ARG2(__lsx_vsrai_w, dst2_r, 6, dst2_l, 6, dst3_r, 6, dst3_l, 6,
> - dst2_r, dst2_l, dst3_r, dst3_l);
> - DUP4_ARG2(__lsx_vpickev_h, dst0_l, dst0_r, dst1_l, dst1_r, dst2_l,
> dst2_r,
> - dst3_l, dst3_r, dst0_r, dst1_r, dst2_r, dst3_r);
> + DUP2_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6,
> dst0_r, dst1_r);
> + DUP2_ARG3(__lsx_vsrani_h_w, dst2_l, dst2_r, 6, dst3_l, dst3_r, 6,
> dst2_r, dst3_r);
> __lsx_vst(dst0_r, dst_tmp, 0);
> __lsx_vstx(dst1_r, dst_tmp, dst_stride_x);
> __lsx_vstx(dst2_r, dst_tmp, dst_stride_2x);
> @@ -3070,93 +3040,74 @@ static void hevc_hv_4t_12w_lsx(const uint8_t *src,
> dst2 = dst6;
> }
>
> - src += 8;
> - dst += 8;
> + src_tmp = src + 8;
> + dst_tmp = dst + 8;
>
> - mask2 = __lsx_vld(ff_hevc_mask_arr, 16);
> - mask3 = __lsx_vaddi_bu(mask2, 2);
> + src0 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x, src1,
> src2);
> + src_tmp += src_stride_3x;
>
> - src0 = __lsx_vld(src, 0);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
> - src += src_stride_3x;
> - DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask2, src1, src0, mask3, vec0,
> vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src2, src1, mask2, src2, src1, mask3, vec2,
> vec3);
> - DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst10, dst21);
> - DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst10, vec1, filt1, dst21, vec3, filt1,
> - dst10, dst21);
> - dst10_r = __lsx_vilvl_h(dst21, dst10);
> - dst21_r = __lsx_vilvh_h(dst21, dst10);
> - dst22 = __lsx_vreplvei_d(dst21, 1);
> -
> - for (loop_cnt = 2; loop_cnt--;) {
> - src3 = __lsx_vld(src, 0);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src4,
> src5);
> - src6 = __lsx_vldx(src, src_stride_3x);
> - src += src_stride_4x;
> - src7 = __lsx_vld(src, 0);
> - DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8,
> src9);
> - src10 = __lsx_vldx(src, src_stride_3x);
> - src += src_stride_4x;
> - DUP2_ARG3(__lsx_vshuf_b, src7, src3, mask2, src7, src3, mask3,
> - vec0, vec1);
> - DUP2_ARG3(__lsx_vshuf_b, src8, src4, mask2, src8, src4, mask3,
> - vec2, vec3);
> - DUP2_ARG3(__lsx_vshuf_b, src9, src5, mask2, src9, src5, mask3,
> - vec4, vec5);
> - DUP2_ARG3(__lsx_vshuf_b, src10, src6, mask2, src10, src6, mask3,
> - vec6, vec7);
> + DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, vec4,
> vec5);
> +
> + DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst0, dst1);
> + dst2 = __lsx_vdp2_h_bu_b(vec4, filt0);
> + DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
> dst0, dst1);
> + dst2 = __lsx_vdp2add_h_bu_b(dst2, vec5, filt1);
> +
> + DUP2_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst10_r, dst21_r);
> + DUP2_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst10_l, dst21_l);
> +
> + for (loop_cnt = height >> 2; loop_cnt--;) {
> + src3 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
> src4, src5);
> + src6 = __lsx_vldx(src_tmp, src_stride_3x);
> + src_tmp += src_stride_4x;
> +
> + DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, vec0,
> vec1);
> + DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, vec2,
> vec3);
> + DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, vec4,
> vec5);
> + DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, vec6,
> vec7);
>
> DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
> - vec6, filt0, dst73, dst84, dst95, dst106);
> - DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst73, vec1, filt1, dst84, vec3,
> - filt1, dst95, vec5, filt1, dst106, vec7, filt1, dst73,
> - dst84, dst95, dst106);
> -
> - DUP2_ARG2(__lsx_vilvl_h, dst73, dst22, dst84, dst73, dst32_r,
> dst43_r);
> - DUP2_ARG2(__lsx_vilvh_h, dst84, dst73, dst95, dst84, dst87_r,
> dst98_r);
> - DUP2_ARG2(__lsx_vilvl_h, dst95, dst84, dst106, dst95, dst54_r,
> dst65_r);
> - dst109_r = __lsx_vilvh_h(dst106, dst95);
> - dst22 = __lsx_vreplvei_d(dst73, 1);
> - dst76_r = __lsx_vilvl_h(dst22, dst106);
> + vec6, filt0, dst3, dst4, dst5, dst6);
> + DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3, filt1,
> + dst5, vec5, filt1, dst6, vec7, filt1, dst3, dst4, dst5,
> dst6);
>
> - DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst21_r, filt_h0,
> dst32_r,
> - filt_h0, dst43_r, filt_h0, tmp0, tmp1, tmp2, tmp3);
> - DUP4_ARG2(__lsx_vdp2_w_h, dst54_r, filt_h0, dst65_r, filt_h0,
> dst76_r,
> - filt_h0, dst87_r, filt_h0, tmp4, tmp5, tmp6, tmp7);
> - DUP4_ARG3(__lsx_vdp2add_w_h, tmp0, dst32_r, filt_h1, tmp1, dst43_r,
> - filt_h1, tmp2, dst54_r, filt_h1, tmp3, dst65_r, filt_h1,
> - tmp0, tmp1, tmp2, tmp3);
> - DUP4_ARG3(__lsx_vdp2add_w_h, tmp4, dst76_r, filt_h1, tmp5, dst87_r,
> - filt_h1, tmp6, dst98_r, filt_h1, tmp7, dst109_r, filt_h1,
> - tmp4, tmp5, tmp6, tmp7);
> - DUP4_ARG2(__lsx_vsrai_w, tmp0, 6, tmp1, 6, tmp2, 6, tmp3, 6,
> - tmp0, tmp1, tmp2, tmp3);
> - DUP4_ARG2(__lsx_vsrai_w, tmp4, 6, tmp5, 6, tmp6, 6, tmp7, 6,
> - tmp4, tmp5, tmp6, tmp7);
> - DUP4_ARG2(__lsx_vpickev_h, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4,
> - tmp7, tmp6, tmp0, tmp1, tmp2, tmp3);
> + DUP2_ARG2(__lsx_vilvl_h, dst3, dst2, dst4, dst3, dst32_r, dst43_r);
> + DUP2_ARG2(__lsx_vilvh_h, dst3, dst2, dst4, dst3, dst32_l, dst43_l);
> + DUP2_ARG2(__lsx_vilvl_h, dst5, dst4, dst6, dst5, dst54_r, dst65_r);
> + DUP2_ARG2(__lsx_vilvh_h, dst5, dst4, dst6, dst5, dst54_l, dst65_l);
>
> - __lsx_vstelm_d(tmp0, dst, 0, 0);
> - dst += dst_stride;
> - __lsx_vstelm_d(tmp0, dst, 0, 1);
> - dst += dst_stride;
> - __lsx_vstelm_d(tmp1, dst, 0, 0);
> - dst += dst_stride;
> - __lsx_vstelm_d(tmp1, dst, 0, 1);
> - dst += dst_stride;
> - __lsx_vstelm_d(tmp2, dst, 0, 0);
> - dst += dst_stride;
> - __lsx_vstelm_d(tmp2, dst, 0, 1);
> - dst += dst_stride;
> - __lsx_vstelm_d(tmp3, dst, 0, 0);
> - dst += dst_stride;
> - __lsx_vstelm_d(tmp3, dst, 0, 1);
> - dst += dst_stride;
> + DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0,
> dst21_r,
> + filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
> + DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0,
> dst43_r,
> + filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
> + DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l,
> dst32_l,
> + filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l,
> filt_h1,
> + dst0_r, dst0_l, dst1_r, dst1_l);
> + DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l,
> dst54_l,
> + filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l,
> filt_h1,
> + dst2_r, dst2_l, dst3_r, dst3_l);
> + DUP2_ARG3(__lsx_vsrani_h_w, dst0_l, dst0_r, 6, dst1_l, dst1_r, 6,
> dst0_r, dst1_r);
> + DUP2_ARG3(__lsx_vsrani_h_w, dst2_l, dst2_r, 6, dst3_l, dst3_r, 6,
> dst2_r, dst3_r);
> + __lsx_vstelm_d(dst0_r, dst_tmp, 0, 0);
> + dst_tmp += dst_stride;
> + __lsx_vstelm_d(dst1_r, dst_tmp, 0, 0);
> + dst_tmp += dst_stride;
> + __lsx_vstelm_d(dst2_r, dst_tmp, 0, 0);
> + dst_tmp += dst_stride;
> + __lsx_vstelm_d(dst3_r, dst_tmp, 0, 0);
> + dst_tmp += dst_stride;
>
> - dst10_r = dst98_r;
> - dst21_r = dst109_r;
> - dst22 = __lsx_vreplvei_d(dst106, 1);
> + dst10_r = dst54_r;
> + dst10_l = dst54_l;
> + dst21_r = dst65_r;
> + dst21_l = dst65_l;
> + dst2 = dst6;
> }
> +
> }
>
> static void hevc_hv_4t_16w_lsx(const uint8_t *src,
> diff --git a/libavcodec/loongarch/hevcdsp_lsx.h
> b/libavcodec/loongarch/hevcdsp_lsx.h
> index cf2a519e94..8562a0d681 100644
> --- a/libavcodec/loongarch/hevcdsp_lsx.h
> +++ b/libavcodec/loongarch/hevcdsp_lsx.h
> @@ -141,7 +141,6 @@ BI_MC(epel, v, 16);
> BI_MC(epel, v, 24);
> BI_MC(epel, v, 32);
>
> -BI_MC(epel, hv, 6);
> BI_MC(epel, hv, 8);
> BI_MC(epel, hv, 16);
> BI_MC(epel, hv, 24);
> --
> 2.20.1
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".
LGTM
本邮件及其附件含有龙芯中科的商业秘密信息,仅限于发送给上面地址中列出的个人或群组。禁止任何其他人以任何形式使用(包括但不限于全部或部分地泄露、复制或散发)本邮件及其附件中的信息。如果您错收本邮件,请您立即电话或邮件通知发件人并删除本邮件。
This email and its attachments contain confidential information from Loongson
Technology , which is intended only for the person or entity whose address is
listed above. Any use of the information contained herein in any way
(including, but not limited to, total or partial disclosure, reproduction or
dissemination) by persons other than the intended recipient(s) is prohibited.
If you receive this email in error, please notify the sender by phone or email
immediately and delete it.
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".