On 2016-11-11 21:43:09 +0200, Martin Storsjö wrote:
> This work is sponsored by, and copyright, Google.
> 
> These are ported from the ARM version; thanks to the larger
> amount of registers available, we can do the loop filters with
> 16 pixels at a time. The implementation is fully templated, with
> a single macro which can generate versions for both 8 and
> 16 pixels wide, for both 4, 8 and 16 pixels loop filters
> (and the 4/8 mixed versions as well).
> 
> For the 8 pixel wide versions, it is pretty close in speed (the
> v_4_8 and v_8_8 filters are the best examples of this; the h_4_8
> and h_8_8 filters seem to get some gain in the load/transpose/store
> part). For the 16 pixels wide ones, we get a speedup of around
> 1.2-1.4x compared to the 32 bit version.
> 
> Examples of runtimes vs the 32 bit version, on a Cortex A53:
>                                        ARM AArch64
> vp9_loop_filter_h_4_8_neon:          144.0   128.2
> vp9_loop_filter_h_8_8_neon:          207.0   182.5
> vp9_loop_filter_h_16_8_neon:         415.0   329.7
> vp9_loop_filter_h_16_16_neon:        672.0   558.0
> vp9_loop_filter_mix2_h_44_16_neon:   302.0   202.5
> vp9_loop_filter_mix2_h_48_16_neon:   365.0   307.2
> vp9_loop_filter_mix2_h_84_16_neon:   365.0   307.2
> vp9_loop_filter_mix2_h_88_16_neon:   376.0   307.2
> vp9_loop_filter_mix2_v_44_16_neon:   193.2   128.2
> vp9_loop_filter_mix2_v_48_16_neon:   246.7   219.2
> vp9_loop_filter_mix2_v_84_16_neon:   248.0   219.5
> vp9_loop_filter_mix2_v_88_16_neon:   302.0   219.2
> vp9_loop_filter_v_4_8_neon:           89.0    89.7
> vp9_loop_filter_v_8_8_neon:          141.0   137.7
> vp9_loop_filter_v_16_8_neon:         295.0   273.7
> vp9_loop_filter_v_16_16_neon:        546.0   454.7

Apple A7 benchmarkswhere just too unrelieable so I skipped them.

                         A57 gcc-5.3  neon
loop_filter_h_4_8_neon:        256.6  93.4
loop_filter_h_8_8_neon:        307.3 139.1
loop_filter_h_16_8_neon:       340.1 254.1
loop_filter_h_16_16_neon:      827.0 407.9
loop_filter_mix2_h_44_16_neon: 524.5 155.4
loop_filter_mix2_h_48_16_neon: 644.5 173.3
loop_filter_mix2_h_84_16_neon: 630.5 222.0
loop_filter_mix2_h_88_16_neon: 697.3 222.0
loop_filter_mix2_v_44_16_neon: 598.5 100.6
loop_filter_mix2_v_48_16_neon: 651.5 127.0
loop_filter_mix2_v_84_16_neon: 591.5 167.1
loop_filter_mix2_v_88_16_neon: 855.1 166.7
loop_filter_v_4_8_neon:        271.7  65.3
loop_filter_v_8_8_neon:        312.5 106.9
loop_filter_v_16_8_neon:       473.3 206.5
loop_filter_v_16_16_neon:      976.1 327.8

The speed-up compared to the C functions is 2.5 to 6 and the cortex-a57 
is again 30-50% faster than the cortex-a53.

> The speedup vs C code in checkasm tests is around 2-7x, which is
> pretty much the same as for the 32 bit version. Even if these functions
> are faster than their 32 bit equivalent, the C version that we compare
> to also became around 1.3-1.7x faster than the C version in 32 bit.
> 
> Based on START_TIMER/STOP_TIMER wrapping around a few individual
> functions, the speedup vs C code is around 4-5x.
> ---
> v2: Instantiating the loop filter core once per configuration,
> instead of once per frontend filter, reducing the code size from
> 16560 bytes to 9756 bytes, with very low extra overhead. Added a
> missing comma from a macro invocation.
> ---
>  libavcodec/aarch64/Makefile              |    1 +
>  libavcodec/aarch64/vp9dsp_init_aarch64.c |   45 +
>  libavcodec/aarch64/vp9lpf_neon.S         | 1406 
> ++++++++++++++++++++++++++++++
>  3 files changed, 1452 insertions(+)
>  create mode 100644 libavcodec/aarch64/vp9lpf_neon.S
> 
> diff --git a/libavcodec/aarch64/Makefile b/libavcodec/aarch64/Makefile
> index 0b06b33..5c1d118 100644
> --- a/libavcodec/aarch64/Makefile
> +++ b/libavcodec/aarch64/Makefile
> @@ -45,4 +45,5 @@ NEON-OBJS-$(CONFIG_DCA_DECODER)         += 
> aarch64/dcadsp_neon.o               \
>                                             aarch64/synth_filter_neon.o
>  NEON-OBJS-$(CONFIG_VORBIS_DECODER)      += aarch64/vorbisdsp_neon.o
>  NEON-OBJS-$(CONFIG_VP9_DECODER)         += aarch64/vp9itxfm_neon.o           
>   \
> +                                           aarch64/vp9lpf_neon.o             
>   \
>                                             aarch64/vp9mc_neon.o
> diff --git a/libavcodec/aarch64/vp9dsp_init_aarch64.c 
> b/libavcodec/aarch64/vp9dsp_init_aarch64.c
> index da6f7f9..486ae1e 100644
> --- a/libavcodec/aarch64/vp9dsp_init_aarch64.c
> +++ b/libavcodec/aarch64/vp9dsp_init_aarch64.c
> @@ -195,8 +195,53 @@ static av_cold void 
> vp9dsp_itxfm_init_aarch64(VP9DSPContext *dsp)
>      }
>  }
>  
> +#define define_loop_filter(dir, wd, len) \
> +void ff_vp9_loop_filter_##dir##_##wd##_##len##_neon(uint8_t *dst, ptrdiff_t 
> stride, int E, int I, int H)
> +
> +#define define_loop_filters(wd, len) \
> +    define_loop_filter(h, wd, len);  \
> +    define_loop_filter(v, wd, len)
> +
> +define_loop_filters(4, 8);
> +define_loop_filters(8, 8);
> +define_loop_filters(16, 8);
> +
> +define_loop_filters(16, 16);
> +
> +define_loop_filters(44, 16);
> +define_loop_filters(48, 16);
> +define_loop_filters(84, 16);
> +define_loop_filters(88, 16);
> +
> +static av_cold void vp9dsp_loopfilter_init_aarch64(VP9DSPContext *dsp)
> +{
> +    int cpu_flags = av_get_cpu_flags();
> +
> +    if (have_neon(cpu_flags)) {
> +        dsp->loop_filter_8[0][1] = ff_vp9_loop_filter_v_4_8_neon;
> +        dsp->loop_filter_8[0][0] = ff_vp9_loop_filter_h_4_8_neon;
> +        dsp->loop_filter_8[1][1] = ff_vp9_loop_filter_v_8_8_neon;
> +        dsp->loop_filter_8[1][0] = ff_vp9_loop_filter_h_8_8_neon;
> +        dsp->loop_filter_8[2][1] = ff_vp9_loop_filter_v_16_8_neon;
> +        dsp->loop_filter_8[2][0] = ff_vp9_loop_filter_h_16_8_neon;
> +
> +        dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_neon;
> +        dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_neon;
> +
> +        dsp->loop_filter_mix2[0][0][0] = ff_vp9_loop_filter_h_44_16_neon;
> +        dsp->loop_filter_mix2[0][0][1] = ff_vp9_loop_filter_v_44_16_neon;
> +        dsp->loop_filter_mix2[0][1][0] = ff_vp9_loop_filter_h_48_16_neon;
> +        dsp->loop_filter_mix2[0][1][1] = ff_vp9_loop_filter_v_48_16_neon;
> +        dsp->loop_filter_mix2[1][0][0] = ff_vp9_loop_filter_h_84_16_neon;
> +        dsp->loop_filter_mix2[1][0][1] = ff_vp9_loop_filter_v_84_16_neon;
> +        dsp->loop_filter_mix2[1][1][0] = ff_vp9_loop_filter_h_88_16_neon;
> +        dsp->loop_filter_mix2[1][1][1] = ff_vp9_loop_filter_v_88_16_neon;
> +    }
> +}
> +
>  av_cold void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp)
>  {
>      vp9dsp_mc_init_aarch64(dsp);
> +    vp9dsp_loopfilter_init_aarch64(dsp);
>      vp9dsp_itxfm_init_aarch64(dsp);
>  }
> diff --git a/libavcodec/aarch64/vp9lpf_neon.S 
> b/libavcodec/aarch64/vp9lpf_neon.S
> new file mode 100644
> index 0000000..c07eebf
> --- /dev/null
> +++ b/libavcodec/aarch64/vp9lpf_neon.S
> @@ -0,0 +1,1406 @@
> +/*
> + * Copyright (c) 2016 Google Inc.
> + *
> + * This file is part of Libav.
> + *
> + * Libav is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * Libav is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with Libav; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
> USA
> + */
> +
> +#include "libavutil/aarch64/asm.S"
> +#include "neon.S"
> +
> +
> +// The main loop filter macro is templated and can produce filters for
> +// vectors of 8 or 16 bytes. The register mapping throughout the filter
> +// is close to identical to the arm version (please try to maintain this,
> +// if either is changed!). When the arm version uses e.g. d20 for the
> +// input variable p3, the aarch64 version uses v20.8b or v20.16b, depending
> +// on vector length.
> +//
> +// The number of elements in the vector is passed in via the macro parameter
> +// \sz, which is either .8b or .16b. For simple instructions that doesn't
> +// lengthen or narrow things, this can easily be templated like this:
> +//      uabd            v4\sz,  v20\sz, v21\sz
> +//
> +// For instructions that lengthen or narrow content, the arm version would
> +// have used q registers. For these instructions, we have macros that expand
> +// into either a single e.g. uaddl instruction, or into a uaddl + uaddl2
> +// pair, depending on the \sz parameter. Wherever the arm version would have
> +// used a q register, these macros instead take two v registers, i.e. q3
> +// is mapped to v6+v7. For the case with 8 byte input vectors, such a
> +// lengthening operation is only stored in v6.8h (what was in q3 in the arm
> +// case), while the 16 byte input vectors will use v6.8h + v7.8h.
> +// Such a macro invocation would look like this:
> +//      uaddl_sz        v8.8h,  v9.8h,  v17, v18, \sz
> +//
> +// That is, in the 8 byte input vector case, the second register in these
> +// register pairs will be unused.
> +// Unfortunately, this makes the code quite hard to read. For readability,
> +// see the arm version instead.
> +
> +
> +.macro uabdl_sz dst1, dst2, in1, in2, sz
> +        uabdl           \dst1,  \in1\().8b,  \in2\().8b
> +.ifc \sz, .16b
> +        uabdl2          \dst2,  \in1\().16b, \in2\().16b
> +.endif
> +.endm
> +
> +.macro add_sz dst1, dst2, in1, in2, in3, in4, sz
> +        add             \dst1,  \in1,  \in3
> +.ifc \sz, .16b
> +        add             \dst2,  \in2,  \in4
> +.endif
> +.endm
> +
> +.macro sub_sz dst1, dst2, in1, in2, in3, in4, sz
> +        sub             \dst1,  \in1,  \in3
> +.ifc \sz, .16b
> +        sub             \dst2,  \in2,  \in4
> +.endif
> +.endm
> +
> +.macro uaddw_sz dst1, dst2, in1, in2, in3, sz
> +        uaddw           \dst1,  \in1, \in3\().8b
> +.ifc \sz, .16b
> +        uaddw2          \dst2,  \in2, \in3\().16b
> +.endif
> +.endm
> +
> +.macro usubw_sz dst1, dst2, in1, in2, in3, sz
> +        usubw           \dst1,  \in1, \in3\().8b
> +.ifc \sz, .16b
> +        usubw2          \dst2,  \in2, \in3\().16b
> +.endif
> +.endm
> +
> +.macro cmhs_sz dst1, dst2, in1, in2, in3, in4, sz
> +        cmhs            \dst1,  \in1,  \in3
> +.ifc \sz, .16b
> +        cmhs            \dst2,  \in2,  \in4
> +.endif
> +.endm
> +
> +.macro xtn_sz dst, in1, in2, sz
> +        xtn             \dst\().8b,  \in1
> +.ifc \sz, .16b
> +        xtn2            \dst\().16b, \in2
> +.endif
> +.endm
> +
> +.macro usubl_sz dst1, dst2, in1, in2, sz
> +        usubl           \dst1,  \in1\().8b,  \in2\().8b
> +.ifc \sz, .16b
> +        usubl2          \dst2,  \in1\().16b, \in2\().16b
> +.endif
> +.endm
> +
> +.macro sqxtn_sz dst, in1, in2, sz
> +        sqxtn           \dst\().8b,  \in1
> +.ifc \sz, .16b
> +        sqxtn2          \dst\().16b, \in2
> +.endif
> +.endm
> +
> +.macro sqxtun_sz dst, in1, in2, sz
> +        sqxtun          \dst\().8b,  \in1
> +.ifc \sz, .16b
> +        sqxtun2         \dst\().16b, \in2
> +.endif
> +.endm
> +
> +.macro mul_sz dst1, dst2, in1, in2, in3, in4, sz
> +        mul             \dst1,  \in1,  \in3
> +.ifc \sz, .16b
> +        mul             \dst2,  \in2,  \in4
> +.endif
> +.endm
> +
> +.macro saddw_sz dst1, dst2, in1, in2, in3, sz
> +        saddw           \dst1,  \in1, \in3\().8b
> +.ifc \sz, .16b
> +        saddw2          \dst2,  \in2, \in3\().16b
> +.endif
> +.endm
> +
> +.macro ssubw_sz dst1, dst2, in1, in2, in3, sz
> +        ssubw           \dst1,  \in1, \in3\().8b
> +.ifc \sz, .16b
> +        ssubw2          \dst2,  \in2, \in3\().16b
> +.endif
> +.endm
> +
> +.macro uxtl_sz dst1, dst2, in, sz
> +        uxtl            \dst1,  \in\().8b
> +.ifc \sz, .16b
> +        uxtl2           \dst2,  \in\().16b
> +.endif
> +.endm
> +
> +.macro uaddl_sz dst1, dst2, in1, in2, sz
> +        uaddl           \dst1,  \in1\().8b,  \in2\().8b
> +.ifc \sz, .16b
> +        uaddl2          \dst2,  \in1\().16b, \in2\().16b
> +.endif
> +.endm
> +
> +.macro rshrn_sz dst, in1, in2, shift, sz
> +        rshrn           \dst\().8b,  \in1, \shift
> +.ifc \sz, .16b
> +        rshrn2          \dst\().16b, \in2, \shift
> +.endif
> +.endm
> +
> +.macro ushll_sz dst1, dst2, in, shift, sz
> +        ushll           \dst1,  \in\().8b,  \shift
> +.ifc \sz, .16b
> +        ushll2          \dst2,  \in\().16b, \shift
> +.endif
> +.endm
> +
> +// The input to and output from this macro is in the registers v16-v31,
> +// and v0-v7 are used as scratch registers.
> +// p7 = v16 .. p3 = v20, p0 = v23, q0 = v24, q3 = v27, q7 = v31
> +// Depending on the width of the loop filter, we either use v16-v19
> +// and v28-v31 as temp registers, or v8-v15.
> +// When comparing to the arm version, tmpq1 == tmp1 + tmp2,
> +// tmpq2 == tmp3 + tmp4, etc.
> +.macro loop_filter wd, sz, mix, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, 
> tmp8
> +.if \mix == 0
> +        dup             v0.8h,  w2        // E
> +        dup             v1.8h,  w2        // E
> +        dup             v2\sz,  w3        // I
> +        dup             v3\sz,  w4        // H
> +.else
> +        and             w5,     w2,  #0xff
> +        and             w6,     w3,  #0xff
> +        and             w7,     w4,  #0xff
> +        dup             v0.8h,  w5        // E

        dup             v0.8h,  w2        // E
        ushr            v1.8h,  v0.8h,  #8
        bic             v0.8h,  #255, lsl 8

is one instruction less and looks faster on a cortex-a57

> +        dup             v2.8b,  w6        // I
> +        dup             v3.8b,  w7        // H

the last two can use w3 and w4 directly without and 0xff

> +        lsr             w5,     w2,  #8
> +        lsr             w6,     w3,  #8
> +        lsr             w7,     w4,  #8
> +        dup             v1.8h,  w5        // E
> +        dup             v4.8b,  w6        // I
> +        dup             v5.8b,  w7        // H
> +        trn1            v2.2d,  v2.2d,  v4.2d
> +        trn1            v3.2d,  v3.2d,  v5.2d
> +.endif
> +
> +        uabd            v4\sz,  v20\sz, v21\sz        // abs(p3 - p2)
> +        uabd            v5\sz,  v21\sz, v22\sz        // abs(p2 - p1)
> +        uabd            v6\sz,  v22\sz, v23\sz        // abs(p1 - p0)
> +        uabd            v7\sz,  v24\sz, v25\sz        // abs(q0 - q1)
> +        uabd            \tmp1\sz,  v25\sz, v26\sz     // abs(q1 - q2)
> +        uabd            \tmp2\sz,  v26\sz, v27\sz     // abs(q2 - q3)
> +        umax            v4\sz,  v4\sz,  v5\sz
> +        umax            v5\sz,  v6\sz,  v7\sz
> +        umax            \tmp1\sz, \tmp1\sz, \tmp2\sz
> +        uabdl_sz        v6.8h,  v7.8h,  v23, v24, \sz // abs(p0 - q0)
> +        umax            v4\sz,  v4\sz,  v5\sz
> +        add_sz          v6.8h,  v7.8h,  v6.8h,  v7.8h,  v6.8h,  v7.8h, \sz 
> // abs(p0 - q0) * 2
> +        uabd            v5\sz,  v22\sz, v25\sz        // abs(p1 - q1)
> +        umax            v4\sz,  v4\sz,  \tmp1\sz      // max(abs(p3 - p2), 
> ..., abs(q2 - q3))
> +        ushr            v5\sz,  v5\sz,  #1
> +        cmhs            v4\sz,  v2\sz,  v4\sz         // max(abs()) <= I
> +        uaddw_sz        v6.8h,  v7.8h,  v6.8h,  v7.8h,  v5, \sz // abs(p0 - 
> q0) * 2 + abs(p1 - q1) >> 1
> +        cmhs_sz         v6.8h,  v7.8h,  v0.8h,  v1.8h,  v6.8h,  v7.8h, \sz
> +        xtn_sz          v5,     v6.8h,  v7.8h,  \sz
> +        and             v4\sz,  v4\sz,  v5\sz         // fm
> +
> +        mov             x5,  v4.d[0]
> +.ifc \sz, .16b
> +        mov             x6,  v4.d[1]
> +        orr             x5,  x5,  x6
> +.endif
> +        // If no pixels need filtering, just exit as soon as possible
> +        cbz             x5,  9f
> +
> +.if \wd >= 8
> +        movi            v0\sz,  #1
> +
> +        uabd            v6\sz,  v20\sz, v23\sz    // abs(p3 - p0)
> +        uabd            v2\sz,  v21\sz, v23\sz    // abs(p2 - p0)
> +        uabd            v1\sz,  v22\sz, v23\sz    // abs(p1 - p0)
> +        uabd            \tmp1\sz,  v25\sz, v24\sz // abs(q1 - q0)
> +        uabd            \tmp2\sz,  v26\sz, v24\sz // abs(q2 - q0)
> +        uabd            \tmp3\sz,  v27\sz, v24\sz // abs(q3 - q0)
> +        umax            v6\sz,  v6\sz,  v2\sz
> +        umax            v1\sz,  v1\sz,  \tmp1\sz
> +        umax            \tmp2\sz,  \tmp2\sz,  \tmp3\sz
> +.if \wd == 16
> +        uabd            v7\sz,  v16\sz, v23\sz    // abs(p7 - p0)
> +        umax            v6\sz,  v6\sz,  v1\sz
> +        uabd            v2\sz,  v17\sz, v23\sz    // abs(p6 - p0)
> +        umax            v6\sz,  v6\sz,  \tmp2\sz
> +        uabd            v1\sz,  v18\sz, v23\sz    // abs(p5 - p0)
> +        cmhs            v6\sz,  v0\sz,  v6\sz     // flat8in
> +        uabd            v8\sz,  v19\sz, v23\sz    // abs(p4 - p0)
> +        and             v6\sz,  v6\sz,  v4\sz     // flat8in && fm
> +        uabd            v9\sz,  v28\sz, v24\sz    // abs(q4 - q0)
> +        bic             v4\sz,  v4\sz,  v6\sz     // fm && !flat8in
> +        uabd            v10\sz, v29\sz, v24\sz    // abs(q5 - q0)
> +        uabd            v11\sz, v30\sz, v24\sz    // abs(q6 - q0)
> +        uabd            v12\sz, v31\sz, v24\sz    // abs(q7 - q0)
> +
> +        umax            v7\sz,  v7\sz,  v2\sz
> +        umax            v1\sz,  v1\sz,  v8\sz
> +        umax            v9\sz,  v9\sz,  v10\sz
> +        umax            v11\sz, v11\sz, v12\sz
> +        // The rest of the calculation of flat8out is interleaved below
> +.else
> +        // The rest of the calculation of flat8in is interleaved below
> +.endif
> +.endif
> +
> +        // Calculate the normal inner loop filter for 2 or 4 pixels
> +        uabd            v5\sz,  v22\sz, v23\sz // abs(p1 - p0)
> +.if \wd == 16
> +        umax            v7\sz,  v7\sz,  v1\sz
> +        umax            v9\sz,  v9\sz,  v11\sz
> +.elseif \wd == 8
> +        umax            v6\sz,  v6\sz,  v1\sz
> +.endif
> +        uabd            v1\sz,  v25\sz, v24\sz // abs(q1 - q0)
> +.if \wd == 16
> +        umax            v7\sz,  v7\sz,  v9\sz
> +.elseif \wd == 8
> +        umax            v6\sz,  v6\sz,  \tmp2\sz
> +.endif
> +        usubl_sz        \tmp1\().8h,  \tmp2\().8h,  v22,  v25, \sz // p1 - q1
> +        umax            v5\sz,  v5\sz,  v1\sz  // max(abs(p1 - p0), abs(q1 - 
> q0))
> +        usubl_sz        \tmp3\().8h,  \tmp4\().8h,  v24,  v23, \sz // q0 - p0
> +        movi            \tmp5\().8h,  #3
> +.if \wd == 8
> +        cmhs            v6\sz,  v0\sz,  v6\sz  // flat8in
> +.endif
> +        cmhi            v5\sz,  v5\sz,  v3\sz  // hev
> +.if \wd == 8
> +        // If a 4/8 or 8/4 mix is used, clear the relevant half of v6
> +.if \mix != 0
> +        mov             v1.2d[0], x11
> +        sxtl            v1.8h,  v1.8b
> +        and             v6\sz,  v6\sz,  v1.16b
> +.endif
> +        and             v6\sz,  v6\sz,  v4\sz  // flat8in && fm
> +.endif
> +        sqxtn_sz        \tmp1,        \tmp1\().8h,  \tmp2\().8h, \sz // 
> av_clip_int8(p1 - q1)
> +.if \wd == 16
> +        cmhs            v7\sz,  v0\sz,  v7\sz  // flat8out
> +.elseif \wd == 8
> +        bic             v4\sz,  v4\sz,  v6\sz  // fm && !flat8in
> +.endif
> +        mvn             v5\sz,  v5\sz          // !hev
> +.if \wd == 16
> +        and             v7\sz,  v7\sz,  v6\sz  // flat8out && flat8in && fm
> +.endif
> +        and             v5\sz,  v5\sz,  v4\sz  // !hev && fm && !flat8in
> +
> +        mul_sz          \tmp3\().8h,  \tmp4\().8h,  \tmp3\().8h, 
> \tmp4\().8h,  \tmp5\().8h,  \tmp5\().8h, \sz // 3 * (q0 - p0)
> +        bic             \tmp1\sz,  \tmp1\sz,  v5\sz    // if (!hev) 
> av_clip_int8 = 0
> +        movi            v2\sz,  #4
> +        saddw_sz        \tmp3\().8h,  \tmp4\().8h,  \tmp3\().8h, 
> \tmp4\().8h,  \tmp1, \sz // 3 * (q0 - p0) [+ av_clip_int8(p1 - q1)]
> +        movi            v3\sz,  #3
> +        sqxtn_sz        \tmp1,        \tmp3\().8h,  \tmp4\().8h, \sz       
> // f
> +.if \wd == 16
> +        bic             v6\sz,  v6\sz,  v7\sz  // fm && flat8in && !flat8out
> +.endif
> +
> +        sqadd           \tmp3\sz,  \tmp1\sz,  v2\sz // FFMIN(f + 4, 127)
> +        sqadd           \tmp4\sz,  \tmp1\sz,  v3\sz // FFMIN(f + 3, 127)
> +        uxtl_sz         v0.8h,  v1.8h,  v23, \sz    // p0
> +        sshr            \tmp3\sz,  \tmp3\sz,  #3    // f1
> +        sshr            \tmp4\sz,  \tmp4\sz,  #3    // f2
> +
> +        uxtl_sz         v2.8h,  v3.8h,  v24, \sz    // q0
> +        saddw_sz        v0.8h,  v1.8h,  v0.8h,  v1.8h,  \tmp4, \sz // p0 + f2
> +        ssubw_sz        v2.8h,  v3.8h,  v2.8h,  v3.8h,  \tmp3, \sz // q0 - f1
> +        sqxtun_sz       v0,  v0.8h,  v1.8h,  \sz    // out p0
> +        sqxtun_sz       v1,  v2.8h,  v3.8h,  \sz    // out q0
> +        srshr           \tmp3\sz, \tmp3\sz, #1      // f = (f1 + 1) >> 1
> +        bit             v23\sz, v0\sz,  v4\sz       // if (fm && !flat8in)
> +        bit             v24\sz, v1\sz,  v4\sz
> +
> +        uxtl_sz         v0.8h,  v1.8h,  v22, \sz    // p1
> +        uxtl_sz         v2.8h,  v3.8h,  v25, \sz    // q1
> +        saddw_sz        v0.8h,  v1.8h,  v0.8h,  v1.8h,  \tmp3, \sz // p1 + f
> +        ssubw_sz        v2.8h,  v3.8h,  v2.8h,  v3.8h,  \tmp3, \sz // q1 - f
> +        sqxtun_sz       v0,  v0.8h,  v1.8h, \sz     // out p1
> +        sqxtun_sz       v2,  v2.8h,  v3.8h, \sz     // out q1
> +        bit             v22\sz, v0\sz,  v5\sz       // if (!hev && fm && 
> !flat8in)
> +        bit             v25\sz, v2\sz,  v5\sz
> +
> +.if \wd >= 8
> +        mov             x5,  v6.d[0]
> +.ifc \sz, .16b
> +        mov             x6,  v6.d[1]
> +        orr             x5,  x5,  x6
> +.endif
> +        // If no pixels need flat8in, jump to flat8out
> +        // (or to a writeout of the inner 4 pixels, for wd=8)
> +        cbz             x5,  6f
> +
> +        // flat8in
> +        uaddl_sz        \tmp1\().8h, \tmp2\().8h,  v20, v21, \sz
> +        uaddl_sz        \tmp3\().8h, \tmp4\().8h,  v22, v25, \sz
> +        uaddl_sz        \tmp5\().8h, \tmp6\().8h,  v20, v22, \sz
> +        uaddl_sz        \tmp7\().8h, \tmp8\().8h,  v23, v26, \sz
> +        add_sz          v0.8h,  v1.8h,  \tmp1\().8h, \tmp2\().8h, 
> \tmp1\().8h, \tmp2\().8h, \sz
> +        uaddw_sz        v0.8h,  v1.8h,  v0.8h,  v1.8h,  v23, \sz
> +        uaddw_sz        v0.8h,  v1.8h,  v0.8h,  v1.8h,  v24, \sz
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  \tmp5\().8h, 
> \tmp6\().8h, \sz
> +        sub_sz          \tmp3\().8h, \tmp4\().8h,  \tmp3\().8h, \tmp4\().8h, 
>  \tmp1\().8h, \tmp2\().8h, \sz
> +        sub_sz          \tmp7\().8h, \tmp8\().8h,  \tmp7\().8h, \tmp8\().8h, 
>  \tmp5\().8h, \tmp6\().8h, \sz
> +        rshrn_sz        v2,  v0.8h,  v1.8h,  #3,  \sz // out p2
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  \tmp3\().8h, 
> \tmp4\().8h, \sz
> +        uaddl_sz        \tmp1\().8h, \tmp2\().8h,  v20,  v23, \sz
> +        uaddl_sz        \tmp3\().8h, \tmp4\().8h,  v24,  v27, \sz
> +        rshrn_sz        v3,  v0.8h,  v1.8h,  #3,  \sz // out p1
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  \tmp7\().8h, 
> \tmp8\().8h, \sz
> +        sub_sz          \tmp3\().8h, \tmp4\().8h,  \tmp3\().8h, \tmp4\().8h, 
>  \tmp1\().8h, \tmp2\().8h, \sz
> +        uaddl_sz        \tmp5\().8h, \tmp6\().8h,  v21,  v24, \sz
> +        uaddl_sz        \tmp7\().8h, \tmp8\().8h,  v25,  v27, \sz
> +        rshrn_sz        v4,  v0.8h,  v1.8h,  #3,  \sz // out p0
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  \tmp3\().8h, 
> \tmp4\().8h, \sz
> +        sub_sz          \tmp7\().8h, \tmp8\().8h,  \tmp7\().8h, \tmp8\().8h, 
>  \tmp5\().8h, \tmp6\().8h, \sz
> +        uaddl_sz        \tmp1\().8h, \tmp2\().8h,  v22,  v25, \sz
> +        uaddl_sz        \tmp3\().8h, \tmp4\().8h,  v26,  v27, \sz
> +        rshrn_sz        v5,  v0.8h,  v1.8h,  #3,  \sz // out q0
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  \tmp7\().8h, 
> \tmp8\().8h, \sz
> +        sub_sz          \tmp3\().8h, \tmp4\().8h,  \tmp3\().8h, \tmp4\().8h, 
>  \tmp1\().8h, \tmp2\().8h, \sz
> +        rshrn_sz        \tmp5,  v0.8h,  v1.8h,  #3,  \sz // out q1
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  \tmp3\().8h, 
> \tmp4\().8h, \sz
> +        // The output here is written back into the input registers. This 
> doesn't
> +        // matter for the flat8part below, since we only update those pixels
> +        // which won't be touched below.
> +        bit             v21\sz, v2\sz,  v6\sz
> +        bit             v22\sz, v3\sz,  v6\sz
> +        bit             v23\sz, v4\sz,  v6\sz
> +        rshrn_sz        \tmp6,  v0.8h,  v1.8h,  #3,  \sz // out q2
> +        bit             v24\sz, v5\sz,  v6\sz
> +        bit             v25\sz, \tmp5\sz,  v6\sz
> +        bit             v26\sz, \tmp6\sz,  v6\sz
> +.endif
> +.if \wd == 16
> +6:
> +        orr             v2\sz,  v6\sz,  v7\sz
> +        mov             x5,  v2.d[0]
> +.ifc \sz, .16b
> +        mov             x6,  v2.d[1]
> +        orr             x5,  x5,  x6
> +.endif
> +        // If no pixels needed flat8in nor flat8out, jump to a
> +        // writeout of the inner 4 pixels
> +        cbz             x5,  7f
> +        mov             x5,  v7.d[0]
> +.ifc \sz, .16b
> +        mov             x6,  v2.d[1]
> +        orr             x5,  x5,  x6
> +.endif
> +        // If no pixels need flat8out, jump to a writeout of the inner 6 
> pixels
> +        cbz             x5,  8f
> +
> +        // flat8out
> +        // This writes all outputs into v2-v17 (skipping v6 and v16).
> +        // If this part is skipped, the output is read from v21-v26 (which 
> is the input
> +        // to this section).
> +        ushll_sz        v0.8h,  v1.8h,  v16,  #3,  \sz           // 8 * v16
> +        usubw_sz        v0.8h,  v1.8h,  v0.8h,  v1.8h,  v16, \sz // 7 * v16
> +        uaddw_sz        v0.8h,  v1.8h,  v0.8h,  v1.8h,  v17, \sz
> +        uaddl_sz        v8.8h,  v9.8h,  v17, v18, \sz
> +        uaddl_sz        v10.8h, v11.8h, v19, v20, \sz
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v8.8h,  v9.8h,  \sz
> +        uaddl_sz        v8.8h,  v9.8h,  v16, v17, \sz
> +        uaddl_sz        v12.8h, v13.8h, v21, v22, \sz
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v10.8h, v11.8h, \sz
> +        uaddl_sz        v10.8h, v11.8h, v18, v25, \sz
> +        uaddl_sz        v14.8h, v15.8h, v23, v24, \sz
> +        sub_sz          v10.8h, v11.8h, v10.8h, v11.8h, v8.8h,  v9.8h,  \sz
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v12.8h, v13.8h, \sz
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v14.8h, v15.8h, \sz
> +        uaddl_sz        v12.8h, v13.8h, v16, v18, \sz
> +        uaddl_sz        v14.8h, v15.8h, v19, v26, \sz
> +        rshrn_sz        v2,  v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v10.8h, v11.8h, \sz
> +        uaddl_sz        v8.8h,  v9.8h,  v16, v19, \sz
> +        uaddl_sz        v10.8h, v11.8h, v20, v27, \sz
> +        sub_sz          v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
> +        bif             v2\sz,  v17\sz, v7\sz
> +        rshrn_sz        v3,  v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v14.8h, v15.8h, \sz
> +        uaddl_sz        v12.8h, v13.8h, v16, v20, \sz
> +        uaddl_sz        v14.8h, v15.8h, v21, v28, \sz
> +        sub_sz          v10.8h, v11.8h, v10.8h, v11.8h, v8.8h,  v9.8h,  \sz
> +        bif             v3\sz,  v18\sz, v7\sz
> +        rshrn_sz        v4,  v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v10.8h, v11.8h, \sz
> +        uaddl_sz        v8.8h,  v9.8h,  v16, v21, \sz
> +        uaddl_sz        v10.8h, v11.8h, v22, v29, \sz
> +        sub_sz          v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
> +        bif             v4\sz,  v19\sz, v7\sz
> +        rshrn_sz        v5,  v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v14.8h, v15.8h, \sz
> +        uaddl_sz        v12.8h, v13.8h, v16, v22, \sz
> +        uaddl_sz        v14.8h, v15.8h, v23, v30, \sz
> +        sub_sz          v10.8h, v11.8h, v10.8h, v11.8h, v8.8h,  v9.8h,  \sz
> +        bif             v5\sz,  v20\sz, v7\sz
> +        rshrn_sz        v6,  v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v10.8h, v11.8h, \sz
> +        uaddl_sz        v10.8h, v11.8h, v16, v23, \sz
> +        sub_sz          v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
> +        uaddl_sz        v12.8h, v13.8h, v24, v31, \sz
> +        bif             v6\sz,  v21\sz, v7\sz
> +        rshrn_sz        v8,  v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v14.8h, v15.8h, \sz
> +        sub_sz          v10.8h, v11.8h, v12.8h, v13.8h, v10.8h, v11.8h, \sz
> +        uaddl_sz        v12.8h, v13.8h, v17, v24, \sz
> +        uaddl_sz        v14.8h, v15.8h, v25, v31, \sz
> +        bif             v8\sz,  v22\sz, v7\sz
> +        rshrn_sz        v9,  v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v10.8h, v11.8h, \sz
> +        sub_sz          v14.8h, v15.8h, v14.8h, v15.8h, v12.8h, v13.8h, \sz
> +        uaddl_sz        v12.8h, v13.8h, v26, v31, \sz
> +        bif             v9\sz,  v23\sz, v7\sz
> +        rshrn_sz        v10, v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v14.8h, v15.8h, \sz
> +        uaddl_sz        v14.8h, v15.8h, v18, v25, \sz
> +        uaddl_sz        v18.8h, v19.8h, v19, v26, \sz
> +        sub_sz          v12.8h, v13.8h, v12.8h, v13.8h, v14.8h, v15.8h, \sz
> +        uaddl_sz        v14.8h, v15.8h, v27, v31, \sz
> +        bif             v10\sz, v24\sz, v7\sz
> +        rshrn_sz        v11, v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v12.8h, v13.8h, \sz
> +        uaddl_sz        v12.8h, v13.8h, v20, v27, \sz
> +        sub_sz          v14.8h, v15.8h, v14.8h, v15.8h, v18.8h, v19.8h, \sz
> +        uaddl_sz        v18.8h, v19.8h, v28, v31, \sz
> +        bif             v11\sz, v25\sz, v7\sz
> +        sub_sz          v18.8h, v19.8h, v18.8h, v19.8h, v12.8h, v13.8h, \sz
> +        rshrn_sz        v12, v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v14.8h, v15.8h, \sz
> +        uaddl_sz        v14.8h, v15.8h, v21, v28, \sz
> +        uaddl_sz        v20.8h, v21.8h, v29, v31, \sz
> +        bif             v12\sz, v26\sz, v7\sz
> +        rshrn_sz        v13, v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v18.8h, v19.8h, \sz
> +        sub_sz          v20.8h, v21.8h, v20.8h, v21.8h, v14.8h, v15.8h, \sz
> +        uaddl_sz        v18.8h, v19.8h, v22, v29, \sz
> +        uaddl_sz        v22.8h, v23.8h, v30, v31, \sz
> +        bif             v13\sz, v27\sz, v7\sz
> +        rshrn_sz        v14, v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v20.8h, v21.8h, \sz
> +        sub_sz          v22.8h, v23.8h, v22.8h, v23.8h, v18.8h, v19.8h, \sz
> +        bif             v14\sz, v28\sz, v7\sz
> +        rshrn_sz        v15, v0.8h,  v1.8h,  #4,  \sz
> +
> +        add_sz          v0.8h,  v1.8h,  v0.8h,  v1.8h,  v22.8h, v23.8h, \sz
> +        bif             v15\sz, v29\sz, v7\sz
> +        rshrn_sz        v17, v0.8h,  v1.8h,  #4,  \sz
> +        bif             v17\sz, v30\sz, v7\sz
> +.endif
> +.endm
> +
> +// For wd <= 8, we use v16-v19 and v28-v31 for temp registers,
> +// while we need those for inputs/outputs in wd=16 and use v8-v15
> +// for temp registers there instead.
> +function vp9_loop_filter_4
> +        loop_filter     4,  .8b,  0,    v16, v17, v18, v19, v28, v29, v30, 
> v31
> +        mov             x5,  #0

unneeded

> +        ret
> +9:
> +        br              x10
> +endfunc
> +
> +function vp9_loop_filter_4_16b_mix_44
> +        loop_filter     4,  .16b, 44,   v16, v17, v18, v19, v28, v29, v30, 
> v31
> +        mov             x5,  #0

same

> +        ret
> +9:
> +        br              x10
> +endfunc
> +
> +function vp9_loop_filter_8
> +        loop_filter     8,  .8b,  0,    v16, v17, v18, v19, v28, v29, v30, 
> v31
> +        mov             x5,  #0
> +        ret
> +6:
> +        mov             x5,  #6
> +        ret

I tried to return directly by loading the labels into registers and br 
to the directly but with mixed results.

> +9:
> +        br              x10
> +endfunc
> +
> +function vp9_loop_filter_8_16b_mix
> +        loop_filter     8,  .16b, 88,   v16, v17, v18, v19, v28, v29, v30, 
> v31
> +        mov             x5,  #0
> +        ret
> +6:
> +        mov             x5,  #6
> +        ret
> +9:
> +        br              x10
> +endfunc
> +
> +function vp9_loop_filter_16
> +        loop_filter     16, .8b,  0,    v8,  v9,  v10, v11, v12, v13, v14, 
> v15
> +        mov             x5,  #0
> +        ret
> +7:
> +        mov             x5,  #7
> +        ret
> +8:
> +        mov             x5,  #8
> +        ret
> +9:
> +        ldp             d8,  d9,  [sp]
> +        ldp             d10, d11, [sp, #0x10]
> +        ldp             d12, d13, [sp, #0x20]
> +        ldp             d14, d15, [sp, #0x30]
> +        add             sp,  sp,  #0x40

if the post index variant is not slower we could use that

> +        br              x10
> +endfunc
> +
> +function vp9_loop_filter_16_16b
> +        loop_filter     16, .16b, 0,    v8,  v9,  v10, v11, v12, v13, v14, 
> v15
> +        mov             x5,  #0
> +        ret
> +7:
> +        mov             x5,  #7
> +        ret
> +8:
> +        mov             x5,  #8
> +        ret
> +9:
> +        ldp             d8,  d9,  [sp]
> +        ldp             d10, d11, [sp, #0x10]
> +        ldp             d12, d13, [sp, #0x20]
> +        ldp             d14, d15, [sp, #0x30]
> +        add             sp,  sp,  #0x40
> +        br              x10
> +endfunc
> +
> +.macro loop_filter_4
> +        bl              vp9_loop_filter_4
> +.endm
> +
> +.macro loop_filter_4_16b_mix mix
> +        bl              vp9_loop_filter_4_16b_mix_\mix
> +.endm
> +
> +.macro loop_filter_8
> +        bl              vp9_loop_filter_8
> +        cbnz            x5,  6f
> +.endm
> +
> +.macro loop_filter_8_16b_mix mix
> +.if \mix == 48
> +        mov             x11, #0xffffffff00000000
> +.elseif \mix == 84
> +        mov             x11, #0x00000000ffffffff
> +.else
> +        mov             x11, #0xffffffffffffffff
> +.endif
> +        bl              vp9_loop_filter_8_16b_mix
> +        cbnz            x5,  6f
> +.endm
> +
> +.macro loop_filter_16
> +        bl              vp9_loop_filter_16
> +        cmp             x5,  7
> +        b.gt            8f
> +        b.eq            7f
> +.endm
> +
> +.macro loop_filter_16_16b
> +        bl              vp9_loop_filter_16_16b
> +        cmp             x5,  7
> +        b.gt            8f
> +        b.eq            7f
> +.endm
> +
> +
> +// The public functions in this file have got the following signature:
> +// void loop_filter(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int 
> hev_thr);
> +
> +function ff_vp9_loop_filter_v_4_8_neon, export=1
> +        mov             x10, x30
> +        sub             x9,  x0,  x1, lsl #2
> +        ld1             {v20.8b}, [x9], x1 // p3
> +        ld1             {v24.8b}, [x0], x1 // q0
> +        ld1             {v21.8b}, [x9], x1 // p2
> +        ld1             {v25.8b}, [x0], x1 // q1
> +        ld1             {v22.8b}, [x9], x1 // p1
> +        ld1             {v26.8b}, [x0], x1 // q2
> +        ld1             {v23.8b}, [x9], x1 // p0
> +        ld1             {v27.8b}, [x0], x1 // q3
> +        sub             x0,  x0,  x1, lsl #2
> +        sub             x9,  x9,  x1, lsl #1
> +
> +        loop_filter_4
> +
> +        st1             {v22.8b}, [x9], x1
> +        st1             {v24.8b}, [x0], x1
> +        st1             {v23.8b}, [x9], x1
> +        st1             {v25.8b}, [x0], x1
> +
> +        br              x10
> +endfunc
> +
> +function ff_vp9_loop_filter_v_44_16_neon, export=1
> +        mov             x10, x30
> +        sub             x9,  x0,  x1, lsl #2
> +        ld1             {v20.16b}, [x9], x1 // p3
> +        ld1             {v24.16b}, [x0], x1 // q0
> +        ld1             {v21.16b}, [x9], x1 // p2
> +        ld1             {v25.16b}, [x0], x1 // q1
> +        ld1             {v22.16b}, [x9], x1 // p1
> +        ld1             {v26.16b}, [x0], x1 // q2
> +        ld1             {v23.16b}, [x9], x1 // p0
> +        ld1             {v27.16b}, [x0], x1 // q3
> +        sub             x0,  x0,  x1, lsl #2
> +        sub             x9,  x9,  x1, lsl #1
> +
> +        loop_filter_4_16b_mix 44
> +
> +        st1             {v22.16b}, [x9], x1
> +        st1             {v24.16b}, [x0], x1
> +        st1             {v23.16b}, [x9], x1
> +        st1             {v25.16b}, [x0], x1
> +
> +        br              x10
> +endfunc
> +
> +function ff_vp9_loop_filter_h_4_8_neon, export=1
> +        mov             x10, x30
> +        sub             x9,  x0,  #4
> +        add             x0,  x9,  x1, lsl #2
> +        ld1             {v20.8b}, [x9], x1
> +        ld1             {v24.8b}, [x0], x1
> +        ld1             {v21.8b}, [x9], x1
> +        ld1             {v25.8b}, [x0], x1
> +        ld1             {v22.8b}, [x9], x1
> +        ld1             {v26.8b}, [x0], x1
> +        ld1             {v23.8b}, [x9], x1
> +        ld1             {v27.8b}, [x0], x1
> +
> +        sub             x9,  x9,  x1, lsl #2
> +        sub             x0,  x0,  x1, lsl #2
> +        // Move x0/x9 forward by 2 pixels; we don't need to rewrite the
> +        // outermost 2 pixels since they aren't changed.
> +        add             x9,  x9,  #2
> +        add             x0,  x0,  #2
> +
> +        transpose_8x8B  v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
> +
> +        loop_filter_4
> +
> +        // We only will write the mid 4 pixels back; after the loop filter,
> +        // these are in v22, v23, v24, v25, ordered as rows (8x4 pixels).
> +        // We need to transpose them to columns, done with a 4x8 transpose
> +        // (which in practice is two 4x4 transposes of the two 4x4 halves
> +        // of the 8x4 pixels; into 4x8 pixels).
> +        transpose_4x8B  v22, v23, v24, v25, v26, v27, v28, v29
> +        st1             {v22.s}[0], [x9], x1
> +        st1             {v22.s}[1], [x0], x1
> +        st1             {v23.s}[0], [x9], x1
> +        st1             {v23.s}[1], [x0], x1
> +        st1             {v24.s}[0], [x9], x1
> +        st1             {v24.s}[1], [x0], x1
> +        st1             {v25.s}[0], [x9], x1
> +        st1             {v25.s}[1], [x0], x1
> +
> +        br              x10
> +endfunc
> +
> +function ff_vp9_loop_filter_h_44_16_neon, export=1
> +        mov             x10, x30
> +        sub             x9,  x0,  #4
> +        add             x0,  x9,  x1, lsl #3
> +        ld1             {v20.8b},   [x9], x1
> +        ld1             {v20.d}[1], [x0], x1
> +        ld1             {v21.8b},   [x9], x1
> +        ld1             {v21.d}[1], [x0], x1
> +        ld1             {v22.8b},   [x9], x1
> +        ld1             {v22.d}[1], [x0], x1
> +        ld1             {v23.8b},   [x9], x1
> +        ld1             {v23.d}[1], [x0], x1
> +        ld1             {v24.8b},   [x9], x1
> +        ld1             {v24.d}[1], [x0], x1
> +        ld1             {v25.8b},   [x9], x1
> +        ld1             {v25.d}[1], [x0], x1
> +        ld1             {v26.8b},   [x9], x1
> +        ld1             {v26.d}[1], [x0], x1
> +        ld1             {v27.8b},   [x9], x1
> +        ld1             {v27.d}[1], [x0], x1
> +
> +        sub             x9,  x9,  x1, lsl #3
> +        sub             x0,  x0,  x1, lsl #3
> +        add             x9,  x9,  #2
> +        add             x0,  x0,  #2
> +
> +        transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
> +
> +        loop_filter_4_16b_mix 44
> +
> +        transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
> +
> +        st1             {v22.s}[0], [x9], x1
> +        st1             {v22.s}[2], [x0], x1
> +        st1             {v23.s}[0], [x9], x1
> +        st1             {v23.s}[2], [x0], x1
> +        st1             {v24.s}[0], [x9], x1
> +        st1             {v24.s}[2], [x0], x1
> +        st1             {v25.s}[0], [x9], x1
> +        st1             {v25.s}[2], [x0], x1
> +        st1             {v22.s}[1], [x9], x1
> +        st1             {v22.s}[3], [x0], x1
> +        st1             {v23.s}[1], [x9], x1
> +        st1             {v23.s}[3], [x0], x1
> +        st1             {v24.s}[1], [x9], x1
> +        st1             {v24.s}[3], [x0], x1
> +        st1             {v25.s}[1], [x9], x1
> +        st1             {v25.s}[3], [x0], x1
> +
> +        br              x10
> +endfunc
> +
> +function ff_vp9_loop_filter_v_8_8_neon, export=1
> +        mov             x10, x30
> +        sub             x9,  x0,  x1, lsl #2
> +        ld1             {v20.8b}, [x9], x1 // p3
> +        ld1             {v24.8b}, [x0], x1 // q0
> +        ld1             {v21.8b}, [x9], x1 // p2
> +        ld1             {v25.8b}, [x0], x1 // q1
> +        ld1             {v22.8b}, [x9], x1 // p1
> +        ld1             {v26.8b}, [x0], x1 // q2
> +        ld1             {v23.8b}, [x9], x1 // p0
> +        ld1             {v27.8b}, [x0], x1 // q3
> +        sub             x9,  x9,  x1, lsl #2
> +        sub             x0,  x0,  x1, lsl #2
> +        add             x9,  x9,  x1
> +
> +        loop_filter_8
> +
> +        st1             {v21.8b}, [x9], x1
> +        st1             {v24.8b}, [x0], x1
> +        st1             {v22.8b}, [x9], x1
> +        st1             {v25.8b}, [x0], x1
> +        st1             {v23.8b}, [x9], x1
> +        st1             {v26.8b}, [x0], x1
> +
> +        br              x10
> +6:
> +        sub             x9,  x0,  x1, lsl #1
> +        st1             {v22.8b}, [x9], x1
> +        st1             {v24.8b}, [x0], x1
> +        st1             {v23.8b}, [x9], x1
> +        st1             {v25.8b}, [x0], x1
> +        br              x10
> +endfunc
> +
> +.macro mix_v_16 mix
> +function ff_vp9_loop_filter_v_\mix\()_16_neon, export=1
> +        mov             x10, x30
> +        sub             x9,  x0,  x1, lsl #2
> +        ld1             {v20.16b}, [x9], x1 // p3
> +        ld1             {v24.16b}, [x0], x1 // q0
> +        ld1             {v21.16b}, [x9], x1 // p2
> +        ld1             {v25.16b}, [x0], x1 // q1
> +        ld1             {v22.16b}, [x9], x1 // p1
> +        ld1             {v26.16b}, [x0], x1 // q2
> +        ld1             {v23.16b}, [x9], x1 // p0
> +        ld1             {v27.16b}, [x0], x1 // q3
> +        sub             x9,  x9,  x1, lsl #2
> +        sub             x0,  x0,  x1, lsl #2
> +        add             x9,  x9,  x1
> +
> +        loop_filter_8_16b_mix \mix
> +
> +        st1             {v21.16b}, [x9], x1
> +        st1             {v24.16b}, [x0], x1
> +        st1             {v22.16b}, [x9], x1
> +        st1             {v25.16b}, [x0], x1
> +        st1             {v23.16b}, [x9], x1
> +        st1             {v26.16b}, [x0], x1
> +
> +        br              x10
> +6:
> +        sub             x9,  x0,  x1, lsl #1
> +        st1             {v22.16b}, [x9], x1
> +        st1             {v24.16b}, [x0], x1
> +        st1             {v23.16b}, [x9], x1
> +        st1             {v25.16b}, [x0], x1
> +        br              x10
> +endfunc
> +.endm
> +
> +mix_v_16 48
> +mix_v_16 84
> +mix_v_16 88
> +
> +function ff_vp9_loop_filter_h_8_8_neon, export=1
> +        mov             x10, x30
> +        sub             x9,  x0,  #4
> +        add             x0,  x9,  x1, lsl #2
> +        ld1             {v20.8b}, [x9], x1
> +        ld1             {v24.8b}, [x0], x1
> +        ld1             {v21.8b}, [x9], x1
> +        ld1             {v25.8b}, [x0], x1
> +        ld1             {v22.8b}, [x9], x1
> +        ld1             {v26.8b}, [x0], x1
> +        ld1             {v23.8b}, [x9], x1
> +        ld1             {v27.8b}, [x0], x1
> +
> +        sub             x9,  x9,  x1, lsl #2
> +        sub             x0,  x0,  x1, lsl #2
> +
> +        transpose_8x8B  v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
> +
> +        loop_filter_8
> +
> +        // Even though only 6 pixels per row have been changed, we write the
> +        // full 8 pixel registers.
> +        transpose_8x8B  v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
> +
> +        st1             {v20.8b}, [x9], x1
> +        st1             {v24.8b}, [x0], x1
> +        st1             {v21.8b}, [x9], x1
> +        st1             {v25.8b}, [x0], x1
> +        st1             {v22.8b}, [x9], x1
> +        st1             {v26.8b}, [x0], x1
> +        st1             {v23.8b}, [x9], x1
> +        st1             {v27.8b}, [x0], x1
> +
> +        br              x10
> +6:
> +        // If we didn't need to do the flat8in part, we use the same 
> writeback
> +        // as in loop_filter_h_4_8.
> +        add             x9,  x9,  #2
> +        add             x0,  x0,  #2
> +        transpose_4x8B  v22, v23, v24, v25, v26, v27, v28, v29
> +        st1             {v22.s}[0], [x9], x1
> +        st1             {v22.s}[1], [x0], x1
> +        st1             {v23.s}[0], [x9], x1
> +        st1             {v23.s}[1], [x0], x1
> +        st1             {v24.s}[0], [x9], x1
> +        st1             {v24.s}[1], [x0], x1
> +        st1             {v25.s}[0], [x9], x1
> +        st1             {v25.s}[1], [x0], x1
> +        br              x10
> +endfunc
> +
> +.macro mix_h_16 mix
> +function ff_vp9_loop_filter_h_\mix\()_16_neon, export=1
> +        mov             x10, x30
> +        sub             x9,  x0,  #4
> +        add             x0,  x9,  x1, lsl #3
> +        ld1             {v20.8b},   [x9], x1
> +        ld1             {v20.d}[1], [x0], x1
> +        ld1             {v21.8b},   [x9], x1
> +        ld1             {v21.d}[1], [x0], x1
> +        ld1             {v22.8b},   [x9], x1
> +        ld1             {v22.d}[1], [x0], x1
> +        ld1             {v23.8b},   [x9], x1
> +        ld1             {v23.d}[1], [x0], x1
> +        ld1             {v24.8b},   [x9], x1
> +        ld1             {v24.d}[1], [x0], x1
> +        ld1             {v25.8b},   [x9], x1
> +        ld1             {v25.d}[1], [x0], x1
> +        ld1             {v26.8b},   [x9], x1
> +        ld1             {v26.d}[1], [x0], x1
> +        ld1             {v27.8b},   [x9], x1
> +        ld1             {v27.d}[1], [x0], x1
> +
> +        sub             x9,  x9,  x1, lsl #3
> +        sub             x0,  x0,  x1, lsl #3
> +
> +        transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
> +
> +        loop_filter_8_16b_mix \mix
> +
> +        transpose_8x16B v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
> +
> +        st1             {v20.8b},   [x9], x1
> +        st1             {v20.d}[1], [x0], x1
> +        st1             {v21.8b},   [x9], x1
> +        st1             {v21.d}[1], [x0], x1
> +        st1             {v22.8b},   [x9], x1
> +        st1             {v22.d}[1], [x0], x1
> +        st1             {v23.8b},   [x9], x1
> +        st1             {v23.d}[1], [x0], x1
> +        st1             {v24.8b},   [x9], x1
> +        st1             {v24.d}[1], [x0], x1
> +        st1             {v25.8b},   [x9], x1
> +        st1             {v25.d}[1], [x0], x1
> +        st1             {v26.8b},   [x9], x1
> +        st1             {v26.d}[1], [x0], x1
> +        st1             {v27.8b},   [x9], x1
> +        st1             {v27.d}[1], [x0], x1
> +
> +        br              x10
> +6:
> +        add             x9,  x9,  #2
> +        add             x0,  x0,  #2
> +        transpose_4x16B v22, v23, v24, v25, v26, v27, v28, v29
> +        st1             {v22.s}[0], [x9], x1
> +        st1             {v22.s}[2], [x0], x1
> +        st1             {v23.s}[0], [x9], x1
> +        st1             {v23.s}[2], [x0], x1
> +        st1             {v24.s}[0], [x9], x1
> +        st1             {v24.s}[2], [x0], x1
> +        st1             {v25.s}[0], [x9], x1
> +        st1             {v25.s}[2], [x0], x1
> +        st1             {v22.s}[1], [x9], x1
> +        st1             {v22.s}[3], [x0], x1
> +        st1             {v23.s}[1], [x9], x1
> +        st1             {v23.s}[3], [x0], x1
> +        st1             {v24.s}[1], [x9], x1
> +        st1             {v24.s}[3], [x0], x1
> +        st1             {v25.s}[1], [x9], x1
> +        st1             {v25.s}[3], [x0], x1
> +        br              x10
> +endfunc
> +.endm
> +
> +mix_h_16 48
> +mix_h_16 84
> +mix_h_16 88
> +
> +function ff_vp9_loop_filter_v_16_8_neon, export=1
> +        mov             x10, x30
> +        sub             sp,  sp,  #0x40
> +        stp             d8,  d9,  [sp]
> +        stp             d10, d11, [sp, #0x10]
> +        stp             d12, d13, [sp, #0x20]
> +        stp             d14, d15, [sp, #0x30]
> +        sub             x9,  x0,  x1, lsl #3
> +        ld1             {v16.8b}, [x9], x1 // p7
> +        ld1             {v24.8b}, [x0], x1 // q0
> +        ld1             {v17.8b}, [x9], x1 // p6
> +        ld1             {v25.8b}, [x0], x1 // q1
> +        ld1             {v18.8b}, [x9], x1 // p5
> +        ld1             {v26.8b}, [x0], x1 // q2
> +        ld1             {v19.8b}, [x9], x1 // p4
> +        ld1             {v27.8b}, [x0], x1 // q3
> +        ld1             {v20.8b}, [x9], x1 // p3
> +        ld1             {v28.8b}, [x0], x1 // q4
> +        ld1             {v21.8b}, [x9], x1 // p2
> +        ld1             {v29.8b}, [x0], x1 // q5
> +        ld1             {v22.8b}, [x9], x1 // p1
> +        ld1             {v30.8b}, [x0], x1 // q6
> +        ld1             {v23.8b}, [x9], x1 // p0
> +        ld1             {v31.8b}, [x0], x1 // q7
> +        sub             x9,  x9,  x1, lsl #3
> +        sub             x0,  x0,  x1, lsl #3
> +        add             x9,  x9,  x1
> +
> +        loop_filter_16
> +
> +        // If we did the flat8out part, we get the output in
> +        // v2-v17 (skipping v7 and v16). x9 points to x0 - 7 * stride,
> +        // store v2-v9 there, and v10-v17 into x0.
> +        st1             {v2.8b},  [x9], x1
> +        st1             {v10.8b}, [x0], x1
> +        st1             {v3.8b},  [x9], x1
> +        st1             {v11.8b}, [x0], x1
> +        st1             {v4.8b},  [x9], x1
> +        st1             {v12.8b}, [x0], x1
> +        st1             {v5.8b},  [x9], x1
> +        st1             {v13.8b}, [x0], x1
> +        st1             {v6.8b},  [x9], x1
> +        st1             {v14.8b}, [x0], x1
> +        st1             {v8.8b},  [x9], x1
> +        st1             {v15.8b}, [x0], x1
> +        st1             {v9.8b},  [x9], x1
> +        st1             {v17.8b}, [x0], x1
> +
> +        ldp             d8,  d9,  [sp]
> +        ldp             d10, d11, [sp, #0x10]
> +        ldp             d12, d13, [sp, #0x20]
> +        ldp             d14, d15, [sp, #0x30]
> +        add             sp,  sp,  #0x40
> +        br              x10

I don't think it makes sense to duplicate this for every variant, just 
branch to tail of the function, the same applies to the other 16_8/16 
functions

otherwise ok

Janne
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to