On 2016-10-16 23:18:58 +0300, Martin Storsjö wrote:
> This work is sponsored by, and copyright, Google.
> 
> The implementation tries to have smart handling of cases
> where no pixels need the full filtering for the 8/16 width
> filters, skipping both calculation and writeback of the
> unmodified pixels in those cases. The actual effect of this
> is hard to test with checkasm though, since it tests the
> full filtering, and the benefit depends on how many filtered
> blocks use the shortcut.

Did you benchmark it with START/STOP_TIMER in the decoder? It will still 
depend on the amount of filtering required by the test stream.

> Examples of relative speedup compared to the C version, from checkasm:
>                           Cortex       A7     A8     A9    A53
> vp9_loop_filter_h_4_8_neon:          2.81   2.59   2.29   3.33
> vp9_loop_filter_h_8_8_neon:          2.17   2.09   1.81   2.67
> vp9_loop_filter_h_16_8_neon:         2.72   2.29   2.20   3.10
> vp9_loop_filter_h_16_16_neon:        2.49   2.38   2.23   2.82
> vp9_loop_filter_mix2_h_44_16_neon:   2.88   2.71   2.30   3.31
> vp9_loop_filter_mix2_h_48_16_neon:   2.59   2.40   2.05   3.00
> vp9_loop_filter_mix2_h_84_16_neon:   2.58   2.33   2.04   3.05
> vp9_loop_filter_mix2_h_88_16_neon:   2.32   2.25   1.91   2.81
> vp9_loop_filter_mix2_v_44_16_neon:   4.28   4.13   3.49   5.64
> vp9_loop_filter_mix2_v_48_16_neon:   4.32   4.15   3.54   5.96
> vp9_loop_filter_mix2_v_84_16_neon:   4.13   3.71   3.31   5.33
> vp9_loop_filter_mix2_v_88_16_neon:   4.64   4.50   3.94   6.74
> vp9_loop_filter_v_4_8_neon:          4.10   3.78   3.12   5.44
> vp9_loop_filter_v_8_8_neon:          4.50   4.24   3.66   6.45
> vp9_loop_filter_v_16_8_neon:         5.21   4.42   4.12   7.00
> vp9_loop_filter_v_16_16_neon:        3.76   3.57   3.23   5.25
> 
> The speedup vs C code is around 2-7x. The numbers are quite
> inconclusive though, since the checkasm test runs multiple filterings
> on top of each other, so later rounds might end up with different
> codepaths (different decisions on which filter to apply, based
> on input pixel differences). Disabling the early-exit in the asm
> doesn't give a fair comparison either though, since the C code
> only does the necessary calcuations for each row.

The only way to get meaning full numbers is timing the use in the 
decoder.

> This is pretty similar in runtime to the corresponding routines
> in libvpx. (This is comparing vpx_lpf_vertical_16_neon,
> vpx_lpf_horizontal_edge_8_neon and vpx_lpf_horizontal_edge_16_neon
> to vp9_loop_filter_h_16_8_neon, vp9_loop_filter_v_16_8_neon
> and vp9_loop_filter_v_16_16_neon - note that the naming of horizonal
> and vertical is flipped between the libraries.)
> 
> In order to have stable, comparable numbers, the early exits in both
> asm versions were disabled, forcing the full filtering codepath.

This is probably ok since the implemtations are probably pretty similar.

>                            Cortex           A7      A8      A9     A53
> vp9_loop_filter_h_16_8_neon:             594.7   441.2   459.5   406.0
> libvpx vpx_lpf_vertical_16_neon:         626.0   464.5   470.7   445.0
> vp9_loop_filter_v_16_8_neon:             516.3   390.2   406.7   285.0
> libvpx vpx_lpf_horizontal_edge_8_neon:   586.5   414.5   415.6   383.2
> vp9_loop_filter_v_16_16_neon:            955.4   762.2   780.9   558.0
> libvpx vpx_lpf_horizontal_edge_16_neon: 1060.2   751.7   743.5   685.2
> 
> Our version is consistently faster on on A7 and A53, consistently
> marginally slower on A9, and faster in some tests but slower in some,
> on A7 and A8.
> ---
> I did some more optimizations to the core filter macro, and added
> assembly frontends for the 16_16 functions to avoid doing vpush/vpop
> twice, and added lots of more code comments. Assembly frontends for
> the mix functions isn't necessary since none of them clobber any callee
> saved registers.
> 
> Now the comparison to libvpx is much more close; we're rarely slower
> at all, and even much faster in some cases.
> ---
>  libavcodec/arm/Makefile          |   1 +
>  libavcodec/arm/vp9dsp_init_arm.c |  57 +++
>  libavcodec/arm/vp9lpf_neon.S     | 764 
> +++++++++++++++++++++++++++++++++++++++
>  3 files changed, 822 insertions(+)
>  create mode 100644 libavcodec/arm/vp9lpf_neon.S
> 
> diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
> index 01630ac..77452b1 100644
> --- a/libavcodec/arm/Makefile
> +++ b/libavcodec/arm/Makefile
> @@ -140,4 +140,5 @@ NEON-OBJS-$(CONFIG_RV40_DECODER)       += 
> arm/rv34dsp_neon.o            \
>  NEON-OBJS-$(CONFIG_VORBIS_DECODER)     += arm/vorbisdsp_neon.o
>  NEON-OBJS-$(CONFIG_VP6_DECODER)        += arm/vp6dsp_neon.o
>  NEON-OBJS-$(CONFIG_VP9_DECODER)        += arm/vp9itxfm_neon.o           \
> +                                          arm/vp9lpf_neon.o             \
>                                            arm/vp9mc_neon.o
> diff --git a/libavcodec/arm/vp9dsp_init_arm.c 
> b/libavcodec/arm/vp9dsp_init_arm.c
> index b2d91d9..7f3b970 100644
> --- a/libavcodec/arm/vp9dsp_init_arm.c
> +++ b/libavcodec/arm/vp9dsp_init_arm.c
> @@ -182,8 +182,65 @@ static av_cold void vp9dsp_itxfm_init_arm(VP9DSPContext 
> *dsp)
>      }
>  }
>  
> +#define define_loop_filter(dir, wd, size) \
> +void ff_vp9_loop_filter_##dir##_##wd##_##size##_neon(uint8_t *dst, ptrdiff_t 
> stride, int E, int I, int H)
> +
> +#define define_loop_filters(wd, size) \
> +    define_loop_filter(h, wd, size);  \
> +    define_loop_filter(v, wd, size)
> +
> +define_loop_filters(4, 8);
> +define_loop_filters(8, 8);
> +define_loop_filters(16, 8);
> +define_loop_filters(16, 16);
> +
> +#define lf_mix_fn(dir, wd1, wd2, stridea)                                    
>                      \
> +static void loop_filter_##dir##_##wd1##wd2##_16_neon(uint8_t *dst,           
>                      \
> +                                                     ptrdiff_t stride,       
>                      \
> +                                                     int E, int I, int H)    
>                      \
> +{                                                                            
>                      \
> +    ff_vp9_loop_filter_##dir##_##wd1##_8_neon(dst, stride, E & 0xff, I & 
> 0xff, H & 0xff);         \
> +    ff_vp9_loop_filter_##dir##_##wd2##_8_neon(dst + 8 * stridea, stride, E 
> >> 8, I >> 8, H >> 8); \
> +}
> +
> +#define lf_mix_fns(wd1, wd2)       \
> +    lf_mix_fn(h, wd1, wd2, stride) \
> +    lf_mix_fn(v, wd1, wd2, sizeof(uint8_t))
> +
> +lf_mix_fns(4, 4)
> +lf_mix_fns(4, 8)
> +lf_mix_fns(8, 4)
> +lf_mix_fns(8, 8)
> +
> +static av_cold void vp9dsp_loopfilter_init_arm(VP9DSPContext *dsp)
> +{
> +    int cpu_flags = av_get_cpu_flags();
> +
> +    if (have_neon(cpu_flags)) {
> +        dsp->loop_filter_8[0][1] = ff_vp9_loop_filter_v_4_8_neon;
> +        dsp->loop_filter_8[0][0] = ff_vp9_loop_filter_h_4_8_neon;
> +        dsp->loop_filter_8[1][1] = ff_vp9_loop_filter_v_8_8_neon;
> +        dsp->loop_filter_8[1][0] = ff_vp9_loop_filter_h_8_8_neon;
> +        dsp->loop_filter_8[2][1] = ff_vp9_loop_filter_v_16_8_neon;
> +        dsp->loop_filter_8[2][0] = ff_vp9_loop_filter_h_16_8_neon;
> +
> +        dsp->loop_filter_16[0] = ff_vp9_loop_filter_h_16_16_neon;
> +        dsp->loop_filter_16[1] = ff_vp9_loop_filter_v_16_16_neon;
> +
> +        dsp->loop_filter_mix2[0][0][0] = loop_filter_h_44_16_neon;
> +        dsp->loop_filter_mix2[0][0][1] = loop_filter_v_44_16_neon;
> +        dsp->loop_filter_mix2[0][1][0] = loop_filter_h_48_16_neon;
> +        dsp->loop_filter_mix2[0][1][1] = loop_filter_v_48_16_neon;
> +        dsp->loop_filter_mix2[1][0][0] = loop_filter_h_84_16_neon;
> +        dsp->loop_filter_mix2[1][0][1] = loop_filter_v_84_16_neon;
> +        dsp->loop_filter_mix2[1][1][0] = loop_filter_h_88_16_neon;
> +        dsp->loop_filter_mix2[1][1][1] = loop_filter_v_88_16_neon;
> +    }
> +}
> +
>  av_cold void ff_vp9dsp_init_arm(VP9DSPContext *dsp)
>  {
>      vp9dsp_mc_init_arm(dsp);
> +    vp9dsp_loopfilter_init_arm(dsp);
>      vp9dsp_itxfm_init_arm(dsp);
>  }
> diff --git a/libavcodec/arm/vp9lpf_neon.S b/libavcodec/arm/vp9lpf_neon.S
> new file mode 100644
> index 0000000..7cb691c
> --- /dev/null
> +++ b/libavcodec/arm/vp9lpf_neon.S
> @@ -0,0 +1,764 @@
> +/*
> + * Copyright (c) 2016 Google Inc.
> + *
> + * This file is part of Libav.
> + *
> + * Libav is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * Libav is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with Libav; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
> USA
> + */
> +
> +#include "libavutil/arm/asm.S"
> +#include "neon.S"
> +
> +@ Do an 8x8 transpose, using q registers for the subtransposes that don't
> +@ need to address the indiviudal d registers.
> +@ r0,r1 == rq0, r2,r3 == rq1, etc
> +.macro transpose_q_8x8 rq0, rq1, rq2, rq3, r0, r1, r2, r3, r4, r5, r6, r7
> +        vtrn.32         \rq0, \rq2
> +        vtrn.32         \rq1, \rq3
> +        vtrn.16         \rq0, \rq1
> +        vtrn.16         \rq2, \rq3
> +        vtrn.8          \r0,  \r1
> +        vtrn.8          \r2,  \r3
> +        vtrn.8          \r4,  \r5
> +        vtrn.8          \r6,  \r7
> +.endm
> +
> +@ Do a 4x4 transpose, using q registers for the subtransposes that don't
> +@ need to address the indiviudal d registers.
> +@ r0,r1 == rq0, r2,r3 == rq1
> +.macro transpose_q_4x4 rq0, rq1, r0, r1, r2, r3
> +        vtrn.16         \rq0, \rq1
> +        vtrn.8          \r0,  \r1
> +        vtrn.8          \r2,  \r3
> +.endm
> +
> +@ The input to and output from this macro is in the registers d16-d31,
> +@ and d0-d7 are used as scratch registers.
> +@ p7 = d16 .. p3 = d20, p0 = d23, q0 = d24, q3 = d27, q7 = d31
> +@ Depending on the width of the loop filter, we either use d16-d19
> +@ and d28-d31 as temp registers, or d8-d15.
> +@ tmp1,tmp2 = tmpq1, tmp3,tmp4 = tmpq2, tmp5,tmp6 = tmpq3, tmp7,tmp8 = tmpq4
> +.macro loop_filter wd, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, 
> tmpq1, tmpq2, tmpq3, tmpq4
> +        vdup.u16        q0,  r2 @ E
> +        vdup.u8         d2,  r3 @ I
> +        ldr             r3,  [sp]
> +
> +        vabd.u8         d4,  d20, d21    @ abs(p3 - p2)
> +        vabd.u8         d5,  d21, d22    @ abs(p2 - p1)
> +        vabd.u8         d6,  d22, d23    @ abs(p1 - p0)
> +        vabd.u8         d7,  d24, d25    @ abs(q0 - q1)
> +        vabd.u8         \tmp1,  d25, d26 @ abs(q1 - q2)
> +        vabd.u8         \tmp2,  d26, d27 @ abs(q2 - q3)
> +        vmax.u8         d4,  d4,  d5
> +        vmax.u8         d5,  d6,  d7
> +        vmax.u8         \tmp1,  \tmp1,  \tmp2
> +        vabdl.u8        q3,  d23, d24    @ abs(p0 - q0)
> +        vmax.u8         d4,  d4,  d5
> +        vadd.u16        q3,  q3,  q3     @ abs(p0 - q0) * 2
> +        vabd.u8         d5,  d22, d25    @ abs(p1 - q1)
> +        vmax.u8         d4,  d4,  \tmp1  @ max(abs(p3 - p2), ..., abs(q2 - 
> q3))
> +        vshr.u8         d5,  d5,  #1
> +        vcle.u8         d4,  d4,  d2     @ max(abs()) <= I
> +        vaddw.u8        q3,  q3,  d5     @ abs(p0 - q0) * 2 + abs(p1 - q1) 
> >> 1
> +        vcle.u16        q3,  q3,  q0
> +        vmovn.u16       d5,  q3
> +        vand            d4,  d4,  d5  @ fm
> +
> +        vcmp.f64        d4,  #0
> +        vmrs            APSR_nzcv, FPSCR
> +        @ If no pixels need filtering, just exit as soon as possible
> +        beq             9f
> +
> +.if \wd >= 8
> +        vmov.u8         d0,  #1
> +
> +        vabd.u8         d6,  d20, d23    @ abs(p3 - p0)
> +        vabd.u8         d2,  d21, d23    @ abs(p2 - p0)
> +        vabd.u8         d1,  d22, d23    @ abs(p1 - p0)
> +        vabd.u8         \tmp1,  d25, d24 @ abs(q1 - q0)
> +        vabd.u8         \tmp2,  d26, d24 @ abs(q2 - q0)
> +        vabd.u8         \tmp3,  d27, d24 @ abs(q3 - q0)
> +        vmax.u8         d6,  d6,  d2
> +        vmax.u8         d1,  d1,  \tmp1
> +        vmax.u8         \tmp2,  \tmp2,  \tmp3
> +.if \wd == 16
> +        vabd.u8         d7,  d16, d23    @ abs(p7 - p0)
> +        vmax.u8         d6,  d6,  d1
> +        vabd.u8         d2,  d17, d23    @ abs(p6 - p0)
> +        vmax.u8         d6,  d6,  \tmp2
> +        vabd.u8         d1,  d18, d23    @ abs(p5 - p0)
> +        vcle.u8         d6,  d6,  d0     @ flat8in
> +        vabd.u8         d8,  d19, d23    @ abs(p4 - p0)
> +        vand            d6,  d6,  d4     @ flat8in && fm
> +        vabd.u8         d9,  d28, d24    @ abs(q4 - q0)
> +        vbic            d4,  d4,  d6     @ fm && !flat8in
> +        vabd.u8         d10, d29, d24    @ abs(q5 - q0)
> +        vabd.u8         d11, d30, d24    @ abs(q6 - q0)
> +        vabd.u8         d12, d31, d24    @ abs(q7 - q0)
> +
> +        vmax.u8         d7,  d7,  d2
> +        vmax.u8         d1,  d1,  d8
> +        vmax.u8         d9,  d9,  d10
> +        vmax.u8         d11, d11, d12
> +        @ The rest of the calculation of flat8out is interleaved below
> +.else
> +        @ The rest of the calculation of flat8in is interleaved below
> +.endif
> +.endif
> +
> +        @ Calculate the normal inner loop filter for 2 or 4 pixels
> +        vdup.u8         d3,  r3                 @ H
> +        vabd.u8         d5,  d22, d23           @ abs(p1 - p0)
> +.if \wd == 16
> +        vmax.u8         d7,  d7,  d1
> +        vmax.u8         d9,  d9,  d11
> +.elseif \wd == 8
> +        vmax.u8         d6,  d6,  d1
> +.endif
> +        vabd.u8         d1,  d24, d25           @ abs(q1 - q0)
> +.if \wd == 16
> +        vmax.u8         d7,  d7,  d9
> +.elseif \wd == 8
> +        vmax.u8         d6,  d6,  \tmp2
> +.endif
> +        vsubl.u8        \tmpq1,  d22, d25       @ p1 - q1
> +        vmax.u8         d5,  d5,  d1            @ max(abs(p1 - p0), abs(q1 - 
> q0))
> +        vsubl.u8        \tmpq2,  d24, d23       @ q0 - p0
> +        vmov.s16        \tmpq3,  #3
> +.if \wd == 8
> +        vcle.u8         d6,  d6,  d0            @ flat8in
> +.endif
> +        vcgt.u8         d5,  d5,  d3            @ hev
> +.if \wd == 8
> +        vand            d6,  d6,  d4            @ flat8in && fm
> +.endif
> +        vqmovn.s16      \tmp1,   \tmpq1         @ av_clip_int8(p1 - q1)
> +.if \wd == 16
> +        vcle.u8         d7,  d7,  d0            @ flat8out
> +.elseif \wd == 8
> +        vbic            d4,  d4,  d6            @ fm && !flat8in
> +.endif
> +        vmvn            d5,  d5                 @ !hev
> +.if \wd == 16
> +        vand            d7,  d7,  d6            @ flat8out && flat8in && fm
> +.endif
> +        vand            d5,  d5,  d4            @ !hev && fm && !flat8in
> +
> +        vmul.s16        \tmpq2,  \tmpq2, \tmpq3 @ 3 * (q0 - p0)
> +        vbic            \tmp1,   \tmp1,   d5    @ if (!hev) av_clip_int8 = 0
> +        vmov.s8         d2,  #4
> +        vaddw.s8        \tmpq2,  \tmpq2,  \tmp1 @ 3 * (q0 - p0) [+ 
> av_clip_int8(p1 - q1)]
> +        vmov.s8         d3,  #3
> +        vqmovn.s16      \tmp1,   \tmpq2         @ f
> +.if \wd == 16
> +        vbic            d6,  d6,  d7            @ fm && flat8in && !flat8out
> +.endif
> +
> +        vqadd.s8        \tmp3, \tmp1,  d2       @ FFMIN(f + 4, 127)
> +        vqadd.s8        \tmp4, \tmp1,  d3       @ FFMIN(f + 3, 127)
> +        vmovl.u8        q0,  d23                @ p0
> +        vshr.s8         \tmp3, \tmp3,  #3       @ f1
> +        vshr.s8         \tmp4, \tmp4,  #3       @ f2
> +
> +        vmovl.u8        q1,  d24                @ q0
> +        vaddw.s8        q0,  q0,  \tmp4         @ p0 + f2
> +        vsubw.s8        q1,  q1,  \tmp3         @ q0 - f1
> +        vqmovun.s16     d0,  q0                 @ out p0
> +        vqmovun.s16     d1,  q1                 @ out q0
> +        vrshr.s8        \tmp3, \tmp3, #1        @ f = (f1 + 1) >> 1
> +        vbit            d23, d0,  d4            @ if (fm && !flat8in)
> +        vbit            d24, d1,  d4
> +
> +        vmovl.u8        q0,  d22                @ p1
> +        vmovl.u8        q1,  d25                @ q1
> +        vaddw.s8        q0,  q0,  \tmp3         @ p1 + f
> +        vsubw.s8        q1,  q1,  \tmp3         @ q1 + f
> +        vqmovun.s16     d0,  q0                 @ out p1
> +        vqmovun.s16     d2,  q1                 @ out q1
> +        vbit            d22, d0,  d5            @ if (!hev && fm && !flat8in)
> +        vbit            d25, d2,  d5
> +
> +.if \wd >= 8
> +        vcmp.f64        d6,  #0
> +        vmrs            APSR_nzcv, FPSCR
> +        @ If no pixels need flat8in, jump to flat8out
> +        @ (or to a writeout of the inner 4 pixels, for wd=8)
> +        beq             6f
> +
> +        @ flat8in
> +        vaddl.u8        \tmpq1, d20, d21
> +        vaddl.u8        \tmpq2, d22, d25
> +        vaddl.u8        \tmpq3, d20, d22
> +        vaddl.u8        \tmpq4, d23, d26
> +        vadd.u16        q0,  \tmpq1, \tmpq1
> +        vaddw.u8        q0,  q0,  d23
> +        vaddw.u8        q0,  q0,  d24
> +        vadd.u16        q0,  q0,  \tmpq3
> +        vsub.s16        \tmpq2, \tmpq2, \tmpq1
> +        vsub.s16        \tmpq4, \tmpq4, \tmpq3
> +        vrshrn.u16      d2,  q0,  #3            @ out p2
> +
> +        vadd.u16        q0,  q0,  \tmpq2
> +        vaddl.u8        \tmpq1, d20, d23
> +        vaddl.u8        \tmpq2, d24, d27
> +        vrshrn.u16      d3,  q0,  #3            @ out p1
> +
> +        vadd.u16        q0,  q0,  \tmpq4
> +        vsub.s16        \tmpq2, \tmpq2, \tmpq1
> +        vaddl.u8        \tmpq3, d21, d24
> +        vaddl.u8        \tmpq4, d25, d27
> +        vrshrn.u16      d4,  q0,  #3            @ out p0
> +
> +        vadd.u16        q0,  q0,  \tmpq2
> +        vsub.s16        \tmpq4, \tmpq4, \tmpq3
> +        vaddl.u8        \tmpq1, d22, d25
> +        vaddl.u8        \tmpq2, d26, d27
> +        vrshrn.u16      d5,  q0,  #3            @ out q0
> +
> +        vadd.u16        q0,  q0,  \tmpq4
> +        vsub.s16        \tmpq2, \tmpq2, \tmpq1
> +        vrshrn.u16      \tmp5,  q0,  #3         @ out q1
> +
> +        vadd.u16        q0,  q0,  \tmpq2
> +        @ The output here is written back into the input registers. This 
> doesn't
> +        @ matter for the flat8out part below, since we only update those 
> pixels
> +        @ which won't be touched below.
> +        vbit            d21, d2,  d6
> +        vbit            d22, d3,  d6
> +        vbit            d23, d4,  d6
> +        vrshrn.u16      \tmp6,  q0,  #3         @ out q2
> +        vbit            d24, d5,  d6
> +        vbit            d25, \tmp5,  d6
> +        vbit            d26, \tmp6,  d6
> +.endif
> +.if \wd == 16
> +6:
> +        vorr            d2,  d6,  d7
> +        vcmp.f64        d2,  #0
> +        vmrs            APSR_nzcv, FPSCR
> +        @ If no pixels needed flat8in nor flat8out, jump to a
> +        @ writeout of the inner 4 pixels
> +        beq             7f
> +        vcmp.f64        d7,  #0
> +        vmrs            APSR_nzcv, FPSCR
> +        @ If no pixels need flat8out, jump to a writeout of the inner 6 
> pixels
> +        beq             8f
> +
> +        @ flat8out
> +        @ This writes all outputs into d2-d17 (skipping d6 and d16).
> +        @ If this part is skipped, the output is read from d21-d26 (which is 
> the input
> +        @ to this section).
> +        vshll.u8        q0,  d16, #3  @ 8 * d16
> +        vsubw.u8        q0,  q0,  d16 @ 7 * d16
> +        vaddw.u8        q0,  q0,  d17
> +        vaddl.u8        q4,  d17, d18
> +        vaddl.u8        q5,  d19, d20
> +        vadd.s16        q0,  q0,  q4
> +        vaddl.u8        q4,  d16, d17
> +        vaddl.u8        q6,  d21, d22
> +        vadd.s16        q0,  q0,  q5
> +        vaddl.u8        q5,  d18, d25
> +        vaddl.u8        q7,  d23, d24
> +        vsub.s16        q5,  q5,  q4
> +        vadd.s16        q0,  q0,  q6
> +        vadd.s16        q0,  q0,  q7
> +        vaddl.u8        q6,  d16, d18
> +        vaddl.u8        q7,  d19, d26
> +        vrshrn.u16      d2,  q0,  #4
> +
> +        vadd.s16        q0,  q0,  q5
> +        vaddl.u8        q4,  d16, d19
> +        vaddl.u8        q5,  d20, d27
> +        vsub.s16        q7,  q7,  q6
> +        vbif            d2,  d17, d7
> +        vrshrn.u16      d3,  q0,  #4
> +
> +        vadd.s16        q0,  q0,  q7
> +        vaddl.u8        q6,  d16, d20
> +        vaddl.u8        q7,  d21, d28
> +        vsub.s16        q5,  q5,  q4
> +        vbif            d3,  d18, d7
> +        vrshrn.u16      d4,  q0,  #4
> +
> +        vadd.s16        q0,  q0,  q5
> +        vaddl.u8        q4,  d16, d21
> +        vaddl.u8        q5,  d22, d29
> +        vsub.s16        q7,  q7,  q6
> +        vbif            d4,  d19, d7
> +        vrshrn.u16      d5,  q0,  #4
> +
> +        vadd.s16        q0,  q0,  q7
> +        vaddl.u8        q6,  d16, d22
> +        vaddl.u8        q7,  d23, d30
> +        vsub.s16        q5,  q5,  q4
> +        vbif            d5,  d20, d7
> +        vrshrn.u16      d6,  q0,  #4
> +
> +        vadd.s16        q0,  q0,  q5
> +        vaddl.u8        q5,  d16, d23
> +        vsub.s16        q7,  q7,  q6
> +        vaddl.u8        q6,  d24, d31
> +        vbif            d6,  d21, d7
> +        vrshrn.u16      d8,  q0,  #4
> +
> +        vadd.s16        q0,  q0,  q7
> +        vsub.s16        q5,  q6,  q5
> +        vaddl.u8        q6,  d17, d24
> +        vaddl.u8        q7,  d25, d31
> +        vbif            d8,  d22, d7
> +        vrshrn.u16      d9,  q0,  #4
> +
> +        vadd.s16        q0,  q0,  q5
> +        vsub.s16        q7,  q7,  q6
> +        vaddl.u8        q6,  d26, d31
> +        vbif            d9,  d23, d7
> +        vrshrn.u16      d10, q0,  #4
> +
> +        vadd.s16        q0,  q0,  q7
> +        vaddl.u8        q7,  d18, d25
> +        vaddl.u8        q9,  d19, d26
> +        vsub.s16        q6,  q6,  q7
> +        vaddl.u8        q7,  d27, d31
> +        vbif            d10, d24, d7
> +        vrshrn.u16      d11, q0,  #4
> +
> +        vadd.s16        q0,  q0,  q6
> +        vaddl.u8        q6,  d20, d27
> +        vsub.s16        q7,  q7,  q9
> +        vaddl.u8        q9,  d28, d31
> +        vbif            d11, d25, d7
> +        vsub.s16        q9,  q9,  q6
> +        vrshrn.u16      d12, q0,  #4
> +
> +        vadd.s16        q0,  q0,  q7
> +        vaddl.u8        q7,  d21, d28
> +        vaddl.u8        q10, d29, d31
> +        vbif            d12, d26, d7
> +        vrshrn.u16      d13, q0,  #4
> +
> +        vadd.s16        q0,  q0,  q9
> +        vsub.s16        q10, q10, q7
> +        vaddl.u8        q9,  d22, d29
> +        vaddl.u8        q11, d30, d31
> +        vbif            d13, d27, d7
> +        vrshrn.u16      d14, q0,  #4
> +
> +        vadd.s16        q0,  q0,  q10
> +        vsub.s16        q11, q11, q9
> +        vbif            d14, d28, d7
> +        vrshrn.u16      d15, q0,  #4
> +
> +        vadd.s16        q0,  q0,  q11
> +        vbif            d15, d29, d7
> +        vrshrn.u16      d17, q0,  #4
> +        vbif            d17, d30, d7
> +.endif
> +.endm
> +
> +@ For wd <= 8, we use d16-d19 and d28-d31 for temp registers,
> +@ while we need those for inputs/outputs in wd=16 and use d8-d15
> +@ for temp registers there instead.
> +.macro loop_filter_4
> +        loop_filter     4,  d16, d17, d18, d19, d28, d29, d30, d31, q8,  q9, 
>  q14, q15
> +.endm
> +
> +.macro loop_filter_8
> +        loop_filter     8,  d16, d17, d18, d19, d28, d29, d30, d31, q8,  q9, 
>  q14, q15
> +.endm
> +
> +.macro loop_filter_16
> +        loop_filter     16, d8,  d9,  d10, d11, d12, d13, d14, d15, q4,  q5, 
>  q6,  q7
> +.endm
> +
> +
> +@ The public functions in this file have got the following signature:
> +@ void loop_filter(uint8_t *dst, ptrdiff_t stride, int mb_lim, int lim, int 
> hev_thr);
> +
> +function ff_vp9_loop_filter_v_4_8_neon, export=1
> +        sub             r12, r0,  r1, lsl #2
> +        vld1.8          {d20}, [r12,:64], r1 @ p3
> +        vld1.8          {d24}, [r0, :64], r1 @ q0
> +        vld1.8          {d21}, [r12,:64], r1 @ p2
> +        vld1.8          {d25}, [r0, :64], r1 @ q1
> +        vld1.8          {d22}, [r12,:64], r1 @ p1
> +        vld1.8          {d26}, [r0, :64], r1 @ q2
> +        vld1.8          {d23}, [r12,:64], r1 @ p0
> +        vld1.8          {d27}, [r0, :64], r1 @ q3
> +        sub             r0,  r0,  r1, lsl #2
> +        sub             r12, r12, r1, lsl #1
> +
> +        loop_filter_4
> +
> +        vst1.8          {d22}, [r12,:64], r1
> +        vst1.8          {d24}, [r0, :64], r1
> +        vst1.8          {d23}, [r12,:64], r1
> +        vst1.8          {d25}, [r0, :64], r1
> +9:
> +        bx              lr
> +endfunc
> +
> +function ff_vp9_loop_filter_h_4_8_neon, export=1
> +        sub             r12, r0,  #4
> +        add             r0,  r12, r1, lsl #2
> +        vld1.8          {d20}, [r12], r1
> +        vld1.8          {d24}, [r0],  r1
> +        vld1.8          {d21}, [r12], r1
> +        vld1.8          {d25}, [r0],  r1
> +        vld1.8          {d22}, [r12], r1
> +        vld1.8          {d26}, [r0],  r1
> +        vld1.8          {d23}, [r12], r1
> +        vld1.8          {d27}, [r0],  r1
> +
> +        sub             r12, r12, r1, lsl #2
> +        sub             r0,  r0,  r1, lsl #2
> +        @ Move r0/r12 forward by 2 pixels; we don't need to rewrite the
> +        @ outermost 2 pixels since they aren't changed.
> +        add             r12, r12, #2
> +        add             r0,  r0,  #2
> +
> +        @ Transpose the 8x8 pixels, taking advantage of q registers, to get
> +        @ one register per column.
> +        transpose_q_8x8 q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, 
> d26, d27
> +
> +        loop_filter_4

since the loop filter core is huge and doesn't differ between horizontal 
and vertical filtering it's probably benificial to have a loopfilter 
core function and call it from both _h and _v

> +        @ these are in d22, d23, d24, d25 (q11, q12), ordered as rows
> +        @ (8x4 pixels). We need to transpose them to columns, done with a
> +        @ 4x4 transpose (which in practice is two 4x4 transposes of the two
> +        @ 4x4 halves of the 8x4 pixels; into 4x8 pixels).
> +        transpose_q_4x4 q11, q12, d22, d23, d24, d25
> +
> +        vst1.32         {d22[0]}, [r12], r1
> +        vst1.32         {d22[1]}, [r0],  r1
> +        vst1.32         {d23[0]}, [r12], r1
> +        vst1.32         {d23[1]}, [r0],  r1
> +        vst1.32         {d24[0]}, [r12], r1
> +        vst1.32         {d24[1]}, [r0],  r1
> +        vst1.32         {d25[0]}, [r12], r1
> +        vst1.32         {d25[1]}, [r0],  r1
> +9:
> +        bx              lr
> +endfunc
> +
> +function ff_vp9_loop_filter_v_8_8_neon, export=1
> +        sub             r12, r0,  r1, lsl #2
> +        vld1.8          {d20}, [r12,:64], r1 @ p3
> +        vld1.8          {d24}, [r0, :64], r1 @ q0
> +        vld1.8          {d21}, [r12,:64], r1 @ p2
> +        vld1.8          {d25}, [r0, :64], r1 @ q1
> +        vld1.8          {d22}, [r12,:64], r1 @ p1
> +        vld1.8          {d26}, [r0, :64], r1 @ q2
> +        vld1.8          {d23}, [r12,:64], r1 @ p0
> +        vld1.8          {d27}, [r0, :64], r1 @ q3
> +        sub             r12, r0,  r1, lsl #3
> +        sub             r0,  r0,  r1, lsl #2
> +        add             r12, r12, r1
> +
> +        loop_filter_8
> +
> +        vst1.8          {d21}, [r12,:64], r1
> +        vst1.8          {d24}, [r0, :64], r1
> +        vst1.8          {d22}, [r12,:64], r1
> +        vst1.8          {d25}, [r0, :64], r1
> +        vst1.8          {d23}, [r12,:64], r1
> +        vst1.8          {d26}, [r0, :64], r1
> +9:
> +        bx              lr
> +6:
> +        sub             r12, r0,  r1, lsl #1
> +        vst1.8          {d22}, [r12,:64], r1
> +        vst1.8          {d24}, [r0, :64], r1
> +        vst1.8          {d23}, [r12,:64], r1
> +        vst1.8          {d25}, [r0, :64], r1
> +        bx              lr
> +endfunc
> +
> +function ff_vp9_loop_filter_h_8_8_neon, export=1
> +        sub             r12, r0,  #4
> +        add             r0,  r12, r1, lsl #2
> +        vld1.8          {d20}, [r12], r1
> +        vld1.8          {d24}, [r0],  r1
> +        vld1.8          {d21}, [r12], r1
> +        vld1.8          {d25}, [r0],  r1
> +        vld1.8          {d22}, [r12], r1
> +        vld1.8          {d26}, [r0],  r1
> +        vld1.8          {d23}, [r12], r1
> +        vld1.8          {d27}, [r0],  r1
> +
> +        sub             r12, r12, r1, lsl #2
> +        sub             r0,  r0,  r1, lsl #2
> +
> +        transpose_q_8x8 q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, 
> d26, d27
> +
> +        loop_filter_8

ditto

> +        @ Even though only 6 pixels per row have been changed, we write the
> +        @ full 8 pixel registers.
> +        transpose_q_8x8 q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, 
> d26, d27
> +
> +        vst1.8          {d20}, [r12], r1
> +        vst1.8          {d24}, [r0],  r1
> +        vst1.8          {d21}, [r12], r1
> +        vst1.8          {d25}, [r0],  r1
> +        vst1.8          {d22}, [r12], r1
> +        vst1.8          {d26}, [r0],  r1
> +        vst1.8          {d23}, [r12], r1
> +        vst1.8          {d27}, [r0],  r1
> +9:
> +        bx              lr
> +6:
> +        @ If we didn't need to do the flat8in part, we use the same writeback
> +        @ as in loop_filter_h_4_8.
> +        add             r12, r12, #2
> +        add             r0,  r0,  #2
> +        transpose_q_4x4 q11, q12, d22, d23, d24, d25
> +        vst1.32         {d22[0]}, [r12], r1
> +        vst1.32         {d22[1]}, [r0],  r1
> +        vst1.32         {d23[0]}, [r12], r1
> +        vst1.32         {d23[1]}, [r0],  r1
> +        vst1.32         {d24[0]}, [r12], r1
> +        vst1.32         {d24[1]}, [r0],  r1
> +        vst1.32         {d25[0]}, [r12], r1
> +        vst1.32         {d25[1]}, [r0],  r1
> +        bx              lr
> +endfunc
> +
> +function vp9_loop_filter_v_16_neon
> +        sub             r12, r0,  r1, lsl #3
> +        @ Read p7-p0 using r12 and q0-q7 using r0
> +        vld1.8          {d16}, [r12,:64], r1 @ p7
> +        vld1.8          {d24}, [r0, :64], r1 @ q0
> +        vld1.8          {d17}, [r12,:64], r1 @ p6
> +        vld1.8          {d25}, [r0, :64], r1 @ q1
> +        vld1.8          {d18}, [r12,:64], r1 @ p5
> +        vld1.8          {d26}, [r0, :64], r1 @ q2
> +        vld1.8          {d19}, [r12,:64], r1 @ p4
> +        vld1.8          {d27}, [r0, :64], r1 @ q3
> +        vld1.8          {d20}, [r12,:64], r1 @ p3
> +        vld1.8          {d28}, [r0, :64], r1 @ q4
> +        vld1.8          {d21}, [r12,:64], r1 @ p2
> +        vld1.8          {d29}, [r0, :64], r1 @ q5
> +        vld1.8          {d22}, [r12,:64], r1 @ p1
> +        vld1.8          {d30}, [r0, :64], r1 @ q6
> +        vld1.8          {d23}, [r12,:64], r1 @ p0
> +        vld1.8          {d31}, [r0, :64], r1 @ q7
> +        sub             r12, r12, r1, lsl #3
> +        sub             r0,  r0,  r1, lsl #3
> +        add             r12, r12, r1
> +
> +        loop_filter_16
> +
> +        @ If we did the flat8out part, we get the output in
> +        @ d2-d17 (skipping d7 and d16). r12 points to r0 - 7 * stride,
> +        @ store d2-d9 there, and d10-d17 into r0.
> +        vst1.8          {d2},  [r12,:64], r1
> +        vst1.8          {d10}, [r0, :64], r1
> +        vst1.8          {d3},  [r12,:64], r1
> +        vst1.8          {d11}, [r0, :64], r1
> +        vst1.8          {d4},  [r12,:64], r1
> +        vst1.8          {d12}, [r0, :64], r1
> +        vst1.8          {d5},  [r12,:64], r1
> +        vst1.8          {d13}, [r0, :64], r1
> +        vst1.8          {d6},  [r12,:64], r1
> +        vst1.8          {d14}, [r0, :64], r1
> +        vst1.8          {d8},  [r12,:64], r1
> +        vst1.8          {d15}, [r0, :64], r1
> +        vst1.8          {d9},  [r12,:64], r1
> +        vst1.8          {d17}, [r0, :64], r1
> +        sub             r0,  r0,  r1, lsl #3
> +        add             r0,  r0,  r1
> +
> +9:
> +        bx              lr
> +
> +8:
> +        add             r12, r12, r1, lsl #2
> +        @ If we didn't do the flat8out part, the output is left in the
> +        @ input registers.
> +        vst1.8          {d21}, [r12,:64], r1
> +        vst1.8          {d24}, [r0, :64], r1
> +        vst1.8          {d22}, [r12,:64], r1
> +        vst1.8          {d25}, [r0, :64], r1
> +        vst1.8          {d23}, [r12,:64], r1
> +        vst1.8          {d26}, [r0, :64], r1
> +        sub             r0,  r0,  r1, lsl #1
> +        sub             r0,  r0,  r1
> +        bx              lr
> +7:
> +        sub             r12, r0,  r1, lsl #1
> +        vst1.8          {d22}, [r12,:64], r1
> +        vst1.8          {d24}, [r0, :64], r1
> +        vst1.8          {d23}, [r12,:64], r1
> +        vst1.8          {d25}, [r0, :64], r1
> +        sub             r0,  r0,  r1, lsl #1
> +        bx              lr
> +endfunc
> +
> +function ff_vp9_loop_filter_v_16_8_neon, export=1
> +        ldr             r12, [sp]
> +        push            {lr}
> +        vpush           {q4-q7}
> +        push            {r12}
> +        bl              vp9_loop_filter_v_16_neon
> +        add             sp,  sp,  #4
> +        vpop            {q4-q7}
> +        pop             {pc}
> +endfunc
> +
> +function ff_vp9_loop_filter_v_16_16_neon, export=1
> +        ldr             r12, [sp]
> +        // The filter clobbers r3, but we need to keep it for the second 
> round
> +        push            {r3, lr}
> +        vpush           {q4-q7}
> +        push            {r12}
> +        bl              vp9_loop_filter_v_16_neon
> +        add             r0,  #8
> +        ldr             r3,  [sp, #68]
> +        bl              vp9_loop_filter_v_16_neon
> +        add             sp,  sp,  #4
> +        vpop            {q4-q7}
> +        pop             {r3, pc}
> +endfunc
> +
> +function vp9_loop_filter_h_16_neon
> +        sub             r12,  r0,  #8
> +        vld1.8          {d16}, [r12,:64], r1
> +        vld1.8          {d24}, [r0, :64], r1
> +        vld1.8          {d17}, [r12,:64], r1
> +        vld1.8          {d25}, [r0, :64], r1
> +        vld1.8          {d18}, [r12,:64], r1
> +        vld1.8          {d26}, [r0, :64], r1
> +        vld1.8          {d19}, [r12,:64], r1
> +        vld1.8          {d27}, [r0, :64], r1
> +        vld1.8          {d20}, [r12,:64], r1
> +        vld1.8          {d28}, [r0, :64], r1
> +        vld1.8          {d21}, [r12,:64], r1
> +        vld1.8          {d29}, [r0, :64], r1
> +        vld1.8          {d22}, [r12,:64], r1
> +        vld1.8          {d30}, [r0, :64], r1
> +        vld1.8          {d23}, [r12,:64], r1
> +        vld1.8          {d31}, [r0, :64], r1
> +        sub             r0,  r0,  r1, lsl #3
> +        sub             r12, r12, r1, lsl #3
> +
> +        @ The 16x8 pixels read above is in two 8x8 blocks; the left
> +        @ half in d16-d23, and the right half in d24-d31. Do two 8x8 
> transposes
> +        @ of this, to get one column per register. This could be done with 
> two
> +        @ transpose_8x8 as below, but this takes advantage of the q 
> registers.
> +        transpose16_4x4 q8,  q9,  q10, q11, q12, q13, q14, q15
> +        vtrn.8          d16, d17
> +        vtrn.8          d18, d19
> +        vtrn.8          d20, d21
> +        vtrn.8          d22, d23
> +        vtrn.8          d24, d25
> +        vtrn.8          d26, d27
> +        vtrn.8          d28, d29
> +        vtrn.8          d30, d31
> +
> +        loop_filter_16

in theory the same but I'm not sure how easily the different storing 
routines can be triggered. Should be doable with 'return value' in a 
free register.
 
> +
> +        @ Transpose back; this is the same transpose as above, but
> +        @ we can't take advantage of q registers for the transpose, since
> +        @ all d registers in the transpose aren't consecutive.
> +        transpose_8x8   d16, d2,  d3,  d4,  d5,  d6,  d8,  d9
> +        transpose_8x8   d10, d11, d12, d13, d14, d15, d17, d31
> +
> +        vst1.8          {d16}, [r12,:64], r1
> +        vst1.8          {d10}, [r0, :64], r1
> +
> +        vst1.8          {d2},  [r12,:64], r1
> +        vst1.8          {d11}, [r0, :64], r1
> +
> +        vst1.8          {d3},  [r12,:64], r1
> +        vst1.8          {d12}, [r0, :64], r1
> +
> +        vst1.8          {d4},  [r12,:64], r1
> +        vst1.8          {d13}, [r0, :64], r1
> +
> +        vst1.8          {d5},  [r12,:64], r1
> +        vst1.8          {d14}, [r0, :64], r1
> +
> +        vst1.8          {d6},  [r12,:64], r1
> +        vst1.8          {d15}, [r0, :64], r1
> +
> +        vst1.8          {d8},  [r12,:64], r1
> +        vst1.8          {d17}, [r0, :64], r1
> +
> +        vst1.8          {d9},  [r12,:64], r1
> +        vst1.8          {d31}, [r0, :64], r1
> +        sub             r0,  r0,  r1, lsl #3
> +9:
> +        bx              lr
> +8:
> +        @ The same writeback as in loop_filter_h_8_8
> +        sub             r12, r0,  #4
> +        add             r0,  r12, r1, lsl #2
> +        transpose_q_8x8 q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, 
> d26, d27
> +
> +        vst1.8          {d20}, [r12], r1
> +        vst1.8          {d24}, [r0],  r1
> +        vst1.8          {d21}, [r12], r1
> +        vst1.8          {d25}, [r0],  r1
> +        vst1.8          {d22}, [r12], r1
> +        vst1.8          {d26}, [r0],  r1
> +        vst1.8          {d23}, [r12], r1
> +        vst1.8          {d27}, [r0],  r1
> +        sub             r0,  r0,  r1, lsl #3
> +        add             r0,  r0,  #4
> +        bx              lr
> +7:
> +        @ The same writeback as in loop_filter_h_4_8
> +        sub             r12, r0,  #2
> +        add             r0,  r12, r1, lsl #2
> +        transpose_q_4x4 q11, q12, d22, d23, d24, d25
> +        vst1.32         {d22[0]}, [r12], r1
> +        vst1.32         {d22[1]}, [r0],  r1
> +        vst1.32         {d23[0]}, [r12], r1
> +        vst1.32         {d23[1]}, [r0],  r1
> +        vst1.32         {d24[0]}, [r12], r1
> +        vst1.32         {d24[1]}, [r0],  r1
> +        vst1.32         {d25[0]}, [r12], r1
> +        vst1.32         {d25[1]}, [r0],  r1
> +        sub             r0,  r0,  r1, lsl #3
> +        add             r0,  r0,  #2
> +        bx              lr
> +endfunc
> +
> +function ff_vp9_loop_filter_h_16_8_neon, export=1
> +        ldr             r12, [sp]
> +        push            {lr}
> +        vpush           {q4-q7}
> +        push            {r12}
> +        bl              vp9_loop_filter_h_16_neon
> +        add             sp,  sp,  #4
> +        vpop            {q4-q7}
> +        pop             {pc}
> +endfunc
> +
> +function ff_vp9_loop_filter_h_16_16_neon, export=1
> +        ldr             r12, [sp]
> +        // The filter clobbers r3, but we need to keep it for the second 
> round
> +        push            {r3, lr}
> +        vpush           {q4-q7}
> +        push            {r12}
> +        bl              vp9_loop_filter_h_16_neon
> +        add             r0,  r0,  r1, lsl #3
> +        ldr             r3,  [sp, #68]
> +        bl              vp9_loop_filter_h_16_neon
> +        add             sp,  sp,  #4
> +        vpop            {q4-q7}
> +        pop             {r3, pc}
> +endfunc

patch ok

Janne

_______________________________________________
libav-devel mailing list
libav-devel@libav.org
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to