On 2016-11-02 14:58:41 +0200, Martin Storsjö wrote:
> ---
> libavcodec/aarch64/Makefile | 2 +
> libavcodec/aarch64/vp9dsp_init_aarch64.c | 139 ++++++
> libavcodec/aarch64/vp9mc_neon.S | 733
> +++++++++++++++++++++++++++++++
> libavcodec/vp9.h | 1 +
> libavcodec/vp9dsp.c | 2 +
> 5 files changed, 877 insertions(+)
> create mode 100644 libavcodec/aarch64/vp9dsp_init_aarch64.c
> create mode 100644 libavcodec/aarch64/vp9mc_neon.S
>
> diff --git a/libavcodec/aarch64/Makefile b/libavcodec/aarch64/Makefile
> index 764bedc..6f1227a 100644
> --- a/libavcodec/aarch64/Makefile
> +++ b/libavcodec/aarch64/Makefile
> @@ -17,6 +17,7 @@ OBJS-$(CONFIG_DCA_DECODER) +=
> aarch64/dcadsp_init.o
> OBJS-$(CONFIG_RV40_DECODER) += aarch64/rv40dsp_init_aarch64.o
> OBJS-$(CONFIG_VC1_DECODER) += aarch64/vc1dsp_init_aarch64.o
> OBJS-$(CONFIG_VORBIS_DECODER) += aarch64/vorbisdsp_init.o
> +OBJS-$(CONFIG_VP9_DECODER) += aarch64/vp9dsp_init_aarch64.o
>
> # ARMv8 optimizations
>
> @@ -43,3 +44,4 @@ NEON-OBJS-$(CONFIG_MPEGAUDIODSP) +=
> aarch64/mpegaudiodsp_neon.o
> NEON-OBJS-$(CONFIG_DCA_DECODER) += aarch64/dcadsp_neon.o
> \
> aarch64/synth_filter_neon.o
> NEON-OBJS-$(CONFIG_VORBIS_DECODER) += aarch64/vorbisdsp_neon.o
> +NEON-OBJS-$(CONFIG_VP9_DECODER) += aarch64/vp9mc_neon.o
> diff --git a/libavcodec/aarch64/vp9dsp_init_aarch64.c
> b/libavcodec/aarch64/vp9dsp_init_aarch64.c
> new file mode 100644
> index 0000000..3d414af
> --- /dev/null
> +++ b/libavcodec/aarch64/vp9dsp_init_aarch64.c
> + LOCAL_ALIGNED_16(uint8_t, temp, [((sz < 64 ? 2 * sz : 64) + 8) * sz]);
> \
((1 + (sz < 64)) * sz + 8) * sz
nit, looks nicer imo. no need to change it
> diff --git a/libavcodec/aarch64/vp9mc_neon.S
> b/libavcodec/aarch64/vp9mc_neon.S
> new file mode 100644
> index 0000000..9ca2a32
> --- /dev/null
> +++ b/libavcodec/aarch64/vp9mc_neon.S
> @@ -0,0 +1,733 @@
> +/*
> + * Copyright (c) 2016 Google Inc.
> + *
> + * This file is part of Libav.
> + *
> + * Libav is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * Libav is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with Libav; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
> USA
> + */
> +
> +#include "libavutil/aarch64/asm.S"
> +
> +const regular_filter, align=4
can't we use the constants from C? the single sign extend will be lost
in the noise. same applies to the arm version
> +// All public functions in this file have the following signature:
> +// typedef void (*vp9_mc_func)(uint8_t *dst, ptrdiff_t dst_stride,
> +// const uint8_t *ref, ptrdiff_t ref_stride,
> +// int h, int mx, int my);
> +
> +function ff_vp9_copy64_neon, export=1
> +1:
> + ldp x5, x6, [x2]
> + stp x5, x6, [x0]
> + ldp x5, x6, [x2, #16]
use more registers we have enough, probably not noticable
> + stp x5, x6, [x0, #16]
> + subs w4, w4, #1
> + ldp x5, x6, [x2, #32]
> + stp x5, x6, [x0, #32]
> + ldp x5, x6, [x2, #48]
> + stp x5, x6, [x0, #48]
> + add x2, x2, x3
> + add x0, x0, x1
> + b.ne 1b
> + ret
> +endfunc
> +
> +function ff_vp9_avg64_neon, export=1
> + mov x5, x0
> +1:
> + ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x2], x3
> + ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x0], x1
> + ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [x2], x3
> + urhadd v0.16b, v0.16b, v4.16b
> + urhadd v1.16b, v1.16b, v5.16b
> + ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x0], x1
> + urhadd v2.16b, v2.16b, v6.16b
> + urhadd v3.16b, v3.16b, v7.16b
> + subs w4, w4, #2
> + urhadd v16.16b, v16.16b, v20.16b
> + urhadd v17.16b, v17.16b, v21.16b
> + st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x5], x1
> + urhadd v18.16b, v18.16b, v22.16b
> + urhadd v19.16b, v19.16b, v23.16b
> + st1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x5], x1
> + b.ne 1b
> + ret
> +endfunc
> +
> +function ff_vp9_copy32_neon, export=1
> +1:
> + ldp x5, x6, [x2]
> + stp x5, x6, [x0]
> + subs w4, w4, #1
> + ldp x5, x6, [x2, #16]
see 64
> + stp x5, x6, [x0, #16]
> + add x2, x2, x3
> + add x0, x0, x1
> + b.ne 1b
> + ret
> +endfunc
> +
> +function ff_vp9_avg32_neon, export=1
> +1:
> + ld1 {v2.16b, v3.16b}, [x2], x3
> + ld1 {v0.16b, v1.16b}, [x0]
> + urhadd v0.16b, v0.16b, v2.16b
> + urhadd v1.16b, v1.16b, v3.16b
> + subs w4, w4, #1
> + st1 {v0.16b, v1.16b}, [x0], x1
> + b.ne 1b
> + ret
> +endfunc
> +
> +function ff_vp9_copy16_neon, export=1
> + add x5, x0, x1
> + add x1, x1, x1
I think lsl 'x1, x1, #1' would be a little clearer
> + add x6, x2, x3
> + add x3, x3, x3
> +1:
> + ld1 {v0.16b}, [x2], x3
> + ld1 {v1.16b}, [x6], x3
> + ld1 {v2.16b}, [x2], x3
> + ld1 {v3.16b}, [x6], x3
> + subs w4, w4, #4
> + st1 {v0.16b}, [x0], x1
> + st1 {v1.16b}, [x5], x1
> + st1 {v2.16b}, [x0], x1
> + st1 {v3.16b}, [x5], x1
> + b.ne 1b
> + ret
> +endfunc
> +
> +function ff_vp9_avg16_neon, export=1
> +1:
> + ld1 {v2.16b}, [x2], x3
> + ld1 {v0.16b}, [x0], x1
> + ld1 {v3.16b}, [x2], x3
> + urhadd v0.16b, v0.16b, v2.16b
> + ld1 {v1.16b}, [x0]
> + sub x0, x0, x1
use another register for st1
> + urhadd v1.16b, v1.16b, v3.16b
> + subs w4, w4, #2
> + st1 {v0.16b}, [x0], x1
> + st1 {v1.16b}, [x0], x1
> + b.ne 1b
> + ret
> +endfunc
...
> +function ff_vp9_copy4_neon, export=1
> +1:
> + ld1 {v0.s}[0], [x2], x3
> + ld1 {v1.s}[0], [x2], x3
> + st1 {v0.s}[0], [x0], x1
> + ld1 {v2.s}[0], [x2], x3
> + st1 {v1.s}[0], [x0], x1
> + ld1 {v3.s}[0], [x2], x3
> + subs w4, w4, #4
> + st1 {v2.s}[0], [x0], x1
> + st1 {v3.s}[0], [x0], x1
> + b.ne 1b
> + ret
> +endfunc
> +
> +function ff_vp9_avg4_neon, export=1
> +1:
> + ld1r {v4.2s}, [x2], x3
is there a difference between ld1r and ld1 {}[0]? eiether way we should
just use one variant for loading 4 bytes unless we specificially need
one of them. Iirc the cortex-a8 is the only core were a load to all
lanes is faster than a load to all lanes.
> + ld1r {v1.2s}, [x0], x1
> + ld1r {v6.2s}, [x2], x3
> + ld1r {v2.2s}, [x0], x1
> + ld1r {v7.2s}, [x2], x3
> + ld1r {v3.2s}, [x0], x1
> + sub x0, x0, x1, lsl #2
> + subs w4, w4, #4
> + urhadd v0.8b, v0.8b, v4.8b
> + urhadd v1.8b, v1.8b, v5.8b
> + urhadd v2.8b, v2.8b, v6.8b
> + urhadd v3.8b, v3.8b, v7.8b
in theory a single urhadd instruction is enough but I doubt it's faster,
doing 2 with 8 values each might though.
> + st1 {v0.s}[0], [x0], x1
> + st1 {v1.s}[0], [x0], x1
> + st1 {v2.s}[0], [x0], x1
> + st1 {v3.s}[0], [x0], x1
> + b.ne 1b
> + ret
> +endfunc
> +
> +
> +// Extract a vector from src1-src2 and src4-src5 (src1-src3 and src4-src6
> +// for size >= 16), and multiply-accumulate into dst1 and dst3 (or
> +// dst1-dst2 and dst3-dst4 for size >= 16)
> +.macro extmla dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6,
> offset, size
> + ext v20.16b, \src1, \src2, #(2*\offset)
> + ext v22.16b, \src4, \src5, #(2*\offset)
> +.if \size >= 16
> + mla \dst1, v20.8h, v0.h[\offset]
> + ext v21.16b, \src2, \src3, #(2*\offset)
> + mla \dst3, v22.8h, v0.h[\offset]
> + ext v23.16b, \src5, \src6, #(2*\offset)
> + mla \dst2, v21.8h, v0.h[\offset]
> + mla \dst4, v23.8h, v0.h[\offset]
> +.else
> + mla \dst1, v20.8h, v0.h[\offset]
> + mla \dst3, v22.8h, v0.h[\offset]
> +.endif
> +.endm
> +// The same as above, but don't accumulate straight into the
> +// destination, but use a temp register and accumulate with saturation.
> +.macro extmulqadd dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5,
> src6, offset, size
> + ext v20.16b, \src1, \src2, #(2*\offset)
> + ext v22.16b, \src4, \src5, #(2*\offset)
> +.if \size >= 16
> + mul v20.8h, v20.8h, v0.h[\offset]
> + ext v21.16b, \src2, \src3, #(2*\offset)
> + mul v22.8h, v22.8h, v0.h[\offset]
> + ext v23.16b, \src5, \src6, #(2*\offset)
> + mul v21.8h, v21.8h, v0.h[\offset]
> + mul v23.8h, v23.8h, v0.h[\offset]
> +.else
> + mul v20.8h, v20.8h, v0.h[\offset]
> + mul v22.8h, v22.8h, v0.h[\offset]
> +.endif
> + sqadd \dst1, \dst1, v20.8h
> + sqadd \dst3, \dst3, v22.8h
> +.if \size >= 16
> + sqadd \dst2, \dst2, v21.8h
> + sqadd \dst4, \dst4, v23.8h
> +.endif
> +.endm
> +
> +
> +// Instantiate a horizontal filter function for the given size.
> +// This can work on 4, 8 or 16 pixels in parallel; for larger
> +// widths it will do 16 pixels at a time and loop horizontally.
> +// The actual width is passed in x5, the height in w4 and the
> +// filter coefficients in x9. idx2 is the index of the largest
> +// filter coefficient (3 or 4) and idx1 is the other one of them.
> +.macro do_8tap_h type, size, idx1, idx2
> +function \type\()_8tap_\size\()h_\idx1\idx2
> + sub x2, x2, #3
> + add x6, x0, x1
> + add x7, x2, x3
> + add x1, x1, x1
> + add x3, x3, x3
> + // Only size >= 16 loops horizontally and needs
> + // reduced dst stride
> +.if \size >= 16
> + sub x1, x1, x5
> +.endif
> + // size >= 16 loads two qwords and increments r2,
> + // for size 4/8 it's enough with one qword and no
> + // postincrement
> +.if \size >= 16
> + sub x3, x3, x5
> + sub x3, x3, #8
> +.endif
> + // Load the filter vector
> + ld1 {v0.8h}, [x9]
> +1:
> +.if \size >= 16
> + mov x9, x5
> +.endif
> + // Load src
> +.if \size >= 16
> + ld1 {v4.16b}, [x2], #16
> + ld1 {v16.16b}, [x7], #16
> + ld1 {v6.8b}, [x2], #8
> + ld1 {v18.8b}, [x7], #8
ld1 {v4, v5, v6}, ...
ld1 {v16, v17, v18}, ...
and adept the rest
> +.else
> + ld1 {v4.16b}, [x2]
> + ld1 {v16.16b}, [x7]
> +.endif
> + uxtl2 v5.8h, v4.16b
> + uxtl v4.8h, v4.8b
> + uxtl2 v17.8h, v16.16b
> + uxtl v16.8h, v16.8b
> +.if \size >= 16
> + uxtl v6.8h, v6.8b
> + uxtl v18.8h, v18.8b
> +.endif
> +2:
> +
> + // Accumulate, adding idx2 last with a separate
> + // saturating add. The positive filter coefficients
> + // for all indices except idx2 must add up to less
> + // than 127 for this not to overflow.
> + mul v1.8h, v4.8h, v0.h[0]
> + mul v24.8h, v16.8h, v0.h[0]
> +.if \size >= 16
> + mul v2.8h, v5.8h, v0.h[0]
> + mul v25.8h, v17.8h, v0.h[0]
> +.endif
> + extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b,
> v6.16b, v16.16b, v17.16b, v18.16b, 1, \size
> + extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b,
> v6.16b, v16.16b, v17.16b, v18.16b, 2, \size
> + extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b,
> v6.16b, v16.16b, v17.16b, v18.16b, \idx1, \size
> + extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b,
> v6.16b, v16.16b, v17.16b, v18.16b, 5, \size
> + extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b,
> v6.16b, v16.16b, v17.16b, v18.16b, 6, \size
> + extmla v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b,
> v6.16b, v16.16b, v17.16b, v18.16b, 7, \size
> + extmulqadd v1.8h, v2.8h, v24.8h, v25.8h, v4.16b, v5.16b,
> v6.16b, v16.16b, v17.16b, v18.16b, \idx2, \size
> +
> + // Round, shift and saturate
> + sqrshrun v1.8b, v1.8h, #7
> + sqrshrun v24.8b, v24.8h, #7
> +.if \size >= 16
> + sqrshrun2 v1.16b, v2.8h, #7
> + sqrshrun2 v24.16b, v25.8h, #7
> +.endif
> + // Average
> +.ifc \type,avg
> +.if \size >= 16
> + ld1 {v2.16b}, [x0]
> + ld1 {v3.16b}, [x6]
> + urhadd v1.16b, v1.16b, v2.16b
> + urhadd v24.16b, v24.16b, v3.16b
> +.elseif \size == 8
> + ld1 {v2.8b}, [x0]
> + ld1 {v3.8b}, [x6]
> + urhadd v1.8b, v1.8b, v2.8b
> + urhadd v24.8b, v24.8b, v3.8b
> +.else
> + ld1r {v2.2s}, [x0]
> + ld1r {v3.2s}, [x6]
> + urhadd v1.8b, v1.8b, v2.8b
> + urhadd v24.8b, v24.8b, v3.8b
> +.endif
> +.endif
> + // Store and loop horizontally (for size >= 16)
> +.if \size >= 16
> + st1 {v1.16b}, [x0], #16
> + st1 {v24.16b}, [x6], #16
> + mov v4.16b, v6.16b
> + mov v16.16b, v18.16b
> + subs x9, x9, #16
> + beq 3f
you can branch before the 2 movs
> + ld1 {v6.16b}, [x2], #16
> + ld1 {v18.16b}, [x7], #16
> + uxtl v5.8h, v6.8b
> + uxtl2 v6.8h, v6.16b
> + uxtl v17.8h, v18.8b
> + uxtl2 v18.8h, v18.16b
> + b 2b
> +.elseif \size == 8
> + st1 {v1.8b}, [x0]
> + st1 {v24.8b}, [x6]
> +.else // \size == 4
> + st1 {v1.s}[0], [x0]
> + st1 {v24.s}[0], [x6]
> +.endif
> +3:
> + // Loop vertically
> + add x0, x0, x1
> + add x6, x6, x1
> + add x2, x2, x3
> + add x7, x7, x3
> + subs w4, w4, #2
> + b.ne 1b
> + ret
> +endfunc
> +.endm
> +
> +.macro do_8tap_h_size size
> +do_8tap_h put, \size, 3, 4
> +do_8tap_h avg, \size, 3, 4
> +do_8tap_h put, \size, 4, 3
> +do_8tap_h avg, \size, 4, 3
> +.endm
> +
> +do_8tap_h_size 4
> +do_8tap_h_size 8
> +do_8tap_h_size 16
> +
> +.macro do_8tap_h_func type, filter, size
> +function ff_vp9_\type\()_\filter\()\size\()_h_neon, export=1
> + movrel x6, \filter\()_filter-16
> + cmp w5, #8
> + add x9, x6, w5, uxtw #4
> + mov x5, #\size
> +.if \size >= 16
> + bge \type\()_8tap_16h_34
> + b \type\()_8tap_16h_43
> +.else
> + bge \type\()_8tap_\size\()h_34
> + b \type\()_8tap_\size\()h_43
> +.endif
> +endfunc
> +.endm
> +
> +.macro do_8tap_h_filters size
> +do_8tap_h_func put, regular, \size
> +do_8tap_h_func avg, regular, \size
> +do_8tap_h_func put, sharp, \size
> +do_8tap_h_func avg, sharp, \size
> +do_8tap_h_func put, smooth, \size
> +do_8tap_h_func avg, smooth, \size
> +.endm
> +
> +do_8tap_h_filters 64
> +do_8tap_h_filters 32
> +do_8tap_h_filters 16
> +do_8tap_h_filters 8
> +do_8tap_h_filters 4
> +
> +
> +// Vertical filters
> +
> +// Round, shift and saturate and store reg1-reg2 over 4 lines
> +.macro do_store4 reg1, reg2, tmp1, tmp2, type
> + sqrshrun \reg1\().8b, \reg1\().8h, #7
> + sqrshrun \reg2\().8b, \reg2\().8h, #7
> +.ifc \type,avg
> + ld1r {\tmp1\().2s}, [x0], x1
> + ld1r {\tmp2\().2s}, [x0], x1
> + ld1 {\tmp1\().s}[1], [x0], x1
> + ld1 {\tmp2\().s}[1], [x0], x1
use a copy of x0 for loading in avg
> + urhadd \reg1\().8b, \reg1\().8b, \tmp1\().8b
> + urhadd \reg2\().8b, \reg2\().8b, \tmp2\().8b
> + sub x0, x0, x1, lsl #2
> +.endif
> + st1 {\reg1\().s}[0], [x0], x1
> + st1 {\reg2\().s}[0], [x0], x1
> + st1 {\reg1\().s}[1], [x0], x1
> + st1 {\reg2\().s}[1], [x0], x1
> +.endm
> +
> +// Round, shift and saturate and store reg1-4
> +.macro do_store reg1, reg2, reg3, reg4, tmp1, tmp2, tmp3, tmp4, type
> + sqrshrun \reg1\().8b, \reg1\().8h, #7
> + sqrshrun \reg2\().8b, \reg2\().8h, #7
> + sqrshrun \reg3\().8b, \reg3\().8h, #7
> + sqrshrun \reg4\().8b, \reg4\().8h, #7
> +.ifc \type,avg
> + ld1 {\tmp1\().8b}, [x0], x1
> + ld1 {\tmp2\().8b}, [x0], x1
> + ld1 {\tmp3\().8b}, [x0], x1
> + ld1 {\tmp4\().8b}, [x0], x1
use a copy of x0
> + urhadd \reg1\().8b, \reg1\().8b, \tmp1\().8b
> + urhadd \reg2\().8b, \reg2\().8b, \tmp2\().8b
> + urhadd \reg3\().8b, \reg3\().8b, \tmp3\().8b
> + urhadd \reg4\().8b, \reg4\().8b, \tmp4\().8b
> + sub x0, x0, x1, lsl #2
> +.endif
> + st1 {\reg1\().8b}, [x0], x1
> + st1 {\reg2\().8b}, [x0], x1
> + st1 {\reg3\().8b}, [x0], x1
> + st1 {\reg4\().8b}, [x0], x1
> +.endm
...
> +// Instantiate a vertical filter function for filtering a 4 pixels wide
> +// slice. The first half of the registers contain one row, while the second
> +// half of a register contains the second-next row (also stored in the first
> +// half of the register two steps ahead). The convolution does two outputs
> +// at a time; the output of v17-v24 into one, and v18-v25 into another one.
> +// The first half of first output is the first output row, the first half
> +// of the other output is the second output row. The second halves of the
> +// registers are rows 3 and 4.
> +// This only is designed to work for 4 or 8 output lines.
> +.macro do_8tap_4v type, idx1, idx2
> +function \type\()_8tap_4v_\idx1\idx2
> + sub x2, x2, x3, lsl #1
> + sub x2, x2, x3
> + ld1 {v0.8h}, [x6]
> +
> + ld1r {v1.2s}, [x2], x3
> + ld1r {v2.2s}, [x2], x3
> + ld1r {v3.2s}, [x2], x3
> + ld1r {v4.2s}, [x2], x3
> + ld1r {v5.2s}, [x2], x3
> + ld1r {v6.2s}, [x2], x3
> + ext v1.8b, v1.8b, v3.8b, #4
I think '{zip,trn}1 v1.2s, v1.2s, v3.2s' is the clearer pattern on
64-bit. it works with ld1 {v0.s}[0] in the case there is no difference
between single lane ld1 and ld1r
> + ld1r {v7.2s}, [x2], x3
> + ext v2.8b, v2.8b, v4.8b, #4
> + ld1r {v30.2s}, [x2], x3
> + uxtl v17.8h, v1.8b
> + ext v3.8b, v3.8b, v5.8b, #4
> + ld1r {v31.2s}, [x2], x3
> + uxtl v18.8h, v2.8b
> + ext v4.8b, v4.8b, v6.8b, #4
> + uxtl v19.8h, v3.8b
> + ext v5.8b, v5.8b, v7.8b, #4
> + ld1 {v30.s}[1], [x2], x3
same applies as for the 32-bit version. we should do the load and
combine pattern for all but the last two loads. small cost for height ==
4, larger gain for height == 8
A can give numbers for cortex-a57 and probably kyro. I don't think I can
enable the cycle counter on my cortex-a72 device.
The rest is ok for me
Janne
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel