Re: [FFmpeg-devel] [PATCH] huffyuvencdsp: Add ff_diff_bytes_{sse2, avx2}

2015-10-21 Thread Timothy Gu
On Wed, Oct 21, 2015 at 10:32 AM Timothy Gu  wrote:

> On Tue, Oct 20, 2015 at 7:36 PM James Almer  wrote:
>
>> On 10/20/2015 10:32 PM, Timothy Gu wrote:
>>
> > +; mov type used for src1q, dstq, first reg, second reg
>> > +%macro DIFF_BYTES_LOOP_CORE 4
>> > +%if regsize != 16
>>
>> %if mmsize != 16
>>
>> By checking regsize you're using the SSE2 version in the AVX2 xmm loop.
>> Check
>> for mmsize instead, which it's always 32 since you used INIT_YMM.
>>
>
> Fixed locally.
>

Reviewed by Henrik on IRC and pushed.

Timothy
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] huffyuvencdsp: Add ff_diff_bytes_{sse2, avx2}

2015-10-21 Thread Timothy Gu
On Tue, Oct 20, 2015 at 7:36 PM James Almer  wrote:

> On 10/20/2015 10:32 PM, Timothy Gu wrote:
> > +; mov type used for src1q, dstq, first reg, second reg
> > +%macro DIFF_BYTES_LOOP_CORE 4
> > +%if regsize != 16
>
> %if mmsize != 16
>
> By checking regsize you're using the SSE2 version in the AVX2 xmm loop.
> Check
> for mmsize instead, which it's always 32 since you used INIT_YMM.
>

Fixed locally.

Timothy
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] huffyuvencdsp: Add ff_diff_bytes_{sse2, avx2}

2015-10-20 Thread Timothy Gu
SSE2 version 4%-35% faster than MMX depending on the width.
AVX2 version 1%-13% faster than SSE2 depending on the width.
---

Addressed James's and Henrik's advices. Removed heuristics based on width.
Made available both aligned and unaligned versions. For AVX2 version,
gracefully fall back on SSE2.

---
 libavcodec/huffyuvenc.c|   4 +-
 libavcodec/x86/huffyuvencdsp.asm   | 110 +++--
 libavcodec/x86/huffyuvencdsp_mmx.c |  14 -
 3 files changed, 107 insertions(+), 21 deletions(-)

diff --git a/libavcodec/huffyuvenc.c b/libavcodec/huffyuvenc.c
index 49d711a..e080cd9 100644
--- a/libavcodec/huffyuvenc.c
+++ b/libavcodec/huffyuvenc.c
@@ -60,12 +60,12 @@ static inline int sub_left_prediction(HYuvContext *s, 
uint8_t *dst,
 }
 return left;
 } else {
-for (i = 0; i < 16; i++) {
+for (i = 0; i < 32; i++) {
 const int temp = src[i];
 dst[i] = temp - left;
 left   = temp;
 }
-s->hencdsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
+s->hencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
 return src[w-1];
 }
 } else {
diff --git a/libavcodec/x86/huffyuvencdsp.asm b/libavcodec/x86/huffyuvencdsp.asm
index e001906..699fd38 100644
--- a/libavcodec/x86/huffyuvencdsp.asm
+++ b/libavcodec/x86/huffyuvencdsp.asm
@@ -27,9 +27,9 @@
 
 section .text
 
-INIT_MMX mmx
 ; void ff_diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t 
*src2,
 ;intptr_t w);
+%macro DIFF_BYTES_PROLOGUE 0
 %if ARCH_X86_32
 cglobal diff_bytes, 3,5,2, dst, src1, src2
 %define wq r4q
@@ -40,34 +40,108 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
 DECLARE_REG_TMP 4
 %endif ; ARCH_X86_32
 %define i t0q
+%endmacro
+
+; label to jump to if w < regsize
+%macro DIFF_BYTES_LOOP_PREP 1
 movi, wq
-andi, -2 * mmsize
-jz  .setup_loop2
+andi, -2 * regsize
+jz%1
 add dstq, i
 addsrc1q, i
 addsrc2q, i
 negi
-.loop:
-mova  m0, [src1q + i]
-mova  m1, [src1q + i + mmsize]
-psubb m0, [src2q + i]
-psubb m1, [src2q + i + mmsize]
-mova  [dstq + i], m0
-mova [mmsize + dstq + i], m1
-addi, 2 * mmsize
-jl .loop
-.setup_loop2:
-and   wq, 2 * mmsize - 1
-jz  .end
+%endmacro
+
+; mov type used for src1q, dstq, first reg, second reg
+%macro DIFF_BYTES_LOOP_CORE 4
+%if regsize != 16
+mov%1 %3, [src1q + i]
+mov%1 %4, [src1q + i + regsize]
+psubb %3, [src2q + i]
+psubb %4, [src2q + i + regsize]
+mov%2   [dstq + i], %3
+mov%2 [regsize + dstq + i], %4
+%else
+; SSE enforces alignment of psubb operand
+mov%1 %3, [src1q + i]
+movu  %4, [src2q + i]
+psubb %3, %4
+mov%2 [dstq + i], %3
+mov%1 %3, [src1q + i + regsize]
+movu  %4, [src2q + i + regsize]
+psubb %3, %4
+mov%2 [regsize + dstq + i], %3
+%endif
+%endmacro
+
+%macro DIFF_BYTES_BODY 2 ; mov type used for src1q, for dstq
+%define regsize mmsize
+.loop_%1%2:
+DIFF_BYTES_LOOP_CORE %1, %2, m0, m1
+addi, 2 * regsize
+jl.loop_%1%2
+.skip_main_%1%2:
+and   wq, 2 * regsize - 1
+jz .end_%1%2
+%if mmsize > 16
+; fall back to narrower xmm
+%define regsize mmsize / 2
+DIFF_BYTES_LOOP_PREP .setup_loop_gpr_aa
+.loop2_%1%2:
+DIFF_BYTES_LOOP_CORE %1, %2, xm0, xm1
+addi, 2 * regsize
+jl   .loop2_%1%2
+.setup_loop_gpr_%1%2:
+and   wq, 2 * regsize - 1
+jz .end_%1%2
+%endif
 add dstq, wq
 addsrc1q, wq
 addsrc2q, wq
 neg   wq
-.loop2:
+.loop_gpr_%1%2:
 mov  t0b, [src1q + wq]
 sub  t0b, [src2q + wq]
 mov  [dstq + wq], t0b
 inc   wq
-jl.loop2
-.end:
+jl .loop_gpr_%1%2
+.end_%1%2:
 REP_RET
+%endmacro
+
+%if ARCH_X86_32
+INIT_MMX mmx
+DIFF_BYTES_PROLOGUE
+%define regsize mmsize
+DIFF_BYTES_LOOP_PREP .skip_main_aa
+DIFF_BYTES_BODYa, a
+%endif
+
+INIT_XMM sse2
+DIFF_BYTES_PROLOGUE
+%define regsize mmsize
+DIFF_BYTES_LOOP_PREP .skip_main_aa
+testdstq, regsize - 1
+jnz .loop_uu
+test   src1q, regsize - 1
+jnz .loop_ua
+DIFF_BYTES_BODYa, a
+DIFF_BYTES_BODYu, a
+DIFF_BYTES_BODYu, u
+
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+DIFF_BYTES_PROLOGUE
+%define regsize mmsize
+; Directly using unaligned SSE2 version is marginally faster than

Re: [FFmpeg-devel] [PATCH] huffyuvencdsp: Add ff_diff_bytes_{sse2, avx2}

2015-10-20 Thread James Almer
On 10/20/2015 10:32 PM, Timothy Gu wrote:
> SSE2 version 4%-35% faster than MMX depending on the width.
> AVX2 version 1%-13% faster than SSE2 depending on the width.
> ---
> 
> Addressed James's and Henrik's advices. Removed heuristics based on width.
> Made available both aligned and unaligned versions. For AVX2 version,
> gracefully fall back on SSE2.
> 
> ---
>  libavcodec/huffyuvenc.c|   4 +-
>  libavcodec/x86/huffyuvencdsp.asm   | 110 
> +++--
>  libavcodec/x86/huffyuvencdsp_mmx.c |  14 -
>  3 files changed, 107 insertions(+), 21 deletions(-)
> 
> diff --git a/libavcodec/huffyuvenc.c b/libavcodec/huffyuvenc.c
> index 49d711a..e080cd9 100644
> --- a/libavcodec/huffyuvenc.c
> +++ b/libavcodec/huffyuvenc.c
> @@ -60,12 +60,12 @@ static inline int sub_left_prediction(HYuvContext *s, 
> uint8_t *dst,
>  }
>  return left;
>  } else {
> -for (i = 0; i < 16; i++) {
> +for (i = 0; i < 32; i++) {
>  const int temp = src[i];
>  dst[i] = temp - left;
>  left   = temp;
>  }
> -s->hencdsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
> +s->hencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
>  return src[w-1];
>  }
>  } else {
> diff --git a/libavcodec/x86/huffyuvencdsp.asm 
> b/libavcodec/x86/huffyuvencdsp.asm
> index e001906..699fd38 100644
> --- a/libavcodec/x86/huffyuvencdsp.asm
> +++ b/libavcodec/x86/huffyuvencdsp.asm
> @@ -27,9 +27,9 @@
>  
>  section .text
>  
> -INIT_MMX mmx
>  ; void ff_diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t 
> *src2,
>  ;intptr_t w);
> +%macro DIFF_BYTES_PROLOGUE 0
>  %if ARCH_X86_32
>  cglobal diff_bytes, 3,5,2, dst, src1, src2
>  %define wq r4q
> @@ -40,34 +40,108 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
>  DECLARE_REG_TMP 4
>  %endif ; ARCH_X86_32
>  %define i t0q
> +%endmacro
> +
> +; label to jump to if w < regsize
> +%macro DIFF_BYTES_LOOP_PREP 1
>  movi, wq
> -andi, -2 * mmsize
> -jz  .setup_loop2
> +andi, -2 * regsize
> +jz%1
>  add dstq, i
>  addsrc1q, i
>  addsrc2q, i
>  negi
> -.loop:
> -mova  m0, [src1q + i]
> -mova  m1, [src1q + i + mmsize]
> -psubb m0, [src2q + i]
> -psubb m1, [src2q + i + mmsize]
> -mova  [dstq + i], m0
> -mova [mmsize + dstq + i], m1
> -addi, 2 * mmsize
> -jl .loop
> -.setup_loop2:
> -and   wq, 2 * mmsize - 1
> -jz  .end
> +%endmacro
> +
> +; mov type used for src1q, dstq, first reg, second reg
> +%macro DIFF_BYTES_LOOP_CORE 4
> +%if regsize != 16

%if mmsize != 16

By checking regsize you're using the SSE2 version in the AVX2 xmm loop. Check
for mmsize instead, which it's always 32 since you used INIT_YMM.

Should be good otherwise, but wait for Hendrik in case he wants to comment.

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel