Hi,

The following patches add an AVX (an intel x86 extension) FFT implementation. Since I do not have a Sandybridge myself, I have no idea of its performance. Benchmarks (for ex., using fft-test -s) are thus very welcome. Also welcome are suggestions for optimizing it further, in particular the 8 point FFT (in the T8_AVX macro), which is not much faster than the SSE version.

One thing noteworthy about AVX is that it uses 256 bits registers, so now av_malloc needs to align the pointers to 32-byte boundaries. If this patch is accepted, I'll have to change a bunch of audio decoders to increase their buffers' alignment (note that AVX does not crash if a 256-bit load is done on a 128-bit aligned pointer, but it will cause a cache miss and thus a performance hit).

-Vitor

PS: cross-posted to both lists since I'm interested in feedback from both groups.
>From 7eb9c4997e3e37af34e7179a22ffaeaa6bb387f5 Mon Sep 17 00:00:00 2001
From: Vitor Sessak <[email protected]>
Date: Wed, 30 Mar 2011 18:47:27 +0200
Subject: [PATCH 1/2] Change x86 asm FFT permutation to later AVX FFT addition

---
 libavcodec/fft.c           |    2 +-
 libavcodec/x86/fft_mmx.asm |   12 ++++++------
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/libavcodec/fft.c b/libavcodec/fft.c
index 76e9c41..0636d97 100644
--- a/libavcodec/fft.c
+++ b/libavcodec/fft.c
@@ -117,7 +117,7 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
     for(i=0; i<n; i++) {
         int j = i;
         if (s->fft_permutation == FF_FFT_PERM_SWAP_LSBS)
-            j = (j&~3) | ((j>>1)&1) | ((j<<1)&2);
+            j = (j&~7) | ((j>>1)&3) | ((j<<2)&4);
         s->revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] = j;
     }
 
diff --git a/libavcodec/x86/fft_mmx.asm b/libavcodec/x86/fft_mmx.asm
index e3829b8..954046e 100644
--- a/libavcodec/x86/fft_mmx.asm
+++ b/libavcodec/x86/fft_mmx.asm
@@ -299,9 +299,9 @@ fft4_sse:
 align 16
 fft8_sse:
     mova     m0, Z(0)
-    mova     m1, Z(1)
+    mova     m1, Z(2)
     T4_SSE   m0, m1, m2
-    mova     m2, Z(2)
+    mova     m2, Z(1)
     mova     m3, Z(3)
     T8_SSE   m0, m1, m2, m3, m4, m5
     mova   Z(0), m0
@@ -313,19 +313,19 @@ fft8_sse:
 align 16
 fft16_sse:
     mova     m0, Z(0)
-    mova     m1, Z(1)
+    mova     m1, Z(2)
     T4_SSE   m0, m1, m2
-    mova     m2, Z(2)
+    mova     m2, Z(1)
     mova     m3, Z(3)
     T8_SSE   m0, m1, m2, m3, m4, m5
     mova     m4, Z(4)
-    mova     m5, Z(5)
+    mova     m5, Z(6)
     mova   Z(0), m0
     mova   Z(1), m1
     mova   Z(2), m2
     mova   Z(3), m3
     T4_SSE   m4, m5, m6
-    mova     m6, Z2(6)
+    mova     m6, Z2(5)
     mova     m7, Z2(7)
     T4_SSE   m6, m7, m0
     PASS_SMALL 0, [cos_16], [cos_16+16]
-- 
1.7.1

>From dd9e6a9633baf7a114e483a476bc9ab72b2fbc47 Mon Sep 17 00:00:00 2001
From: Vitor Sessak <[email protected]>
Date: Fri, 1 Apr 2011 18:30:28 +0200
Subject: [PATCH 2/2] Add AVX FFT implementation

---
 libavcodec/x86/fft.c       |    9 +-
 libavcodec/x86/fft.h       |    1 +
 libavcodec/x86/fft_mmx.asm |  320 ++++++++++++++++++++++++++++++++++++++++++--
 libavcodec/x86/fft_sse.c   |   12 ++
 libavcodec/x86/x86inc.asm  |   17 +++
 libavutil/mem.c            |   16 +--
 6 files changed, 355 insertions(+), 20 deletions(-)

diff --git a/libavcodec/x86/fft.c b/libavcodec/x86/fft.c
index 2426a3d..8dc3fca 100644
--- a/libavcodec/x86/fft.c
+++ b/libavcodec/x86/fft.c
@@ -25,7 +25,14 @@ av_cold void ff_fft_init_mmx(FFTContext *s)
 {
 #if HAVE_YASM
     int has_vectors = av_get_cpu_flags();
-    if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE) {
+    if (has_vectors & AV_CPU_FLAG_AVX && HAVE_AVX) {
+        /* SSE for P3/P4/K8 */
+        s->imdct_calc  = ff_imdct_calc_sse;
+        s->imdct_half  = ff_imdct_half_sse;
+        s->fft_permute = ff_fft_permute_sse;
+        s->fft_calc    = ff_fft_calc_avx;
+        s->fft_permutation = FF_FFT_PERM_SWAP_LSBS;
+    } else if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE) {
         /* SSE for P3/P4/K8 */
         s->imdct_calc  = ff_imdct_calc_sse;
         s->imdct_half  = ff_imdct_half_sse;
diff --git a/libavcodec/x86/fft.h b/libavcodec/x86/fft.h
index 073d408..f879ca2 100644
--- a/libavcodec/x86/fft.h
+++ b/libavcodec/x86/fft.h
@@ -22,6 +22,7 @@
 #include "libavcodec/fft.h"
 
 void ff_fft_permute_sse(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_avx(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z);
 void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z);
diff --git a/libavcodec/x86/fft_mmx.asm b/libavcodec/x86/fft_mmx.asm
index 954046e..b1b28ea 100644
--- a/libavcodec/x86/fft_mmx.asm
+++ b/libavcodec/x86/fft_mmx.asm
@@ -1,6 +1,7 @@
 ;******************************************************************************
 ;* FFT transform with SSE/3DNow optimizations
 ;* Copyright (c) 2008 Loren Merritt
+;* AVX ASM Copyright (c) 2011 Vitor Sessak
 ;*
 ;* This algorithm (though not any of the implementation details) is
 ;* based on libdjbfft by D. J. Bernstein.
@@ -49,11 +50,22 @@ endstruc
 SECTION_RODATA
 
 %define M_SQRT1_2 0.70710678118654752440
-ps_root2: times 4 dd M_SQRT1_2
-ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
-ps_p1p1m1p1: dd 0, 0, 1<<31, 0
+%define M_COS_PI_1_8 0.923879532511287
+%define M_COS_PI_3_8 0.38268343236509
+
+ps_cos16_1: dd 1.0, M_COS_PI_1_8, M_SQRT1_2, M_COS_PI_3_8, 1.0, M_COS_PI_1_8, M_SQRT1_2, M_COS_PI_3_8
+ps_cos16_2: dd 0, M_COS_PI_3_8, M_SQRT1_2, M_COS_PI_1_8, 0, -M_COS_PI_3_8, -M_SQRT1_2, -M_COS_PI_1_8
+
+ps_root2: times 8 dd M_SQRT1_2
+ps_root2mppm: dd -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2, -M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, -M_SQRT1_2
+ps_p1p1m1p1: dd 0, 0, 1<<31, 0, 0, 0, 1<<31, 0
 ps_m1p1: dd 1<<31, 0
 
+perm1: dd 0x00, 0x02, 0x03, 0x01, 0x03, 0x00, 0X02, 0x01
+perm2: dd 0x00, 0x01, 0x02, 0x03, 0x01, 0x00, 0X02, 0x03
+ps_p1p1m1p1root2: dd 1.0, 1.0, -1.0, 1.0, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2
+ps_m1m1p1m1p1m1m1m1: dd 1<<31, 1<<31, 0, 1<<31, 0, 1<<31, 1<<31, 1<<31
+
 %assign i 16
 %rep 13
 cextern cos_ %+ i
@@ -143,6 +155,76 @@ section .text align=16
     SWAP     %4, %5
 %endmacro
 
+;  in: %1 = {r0,i0,r2,i2,r4,i4,r6,i6}
+;      %2 = {r1,i1,r3,i3,r5,i5,r7,i7}
+;      %3, %4, %5 tmp
+; out: %1 = {r0,r1,r2,r3,i0,i1,i2,i3}
+;      %2 = {r4,r5,r6,r7,i4,i5,i6,i7}
+%macro T8_AVX 5
+    vsubps     %5, %1, %2	; v  = %1 - %2
+    vaddps     %3, %1, %2	; w  = %1 + %2
+    vmulps     %2, %5, [ps_p1p1m1p1root2]  ; v *= vals1
+    vpermilps  %2, %2, [perm1]
+    vblendps   %1, %2, %3, 0x33 ; q = {w1,w2,v4,v2,w5,w6,v7,v6}
+    vshufps    %5, %3, %2, 0x4e ; r = {w3,w4,v1,v3,w7,w8,v8,v5}
+    vsubps     %4, %5, %1	; s = r - q
+    vaddps     %1, %5, %1	; u = r + q
+    vpermilps  %1, %1, [perm2]  ; k  = {u1,u2,u3,u4,u6,u5,u7,u8}
+    vshufps    %5, %4, %1, 0xbb
+    vshufps    %3, %4, %1, 0xee
+    vperm2f128 %3, %3, %5, 0x13
+    vxorps     %4, %4, [ps_m1m1p1m1p1m1m1m1]  ; s *= {1,1,-1,-1,1,-1,-1,-1}
+    vshufps    %2, %1, %4, 0xdd
+    vshufps    %1, %1, %4, 0x88
+    vperm2f128 %4, %2, %1, 0x02 ; v  = {k1,k3,s1,s3,k2,k4,s2,s4}
+    vperm2f128 %1, %1, %2, 0x13 ; w  = {k6,k8,s6,s8,k5,k7,s5,s7}
+    vsubps     %5, %1, %3
+    vblendps   %1, %5, %1, 0x55 ; w -= {0,s7,0,k7,0,s8,0,k8}
+    vsubps     %2, %4, %1	; %2 = v - w
+    vaddps     %1, %4, %1	; %1 = v + w
+%endmacro
+
+; Do two fft4 transforms
+;
+; in:  %1={r0,i0,r2,i2,r4,i4,r6,i6} %2={r1,i1,r3,i3,r5,i5,r7,i7}
+; out: %1={r0,r1,r2,r3,r4,r5,r6,r7} %2={i0,i1,i2,i3,i4,i5,i6,i7}
+%macro T4_2_AVX 3
+    vsubps   %3, %1, %2       ; {t3,t4,-t8,t7}
+    vaddps   %1, %1, %2       ; {t1,t2,t6,t5}
+    vxorps   %3, %3, [ps_p1p1m1p1]
+    vshufps  %2, %1, %3, 0xbe ; {t6,t5,t7,t8}
+    vshufps  %1, %1, %3, 0x44 ; {t1,t2,t3,t4}
+    vsubps   %3, %1, %2       ; {r2,i2,r3,i3}
+    vaddps   %1, %1, %2       ; {r0,i0,r1,i1}
+    vshufps  %2, %1, %3, 0xdd ; {i0,i1,i2,i3}
+    vshufps  %1, %1, %3, 0x88 ; {r0,r1,r2,r3}
+%endmacro
+
+; Do two fft8 transforms
+;
+; in:  %1={r0,i0,r2,i2,r8, i8, r10,i10} %2={r1,i1,r3,i3,r9, i9, r11,i11}
+;      %3={r4,i4,r6,i6,r12,i12,r14,i14} %4={r5,i5,r7,i7,r13,i13,r15,i15}
+; out: %1={r0,r1,r2,r3,r8, r9, r10,r11} %2={i0,i1,i2,i3,i8, i9, i10,i11}
+;      %3={r4,r5,r6,r7,r12,r13,r14,r15} %4={i4,i5,i6,i7,i12,i13,i14,i15} 
+%macro T8_2_AVX 6
+    vaddps    %6, %3, %4       ; {t1,t2,t3,t4}
+    vsubps    %3, %3, %4       ; {r5,i5,r7,i7}
+    vshufps   %4, %3, %3, 0xb1 ; {i5,r5,i7,r7}
+    vmulps    %3, %3, [ps_root2mppm] ; {-r5,i5,r7,-i7}
+    vmulps    %4, %4, [ps_root2]
+    vaddps    %3, %3, %4       ; {t8,t7,ta,t9}
+    vshufps   %4, %6, %3, 0x9c ; {t1,t4,t7,ta}
+    vshufps   %6, %6, %3, 0x36 ; {t3,t2,t9,t8}
+    vsubps    %3, %6, %4       ; {t6,t5,tc,tb}
+    vaddps    %6, %6, %4       ; {t1,t2,t9,ta}
+    vshufps   %5, %6, %3, 0x8d ; {t2,ta,t6,tc}
+    vshufps   %6, %6, %3, 0xd8 ; {t1,t9,t5,tb}
+    vsubps    %3, %1, %6       ; {r4,r5,r6,r7}
+    vaddps    %1, %1, %6       ; {r0,r1,r2,r3}
+    vsubps    %4, %2, %5       ; {i4,i5,i6,i7}
+    vaddps    %2, %2, %5       ; {i0,i1,i2,i3}
+%endmacro
+
 ; scheduled for cpu-bound sizes
 %macro PASS_SMALL 3 ; (to load m4-m7), wre, wim
 IF%1 mova    m4, Z(4)
@@ -275,19 +357,224 @@ IF%1 mova  Z(1), m3
 %endif
 %endmacro
 
+; scheduled for cpu-bound sizes
+%macro PASS_SMALL_AVX 3 ; (to load m4-m7), wre, wim
+IF%1 mova     m4, Z(4)
+IF%1 mova     m5, Z(5)
+    mova      m0, %2 ; wre
+    mova      m1, %3 ; wim
+    vmulps    m2, m4, m0 ; r2*wre
+IF%1 mova     m6, Z2(6)
+    vmulps    m3, m5, m1 ; i2*wim
+IF%1 mova     m7, Z2(7)
+    vmulps    m4, m4, m1 ; r2*wim
+    vmulps    m5, m5, m0 ; i2*wre
+    vaddps    m2, m2, m3 ; r2*wre + i2*wim
+    vmulps    m3, m1, m7 ; i3*wim
+    vsubps    m5, m5, m4 ; i2*wre - r2*wim
+    vmulps    m1, m1, m6 ; r3*wim
+    vmulps    m4, m0, m6 ; r3*wre
+    vmulps    m0, m0, m7 ; i3*wre
+    vsubps    m4, m4, m3 ; r3*wre - i3*wim
+    mova      m3, Z(0)
+    vaddps    m0, m0, m1 ; i3*wre + r3*wim
+    vsubps    m1, m4, m2 ; t3
+    vaddps    m4, m4, m2 ; t5
+    vsubps    m3, m3, m4 ; r2
+    vaddps    m4, m4, Z(0) ; r0
+    mova      m6, Z(2)
+    mova    Z(4), m3
+    mova    Z(0), m4
+    vsubps    m3, m5, m0 ; t4
+    vsubps    m4, m6, m3 ; r3
+    vaddps    m3, m3, m6 ; r1
+    mova   Z2(6), m4
+    mova    Z(2), m3
+    mova      m2, Z(3)
+    vaddps    m3, m5, m0 ; t6
+    vsubps    m2, m2, m1 ; i3
+    mova      m7, Z(1)
+    vaddps    m1, m1, Z(3) ; i1
+    mova   Z2(7), m2
+    mova    Z(3), m1
+    vsubps    m4, m7, m3 ; i2
+    vaddps    m3, m3, m7 ; i0
+    mova    Z(5), m4
+    mova    Z(1), m3
+%endmacro
+
+; scheduled to avoid store->load aliasing
+%macro PASS_BIG_AVX 1 ; (!interleave)
+    mova           m4, Z(4) ; r2
+    mova           m5, Z(5) ; i2
+    mova           m0, [wq] ; wre
+    mova           m1, [wq+o1q] ; wim
+    vmulps         m2, m4, m0 ; r2*wre
+    mova           m6, Z2(6) ; r3
+    vmulps         m3, m5, m1 ; i2*wim
+    mova           m7, Z2(7) ; i3
+    vmulps         m4, m4, m1 ; r2*wim
+    vmulps         m5, m5, m0 ; i2*wre
+    vaddps         m2, m2, m3 ; r2*wre + i2*wim
+    vmulps         m3, m1, m7 ; i3*wim
+    vmulps         m1, m1, m6 ; r3*wim
+    vsubps         m5, m5, m4 ; i2*wre - r2*wim
+    vmulps         m4, m0, m6 ; r3*wre
+    vmulps         m0, m0, m7 ; i3*wre
+    vsubps         m4, m4, m3 ; r3*wre - i3*wim
+    mova           m3, Z(0)
+    vaddps         m0, m0, m1 ; i3*wre + r3*wim
+    vsubps         m1, m4, m2 ; t3
+    vaddps         m4, m4, m2 ; t5
+    vsubps         m3, m3, m4 ; r2
+    vaddps         m4, m4, Z(0) ; r0
+    mova           m6, Z(2)
+    mova         Z(4), m3
+    mova         Z(0), m4
+    vsubps         m3, m5, m0 ; t4
+    vsubps         m4, m6, m3 ; r3
+    vaddps         m3, m3, m6 ; r1
+IF%1 mova       Z2(6), m4
+IF%1 mova        Z(2), m3
+    mova           m2, Z(3)
+    vaddps         m5, m5, m0 ; t6
+    vsubps         m2, m2, m1 ; i3
+    mova           m7, Z(1)
+    vaddps         m1, m1, Z(3) ; i1
+IF%1 mova       Z2(7), m2
+IF%1 mova        Z(3), m1
+    vsubps         m6, m7, m5 ; i2
+    vaddps         m5, m5, m7 ; i0
+IF%1 mova          Z(5), m6
+IF%1 mova          Z(1), m5
+%if %1==0
+    vunpckhps      m7, m3, m1
+    vunpcklps      m3, m3, m1
+    vunpckhps      m0, m4, m2
+    vunpcklps      m4, m4, m2
+    mova           m1, Z(0)
+    mova           m2, Z(4)
+    vextractf128   Z(2), m3, 0
+    vextractf128  ZH(2), m7, 0
+    vextractf128   Z(3), m3, 1
+    vextractf128  ZH(3), m7, 1
+    vextractf128  Z2(6), m4, 0
+    vextractf128 Z2H(6), m0, 0
+    vextractf128  Z2(7), m4, 1
+    vextractf128 Z2H(7), m0, 1
+    vunpckhps      m3, m1, m5
+    vunpcklps      m1, m1, m5
+    vunpckhps      m7, m2, m6
+    vunpcklps      m2, m2, m6
+    vextractf128   Z(0), m1, 0
+    vextractf128  ZH(0), m3, 0
+    vextractf128   Z(1), m1, 1
+    vextractf128  ZH(1), m3, 1
+    vextractf128   Z(4), m2, 0
+    vextractf128  ZH(4), m7, 0
+    vextractf128   Z(5), m2, 1
+    vextractf128  ZH(5), m7, 1
+%endif
+%endmacro
+
 %macro PUNPCK 3
     mova      %3, %1
     punpckldq %1, %2
     punpckhdq %3, %2
 %endmacro
 
-INIT_XMM
-%define mova movaps
-
 %define Z(x) [r0+mmsize*x]
 %define Z2(x) [r0+mmsize*x]
+%define ZH(x) [r0 + mmsize*x + mmsize/2]
+
+INIT_YMM
 
 align 16
+fft8_avx:
+    mova      m0, Z(0)
+    mova      m1, Z(1)
+    T8_AVX    m0, m1, m2, m3, m4
+    mova      Z(0), m0
+    mova      Z(1), m1
+    ret
+
+
+align 16
+fft16_avx:
+    mova       m2, Z(2)
+    mova       m3, Z(3)
+    T4_2_AVX   m2, m3, m7
+
+    mova       m0, Z(0)
+    mova       m1, Z(1)
+    T8_AVX     m0, m1, m4, m5, m7
+
+    mova       m4, [ps_cos16_1]
+    mova       m5, [ps_cos16_2]
+    vmulps     m6, m2, m4
+    vmulps     m7, m3, m5
+    vaddps     m7, m7, m6
+    vmulps     m2, m2, m5
+    vmulps     m3, m3, m4
+    vsubps     m3, m3, m2
+    vblendps   m2, m7, m3, 0xf0
+    vperm2f128 m3, m7, m3, 0x21
+    vaddps     m4, m2, m3
+    vsubps     m2, m3, m2
+    vperm2f128 m2, m2, m2, 0x01
+    vsubps     m3, m1, m2
+    vaddps     m1, m1, m2
+    vsubps     m5, m0, m4
+    vaddps     m0, m0, m4
+    vextractf128   Z(0), m0, 0
+    vextractf128  ZH(0), m1, 0
+    vextractf128   Z(1), m0, 1
+    vextractf128  ZH(1), m1, 1
+    vextractf128   Z(2), m5, 0
+    vextractf128  ZH(2), m3, 0
+    vextractf128   Z(3), m5, 1
+    vextractf128  ZH(3), m3, 1
+    ret
+
+align 16
+fft32_avx:
+    call fft16_avx
+
+    ; Z(4)  {r0,i0,r2, i2, r4, i4, r6, i6}
+    ; Z(5)  {r1,i1,r3, i3, r5, i5, r7, i7}
+    ; Z(6)  {r8,i8,r10,i10,r12,i12,r14,i14}
+    ; Z(7)  {r9,i9,r11,i11,r13,i13,r15,i15}
+    vinsertf128 m0, m0, Z(4), 0
+    vinsertf128 m0, m0, Z(6), 1
+    vinsertf128 m1, m1, Z(5), 0
+    vinsertf128 m1, m1, Z(7), 1
+
+    T4_2_AVX    m0, m1, m4
+
+    vinsertf128 m2, m2, ZH(4), 0
+    vinsertf128 m2, m2, ZH(6), 1
+    vinsertf128 m3, m3, ZH(5), 0
+    vinsertf128 m3, m3, ZH(7), 1
+
+    T8_2_AVX    m0, m1, m2, m3, m4, m6
+    ; m0={r0,r1,r2,r3,r8, r9, r10,r11} m1={i0,i1,i2,i3,i8, i9, i10,i11}
+    ; m2={r4,r5,r6,r7,r12,r13,r14,r15} m3={i4,i5,i6,i7,i12,i13,i14,i15} 
+
+    vperm2f128  m4, m0, m2, 0x20
+    vperm2f128  m5, m1, m3, 0x20
+    vperm2f128  m6, m0, m2, 0x31
+    vperm2f128  m7, m1, m3, 0x31
+
+    PASS_SMALL_AVX 0, [cos_32], [cos_32+32]
+
+    add r0, mmsize*2
+    ret
+
+INIT_XMM
+%define mova movaps
+
+align 16
+fft4_avx:
 fft4_sse:
     mova     m0, Z(0)
     mova     m1, Z(1)
@@ -406,6 +693,8 @@ FFT48_3DN _3dn
 
 %define Z(x) [zq + o1q*(x&6) + mmsize*(x&1)]
 %define Z2(x) [zq + o3q + mmsize*(x&1)]
+%define ZH(x) [zq + o1q*(x&6) + mmsize*(x&1) + mmsize/2]
+%define Z2H(x) [zq + o3q + mmsize*(x&1) + mmsize/2]
 
 %macro DECL_PASS 2+ ; name, payload
 align 16
@@ -423,6 +712,10 @@ DEFINE_ARGS z, w, n, o1, o3
     rep ret
 %endmacro
 
+INIT_YMM
+DECL_PASS pass_avx, PASS_BIG_AVX 1
+DECL_PASS pass_interleave_avx, PASS_BIG_AVX 0
+
 INIT_XMM
 %define mova movaps
 DECL_PASS pass_sse, PASS_BIG 1
@@ -456,10 +749,15 @@ DECL_PASS pass_interleave_3dn, PASS_BIG 0
 %endmacro ; FFT_DISPATCH
 
 %macro DECL_FFT 2-3 ; nbits, cpu, suffix
+%xdefine val %1
 %xdefine list_of_fft fft4%2 SECTION_REL, fft8%2 SECTION_REL
-%if %1==5
+%if %1>=5
 %xdefine list_of_fft list_of_fft, fft16%2 SECTION_REL
 %endif
+%if %1>=6
+%xdefine list_of_fft list_of_fft, fft32%3%2 SECTION_REL
+%xdefine val (%1 - 1)
+%endif
 
 %assign n 1<<%1
 %rep 17-%1
@@ -470,11 +768,11 @@ DECL_PASS pass_interleave_3dn, PASS_BIG 0
 align 16
 fft %+ n %+ %3%2:
     call fft %+ n2 %+ %2
-    add r0, n*4 - (n&(-2<<%1))
+    add r0, n*4 - (n&(-2<<val))
     call fft %+ n4 %+ %2
-    add r0, n*2 - (n2&(-2<<%1))
+    add r0, n*2 - (n2&(-2<<val))
     call fft %+ n4 %+ %2
-    sub r0, n*6 + (n2&(-2<<%1))
+    sub r0, n*6 + (n2&(-2<<val))
     lea r1, [cos_ %+ n]
     mov r2d, n4/2
     jmp pass%3%2
@@ -495,6 +793,8 @@ cglobal fft_dispatch%3%2, 2,5,8, z, nbits
     RET
 %endmacro ; DECL_FFT
 
+DECL_FFT 6, _avx
+DECL_FFT 5, _avx, _interleave
 DECL_FFT 5, _sse
 DECL_FFT 5, _sse, _interleave
 DECL_FFT 4, _3dn
diff --git a/libavcodec/x86/fft_sse.c b/libavcodec/x86/fft_sse.c
index 9de4e4c..e1dccf9 100644
--- a/libavcodec/x86/fft_sse.c
+++ b/libavcodec/x86/fft_sse.c
@@ -28,6 +28,18 @@ DECLARE_ASM_CONST(16, int, ff_m1m1m1m1)[4] =
 
 void ff_fft_dispatch_sse(FFTComplex *z, int nbits);
 void ff_fft_dispatch_interleave_sse(FFTComplex *z, int nbits);
+void ff_fft_dispatch_interleave_avx(FFTComplex *z, int nbits);
+
+void ff_fft_calc_avx(FFTContext *s, FFTComplex *z)
+{
+    int n = 1 << s->nbits;
+
+    if(n > 32)
+        ff_fft_dispatch_interleave_avx(z, s->nbits);
+    else
+        ff_fft_calc_sse(s, z);
+
+}
 
 void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
 {
diff --git a/libavcodec/x86/x86inc.asm b/libavcodec/x86/x86inc.asm
index b7d1774..b4ed538 100644
--- a/libavcodec/x86/x86inc.asm
+++ b/libavcodec/x86/x86inc.asm
@@ -538,6 +538,23 @@ SECTION .note.GNU-stack noalloc noexec nowrite progbits
     %endrep
 %endmacro
 
+%macro INIT_YMM 0
+    %define RESET_MM_PERMUTATION INIT_YMM
+    %define mmsize 32
+    %define num_mmregs 8
+    %ifdef ARCH_X86_64
+    %define num_mmregs 16
+    %endif
+    %define mova vmovaps
+    %define movu vmovups
+    %assign %%i 0
+    %rep num_mmregs
+    CAT_XDEFINE m, %%i, ymm %+ %%i
+    CAT_XDEFINE nymm, %%i, %%i
+    %assign %%i %%i+1
+    %endrep
+%endmacro
+
 INIT_MMX
 
 ; I often want to use macros that permute their arguments. e.g. there's no
diff --git a/libavutil/mem.c b/libavutil/mem.c
index 7a54bd0..7e3f9f0 100644
--- a/libavutil/mem.c
+++ b/libavutil/mem.c
@@ -69,21 +69,21 @@ void *av_malloc(FF_INTERNAL_MEM_TYPE size)
 #endif
 
     /* let's disallow possible ambiguous cases */
-    if(size > (INT_MAX-16) )
+    if(size > (INT_MAX-32) )
         return NULL;
 
 #if CONFIG_MEMALIGN_HACK
-    ptr = malloc(size+16);
+    ptr = malloc(size+32);
     if(!ptr)
         return ptr;
-    diff= ((-(long)ptr - 1)&15) + 1;
+    diff= ((-(long)ptr - 1)&31) + 1;
     ptr = (char*)ptr + diff;
     ((char*)ptr)[-1]= diff;
 #elif HAVE_POSIX_MEMALIGN
-    if (posix_memalign(&ptr,16,size))
+    if (posix_memalign(&ptr,32,size))
         ptr = NULL;
 #elif HAVE_MEMALIGN
-    ptr = memalign(16,size);
+    ptr = memalign(32,size);
     /* Why 64?
        Indeed, we should align it:
          on 4 for 386
@@ -93,10 +93,8 @@ void *av_malloc(FF_INTERNAL_MEM_TYPE size)
        Because L1 and L2 caches are aligned on those values.
        But I don't want to code such logic here!
      */
-     /* Why 16?
-        Because some CPUs need alignment, for example SSE2 on P4, & most RISC CPUs
-        it will just trigger an exception and the unaligned load will be done in the
-        exception handler or it will just segfault (SSE2 on P4).
+     /* Why 32?
+        For AVX ASM. SSE / NEON needs only 16.
         Why not larger? Because I did not see a difference in benchmarks ...
      */
      /* benchmarks with P3
-- 
1.7.1

_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to