$subj, to make easier to do an AVX implementation.

-Vitor
>From e989e04199ef67247f2c07dc4d16dbb0bd675e06 Mon Sep 17 00:00:00 2001
From: Vitor Sessak <[email protected]>
Date: Fri, 13 May 2011 18:56:02 +0200
Subject: [PATCH 1/3] Port SSE 32-point DCT to YASM

---
 libavcodec/x86/Makefile      |    3 +-
 libavcodec/x86/dct32_sse.asm |  287 ++++++++++++++++++++++++++++++++++++++++
 libavcodec/x86/dct32_sse.c   |  296 ------------------------------------------
 3 files changed, 289 insertions(+), 297 deletions(-)
 create mode 100644 libavcodec/x86/dct32_sse.asm
 delete mode 100644 libavcodec/x86/dct32_sse.c

diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
index f8d456d..ea0b16f 100644
--- a/libavcodec/x86/Makefile
+++ b/libavcodec/x86/Makefile
@@ -7,6 +7,8 @@ YASM-OBJS-FFT-$(HAVE_SSE)              += x86/fft_sse.o
 YASM-OBJS-$(CONFIG_FFT)                += x86/fft_mmx.o                 \
                                           $(YASM-OBJS-FFT-yes)
 
+YASM-OBJS-$(CONFIG_DCT)                += x86/dct32_sse.o
+
 MMX-OBJS-$(CONFIG_H264DSP)             += x86/h264dsp_mmx.o
 YASM-OBJS-$(CONFIG_H264DSP)            += x86/h264_deblock.o            \
                                           x86/h264_weight.o             \
@@ -57,4 +59,3 @@ OBJS-$(HAVE_MMX)                       += x86/dnxhd_mmx.o               \
                                           x86/mpegvideo_mmx.o           \
                                           x86/simple_idct_mmx.o         \
 
-MMX-OBJS-$(CONFIG_DCT)                 += x86/dct32_sse.o
diff --git a/libavcodec/x86/dct32_sse.asm b/libavcodec/x86/dct32_sse.asm
new file mode 100644
index 0000000..27ea943
--- /dev/null
+++ b/libavcodec/x86/dct32_sse.asm
@@ -0,0 +1,287 @@
+;******************************************************************************
+;* 32 point SSE-optimized DCT transform
+;* Copyright (c) 2010 Vitor Sessak
+;*
+;* This file is part of Libav.
+;*
+;* Libav is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* Libav is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with Libav; if not, write to the Free Software
+;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "x86inc.asm"
+
+SECTION_RODATA
+
+align 32
+ps_cos_vec: dd   0.500603,  0.505471,  0.515447,  0.531043
+            dd   0.553104,  0.582935,  0.622504,  0.674808
+            dd  -1.169440, -0.972568, -0.839350, -0.744536
+            dd -10.190008, -3.407609, -2.057781, -1.484165
+            dd   0.502419,  0.522499,  0.566944,  0.646822
+            dd   0.788155,  1.060678,  1.722447,  5.101149
+            dd   0.509796,  0.601345,  0.899976,  2.562916
+            dd   1.000000,  1.000000,  1.306563,  0.541196
+            dd   1.000000,  0.707107,  1.000000, -0.707107
+
+
+ps_p1p1m1m1: dd 0, 0, 0x80000000, 0x80000000
+
+%macro BUTTERFLY 4
+    movaps %4, %1
+    subps  %1, %2
+    addps  %2, %4
+    mulps  %1, %3
+%endmacro
+
+%macro BUTTERFLY0 5
+    movaps %4, %1
+    shufps %1, %1, %5
+    xorps  %4, %2
+    addps  %1, %4
+    mulps  %1, %3
+%endmacro
+
+%macro BUTTERFLY2 4
+    BUTTERFLY0 %1, %2, %3, %4, 0x1b
+%endmacro
+
+%macro BUTTERFLY3 4
+    BUTTERFLY0 %1, %2, %3, %4, 0xb1
+%endmacro
+
+section .text align=16
+cglobal dct32_float_sse, 2,3,8, out, in, tmp
+    ; pass 1
+
+    movaps xmm0, [inq+0]
+    movaps xmm1, [inq+112]
+    shufps xmm1, xmm1, 0x1b
+    BUTTERFLY     xmm0, xmm1, [ps_cos_vec], xmm3
+
+    movaps xmm7, [inq+64]
+    movaps xmm4, [inq+48]
+    shufps xmm4, xmm4, 0x1b
+    BUTTERFLY     xmm7, xmm4, [ps_cos_vec+48], xmm3
+
+
+    ; pass 2
+    movaps xmm2, [ps_cos_vec+64]
+    BUTTERFLY     xmm1, xmm4, xmm2, xmm3
+    movaps [outq+48], xmm1
+    movaps [outq+0], xmm4
+
+    ; pass 1
+    movaps xmm1, [inq+16]
+    movaps xmm6, [inq+96]
+    shufps xmm6, xmm6, 0x1b
+    BUTTERFLY     xmm1, xmm6, [ps_cos_vec+16], xmm3
+
+    movaps xmm4, [inq+80]
+    movaps xmm5, [inq+32]
+    shufps xmm5, xmm5, 0x1b
+    BUTTERFLY     xmm4, xmm5, [ps_cos_vec+32], xmm3
+
+    ; pass 2
+    BUTTERFLY     xmm0, xmm7, xmm2, xmm3
+
+    movaps xmm2, [ps_cos_vec+80]
+    BUTTERFLY     xmm6, xmm5, xmm2, xmm3
+
+    BUTTERFLY     xmm1, xmm4, xmm2, xmm3
+
+    ; pass 3
+    movaps xmm2, [ps_cos_vec+96]
+    shufps xmm1, xmm1, 0x1b
+    BUTTERFLY     xmm0, xmm1, xmm2, xmm3
+    movaps [outq+112], xmm0
+    movaps [outq+96], xmm1
+
+    movaps xmm0, [outq+0]
+    shufps xmm5, xmm5, 0x1b
+    BUTTERFLY     xmm0, xmm5, xmm2, xmm3
+
+    movaps xmm1, [outq+48]
+    shufps xmm6, xmm6, 0x1b
+    BUTTERFLY     xmm1, xmm6, xmm2, xmm3
+    movaps [outq+48], xmm1
+
+    shufps xmm4, xmm4, 0x1b
+    BUTTERFLY     xmm7, xmm4, xmm2, xmm3
+
+    ; pass 4
+    movaps xmm3, [ps_p1p1m1m1+0]
+    movaps xmm2, [ps_cos_vec+112]
+
+    BUTTERFLY2     xmm5, xmm3, xmm2, xmm1
+
+    BUTTERFLY2     xmm0, xmm3, xmm2, xmm1
+    movaps [outq+16], xmm0
+
+    BUTTERFLY2     xmm6, xmm3, xmm2, xmm1
+    movaps [outq+32], xmm6
+
+    movaps xmm0, [outq+48]
+    BUTTERFLY2     xmm0, xmm3, xmm2, xmm1
+    movaps [outq+48], xmm0
+
+    BUTTERFLY2     xmm4, xmm3, xmm2, xmm1
+
+    BUTTERFLY2     xmm7, xmm3, xmm2, xmm1
+
+    movaps xmm6, [outq+96]
+    BUTTERFLY2     xmm6, xmm3, xmm2, xmm1
+
+    movaps xmm0, [outq+112]
+    BUTTERFLY2     xmm0, xmm3, xmm2, xmm1
+
+    ; pass 5
+    movaps xmm2, [ps_cos_vec+128]
+    shufps xmm3, xmm3, 0xcc
+
+    BUTTERFLY3     xmm5, xmm3, xmm2, xmm1
+    movaps [outq+0], xmm5
+
+    movaps xmm1, [outq+16]
+    BUTTERFLY3     xmm1, xmm3, xmm2, xmm5
+    movaps [outq+16], xmm1
+
+    BUTTERFLY3     xmm4, xmm3, xmm2, xmm5
+    movaps [outq+64], xmm4
+
+    BUTTERFLY3     xmm7, xmm3, xmm2, xmm5
+    movaps [outq+80], xmm7
+
+    movaps xmm5, [outq+32]
+    BUTTERFLY3     xmm5, xmm3, xmm2, xmm7
+    movaps [outq+32], xmm5
+
+    movaps xmm4, [outq+48]
+    BUTTERFLY3     xmm4, xmm3, xmm2, xmm7
+    movaps [outq+48], xmm4
+
+    BUTTERFLY3     xmm6, xmm3, xmm2, xmm7
+    movaps [outq+96], xmm6
+
+    BUTTERFLY3     xmm0, xmm3, xmm2, xmm7
+    movaps [outq+112], xmm0
+
+
+    ;    pass no, 6 SIMD...
+    movss xmm3, [outq+56]
+    mov     tmpq, [outd+4]
+    addss xmm3, [outq+60]
+    movss xmm7, [outd+72]
+    addss xmm4, xmm3
+    movss xmm2, [outq+52]
+    addss xmm2, xmm3
+    movss xmm3, [outq+24]
+    addss xmm3, [outq+28]
+    addss xmm7, [outq+76]
+    addss xmm1, xmm3
+    addss xmm5, xmm4
+    movss [outq+16], xmm1
+    movss xmm1, [outq+20]
+    addss xmm1, xmm3
+    movss xmm3, [outq+40]
+    movss [outq+48], xmm1
+    addss xmm3, [outq+44]
+    movss xmm1, [outq+20]
+    addss xmm4, xmm3
+    addss xmm3, xmm2
+    addss xmm1, [outq+28]
+    movss [outq+40], xmm3
+    addss xmm2, [outq+36]
+    movss xmm3, [outq+8]
+    movss [outq+56], xmm2
+    addss xmm3, [outq+12]
+    movss [outq+8], xmm5
+    movss [outq+32], xmm3
+    movss xmm2, [outq+52]
+    movss xmm3, [outq+80]
+    movss xmm5, [outq+120]
+    movss [outq+80], xmm1
+    movss [outq+24], xmm4
+    addss xmm5, [outq+124]
+    movss xmm1, [outq+64]
+    addss xmm2, [outq+60]
+    addss xmm0, xmm5
+    addss xmm5, [outq+116]
+    mov  [outq+64], tmpq
+    addss xmm6, xmm0
+    addss xmm1, xmm6
+    mov  tmpq, [outq+12]
+    movss [outq+4], xmm1
+    movss xmm1, [outq+88]
+    mov  [outq+96], tmpq
+    addss xmm1, [outq+92]
+    movss xmm4, [outq+104]
+    mov  tmpq, [outq+28]
+    addss xmm4, [outq+108]
+    addss xmm0, xmm4
+    addss xmm3, xmm1
+    addss xmm1, [outq+84]
+    addss xmm4, xmm5
+    addss xmm6, xmm3
+    addss xmm3, xmm0
+    addss xmm0, xmm7
+    addss xmm5, [outq+100]
+    addss xmm7, xmm4
+    mov  [outq+112], tmpq
+    movss [outq+28], xmm0
+    movss xmm0, [outq+36]
+    movss [outq+36], xmm7
+    addss xmm4, xmm1
+    movss xmm7, [outq+116]
+    addss xmm0, xmm2
+    addss xmm7, [outq+124]
+    movss [outq+72], xmm0
+    movss xmm0, [outq+44]
+    movss [outq+12], xmm6
+    movss [outq+20], xmm3
+    addss xmm2, xmm0
+    movss [outq+44], xmm4
+    movss [outq+88], xmm2
+    addss xmm0, [outq+60]
+    mov  tmpq, [outq+60]
+    mov  [outq+120], tmpq
+    movss [outq+104], xmm0
+    addss xmm1, xmm5
+    addss xmm5, [outq+68]
+    movss [outq+52], xmm1
+    movss [outq+60], xmm5
+    movss xmm1, [outq+68]
+    movss xmm5, [outq+100]
+    addss xmm5, xmm7
+    addss xmm7, [outq+108]
+    addss xmm1, xmm5
+    movss xmm2, [outq+84]
+    addss xmm2, [outq+92]
+    addss xmm5, xmm2
+    movss [outq+68], xmm1
+    addss xmm2, xmm7
+    movss xmm1, [outq+76]
+    movss [outq+84], xmm2
+    movss [outq+76], xmm5
+    movss xmm2, [outq+108]
+    addss xmm7, xmm1
+    addss xmm2, [outq+124]
+    addss xmm1, xmm2
+    addss xmm2, [outq+92]
+    movss [outq+100], xmm1
+    movss [outq+108], xmm2
+    movss xmm2, [outq+92]
+    movss [outq+92], xmm7
+    addss xmm2, [outq+124]
+    movss [outq+116], xmm2
+    REP_RET
diff --git a/libavcodec/x86/dct32_sse.c b/libavcodec/x86/dct32_sse.c
deleted file mode 100644
index 5303c6d..0000000
--- a/libavcodec/x86/dct32_sse.c
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * 32 point SSE-optimized DCT transform
- * Copyright (c) 2010 Vitor Sessak
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <stdint.h>
-
-#include "libavutil/x86_cpu.h"
-#include "libavutil/mem.h"
-#include "libavcodec/dsputil.h"
-#include "fft.h"
-
-DECLARE_ALIGNED(16, static const float, b1)[] = {
-     0.500603,  0.505471,  0.515447,  0.531043,
-     0.553104,  0.582935,  0.622504,  0.674808,
-    -1.169440, -0.972568, -0.839350, -0.744536,
-   -10.190008, -3.407609, -2.057781, -1.484165,
-     0.502419,  0.522499,  0.566944,  0.646822,
-     0.788155,  1.060678,  1.722447,  5.101149,
-     0.509796,  0.601345,  0.899976,  2.562916,
-     1.000000,  1.000000,  1.306563,  0.541196,
-     1.000000,  0.707107,  1.000000, -0.707107
-};
-
-DECLARE_ALIGNED(16, static const int32_t, smask)[4] = {
-    0, 0, 0x80000000, 0x80000000
-};
-
-/* butterfly operator */
-#define BUTTERFLY(a,b,c,tmp)                            \
-    "movaps  %%" #a    ", %%" #tmp  "             \n\t" \
-    "subps   %%" #b    ", %%" #a    "             \n\t" \
-    "addps   %%" #tmp  ", %%" #b    "             \n\t" \
-    "mulps     " #c    ", %%" #a    "             \n\t"
-
-///* Same as BUTTERFLY when vectors a and b overlap */
-#define BUTTERFLY0(val, mask, cos, tmp, shuf)                            \
-    "movaps  %%" #val  ", %%" #tmp  "             \n\t"                  \
-    "shufps    " #shuf ", %%" #val  ",%%" #val "  \n\t"                  \
-    "xorps   %%" #mask ", %%" #tmp  "             \n\t" /* flip signs */ \
-    "addps   %%" #tmp  ", %%" #val  "             \n\t"                  \
-    "mulps   %%" #cos  ", %%" #val  "             \n\t"
-
-#define BUTTERFLY2(val, mask, cos, tmp) BUTTERFLY0(val, mask, cos, tmp, $0x1b)
-#define BUTTERFLY3(val, mask, cos, tmp) BUTTERFLY0(val, mask, cos, tmp, $0xb1)
-
-void ff_dct32_float_sse(FFTSample *out, const FFTSample *in)
-{
-    int32_t tmp1 = 0;
-    __asm__ volatile(
-        /* pass 1 */
-
-        "movaps    (%4), %%xmm0           \n\t"
-        "movaps 112(%4), %%xmm1           \n\t"
-        "shufps   $0x1b, %%xmm1, %%xmm1   \n\t"
-        BUTTERFLY(xmm0, xmm1, (%2), xmm3)
-
-        "movaps  64(%4), %%xmm7           \n\t"
-        "movaps  48(%4), %%xmm4           \n\t"
-        "shufps   $0x1b, %%xmm4, %%xmm4   \n\t"
-        BUTTERFLY(xmm7, xmm4, 48(%2), xmm3)
-
-
-        /* pass 2 */
-        "movaps  64(%2), %%xmm2           \n\t"
-        BUTTERFLY(xmm1, xmm4, %%xmm2, xmm3)
-        "movaps  %%xmm1, 48(%1)           \n\t"
-        "movaps  %%xmm4, (%1)             \n\t"
-
-        /* pass 1 */
-        "movaps  16(%4), %%xmm1           \n\t"
-        "movaps  96(%4), %%xmm6           \n\t"
-        "shufps   $0x1b, %%xmm6, %%xmm6   \n\t"
-        BUTTERFLY(xmm1, xmm6, 16(%2), xmm3)
-
-        "movaps  80(%4), %%xmm4           \n\t"
-        "movaps  32(%4), %%xmm5           \n\t"
-        "shufps   $0x1b, %%xmm5, %%xmm5   \n\t"
-        BUTTERFLY(xmm4, xmm5, 32(%2), xmm3)
-
-        /* pass 2 */
-        BUTTERFLY(xmm0, xmm7, %%xmm2, xmm3)
-
-        "movaps  80(%2), %%xmm2           \n\t"
-        BUTTERFLY(xmm6, xmm5, %%xmm2, xmm3)
-
-        BUTTERFLY(xmm1, xmm4, %%xmm2, xmm3)
-
-        /* pass 3 */
-        "movaps  96(%2), %%xmm2           \n\t"
-        "shufps   $0x1b, %%xmm1, %%xmm1   \n\t"
-        BUTTERFLY(xmm0, xmm1, %%xmm2, xmm3)
-        "movaps  %%xmm0, 112(%1)          \n\t"
-        "movaps  %%xmm1,  96(%1)          \n\t"
-
-        "movaps   0(%1), %%xmm0           \n\t"
-        "shufps   $0x1b, %%xmm5, %%xmm5   \n\t"
-        BUTTERFLY(xmm0, xmm5, %%xmm2, xmm3)
-
-        "movaps  48(%1), %%xmm1           \n\t"
-        "shufps   $0x1b, %%xmm6, %%xmm6   \n\t"
-        BUTTERFLY(xmm1, xmm6, %%xmm2, xmm3)
-        "movaps  %%xmm1,  48(%1)          \n\t"
-
-        "shufps   $0x1b, %%xmm4, %%xmm4   \n\t"
-        BUTTERFLY(xmm7, xmm4, %%xmm2, xmm3)
-
-        /* pass 4 */
-        "movaps    (%3), %%xmm3           \n\t"
-        "movaps 112(%2), %%xmm2           \n\t"
-
-        BUTTERFLY2(xmm5, xmm3, xmm2, xmm1)
-
-        BUTTERFLY2(xmm0, xmm3, xmm2, xmm1)
-        "movaps  %%xmm0, 16(%1)           \n\t"
-
-        BUTTERFLY2(xmm6, xmm3, xmm2, xmm1)
-        "movaps  %%xmm6, 32(%1)           \n\t"
-
-        "movaps  48(%1), %%xmm0           \n\t"
-        BUTTERFLY2(xmm0, xmm3, xmm2, xmm1)
-        "movaps  %%xmm0, 48(%1)           \n\t"
-
-        BUTTERFLY2(xmm4, xmm3, xmm2, xmm1)
-
-        BUTTERFLY2(xmm7, xmm3, xmm2, xmm1)
-
-        "movaps  96(%1), %%xmm6           \n\t"
-        BUTTERFLY2(xmm6, xmm3, xmm2, xmm1)
-
-        "movaps 112(%1), %%xmm0           \n\t"
-        BUTTERFLY2(xmm0, xmm3, xmm2, xmm1)
-
-        /* pass 5 */
-        "movaps 128(%2), %%xmm2           \n\t"
-        "shufps   $0xCC, %%xmm3,%%xmm3    \n\t"
-
-        BUTTERFLY3(xmm5, xmm3, xmm2, xmm1)
-        "movaps  %%xmm5, (%1)             \n\t"
-
-        "movaps  16(%1), %%xmm1           \n\t"
-        BUTTERFLY3(xmm1, xmm3, xmm2, xmm5)
-        "movaps  %%xmm1, 16(%1)           \n\t"
-
-        BUTTERFLY3(xmm4, xmm3, xmm2, xmm5)
-        "movaps  %%xmm4, 64(%1)           \n\t"
-
-        BUTTERFLY3(xmm7, xmm3, xmm2, xmm5)
-        "movaps  %%xmm7, 80(%1)           \n\t"
-
-        "movaps  32(%1), %%xmm5           \n\t"
-        BUTTERFLY3(xmm5, xmm3, xmm2, xmm7)
-        "movaps  %%xmm5, 32(%1)           \n\t"
-
-        "movaps  48(%1), %%xmm4           \n\t"
-        BUTTERFLY3(xmm4, xmm3, xmm2, xmm7)
-        "movaps  %%xmm4, 48(%1)           \n\t"
-
-        BUTTERFLY3(xmm6, xmm3, xmm2, xmm7)
-        "movaps  %%xmm6, 96(%1)           \n\t"
-
-        BUTTERFLY3(xmm0, xmm3, xmm2, xmm7)
-        "movaps  %%xmm0, 112(%1)          \n\t"
-
-
-        /* pass 6, no SIMD... */
-        "movss    56(%1),  %%xmm3           \n\t"
-        "movl      4(%1),      %0           \n\t"
-        "addss    60(%1),  %%xmm3           \n\t"
-        "movss    72(%1),  %%xmm7           \n\t"
-        "addss    %%xmm3,  %%xmm4           \n\t"
-        "movss    52(%1),  %%xmm2           \n\t"
-        "addss    %%xmm3,  %%xmm2           \n\t"
-        "movss    24(%1),  %%xmm3           \n\t"
-        "addss    28(%1),  %%xmm3           \n\t"
-        "addss    76(%1),  %%xmm7           \n\t"
-        "addss    %%xmm3,  %%xmm1           \n\t"
-        "addss    %%xmm4,  %%xmm5           \n\t"
-        "movss    %%xmm1,  16(%1)           \n\t"
-        "movss    20(%1),  %%xmm1           \n\t"
-        "addss    %%xmm3,  %%xmm1           \n\t"
-        "movss    40(%1),  %%xmm3           \n\t"
-        "movss    %%xmm1,  48(%1)           \n\t"
-        "addss    44(%1),  %%xmm3           \n\t"
-        "movss    20(%1),  %%xmm1           \n\t"
-        "addss    %%xmm3,  %%xmm4           \n\t"
-        "addss    %%xmm2,  %%xmm3           \n\t"
-        "addss    28(%1),  %%xmm1           \n\t"
-        "movss    %%xmm3,  40(%1)           \n\t"
-        "addss    36(%1),  %%xmm2           \n\t"
-        "movss     8(%1),  %%xmm3           \n\t"
-        "movss    %%xmm2,  56(%1)           \n\t"
-        "addss    12(%1),  %%xmm3           \n\t"
-        "movss    %%xmm5,   8(%1)           \n\t"
-        "movss    %%xmm3,  32(%1)           \n\t"
-        "movss    52(%1),  %%xmm2           \n\t"
-        "movss    80(%1),  %%xmm3           \n\t"
-        "movss   120(%1),  %%xmm5           \n\t"
-        "movss    %%xmm1,  80(%1)           \n\t"
-        "movss    %%xmm4,  24(%1)           \n\t"
-        "addss   124(%1),  %%xmm5           \n\t"
-        "movss    64(%1),  %%xmm1           \n\t"
-        "addss    60(%1),  %%xmm2           \n\t"
-        "addss    %%xmm5,  %%xmm0           \n\t"
-        "addss   116(%1),  %%xmm5           \n\t"
-        "movl         %0,  64(%1)           \n\t"
-        "addss    %%xmm0,  %%xmm6           \n\t"
-        "addss    %%xmm6,  %%xmm1           \n\t"
-        "movl     12(%1),      %0           \n\t"
-        "movss    %%xmm1,   4(%1)           \n\t"
-        "movss    88(%1),  %%xmm1           \n\t"
-        "movl         %0,  96(%1)           \n\t"
-        "addss    92(%1),  %%xmm1           \n\t"
-        "movss   104(%1),  %%xmm4           \n\t"
-        "movl     28(%1),      %0           \n\t"
-        "addss   108(%1),  %%xmm4           \n\t"
-        "addss    %%xmm4,  %%xmm0           \n\t"
-        "addss    %%xmm1,  %%xmm3           \n\t"
-        "addss    84(%1),  %%xmm1           \n\t"
-        "addss    %%xmm5,  %%xmm4           \n\t"
-        "addss    %%xmm3,  %%xmm6           \n\t"
-        "addss    %%xmm0,  %%xmm3           \n\t"
-        "addss    %%xmm7,  %%xmm0           \n\t"
-        "addss   100(%1),  %%xmm5           \n\t"
-        "addss    %%xmm4,  %%xmm7           \n\t"
-        "movl         %0, 112(%1)           \n\t"
-        "movss    %%xmm0,  28(%1)           \n\t"
-        "movss    36(%1),  %%xmm0           \n\t"
-        "movss    %%xmm7,  36(%1)           \n\t"
-        "addss    %%xmm1,  %%xmm4           \n\t"
-        "movss   116(%1),  %%xmm7           \n\t"
-        "addss    %%xmm2,  %%xmm0           \n\t"
-        "addss   124(%1),  %%xmm7           \n\t"
-        "movss    %%xmm0,  72(%1)           \n\t"
-        "movss    44(%1),  %%xmm0           \n\t"
-        "movss    %%xmm6,  12(%1)           \n\t"
-        "movss    %%xmm3,  20(%1)           \n\t"
-        "addss    %%xmm0,  %%xmm2           \n\t"
-        "movss    %%xmm4,  44(%1)           \n\t"
-        "movss    %%xmm2,  88(%1)           \n\t"
-        "addss    60(%1),  %%xmm0           \n\t"
-        "movl     60(%1),      %0           \n\t"
-        "movl         %0, 120(%1)           \n\t"
-        "movss    %%xmm0, 104(%1)           \n\t"
-        "addss    %%xmm5,  %%xmm1           \n\t"
-        "addss    68(%1),  %%xmm5           \n\t"
-        "movss    %%xmm1,  52(%1)           \n\t"
-        "movss    %%xmm5,  60(%1)           \n\t"
-        "movss    68(%1),  %%xmm1           \n\t"
-        "movss   100(%1),  %%xmm5           \n\t"
-        "addss    %%xmm7,  %%xmm5           \n\t"
-        "addss   108(%1),  %%xmm7           \n\t"
-        "addss    %%xmm5,  %%xmm1           \n\t"
-        "movss    84(%1),  %%xmm2           \n\t"
-        "addss    92(%1),  %%xmm2           \n\t"
-        "addss    %%xmm2,  %%xmm5           \n\t"
-        "movss    %%xmm1,  68(%1)           \n\t"
-        "addss    %%xmm7,  %%xmm2           \n\t"
-        "movss    76(%1),  %%xmm1           \n\t"
-        "movss    %%xmm2,  84(%1)           \n\t"
-        "movss    %%xmm5,  76(%1)           \n\t"
-        "movss   108(%1),  %%xmm2           \n\t"
-        "addss    %%xmm1,  %%xmm7           \n\t"
-        "addss   124(%1),  %%xmm2           \n\t"
-        "addss    %%xmm2,  %%xmm1           \n\t"
-        "addss    92(%1),  %%xmm2           \n\t"
-        "movss    %%xmm1, 100(%1)           \n\t"
-        "movss    %%xmm2, 108(%1)           \n\t"
-        "movss    92(%1),  %%xmm2           \n\t"
-        "movss    %%xmm7,  92(%1)           \n\t"
-        "addss   124(%1),  %%xmm2           \n\t"
-        "movss    %%xmm2, 116(%1)           \n\t"
-        :"+&r"(tmp1)
-        :"r"(out), "r"(b1), "r"(smask), "r"(in)
-        :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3",
-                      "%xmm4", "%xmm5", "%xmm6", "%xmm7",)
-         "memory"
-        );
-}
-
-- 
1.7.4.1

_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to