On Mon, 22 Aug 2022, Hubert Mazur wrote:

Provide optimized implementation of vsse16 for arm64.

Performance comparison tests are shown below.
- vsse_0_c: 254.4
- vsse_0_neon: 64.7

Benchmarks and tests are run with checkasm tool on AWS Graviton 3.

Signed-off-by: Hubert Mazur <[email protected]>
---
libavcodec/aarch64/me_cmp_init_aarch64.c |  4 +
libavcodec/aarch64/me_cmp_neon.S         | 97 ++++++++++++++++++++++++
2 files changed, 101 insertions(+)

diff --git a/libavcodec/aarch64/me_cmp_init_aarch64.c 
b/libavcodec/aarch64/me_cmp_init_aarch64.c
index ddc5d05611..7b81e48d16 100644
--- a/libavcodec/aarch64/me_cmp_init_aarch64.c
+++ b/libavcodec/aarch64/me_cmp_init_aarch64.c
@@ -43,6 +43,8 @@ int sse4_neon(MpegEncContext *v, const uint8_t *pix1, const 
uint8_t *pix2,

int vsad16_neon(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
                ptrdiff_t stride, int h);
+int vsse16_neon(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
+                ptrdiff_t stride, int h);

av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
{
@@ -62,5 +64,7 @@ av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, 
AVCodecContext *avctx)
        c->sse[2] = sse4_neon;

        c->vsad[0] = vsad16_neon;
+
+        c->vsse[0] = vsse16_neon;
    }
}
diff --git a/libavcodec/aarch64/me_cmp_neon.S b/libavcodec/aarch64/me_cmp_neon.S
index d4c0099854..279bae7cb5 100644
--- a/libavcodec/aarch64/me_cmp_neon.S
+++ b/libavcodec/aarch64/me_cmp_neon.S
@@ -659,3 +659,100 @@ function vsad16_neon, export=1

        ret
endfunc
+
+function vsse16_neon, export=1
+        // x0           unused
+        // x1           uint8_t *pix1
+        // x2           uint8_t *pix2
+        // x3           ptrdiff_t stride
+        // w4           int h
+
+        movi            v30.4s, #0
+        movi            v29.4s, #0
+
+        add             x5, x1, x3                      // pix1 + stride
+        add             x6, x2, x3                      // pix2 + stride
+        sub             w4, w4, #1                      // we need to make h-1 
iterations
+        cmp             w4, #3                          // check if we can 
make 4 iterations at once
+        b.le            2f
+
+// make 4 iterations at once

The comments seem to talk about 4 iterations at once while the code actually only does 3.

+1:
+        // x = abs(pix1[0] - pix2[0] - pix1[0 + stride] + pix2[0 + stride]) =

The comment seems a bit un-updated here, since there's no abs() involved here

+        // res = (x) * (x)
+        ld1             {v0.16b}, [x1], x3              // Load pix1[0], first 
iteration
+        ld1             {v1.16b}, [x2], x3              // Load pix2[0], first 
iteration
+        ld1             {v2.16b}, [x5], x3              // Load pix1[0 + 
stride], first iteration
+        usubl           v28.8h, v0.8b, v1.8b            // Signed difference 
of pix1[0] - pix2[0], first iteration
+        ld1             {v3.16b}, [x6], x3              // Load pix2[0 + 
stride], first iteration
+        usubl2          v27.8h, v0.16b, v1.16b          // Signed difference 
of pix1[0] - pix2[0], first iteration
+        usubl           v26.8h, v3.8b, v2.8b            // Signed difference 
of pix1[0 + stride] - pix2[0 + stride], first iteration
+        usubl2          v25.8h, v3.16b, v2.16b          // Signed difference 
of pix1[0 + stride] - pix2[0 + stride], first iteration

Same thing about reusing data from the previous row, as for the previous patch.

// Martin

_______________________________________________
ffmpeg-devel mailing list
[email protected]
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
[email protected] with subject "unsubscribe".

Reply via email to