cedric pushed a commit to branch master.

http://git.enlightenment.org/core/efl.git/commit/?id=2c2983aadb8cd7351d821c0a5b01efafd445665d

commit 2c2983aadb8cd7351d821c0a5b01efafd445665d
Author: Cedric BAIL <ced...@osg.samsung.com>
Date:   Tue Apr 28 23:39:18 2015 +0200

    evas: implement _op_blend_rel_{p,pan}_dp_neon using NEON intrinsics
    
    Summary: NEON intrinsics can be built both for armv7 and armv8.
    
    Reviewers: raster, cedric
    
    Subscribers: cedric
    
    Projects: #efl
    
    Differential Revision: https://phab.enlightenment.org/D2441
    
    Signed-off-by: Cedric BAIL <ced...@osg.samsung.com>
---
 .../common/evas_op_blend/op_blend_pixel_neon.c     | 182 ++++++++++++++++++++-
 1 file changed, 176 insertions(+), 6 deletions(-)

diff --git a/src/lib/evas/common/evas_op_blend/op_blend_pixel_neon.c 
b/src/lib/evas/common/evas_op_blend/op_blend_pixel_neon.c
index 8d70b9d..0db97be 100644
--- a/src/lib/evas/common/evas_op_blend/op_blend_pixel_neon.c
+++ b/src/lib/evas/common/evas_op_blend/op_blend_pixel_neon.c
@@ -1,8 +1,6 @@
 #ifdef BUILD_NEON
-#ifdef BUILD_NEON_INTRINSICS
 #include <arm_neon.h>
 #endif
-#endif
 /* blend pixel --> dst */
 
 #ifdef BUILD_NEON
@@ -747,8 +745,114 @@ init_blend_pixel_pt_funcs_neon(void)
 #ifdef BUILD_NEON
 static void
 _op_blend_rel_p_dp_neon(DATA32 *s, DATA8 *m, DATA32 c, DATA32 *d, int l) {
-   DATA32 *e = d + l;
-   while (d < e) {
+   uint16x8_t cs0_16x8;
+   uint16x8_t cs1_16x8;
+   uint16x8_t ld0_16x8;
+   uint16x8_t ld1_16x8;
+   uint32x4_t c_32x4;
+   uint32x4_t cond_32x4;
+   uint32x4_t cs_32x4;
+   uint32x4_t d_32x4;
+   uint32x4_t l_32x4;
+   uint32x4_t ld_32x4;
+   uint32x4_t s_32x4;
+   uint32x4_t x0_32x4;
+   uint32x4_t x1_32x4;
+   uint8x16_t c_8x16;
+   uint8x16_t cs_8x16;
+   uint8x16_t d_8x16;
+   uint8x16_t l_8x16;
+   uint8x16_t ld_8x16;
+   uint8x16_t s_8x16;
+   uint8x16_t x0_8x16;
+   uint8x16_t x1_8x16;
+   uint8x8_t c0_8x8;
+   uint8x8_t c1_8x8;
+   uint8x8_t cs0_8x8;
+   uint8x8_t cs1_8x8;
+   uint8x8_t d0_8x8;
+   uint8x8_t d1_8x8;
+   uint8x8_t l0_8x8;
+   uint8x8_t l1_8x8;
+   uint8x8_t ld0_8x8;
+   uint8x8_t ld1_8x8;
+   uint8x8_t s0_8x8;
+   uint8x8_t s1_8x8;
+
+   x1_8x16 = vdupq_n_u8(0x1);
+   x1_32x4 = vreinterpretq_u32_u8(x1_8x16);
+   x0_8x16 = vdupq_n_u8(0x0);
+   x0_32x4 = vreinterpretq_u32_u8(x0_8x16);
+
+   DATA32 *end = d + (l & ~3);
+   while (d < end)
+   {
+      // load 4 elements from d
+      d_32x4 = vld1q_u32(d);
+      d_8x16 = vreinterpretq_u8_u32(d_32x4);
+      d0_8x8 = vget_low_u8(d_8x16);
+      d1_8x8 = vget_high_u8(d_8x16);
+
+      // load 4 elements from s
+      s_32x4 = vld1q_u32(s);
+      s_8x16 = vreinterpretq_u8_u32(s_32x4);
+      s0_8x8 = vget_low_u8(s_8x16);
+      s1_8x8 = vget_high_u8(s_8x16);
+
+      // calculate l = 256 - (*s >> 24)
+      l_32x4 = vshrq_n_u32(s_32x4, 24);
+      l_32x4 = vmulq_u32(x1_32x4, l_32x4);
+      l_8x16 = vreinterpretq_u8_u32(l_32x4);
+      l_8x16 = vsubq_u8(x0_8x16, l_8x16);
+      l0_8x8 = vget_low_u8(l_8x16);
+      l1_8x8 = vget_high_u8(l_8x16);
+
+      // multiply MUL_256(l, *d)
+      ld0_16x8 = vmull_u8(l0_8x8, d0_8x8);
+      ld1_16x8 = vmull_u8(l1_8x8, d1_8x8);
+      ld0_8x8 = vshrn_n_u16(ld0_16x8,8);
+      ld1_8x8 = vshrn_n_u16(ld1_16x8,8);
+      ld_8x16 = vcombine_u8(ld0_8x8, ld1_8x8);
+      ld_32x4 = vreinterpretq_u32_u8(ld_8x16);
+
+      // select d where l should be 256
+      cond_32x4 = vceqq_u32(l_32x4, x0_32x4);
+      ld_32x4 = vbslq_u32(cond_32x4, d_32x4, ld_32x4);
+
+      // calculate 1 + (*d >> 24)
+      c_32x4 = vshrq_n_u32(d_32x4, 24);
+      c_32x4 = vmulq_u32(x1_32x4, c_32x4);
+      c_8x16 = vreinterpretq_u8_u32(c_32x4);
+      c_8x16 = vaddq_u8(c_8x16, x1_8x16);
+      c0_8x8 = vget_low_u8(c_8x16);
+      c1_8x8 = vget_high_u8(c_8x16);
+
+      // multiply MUL_256(l, *d)
+      cs0_16x8 = vmull_u8(c0_8x8, s0_8x8);
+      cs1_16x8 = vmull_u8(c1_8x8, s1_8x8);
+      cs0_8x8 = vshrn_n_u16(cs0_16x8,8);
+      cs1_8x8 = vshrn_n_u16(cs1_16x8,8);
+      cs_8x16 = vcombine_u8(cs0_8x8, cs1_8x8);
+      cs_32x4 = vreinterpretq_u32_u8(cs_8x16);
+
+      // select s where c should be 256
+      c_32x4 = vreinterpretq_u32_u8(c_8x16);
+      cond_32x4 = vceqq_u32(c_32x4, x0_32x4);
+      cs_32x4 = vbslq_u32(cond_32x4, s_32x4, cs_32x4);
+
+      // add up everything
+      d_32x4 = vaddq_u32(cs_32x4, ld_32x4);
+
+      // save result
+      vst1q_u32(d, d_32x4);
+
+      d+=4;
+      s+=4;
+   }
+
+   end += (l & 3);
+   while (d < end)
+   {
       l = 256 - (*s >> 24);
       c = 1 + (*d >> 24);
       *d = MUL_256(c, *s) + MUL_256(l, *d);
@@ -759,8 +863,74 @@ _op_blend_rel_p_dp_neon(DATA32 *s, DATA8 *m, DATA32 c, 
DATA32 *d, int l) {
 
 static void
 _op_blend_rel_pan_dp_neon(DATA32 *s, DATA8 *m, DATA32 c, DATA32 *d, int l) {
-   DATA32 *e = d + l;
-   while (d < e) {
+   uint16x8_t cs0_16x8;
+   uint16x8_t cs1_16x8;
+   uint32x4_t c_32x4;
+   uint32x4_t cond_32x4;
+   uint32x4_t cs_32x4;
+   uint32x4_t d_32x4;
+   uint32x4_t s_32x4;
+   uint32x4_t x0_32x4;
+   uint32x4_t x1_32x4;
+   uint8x16_t c_8x16;
+   uint8x16_t cs_8x16;
+   uint8x16_t s_8x16;
+   uint8x16_t x0_8x16;
+   uint8x16_t x1_8x16;
+   uint8x8_t c0_8x8;
+   uint8x8_t c1_8x8;
+   uint8x8_t cs0_8x8;
+   uint8x8_t cs1_8x8;
+   uint8x8_t s0_8x8;
+   uint8x8_t s1_8x8;
+
+   x1_8x16 = vdupq_n_u8(0x1);
+   x1_32x4 = vreinterpretq_u32_u8(x1_8x16);
+   x0_8x16 = vdupq_n_u8(0x0);
+   x0_32x4 = vreinterpretq_u32_u8(x0_8x16);
+   DATA32 *end = d + (l & ~3);
+   while (d < end)
+   {
+      // load 4 elements from d
+      d_32x4 = vld1q_u32(d);
+
+      // load 4 elements from s
+      s_32x4 = vld1q_u32(s);
+      s_8x16 = vreinterpretq_u8_u32(s_32x4);
+      s0_8x8 = vget_low_u8(s_8x16);
+      s1_8x8 = vget_high_u8(s_8x16);
+
+      // calculate 1 + (*d >> 24)
+      c_32x4 = vshrq_n_u32(d_32x4, 24);
+      c_32x4 = vmulq_u32(x1_32x4, c_32x4);
+      c_8x16 = vreinterpretq_u8_u32(c_32x4);
+      c_8x16 = vaddq_u8(c_8x16, x1_8x16);
+      c0_8x8 = vget_low_u8(c_8x16);
+      c1_8x8 = vget_high_u8(c_8x16);
+
+      // multiply MUL_256(l, *d)
+      cs0_16x8 = vmull_u8(c0_8x8, s0_8x8);
+      cs1_16x8 = vmull_u8(c1_8x8, s1_8x8);
+      cs0_8x8 = vshrn_n_u16(cs0_16x8,8);
+      cs1_8x8 = vshrn_n_u16(cs1_16x8,8);
+      cs_8x16 = vcombine_u8(cs0_8x8, cs1_8x8);
+      cs_32x4 = vreinterpretq_u32_u8(cs_8x16);
+
+      // select s where c should be 256
+      c_32x4 = vreinterpretq_u32_u8(c_8x16);
+      cond_32x4 = vceqq_u32(c_32x4, x0_32x4);
+      cs_32x4 = vbslq_u32(cond_32x4, s_32x4, cs_32x4);
+
+      // save result
+      vst1q_u32(d, cs_32x4);
+
+      d+=4;
+      s+=4;
+   }
+
+   end += (l & 3);
+   while (d < end)
+   {
       c = 1 + (*d >> 24);
       *d++ = MUL_256(c, *s);
       s++;

-- 


Reply via email to