Hi, As subject, this patch rewrites floating-point mla_n/mls_n intrinsics to use a + b * c / a - b * c rather than inline assembly code, allowing for better scheduling and optimization.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no issues. Ok for master? Thanks, Jonathan --- gcc/ChangeLog: 2021-01-18 Jonathan Wright <jonathan.wri...@arm.com> * config/aarch64/arm_neon.h (vmla_n_f32): Use C rather than asm. (vmlaq_n_f32): Likewise. (vmls_n_f32): Likewise. (vmlsq_n_f32): Likewise.
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index d891067f021a0bcc24af79dfbe2d9dd5889b23bc..d1ab3b7d54cd5b965f91e685139677864fcfe3e1 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -7233,13 +7233,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) { - float32x2_t __result; - float32x2_t __t1; - __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s" - : "=w"(__result), "=w"(__t1) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __a + __b * __c; } __extension__ extern __inline int16x4_t @@ -7734,13 +7728,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) { - float32x4_t __result; - float32x4_t __t1; - __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s" - : "=w"(__result), "=w"(__t1) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __a + __b * __c; } __extension__ extern __inline int16x8_t @@ -7827,13 +7815,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) { - float32x2_t __result; - float32x2_t __t1; - __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s" - : "=w"(__result), "=w"(__t1) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __a - __b * __c; } __extension__ extern __inline int16x4_t @@ -8324,13 +8306,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) { - float32x4_t __result; - float32x4_t __t1; - __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s" - : "=w"(__result), "=w"(__t1) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __a - __b * __c; } __extension__ extern __inline int16x8_t