Hello,
This patch supports following MVE ACLE vaddq intrinsics. The RTL patterns for
this intrinsics
are added using arithmetic "plus" operator.
vaddq_s8, vaddq_s16, vaddq_s32, vaddq_u8, vaddq_u16, vaddq_u32, vaddq_f16,
vaddq_f32.
Please refer to M-profile Vector Extension (MVE) intrinsics [1] for more
details.
[1]
https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics
Regression tested on arm-none-eabi and found no regressions.
Ok for trunk?
Thanks,
Srinath.
gcc/ChangeLog:
2019-11-05 Andre Vieira <[email protected]>
Mihail Ionescu <[email protected]>
Srinath Parvathaneni <[email protected]>
* config/arm/arm_mve.h (vaddq_s8): Define macro.
(vaddq_s16): Likewise.
(vaddq_s32): Likewise.
(vaddq_u8): Likewise.
(vaddq_u16): Likewise.
(vaddq_u32): Likewise.
(vaddq_f16): Likewise.
(vaddq_f32): Likewise.
(__arm_vaddq_s8): Define intrinsic.
(__arm_vaddq_s16): Likewise.
(__arm_vaddq_s32): Likewise.
(__arm_vaddq_u8): Likewise.
(__arm_vaddq_u16): Likewise.
(__arm_vaddq_u32): Likewise.
(__arm_vaddq_f16): Likewise.
(__arm_vaddq_f32): Likewise.
(vaddq): Define polymorphic variant.
* config/arm/iterators.md (VNIM): Define mode iterator for common types
Neon, IWMMXT and MVE.
(VNINOTM): Likewise.
* config/arm/mve.md (mve_vaddq<mode>): Define RTL pattern.
(mve_vaddq_f<mode>): Define RTL pattern.
* config/arm/neon.md (add<mode>3): Rename to addv4hf3 RTL pattern.
(addv8hf3_neon): Define RTL pattern.
* config/arm/vec-common.md (add<mode>3): Modify standard add RTL pattern
to support MVE.
(addv8hf3): Define standard RTL pattern for MVE and Neon.
(add<mode>3): Modify existing standard add RTL pattern for Neon and
IWMMXT.
gcc/testsuite/ChangeLog:
2019-11-05 Andre Vieira <[email protected]>
Mihail Ionescu <[email protected]>
Srinath Parvathaneni <[email protected]>
* gcc.target/arm/mve/intrinsics/vaddq_f16.c: New test.
* gcc.target/arm/mve/intrinsics/vaddq_f32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_s16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_s32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_s8.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_u16.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_u32.c: Likewise.
* gcc.target/arm/mve/intrinsics/vaddq_u8.c: Likewise.
############### Attachment also inlined for ease of reply ###############
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index
42e98f9ad1e357fe974e58378a49bcaaf36c302a..89456589c9dcdff5b56e8707dd720fb151466661
100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -1898,6 +1898,14 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
#define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
__arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
#define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
__arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
#define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
__arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
+#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
+#define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
+#define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
+#define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
+#define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
+#define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
+#define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
+#define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
#endif
__extension__ extern __inline void
@@ -12341,6 +12349,48 @@ __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t *
__base, uint32x4_t __offset,
__builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *)
__base, __offset, __value);
}
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a + __b;
+}
+
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
__extension__ extern __inline void
@@ -14707,6 +14757,20 @@ __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t *
__base, uint32x4_t __offs
__builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset,
__value, __p);
}
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a + __b;
+}
+
#endif
enum {
@@ -16557,6 +16621,25 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]:
__arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *),
__p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]:
__arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *),
__p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+#define vaddq(p0,p1) __arm_vaddq(p0,p1)
+#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8
(__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16
(__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32
(__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
__arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1,
uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
__arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
__arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8
(__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]:
__arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]:
__arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_n_s8
(__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_n_s16
(__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_n_s32
(__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
__arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1,
float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
__arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1,
float32x4_t)));})
+
#else /* MVE Interger. */
#define vst4q(p0,p1) __arm_vst4q(p0,p1)
@@ -17213,12 +17296,7 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32
(__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
__arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1,
uint8x16_t)), \
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
__arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
uint16x8_t)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
__arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32x4_t)));})
-
-#define vaddq(p0,p1) __arm_vaddq(p0,p1)
-#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
__arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32x4_t)), \
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8
(__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]:
__arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
uint16_t)), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]:
__arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32_t)), \
diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index
e30325bc1652d378be2544fa32269c5c4294d7e9..a0cd35eadc8e9fa6d1a8330778c0b5f83a5e8a37
100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -62,6 +62,14 @@
;; Integer and float modes supported by Neon and IWMMXT.
(define_mode_iterator VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
+;; Integer and float modes supported by Neon, IWMMXT and MVE, used by
+;; arithmetic epxand patterns.
+(define_mode_iterator VNIM [V16QI V8HI V4SI V4SF])
+
+;; Integer and float modes supported by Neon and IWMMXT but not MVE, used by
+;; arithmetic epxand patterns.
+(define_mode_iterator VNINOTM [V2SI V4HI V8QI V2SF V2DI])
+
;; Integer and float modes supported by Neon, IWMMXT and MVE.
(define_mode_iterator VNIM1 [V16QI V8HI V4SI V4SF V2DI])
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index
c3fdc8b60843332ee0e59e8ec537d00c41407622..a3f5de300a4a83d916ebbed44ebce30b7d143b22
100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -9636,3 +9636,31 @@
return "";
}
[(set_attr "length" "4")])
+
+;;
+;; [vaddq_s, vaddq_u])
+;;
+(define_insn "mve_vaddq<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE"
+ "vadd.i%#<V_sz_elem> %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vaddq_f])
+;;
+(define_insn "mve_vaddq_f<mode>"
+ [
+ (set (match_operand:MVE_0 0 "s_register_operand" "=w")
+ (plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
+ (match_operand:MVE_0 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+ "vadd.f%#<V_sz_elem> %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index
c23783e0ed914ec21a92828388ada58ada3c6132..df8f4fd4166b0d9cd08f01f3ac7ac3958f20b9db
100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -519,18 +519,30 @@
;; As with SFmode, full support for HFmode vector arithmetic is only available
;; when flag-unsafe-math-optimizations is enabled.
-(define_insn "add<mode>3"
+;; Add pattern with modes V8HF and V4HF is split into separate patterns to add
+;; support for standard pattern addv8hf3 in MVE. Following pattern is called
+;; from "addv8hf3" standard pattern inside vec-common.md file.
+
+(define_insn "addv8hf3_neon"
[(set
- (match_operand:VH 0 "s_register_operand" "=w")
- (plus:VH
- (match_operand:VH 1 "s_register_operand" "w")
- (match_operand:VH 2 "s_register_operand" "w")))]
+ (match_operand:V8HF 0 "s_register_operand" "=w")
+ (plus:V8HF
+ (match_operand:V8HF 1 "s_register_operand" "w")
+ (match_operand:V8HF 2 "s_register_operand" "w")))]
"TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
- "vadd.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
- (if_then_else (match_test "<Is_float_mode>")
- (const_string "neon_fp_addsub_s<q>")
- (const_string "neon_add<q>")))]
+ "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_addsub_s_q")]
+)
+
+(define_insn "addv4hf3"
+ [(set
+ (match_operand:V4HF 0 "s_register_operand" "=w")
+ (plus:V4HF
+ (match_operand:V4HF 1 "s_register_operand" "w")
+ (match_operand:V4HF 2 "s_register_operand" "w")))]
+ "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
+ "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_addsub_s_q")]
)
(define_insn "add<mode>3_fp16"
diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md
index
5f5c113cf95afafbb733e1bfd2a7c7b8a55651a2..82a1a6bd7698fa25db88a5cdb5b3e762dc80a589
100644
--- a/gcc/config/arm/vec-common.md
+++ b/gcc/config/arm/vec-common.md
@@ -77,19 +77,51 @@
}
})
+;; Vector arithmetic. Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon, IWMMXT and MVE.
+
+(define_expand "add<mode>3"
+ [(set (match_operand:VNIM 0 "s_register_operand")
+ (plus:VNIM (match_operand:VNIM 1 "s_register_operand")
+ (match_operand:VNIM 2 "s_register_operand")))]
+ "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
+ || flag_unsafe_math_optimizations))
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))
+ || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE(<MODE>mode))
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(<MODE>mode))"
+{
+})
+
+;; Vector arithmetic. Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon and MVE.
+
+(define_expand "addv8hf3"
+ [(set (match_operand:V8HF 0 "s_register_operand")
+ (plus:V8HF (match_operand:V8HF 1 "s_register_operand")
+ (match_operand:V8HF 2 "s_register_operand")))]
+ "(TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(V8HFmode))
+ || (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)"
+{
+ if (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+ emit_insn (gen_addv8hf3_neon (operands[0], operands[1], operands[2]));
+})
+
+;; Vector arithmetic. Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon and IWMMXT.
+
+(define_expand "add<mode>3"
+ [(set (match_operand:VNINOTM 0 "s_register_operand")
+ (plus:VNINOTM (match_operand:VNINOTM 1 "s_register_operand")
+ (match_operand:VNINOTM 2 "s_register_operand")))]
+ "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
+ || flag_unsafe_math_optimizations))
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
;; Vector arithmetic. Expanders are blank, then unnamed insns implement
;; patterns separately for IWMMXT and Neon.
-(define_expand "add<mode>3"
- [(set (match_operand:VALL 0 "s_register_operand")
- (plus:VALL (match_operand:VALL 1 "s_register_operand")
- (match_operand:VALL 2 "s_register_operand")))]
- "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
- || flag_unsafe_math_optimizations))
- || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
-{
-})
-
(define_expand "sub<mode>3"
[(set (match_operand:VALL 0 "s_register_operand")
(minus:VALL (match_operand:VALL 1 "s_register_operand")
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
new file mode 100644
index
0000000000000000000000000000000000000000..4b6fa72adfdf2b68d766c6a4ce1226a84d357f36
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2"
} */
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b)
+{
+ return vaddq_f16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
new file mode 100644
index
0000000000000000000000000000000000000000..0aa7d4b7d9b8f4cb0c6611fe47d494762297c24c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2"
} */
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b)
+{
+ return vaddq_f32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
new file mode 100644
index
0000000000000000000000000000000000000000..34f1e0b1d0e11b899505a314418f0f2e1acb7b51
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vaddq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
new file mode 100644
index
0000000000000000000000000000000000000000..ae35932b3dcf49eddd9e537e8833daac7050ad31
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vaddq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
new file mode 100644
index
0000000000000000000000000000000000000000..277c9f7f7c1a1054392e0dbd55800e0686e9e8fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vaddq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
new file mode 100644
index
0000000000000000000000000000000000000000..de2f299ab46181c696db6daa726744bca17f1588
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vaddq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
new file mode 100644
index
0000000000000000000000000000000000000000..81a96d427a5783a99e4962df99c0f64b6b16e6a5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vaddq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
new file mode 100644
index
0000000000000000000000000000000000000000..2a754210e139074728ff5af00787aed411d3597c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vaddq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index
42e98f9ad1e357fe974e58378a49bcaaf36c302a..89456589c9dcdff5b56e8707dd720fb151466661
100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -1898,6 +1898,14 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
#define vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
__arm_vstrwq_scatter_shifted_offset_p_u32(__base, __offset, __value, __p)
#define vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
__arm_vstrwq_scatter_shifted_offset_s32(__base, __offset, __value)
#define vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
__arm_vstrwq_scatter_shifted_offset_u32(__base, __offset, __value)
+#define vaddq_s8(__a, __b) __arm_vaddq_s8(__a, __b)
+#define vaddq_s16(__a, __b) __arm_vaddq_s16(__a, __b)
+#define vaddq_s32(__a, __b) __arm_vaddq_s32(__a, __b)
+#define vaddq_u8(__a, __b) __arm_vaddq_u8(__a, __b)
+#define vaddq_u16(__a, __b) __arm_vaddq_u16(__a, __b)
+#define vaddq_u32(__a, __b) __arm_vaddq_u32(__a, __b)
+#define vaddq_f16(__a, __b) __arm_vaddq_f16(__a, __b)
+#define vaddq_f32(__a, __b) __arm_vaddq_f32(__a, __b)
#endif
__extension__ extern __inline void
@@ -12341,6 +12349,48 @@ __arm_vstrwq_scatter_shifted_offset_u32 (uint32_t *
__base, uint32x4_t __offset,
__builtin_mve_vstrwq_scatter_shifted_offset_uv4si ((__builtin_neon_si *)
__base, __offset, __value);
}
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s8 (int8x16_t __a, int8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s16 (int16x8_t __a, int16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_s32 (int32x4_t __a, int32x4_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u8 (uint8x16_t __a, uint8x16_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u16 (uint16x8_t __a, uint16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_u32 (uint32x4_t __a, uint32x4_t __b)
+{
+ return __a + __b;
+}
+
#if (__ARM_FEATURE_MVE & 2) /* MVE Floating point. */
__extension__ extern __inline void
@@ -14707,6 +14757,20 @@ __arm_vstrwq_scatter_shifted_offset_p_f32 (float32_t *
__base, uint32x4_t __offs
__builtin_mve_vstrwq_scatter_shifted_offset_p_fv4sf (__base, __offset,
__value, __p);
}
+__extension__ extern __inline float16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f16 (float16x8_t __a, float16x8_t __b)
+{
+ return __a + __b;
+}
+
+__extension__ extern __inline float32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __a + __b;
+}
+
#endif
enum {
@@ -16557,6 +16621,25 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_uint32_t_ptr][__ARM_mve_type_uint32x4_t]:
__arm_vstrwq_scatter_shifted_offset_u32 (__ARM_mve_coerce(__p0, uint32_t *),
__p1, __ARM_mve_coerce(__p2, uint32x4_t)), \
int (*)[__ARM_mve_type_float32_t_ptr][__ARM_mve_type_float32x4_t]:
__arm_vstrwq_scatter_shifted_offset_f32 (__ARM_mve_coerce(__p0, float32_t *),
__p1, __ARM_mve_coerce(__p2, float32x4_t)));})
+#define vaddq(p0,p1) __arm_vaddq(p0,p1)
+#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
+ __typeof(p1) __p1 = (p1); \
+ _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8x16_t]: __arm_vaddq_s8
(__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8x16_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16x8_t]: __arm_vaddq_s16
(__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16x8_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32
(__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
__arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1,
uint8x16_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
__arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
uint16x8_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
__arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32x4_t)), \
+ int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8
(__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
+ int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]:
__arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
uint16_t)), \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]:
__arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32_t)), \
+ int (*)[__ARM_mve_type_int8x16_t][__ARM_mve_type_int8_t]: __arm_vaddq_n_s8
(__ARM_mve_coerce(__p0, int8x16_t), __ARM_mve_coerce(__p1, int8_t)), \
+ int (*)[__ARM_mve_type_int16x8_t][__ARM_mve_type_int16_t]: __arm_vaddq_n_s16
(__ARM_mve_coerce(__p0, int16x8_t), __ARM_mve_coerce(__p1, int16_t)), \
+ int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32_t]: __arm_vaddq_n_s32
(__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32_t)), \
+ int (*)[__ARM_mve_type_float16x8_t][__ARM_mve_type_float16x8_t]:
__arm_vaddq_f16 (__ARM_mve_coerce(p0, float16x8_t), __ARM_mve_coerce(p1,
float16x8_t)), \
+ int (*)[__ARM_mve_type_float32x4_t][__ARM_mve_type_float32x4_t]:
__arm_vaddq_f32 (__ARM_mve_coerce(p0, float32x4_t), __ARM_mve_coerce(p1,
float32x4_t)));})
+
#else /* MVE Interger. */
#define vst4q(p0,p1) __arm_vst4q(p0,p1)
@@ -17213,12 +17296,7 @@ extern void *__ARM_undef;
int (*)[__ARM_mve_type_int32x4_t][__ARM_mve_type_int32x4_t]: __arm_vaddq_s32
(__ARM_mve_coerce(__p0, int32x4_t), __ARM_mve_coerce(__p1, int32x4_t)), \
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8x16_t]:
__arm_vaddq_u8 (__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1,
uint8x16_t)), \
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16x8_t]:
__arm_vaddq_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
uint16x8_t)), \
- int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
__arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32x4_t)));})
-
-#define vaddq(p0,p1) __arm_vaddq(p0,p1)
-#define __arm_vaddq(p0,p1) ({ __typeof(p0) __p0 = (p0); \
- __typeof(p1) __p1 = (p1); \
- _Generic( (int (*)[__ARM_mve_typeid(__p0)][__ARM_mve_typeid(__p1)])0, \
+ int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32x4_t]:
__arm_vaddq_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32x4_t)), \
int (*)[__ARM_mve_type_uint8x16_t][__ARM_mve_type_uint8_t]: __arm_vaddq_n_u8
(__ARM_mve_coerce(__p0, uint8x16_t), __ARM_mve_coerce(__p1, uint8_t)), \
int (*)[__ARM_mve_type_uint16x8_t][__ARM_mve_type_uint16_t]:
__arm_vaddq_n_u16 (__ARM_mve_coerce(__p0, uint16x8_t), __ARM_mve_coerce(__p1,
uint16_t)), \
int (*)[__ARM_mve_type_uint32x4_t][__ARM_mve_type_uint32_t]:
__arm_vaddq_n_u32 (__ARM_mve_coerce(__p0, uint32x4_t), __ARM_mve_coerce(__p1,
uint32_t)), \
diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index
e30325bc1652d378be2544fa32269c5c4294d7e9..a0cd35eadc8e9fa6d1a8330778c0b5f83a5e8a37
100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -62,6 +62,14 @@
;; Integer and float modes supported by Neon and IWMMXT.
(define_mode_iterator VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
+;; Integer and float modes supported by Neon, IWMMXT and MVE, used by
+;; arithmetic epxand patterns.
+(define_mode_iterator VNIM [V16QI V8HI V4SI V4SF])
+
+;; Integer and float modes supported by Neon and IWMMXT but not MVE, used by
+;; arithmetic epxand patterns.
+(define_mode_iterator VNINOTM [V2SI V4HI V8QI V2SF V2DI])
+
;; Integer and float modes supported by Neon, IWMMXT and MVE.
(define_mode_iterator VNIM1 [V16QI V8HI V4SI V4SF V2DI])
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index
c3fdc8b60843332ee0e59e8ec537d00c41407622..a3f5de300a4a83d916ebbed44ebce30b7d143b22
100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -9636,3 +9636,31 @@
return "";
}
[(set_attr "length" "4")])
+
+;;
+;; [vaddq_s, vaddq_u])
+;;
+(define_insn "mve_vaddq<mode>"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE"
+ "vadd.i%#<V_sz_elem> %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
+
+;;
+;; [vaddq_f])
+;;
+(define_insn "mve_vaddq_f<mode>"
+ [
+ (set (match_operand:MVE_0 0 "s_register_operand" "=w")
+ (plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
+ (match_operand:MVE_0 2 "s_register_operand" "w")))
+ ]
+ "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
+ "vadd.f%#<V_sz_elem> %q0, %q1, %q2"
+ [(set_attr "type" "mve_move")
+])
diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
index
c23783e0ed914ec21a92828388ada58ada3c6132..df8f4fd4166b0d9cd08f01f3ac7ac3958f20b9db
100644
--- a/gcc/config/arm/neon.md
+++ b/gcc/config/arm/neon.md
@@ -519,18 +519,30 @@
;; As with SFmode, full support for HFmode vector arithmetic is only available
;; when flag-unsafe-math-optimizations is enabled.
-(define_insn "add<mode>3"
+;; Add pattern with modes V8HF and V4HF is split into separate patterns to add
+;; support for standard pattern addv8hf3 in MVE. Following pattern is called
+;; from "addv8hf3" standard pattern inside vec-common.md file.
+
+(define_insn "addv8hf3_neon"
[(set
- (match_operand:VH 0 "s_register_operand" "=w")
- (plus:VH
- (match_operand:VH 1 "s_register_operand" "w")
- (match_operand:VH 2 "s_register_operand" "w")))]
+ (match_operand:V8HF 0 "s_register_operand" "=w")
+ (plus:V8HF
+ (match_operand:V8HF 1 "s_register_operand" "w")
+ (match_operand:V8HF 2 "s_register_operand" "w")))]
"TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
- "vadd.<V_if_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
- [(set (attr "type")
- (if_then_else (match_test "<Is_float_mode>")
- (const_string "neon_fp_addsub_s<q>")
- (const_string "neon_add<q>")))]
+ "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_addsub_s_q")]
+)
+
+(define_insn "addv4hf3"
+ [(set
+ (match_operand:V4HF 0 "s_register_operand" "=w")
+ (plus:V4HF
+ (match_operand:V4HF 1 "s_register_operand" "w")
+ (match_operand:V4HF 2 "s_register_operand" "w")))]
+ "TARGET_NEON_FP16INST && flag_unsafe_math_optimizations"
+ "vadd.f16\t%<V_reg>0, %<V_reg>1, %<V_reg>2"
+ [(set_attr "type" "neon_fp_addsub_s_q")]
)
(define_insn "add<mode>3_fp16"
diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md
index
5f5c113cf95afafbb733e1bfd2a7c7b8a55651a2..82a1a6bd7698fa25db88a5cdb5b3e762dc80a589
100644
--- a/gcc/config/arm/vec-common.md
+++ b/gcc/config/arm/vec-common.md
@@ -77,19 +77,51 @@
}
})
+;; Vector arithmetic. Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon, IWMMXT and MVE.
+
+(define_expand "add<mode>3"
+ [(set (match_operand:VNIM 0 "s_register_operand")
+ (plus:VNIM (match_operand:VNIM 1 "s_register_operand")
+ (match_operand:VNIM 2 "s_register_operand")))]
+ "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
+ || flag_unsafe_math_optimizations))
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))
+ || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE(<MODE>mode))
+ || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(<MODE>mode))"
+{
+})
+
+;; Vector arithmetic. Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon and MVE.
+
+(define_expand "addv8hf3"
+ [(set (match_operand:V8HF 0 "s_register_operand")
+ (plus:V8HF (match_operand:V8HF 1 "s_register_operand")
+ (match_operand:V8HF 2 "s_register_operand")))]
+ "(TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE(V8HFmode))
+ || (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)"
+{
+ if (TARGET_NEON_FP16INST && flag_unsafe_math_optimizations)
+ emit_insn (gen_addv8hf3_neon (operands[0], operands[1], operands[2]));
+})
+
+;; Vector arithmetic. Expanders are blank, then unnamed insns implement
+;; patterns separately for Neon and IWMMXT.
+
+(define_expand "add<mode>3"
+ [(set (match_operand:VNINOTM 0 "s_register_operand")
+ (plus:VNINOTM (match_operand:VNINOTM 1 "s_register_operand")
+ (match_operand:VNINOTM 2 "s_register_operand")))]
+ "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
+ || flag_unsafe_math_optimizations))
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
;; Vector arithmetic. Expanders are blank, then unnamed insns implement
;; patterns separately for IWMMXT and Neon.
-(define_expand "add<mode>3"
- [(set (match_operand:VALL 0 "s_register_operand")
- (plus:VALL (match_operand:VALL 1 "s_register_operand")
- (match_operand:VALL 2 "s_register_operand")))]
- "(TARGET_NEON && ((<MODE>mode != V2SFmode && <MODE>mode != V4SFmode)
- || flag_unsafe_math_optimizations))
- || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
-{
-})
-
(define_expand "sub<mode>3"
[(set (match_operand:VALL 0 "s_register_operand")
(minus:VALL (match_operand:VALL 1 "s_register_operand")
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
new file mode 100644
index
0000000000000000000000000000000000000000..4b6fa72adfdf2b68d766c6a4ce1226a84d357f36
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f16.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2"
} */
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+float16x8_t
+foo (float16x8_t a, float16x8_t b)
+{
+ return vaddq_f16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f16" } } */
+
+float16x8_t
+foo1 (float16x8_t a, float16x8_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
new file mode 100644
index
0000000000000000000000000000000000000000..0aa7d4b7d9b8f4cb0c6611fe47d494762297c24c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_f32.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve.fp -mfloat-abi=hard -O2"
} */
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+float32x4_t
+foo (float32x4_t a, float32x4_t b)
+{
+ return vaddq_f32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f32" } } */
+
+float32x4_t
+foo1 (float32x4_t a, float32x4_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.f32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
new file mode 100644
index
0000000000000000000000000000000000000000..34f1e0b1d0e11b899505a314418f0f2e1acb7b51
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s16.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, int16x8_t b)
+{
+ return vaddq_s16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
+
+int16x8_t
+foo1 (int16x8_t a, int16x8_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
new file mode 100644
index
0000000000000000000000000000000000000000..ae35932b3dcf49eddd9e537e8833daac7050ad31
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s32.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, int32x4_t b)
+{
+ return vaddq_s32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
+
+int32x4_t
+foo1 (int32x4_t a, int32x4_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
new file mode 100644
index
0000000000000000000000000000000000000000..277c9f7f7c1a1054392e0dbd55800e0686e9e8fb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_s8.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, int8x16_t b)
+{
+ return vaddq_s8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
+
+int8x16_t
+foo1 (int8x16_t a, int8x16_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
new file mode 100644
index
0000000000000000000000000000000000000000..de2f299ab46181c696db6daa726744bca17f1588
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u16.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint16x8_t b)
+{
+ return vaddq_u16 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint16x8_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i16" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
new file mode 100644
index
0000000000000000000000000000000000000000..81a96d427a5783a99e4962df99c0f64b6b16e6a5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u32.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32x4_t b)
+{
+ return vaddq_u32 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32x4_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i32" } } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
new file mode 100644
index
0000000000000000000000000000000000000000..2a754210e139074728ff5af00787aed411d3597c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vaddq_u8.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-march=armv8.1-m.main+mve -mfloat-abi=hard -O2" }
*/
+/* { dg-skip-if "Skip if not auto" {*-*-*} {"-mfpu=*"} {"-mfpu=auto"} } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint8x16_t b)
+{
+ return vaddq_u8 (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint8x16_t b)
+{
+ return vaddq (a, b);
+}
+
+/* { dg-final { scan-assembler "vadd.i8" } } */