Re: [AArch64][12/14] ARMv8.2-A testsuite for new data movement intrinsics

2016-10-10 Thread James Greenhalgh
On Thu, Jul 07, 2016 at 05:19:09PM +0100, Jiong Wang wrote:
> This patch contains testcases for those new scalar intrinsics which are only
> available for AArch64.

OK.

Thanks,
James

> 
> gcc/testsuite/
> 2016-07-07  Jiong Wang 
> 
> * gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
> (FP16_SUPPORTED):
> Enable AArch64.
> * gcc.target/aarch64/advsimd-intrinsics/vdup_lane.c: Add
> support for
> vdup*_laneq.
> * gcc.target/aarch64/advsimd-intrinsics/vduph_lane.c: New.
> * gcc.target/aarch64/advsimd-intrinsics/vtrn_half.c: New.
> * gcc.target/aarch64/advsimd-intrinsics/vuzp_half.c: New.
> * gcc.target/aarch64/advsimd-intrinsics/vzip_half.c: New.
> 



[AArch64][12/14] ARMv8.2-A testsuite for new data movement intrinsics

2016-07-07 Thread Jiong Wang

This patch contains testcases for those new scalar intrinsics which are only
available for AArch64.

gcc/testsuite/
2016-07-07  Jiong Wang 

* gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h 
(FP16_SUPPORTED):

Enable AArch64.
* gcc.target/aarch64/advsimd-intrinsics/vdup_lane.c: Add 
support for

vdup*_laneq.
* gcc.target/aarch64/advsimd-intrinsics/vduph_lane.c: New.
* gcc.target/aarch64/advsimd-intrinsics/vtrn_half.c: New.
* gcc.target/aarch64/advsimd-intrinsics/vuzp_half.c: New.
* gcc.target/aarch64/advsimd-intrinsics/vzip_half.c: New.

>From 7bf705fa1bacf7a0275b28e6bfa33397f7037415 Mon Sep 17 00:00:00 2001
From: Jiong Wang 
Date: Wed, 6 Jul 2016 14:51:35 +0100
Subject: [PATCH 12/14] [12/14] TESTSUITE for new data movement intrinsics

---
 .../aarch64/advsimd-intrinsics/arm-neon-ref.h  |  16 +-
 .../aarch64/advsimd-intrinsics/vdup_lane.c | 119 +-
 .../aarch64/advsimd-intrinsics/vduph_lane.c| 137 +++
 .../aarch64/advsimd-intrinsics/vtrn_half.c | 263 +
 .../aarch64/advsimd-intrinsics/vuzp_half.c | 259 
 .../aarch64/advsimd-intrinsics/vzip_half.c | 263 +
 6 files changed, 1042 insertions(+), 15 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vduph_lane.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vtrn_half.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vuzp_half.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vzip_half.c

diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
index 1297137..4621415 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/arm-neon-ref.h
@@ -17,9 +17,8 @@ extern void *memcpy(void *, const void *, size_t);
 extern size_t strlen(const char *);
 
 /* Helper macro to select FP16 tests.  */
-#if (!defined (__aarch64__)		\
- && (defined (__ARM_FP16_FORMAT_IEEE)\
-	 || defined (__ARM_FP16_FORMAT_ALTERNATIVE)))
+#if (defined (__ARM_FP16_FORMAT_IEEE) \
+ || defined (__ARM_FP16_FORMAT_ALTERNATIVE))
 #define FP16_SUPPORTED (1)
 #else
 #undef FP16_SUPPORTED
@@ -520,17 +519,6 @@ static void clean_results (void)
 /* Helpers to initialize vectors.  */
 #define VDUP(VAR, Q, T1, T2, W, N, V)			\
   VECT_VAR(VAR, T1, W, N) = vdup##Q##_n_##T2##W(V)
-#if (defined (__aarch64__)		\
- && (defined (__ARM_FP16_FORMAT_IEEE)\
-	 || defined (__ARM_FP16_FORMAT_ALTERNATIVE)))
-/* Work around that there is no vdup_n_f16 intrinsic.  */
-#define vdup_n_f16(VAL)		\
-  __extension__			\
-({\
-  float16_t f = VAL;	\
-  vld1_dup_f16(&f);		\
-})
-#endif
 
 #define VSET_LANE(VAR, Q, T1, T2, W, N, L, V)\
   VECT_VAR(VAR, T1, W, N) = vset##Q##_lane_##T2##W(V,			\
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vdup_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vdup_lane.c
index c4b8f14..5d0dba3 100644
--- a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vdup_lane.c
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vdup_lane.c
@@ -56,7 +56,7 @@ VECT_VAR_DECL (expected, hfloat, 16, 8) [] = { 0xca80, 0xca80,
 VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0xc170, 0xc170,
 	   0xc170, 0xc170 };
 
-#define TEST_MSG "VDUP_LANE/VDUP_LANEQ"
+#define TEST_MSG "VDUP_LANE/VDUPQ_LANE"
 void exec_vdup_lane (void)
 {
   /* Basic test: vec1=vdup_lane(vec2, lane), then store the result.  */
@@ -114,6 +114,123 @@ void exec_vdup_lane (void)
 #else
   CHECK_RESULTS_NO_FP16 (TEST_MSG, "");
 #endif
+
+#if defined (__aarch64__)
+
+#undef TEST_MSG
+#define TEST_MSG "VDUP_LANEQ/VDUPQ_LANEQ"
+
+  /* Expected results for vdup*_laneq tests.  */
+VECT_VAR_DECL(expected2,int,8,8) [] = { 0xfd, 0xfd, 0xfd, 0xfd,
+	0xfd, 0xfd, 0xfd, 0xfd };
+VECT_VAR_DECL(expected2,int,16,4) [] = { 0xfff2, 0xfff2, 0xfff2, 0xfff2 };
+VECT_VAR_DECL(expected2,int,32,2) [] = { 0xfff1, 0xfff1 };
+VECT_VAR_DECL(expected2,int,64,1) [] = { 0xfff0 };
+VECT_VAR_DECL(expected2,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+	 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected2,uint,16,4) [] = { 0xfff3, 0xfff3, 0xfff3, 0xfff3 };
+VECT_VAR_DECL(expected2,uint,32,2) [] = { 0xfff1, 0xfff1 };
+VECT_VAR_DECL(expected2,uint,64,1) [] = { 0xfff0 };
+VECT_VAR_DECL(expected2,poly,8,8) [] = { 0xf7, 0xf7, 0xf7, 0xf7,
+	 0xf7, 0xf7, 0xf7, 0xf7 };
+VECT_VAR_DECL(expected2,poly,16,4) [] = { 0xfff3, 0xfff3, 0xfff3, 0xfff3 };
+VECT_VAR_DECL(expected2,hfloat,32,2) [] = { 0xc170, 0xc170 };
+#if defined (FP16_SUPPORTED)
+VECT_VAR_DECL (expected2, hfloat, 16, 4) [] = { 0xca80, 0xca80,
+		0xca80, 0xca80 };
+#endif
+VECT_VAR_DECL(expected2,int,8,16) [] = {