Hi, 

Add testcase coverage for the vec_st (vector store)
intrinsic builtins.
    
Tested across power platforms (p6 and newer). OK for trunk?
    
Thanks,
-Will
    
[gcc/testsuite]
    
2017-09-21  Will Schmidt  <will_schm...@vnet.ibm.com>
    
    * gcc.target/powerpc/fold-vec-st-char.c: New.
    * gcc.target/powerpc/fold-vec-st-double.c: New.
    * gcc.target/powerpc/fold-vec-st-float.c: New.
    * gcc.target/powerpc/fold-vec-st-int.c: New.
    * gcc.target/powerpc/fold-vec-st-longlong.c: New.
    * gcc.target/powerpc/fold-vec-st-pixel.c: New.
    * gcc.target/powerpc/fold-vec-st-short.c: New.

diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-st-char.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-char.c
new file mode 100644
index 0000000..4c45827
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-char.c
@@ -0,0 +1,94 @@
+/* Verify that overloaded built-ins for vec_st* with char
+   inputs produce the right code.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -O2" } */
+
+#include <altivec.h>
+
+void
+testst_1 (vector signed char vsc1, int i1, vector signed char * vscp)
+{
+       return vec_st(vsc1, i1, vscp);
+}
+
+void
+testst_2 (vector signed char vsc1, int i1, signed char * scp)
+{
+       return vec_st(vsc1, i1, scp);
+}
+
+void
+testst_3 (vector unsigned char vuc1, int i1, vector unsigned char * vscp)
+{
+       return vec_st(vuc1, i1, vscp);
+}
+
+void
+testst_4 (vector unsigned char vuc1, int i1, unsigned char * scp)
+{
+       return vec_st(vuc1, i1, scp);
+}
+
+void
+testst_5 (vector bool char vbc1, int i1, vector bool char * vbcp)
+{
+       return vec_st(vbc1, i1, vbcp);
+}
+
+void
+testst_6 (vector bool char vbc1, int i1, unsigned char * vucp)
+{
+       return vec_st(vbc1, i1, vucp);
+}
+
+void
+testst_7 (vector bool char vbc1, int i1, signed char * vscp)
+{
+       return vec_st(vbc1, i1, vscp);
+}
+
+void
+testst_cst1 (vector signed char vsc1, int i1, vector signed char * vscp)
+{
+       return vec_st(vsc1, 12, vscp);
+}
+
+void
+testst_cst2 (vector signed char vsc1, int i1, signed char * scp)
+{
+       return vec_st(vsc1, 16, scp);
+}
+
+void
+testst_cst3 (vector unsigned char vuc1, int i1, vector unsigned char * vscp)
+{
+       return vec_st(vuc1, 20, vscp);
+}
+
+void
+testst_cst4 (vector unsigned char vuc1, int i1, unsigned char * scp)
+{
+       return vec_st(vuc1, 24, scp);
+}
+
+void
+testst_cst5 (vector bool char vbc1, int i1, vector bool char * vbcp)
+{
+       return vec_st(vbc1, 28, vbcp);
+}
+
+void
+testst_cst6 (vector bool char vbc1, int i1, unsigned char * vucp)
+{
+       return vec_st(vbc1, 32, vucp);
+}
+
+void
+testst_cst7 (vector bool char vbc1, int i1, signed char * vscp)
+{
+       return vec_st(vbc1, 36, vscp);
+}
+
+/* { dg-final { scan-assembler-times {\mstvx\M} 14 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-st-double.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-double.c
new file mode 100644
index 0000000..100caf4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-double.c
@@ -0,0 +1,22 @@
+/* Verify that overloaded built-ins for vec_st with 
+   double inputs produce the right code.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-mvsx -O2" } */
+
+#include <altivec.h>
+
+void
+testst_1 (vector double vd1, int i1, vector double * vdp)
+{
+       return vec_st(vd1, i1, vdp);
+}
+
+void
+testst_cst1 (vector double vd1, int i1, vector double * vdp)
+{
+       return vec_st(vd1, 12, vdp);
+}
+
+/* { dg-final { scan-assembler-times {\mstvx\M}  2 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-st-float.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-float.c
new file mode 100644
index 0000000..5a8fc66
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-float.c
@@ -0,0 +1,34 @@
+/* Verify that overloaded built-ins for vec_st with float
+   inputs produce the right code.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -O2" } */
+
+#include <altivec.h>
+
+void
+testst_1 (vector float vf1, int i1, vector float * vfp)
+{
+       return vec_st(vf1, i1, vfp);
+}
+
+void
+testst_2 (vector float vf1, int i1, float * fp)
+{
+       return vec_st(vf1, i1, fp);
+}
+
+void
+testst_cst1 (vector float vf1, int i1, vector float * vfp)
+{
+       return vec_st(vf1, 16, vfp);
+}
+
+void
+testst_cst2 (vector float vf1, int i1, float * fp)
+{
+       return vec_st(vf1, 24, fp);
+}
+
+/* { dg-final { scan-assembler-times {\mstvx\M}  4 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-st-int.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-int.c
new file mode 100644
index 0000000..4db35f0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-int.c
@@ -0,0 +1,84 @@
+/* Verify that overloaded built-ins for vec_st* with int
+   inputs produce the right code.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -O2" } */
+
+#include <altivec.h>
+// void vec_st (vector signed int, int, vector signed int *);
+
+void
+testst_1 (vector signed int vsi1, int i1, vector signed int * vsip)
+{
+       return vec_st(vsi1, i1, vsip);
+}
+void
+testst_2 (vector signed int vsi1, int i1, signed int * sip)
+{
+       return vec_st(vsi1, i1, sip);
+}
+void
+testst_3 (vector unsigned int vui1, int i1, vector unsigned int * vsip)
+{
+       return vec_st(vui1, i1, vsip);
+}
+void
+testst_4 (vector unsigned int vui1, int i1, unsigned int * sip)
+{
+       return vec_st(vui1, i1, sip);
+}
+void
+testst_5 (vector bool int vbi1, int i1, vector bool int * vbip)
+{
+       return vec_st(vbi1, i1, vbip);
+}
+void
+testst_6 (vector bool int vbi1, int i1, unsigned int * vuip)
+{
+       return vec_st(vbi1, i1, vuip);
+}
+void
+testst_7 (vector bool int vbi1, int i1, signed int * vsip)
+{
+       return vec_st(vbi1, i1, vsip);
+}
+
+void
+testst_cst1 (vector signed int vsi1, int i1, vector signed int * vsip)
+{
+       return vec_st(vsi1, 12, vsip);
+}
+void
+testst_cst2 (vector signed int vsi1, int i1, signed int * sip)
+{
+       return vec_st(vsi1, 16, sip);
+}
+void
+testst_cst3 (vector unsigned int vui1, int i1, vector unsigned int * vsip)
+{
+       return vec_st(vui1, 20, vsip);
+}
+void
+testst_cst4 (vector unsigned int vui1, int i1, unsigned int * sip)
+{
+       return vec_st(vui1, 24, sip);
+}
+void
+testst_cst5 (vector bool int vbi1, int i1, vector bool int * vbip)
+{
+       return vec_st(vbi1, 28, vbip);
+}
+void
+testst_cst6 (vector bool int vbi1, int i1, unsigned int * vuip)
+{
+       return vec_st(vbi1, 32, vuip);
+}
+void
+testst_cst7 (vector bool int vbi1, int i1, signed int * vsip)
+{
+       return vec_st(vbi1, 36, vsip);
+}
+
+/* { dg-final { scan-assembler-times {\mstvx\M}  14 } } */
+
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-st-longlong.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-longlong.c
new file mode 100644
index 0000000..a33f64e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-longlong.c
@@ -0,0 +1,41 @@
+/* Verify that overloaded built-ins for vec_st* with long long
+   inputs produce the right code.  */
+
+/* { dg-do compile { target lp64 } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-options "-mpower8-vector -O2" } */
+
+#include <altivec.h>
+
+void
+testst_1 (vector signed long long vsll1, int i1, vector signed long long * 
vsllp)
+{
+       return vec_st(vsll1, i1, vsllp);
+}
+void
+testst_3 (vector unsigned long long vull1, int i1, vector unsigned long long * 
vsllp)
+{
+       return vec_st(vull1, i1, vsllp);
+}
+void
+testst_5 (vector bool long long vbll1, int i1, vector bool long long * vbllp)
+{
+       return vec_st(vbll1, i1, vbllp);
+}
+void
+testst_cst1 (vector signed long long vsll1, int i1, vector signed long long * 
vsllp)
+{
+       return vec_st(vsll1, 12, vsllp);
+}
+void
+testst_cst3 (vector unsigned long long vull1, int i1, vector unsigned long 
long * vsllp)
+{
+       return vec_st(vull1, 24, vsllp);
+}
+void
+testst_cst5 (vector bool long long vbll1, int i1, vector bool long long * 
vbllp)
+{
+       return vec_st(vbll1, 36, vbllp);
+}
+
+/* { dg-final { scan-assembler-times {\mstvx\M}  6 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-st-pixel.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-pixel.c
new file mode 100644
index 0000000..5b95cc7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-pixel.c
@@ -0,0 +1,22 @@
+/* Verify that overloaded built-ins for vec_st* with pixel
+   inputs produce the right code.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-mvsx -O2" } */
+
+#include <altivec.h>
+
+void
+testst_1 (vector pixel vp1, int i1, vector pixel * vpp)
+{
+       return vec_st(vp1, i1, vpp);
+}
+
+void
+testst_cst1 (vector pixel vp1, int i1, vector pixel * vpp)
+{
+       return vec_st(vp1, 12, vpp);
+}
+
+/* { dg-final { scan-assembler-times {\mstvx\M}  2 } } */
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-st-short.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-short.c
new file mode 100644
index 0000000..ba8397e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-st-short.c
@@ -0,0 +1,83 @@
+/* Verify that overloaded built-ins for vec_st* with short
+   inputs produce the right code.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -O2" } */
+
+#include <altivec.h>
+// vector signed short vec_ld (int, const vector signed short *);
+// void vec_st (vector signed short, int, vector signed short *);
+
+void
+testst_1 (vector signed short vss1, int i1, vector signed short * vssp)
+{
+       return vec_st(vss1, i1, vssp);
+}
+void
+testst_2 (vector signed short vss1, int i1, signed short * ssp)
+{
+       return vec_st(vss1, i1, ssp);
+}
+void
+testst_3 (vector unsigned short vus1, int i1, vector unsigned short * vusp)
+{
+       return vec_st(vus1, i1, vusp);
+}
+void
+testst_4 (vector unsigned short vus1, int i1, unsigned short * usp)
+{
+       return vec_st(vus1, i1, usp);
+}
+void
+testst_5 (vector bool short vbs1, int i1, vector bool short * vbsp)
+{
+       return vec_st(vbs1, i1, vbsp);
+}
+void
+testst_6 (vector bool short vbs1, int i1, unsigned short * vusp)
+{
+       return vec_st(vbs1, i1, vusp);
+}
+void
+testst_7 (vector bool short vbs1, int i1, signed short * vssp)
+{
+       return vec_st(vbs1, i1, vssp);
+}
+void
+testst_cst1 (vector signed short vss1, int i1, vector signed short * vssp)
+{
+       return vec_st(vss1, 12, vssp);
+}
+void
+testst_cst2 (vector signed short vss1, int i1, signed short * ssp)
+{
+       return vec_st(vss1, 16, ssp);
+}
+void
+testst_cst3 (vector unsigned short vus1, int i1, vector unsigned short * vusp)
+{
+       return vec_st(vus1, 20, vusp);
+}
+void
+testst_cst4 (vector unsigned short vus1, int i1, unsigned short * usp)
+{
+       return vec_st(vus1, 24, usp);
+}
+void
+testst_cst5 (vector bool short vbs1, int i1, vector bool short * vbsp)
+{
+       return vec_st(vbs1, 28, vbsp);
+}
+void
+testst_cst6 (vector bool short vbs1, int i1, unsigned short * vusp)
+{
+       return vec_st(vbs1, 32, vusp);
+}
+void
+testst_cst7 (vector bool short vbs1, int i1, signed short * vssp)
+{
+       return vec_st(vbs1, 36, vssp);
+}
+
+/* { dg-final { scan-assembler-times {\mstvx\M} 14} } */


Reply via email to