Re: [PATCH] D17979: [OPENMP] Add regression test for codegen of distribute pragma for NVPTX

2016-09-26 Thread Eugene Zelenko via cfe-commits
Eugene.Zelenko added a subscriber: Eugene.Zelenko.
Eugene.Zelenko added a comment.

Looks like patch was not committed.


Repository:
  rL LLVM

https://reviews.llvm.org/D17979



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


Re: [PATCH] D17979: [OPENMP] Add regression test for codegen of distribute pragma for NVPTX

2016-03-08 Thread Alexey Bataev via cfe-commits
ABataev accepted this revision.
ABataev added a comment.
This revision is now accepted and ready to land.

LG


Repository:
  rL LLVM

http://reviews.llvm.org/D17979



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[PATCH] D17979: [OPENMP] Add regression test for codegen of distribute pragma for NVPTX

2016-03-08 Thread Carlo Bertolli via cfe-commits
carlo.bertolli created this revision.
carlo.bertolli added reviewers: ABataev, fraggamuffin, kkwli0.
carlo.bertolli added subscribers: sfantao, arpith-jacob, caomhin, cfe-commits.
carlo.bertolli set the repository for this revision to rL LLVM.

The support for distribute pragma is the same on host and NVPTX. However, the 
generated code looks different from a interface perspective (e.g. parameters to 
offloading function have different type on host and NVPTX). I added a 
regression test that checks for the distinct NVPTX generated code.

Repository:
  rL LLVM

http://reviews.llvm.org/D17979

Files:
  test/OpenMP/nvptx_distribute_codegen.cpp

Index: test/OpenMP/nvptx_distribute_codegen.cpp
===
--- /dev/null
+++ test/OpenMP/nvptx_distribute_codegen.cpp
@@ -0,0 +1,233 @@
+// Test device codegen.
+
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -omptargets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -omptargets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -omp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
+// RXUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -omptargets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
+// RXUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -omptargets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -omp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
+
+
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+// CHECK-DAG: %ident_t = type { i32, i32, i32, i32, i8* }
+// CHECK-DAG: [[STR:@.+]] = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00"
+// CHECK-DAG: [[DEF_LOC_0:@.+]] = private unnamed_addr constant %ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* [[STR]], i32 0, i32 0) }
+
+void without_schedule_clause(float *a, float *b, float *c, float *d) {
+  #pragma omp target
+  #pragma omp teams
+  #pragma omp distribute
+  for (int i = 33; i < 3200; i += 7) {
+a[i] = b[i] * c[i] * d[i];
+  }
+}
+
+// CHECK:  define void @__omp_offloading_{{.+}}without_schedule_clause{{.+}}(float* [[APTR:%.+]], float* [[BPTR:%.+]], float* [[CPTR:%.+]], float* [[DPTR:%.+]])
+// skip address vars allocation
+// CHECK:  {{%.+}} = alloca {{.+}},
+// CHECK:  {{%.+}} = alloca {{.+}},
+// CHECK:  {{%.+}} = alloca {{.+}},
+// CHECK:  {{%.+}} = alloca {{.+}},
+// CHECK:  [[IV:%.+iv]] = alloca i32,
+// CHECK:  [[LB:%.+lb]] = alloca i32,
+// CHECK:  [[UB:%.+ub]] = alloca i32,
+// CHECK:  [[ST:%.+stride]] = alloca i32,
+// CHECK:  [[LAST:%.+last]] = alloca i32,
+// CHECK:  [[I:%.+]] = alloca i32,
+// CHECK:  [[GBL_TIDV:%.+]] = call i32 @__kmpc_global_thread_num(%ident_t* [[DEF_LOC_0]])
+// CHECK-DAG:  store i32 0, i32* [[LB]]
+// CHECK-DAG:  store i32 4571423, i32* [[UB]]
+// CHECK-DAG:  store i32 1, i32* [[ST]]
+// CHECK-DAG:  store i32 0, i32* [[LAST]]
+// CHECK:  call void @__kmpc_for_static_init_{{.+}}(%ident_t* [[DEF_LOC_0]], i32 [[GBL_TIDV]], i32 92, i32* %.omp.is_last, i32* %.omp.lb, i32* %.omp.ub, i32* %.omp.stride, i32 1, i32 1)
+// CHECK-DAG:  [[UBV0:%.+]] = load i32, i32* [[UB]]
+// CHECK-DAG:  [[USWITCH:%.+]] = icmp sgt i32 [[UBV0]], 4571423
+// CHECK:  br i1 [[USWITCH]], label %[[BBCT:.+]], label %[[BBCF:.+]]
+// CHECK-DAG:  [[BBCT]]:
+// CHECK-DAG:  br label %[[BBCE:.+]]
+// CHECK-DAG:  [[BBCF]]:
+// CHECK-DAG:  [[UBV1:%.+]] = load i32, i32* [[UB]]
+// CHECK-DAG:  br label %[[BBCE]]
+// CHECK:  [[BBCE]]:
+// CHECK:  [[SELUB:%.+]] = phi i32 [ 4571423, %[[BBCT]] ], [ [[UBV1]], %[[BBCF]] ]
+// CHECK:  store i32 [[SELUB]], i32* [[UB]]
+// CHECK:  [[LBV0:%.+]] = load i32, i32* [[LB]]
+// CHECK:  store i32 [[LBV0]], i32* [[IV]]
+// CHECK:  br label %[[BBINNFOR:.+]]
+// CHECK:  [[BBINNFOR]]:
+// CHECK:  [[IVVAL0:%.+]] = load i32, i32* [[IV]]
+// CHECK:  [[UBV2:%.+]] = load i32, i32* [[UB]]
+// CHECK:  [[IVLEUB:%.+]] = icmp sle i32 [[IVVAL0]], [[UBV2]]
+// CHECK:  br i1 [[IVLEUB]], label %[[BBINNBODY:.+]], label %[[BBINNEND:.+]]
+// CHECK:  [[BBINNBODY]]:
+// CHECK:  {{.+}} = load i32, i32* [[IV]]
+// ... loop body ...
+// CHECK:  br label %[[BBBODYCONT:.+]]
+// CHECK:  [[BBBODYCONT]]:
+// CHECK:  br label %[[BBINNINC:.+]]
+// CHECK:  [[BBINNINC]]:
+// CHECK:  [[IVVAL1:%.+]] = load i32, i32* [[IV]]
+// CHECK:  [[IVINC:%.+]] = add nsw i32 [[IVVAL1]], 1
+// CHECK:  store i32 [[IVINC]], i32* [[IV]]
+// CHECK:  br label %[[BBINNFOR]]
+// CHECK:  [[BBINNEND]]:
+// CHECK:  br label %[[LPEXIT:.+]]
+// CHECK:  [[LPEXIT]]:
+// CHECK:  call void @__kmpc_for_static_fini(%ident_t* [[DEF_LOC_0]], i32 [[GBL_TIDV]])
+// CHECK:  ret void
+
+
+void static_not_chunked(float *a, float *b, float *c, float *d) {
+  #pragma omp target
+  #pragma omp teams
+  #pragma omp distribute dist_schedule(static)
+  for (int i