Re: [PATCH] D24522: [CUDA] Do not merge CUDA target attributes.

2016-09-13 Thread Artem Belevich via cfe-commits
This revision was automatically updated to reflect the committed changes.
Closed by commit rL281406: [CUDA] Do not merge CUDA target attributes. 
(authored by tra).

Changed prior to commit:
  https://reviews.llvm.org/D24522?vs=71244=71249#toc

Repository:
  rL LLVM

https://reviews.llvm.org/D24522

Files:
  cfe/trunk/lib/Sema/SemaDecl.cpp
  cfe/trunk/test/SemaCUDA/function-overload.cu
  cfe/trunk/test/SemaCUDA/target_attr_inheritance.cu

Index: cfe/trunk/test/SemaCUDA/function-overload.cu
===
--- cfe/trunk/test/SemaCUDA/function-overload.cu
+++ cfe/trunk/test/SemaCUDA/function-overload.cu
@@ -379,3 +379,14 @@
   HostReturnTy ret3 = host_only_function(1);
   HostReturnTy2 ret4 = host_only_function(1.0f);
 }
+
+// Verify that we allow overloading function templates.
+template  __host__ T template_overload(const T ) { return a; };
+template  __device__ T template_overload(const T ) { return a; };
+
+__host__ void test_host_template_overload() {
+  template_overload(1); // OK. Attribute-based overloading picks __host__ 
variant.
+}
+__device__ void test_device_template_overload() {
+  template_overload(1); // OK. Attribute-based overloading picks __device__ 
variant.
+}
Index: cfe/trunk/test/SemaCUDA/target_attr_inheritance.cu
===
--- cfe/trunk/test/SemaCUDA/target_attr_inheritance.cu
+++ cfe/trunk/test/SemaCUDA/target_attr_inheritance.cu
@@ -0,0 +1,29 @@
+// Verifies correct inheritance of target attributes during template
+// instantiation and specialization.
+
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify %s
+// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fsyntax-only -fcuda-is-device 
-verify %s
+
+#include "Inputs/cuda.h"
+
+// Function must inherit target attributes during instantiation, but not during
+// specialization.
+template  __host__ __device__ T function_template(const T );
+
+// Specialized functions have their own attributes.
+// expected-note@+1 {{candidate function not viable: call to __host__ function 
from __device__ function}}
+template <> __host__ float function_template(const float );
+
+// expected-note@+1 {{candidate function not viable: call to __device__ 
function from __host__ function}}
+template <> __device__ double function_template(const double );
+
+__host__ void hf() {
+  function_template(1.0f); // OK. Specialization is __host__.
+  function_template(2.0); // expected-error {{no matching function for 
call to 'function_template'}}
+  function_template(1);   // OK. Instantiated function template is HD.
+}
+__device__ void df() {
+  function_template(3.0f); // expected-error {{no matching function for 
call to 'function_template'}}
+  function_template(4.0); // OK. Specialization is __device__.
+  function_template(1);   // OK. Instantiated function template is HD.
+}
Index: cfe/trunk/lib/Sema/SemaDecl.cpp
===
--- cfe/trunk/lib/Sema/SemaDecl.cpp
+++ cfe/trunk/lib/Sema/SemaDecl.cpp
@@ -2290,7 +2290,13 @@
 NewAttr = S.mergeAlwaysInlineAttr(D, AA->getRange(),
   (AA->getSpelling()),
   AttrSpellingListIndex);
-  else if (const auto *MA = dyn_cast(Attr))
+  else if (S.getLangOpts().CUDA && isa(D) &&
+   (isa(Attr) || isa(Attr) ||
+isa(Attr))) {
+// CUDA target attributes are part of function signature for
+// overloading purposes and must not be merged.
+return false;
+  } else if (const auto *MA = dyn_cast(Attr))
 NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex);
   else if (const auto *OA = dyn_cast(Attr))
 NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), 
AttrSpellingListIndex);


Index: cfe/trunk/test/SemaCUDA/function-overload.cu
===
--- cfe/trunk/test/SemaCUDA/function-overload.cu
+++ cfe/trunk/test/SemaCUDA/function-overload.cu
@@ -379,3 +379,14 @@
   HostReturnTy ret3 = host_only_function(1);
   HostReturnTy2 ret4 = host_only_function(1.0f);
 }
+
+// Verify that we allow overloading function templates.
+template  __host__ T template_overload(const T ) { return a; };
+template  __device__ T template_overload(const T ) { return a; };
+
+__host__ void test_host_template_overload() {
+  template_overload(1); // OK. Attribute-based overloading picks __host__ variant.
+}
+__device__ void test_device_template_overload() {
+  template_overload(1); // OK. Attribute-based overloading picks __device__ variant.
+}
Index: cfe/trunk/test/SemaCUDA/target_attr_inheritance.cu
===
--- cfe/trunk/test/SemaCUDA/target_attr_inheritance.cu
+++ cfe/trunk/test/SemaCUDA/target_attr_inheritance.cu
@@ -0,0 +1,29 @@
+// Verifies correct inheritance of target attributes during template
+// 

Re: [PATCH] D24522: [CUDA] Do not merge CUDA target attributes.

2016-09-13 Thread Artem Belevich via cfe-commits
tra updated this revision to Diff 71244.
tra marked an inline comment as done.
tra added a comment.

Removed REQUIRED lines.


https://reviews.llvm.org/D24522

Files:
  lib/Sema/SemaDecl.cpp
  test/SemaCUDA/function-overload.cu
  test/SemaCUDA/target_attr_inheritance.cu

Index: test/SemaCUDA/target_attr_inheritance.cu
===
--- /dev/null
+++ test/SemaCUDA/target_attr_inheritance.cu
@@ -0,0 +1,29 @@
+// Verifies correct inheritance of target attributes during template
+// instantiation and specialization.
+
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify %s
+// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fsyntax-only -fcuda-is-device 
-verify %s
+
+#include "Inputs/cuda.h"
+
+// Function must inherit target attributes during instantiation, but not during
+// specialization.
+template  __host__ __device__ T function_template(const T );
+
+// Specialized functions have their own attributes.
+// expected-note@+1 {{candidate function not viable: call to __host__ function 
from __device__ function}}
+template <> __host__ float function_template(const float );
+
+// expected-note@+1 {{candidate function not viable: call to __device__ 
function from __host__ function}}
+template <> __device__ double function_template(const double );
+
+__host__ void hf() {
+  function_template(1.0f); // OK. Specialization is __host__.
+  function_template(2.0); // expected-error {{no matching function for 
call to 'function_template'}}
+  function_template(1);   // OK. Instantiated function template is HD.
+}
+__device__ void df() {
+  function_template(3.0f); // expected-error {{no matching function for 
call to 'function_template'}}
+  function_template(4.0); // OK. Specialization is __device__.
+  function_template(1);   // OK. Instantiated function template is HD.
+}
Index: test/SemaCUDA/function-overload.cu
===
--- test/SemaCUDA/function-overload.cu
+++ test/SemaCUDA/function-overload.cu
@@ -379,3 +379,14 @@
   HostReturnTy ret3 = host_only_function(1);
   HostReturnTy2 ret4 = host_only_function(1.0f);
 }
+
+// Verify that we allow overloading function templates.
+template  __host__ T template_overload(const T ) { return a; };
+template  __device__ T template_overload(const T ) { return a; };
+
+__host__ void test_host_template_overload() {
+  template_overload(1); // OK. Attribute-based overloading picks __host__ 
variant.
+}
+__device__ void test_device_template_overload() {
+  template_overload(1); // OK. Attribute-based overloading picks __device__ 
variant.
+}
Index: lib/Sema/SemaDecl.cpp
===
--- lib/Sema/SemaDecl.cpp
+++ lib/Sema/SemaDecl.cpp
@@ -2292,7 +2292,13 @@
 NewAttr = S.mergeAlwaysInlineAttr(D, AA->getRange(),
   (AA->getSpelling()),
   AttrSpellingListIndex);
-  else if (const auto *MA = dyn_cast(Attr))
+  else if (S.getLangOpts().CUDA && isa(D) &&
+   (isa(Attr) || isa(Attr) ||
+isa(Attr))) {
+// CUDA target attributes are part of function signature for
+// overloading purposes and must not be merged.
+return false;
+  } else if (const auto *MA = dyn_cast(Attr))
 NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex);
   else if (const auto *OA = dyn_cast(Attr))
 NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), 
AttrSpellingListIndex);


Index: test/SemaCUDA/target_attr_inheritance.cu
===
--- /dev/null
+++ test/SemaCUDA/target_attr_inheritance.cu
@@ -0,0 +1,29 @@
+// Verifies correct inheritance of target attributes during template
+// instantiation and specialization.
+
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify %s
+// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fsyntax-only -fcuda-is-device -verify %s
+
+#include "Inputs/cuda.h"
+
+// Function must inherit target attributes during instantiation, but not during
+// specialization.
+template  __host__ __device__ T function_template(const T );
+
+// Specialized functions have their own attributes.
+// expected-note@+1 {{candidate function not viable: call to __host__ function from __device__ function}}
+template <> __host__ float function_template(const float );
+
+// expected-note@+1 {{candidate function not viable: call to __device__ function from __host__ function}}
+template <> __device__ double function_template(const double );
+
+__host__ void hf() {
+  function_template(1.0f); // OK. Specialization is __host__.
+  function_template(2.0); // expected-error {{no matching function for call to 'function_template'}}
+  function_template(1);   // OK. Instantiated function template is HD.
+}
+__device__ void df() {
+  function_template(3.0f); // expected-error {{no matching function for call to 

Re: [PATCH] D24522: [CUDA] Do not merge CUDA target attributes.

2016-09-13 Thread Justin Lebar via cfe-commits
jlebar accepted this revision.
jlebar added a comment.
This revision is now accepted and ready to land.

Yay, this is great.



Comment at: test/SemaCUDA/target_attr_inheritance.cu:4
@@ +3,3 @@
+// REQUIRES: x86-registered-target
+// REQUIRES: nvptx-registered-target
+

Other reviewers have pointed out to me that we don't usually (ever?) need this. 
 I think these have to do with llvm's ability to generate code for our targets, 
but it's not relevant to clang here.


https://reviews.llvm.org/D24522



___
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits