llvmbot wrote:

<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang

@llvm/pr-subscribers-clang-codegen

Author: Alexander Shaposhnikov (alexander-shaposhnikov)

<details>
<summary>Changes</summary>

Add support for optional spir-v attributes.

Test plan:
ninja check-all

---
Full diff: https://github.com/llvm/llvm-project/pull/116589.diff


4 Files Affected:

- (modified) clang/lib/CodeGen/CodeGenFunction.cpp (+5-1) 
- (modified) clang/lib/Sema/SemaDeclAttr.cpp (+3-1) 
- (added) clang/test/CodeGenCUDASPIRV/spirv-attrs.cu (+28) 
- (added) clang/test/SemaCUDA/spirv-attrs.cu (+18) 


``````````diff
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp 
b/clang/lib/CodeGen/CodeGenFunction.cpp
index 6a2f82f9e13906..ed7fdb6cb72aa6 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -635,7 +635,9 @@ void CodeGenFunction::EmitKernelMetadata(const FunctionDecl 
*FD,
 
   CGM.GenKernelArgMetadata(Fn, FD, this);
 
-  if (!getLangOpts().OpenCL)
+  if (!(getLangOpts().OpenCL ||
+        (getLangOpts().CUDA &&
+         getContext().getTargetInfo().getTriple().isSPIRV())))
     return;
 
   if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
@@ -1022,6 +1024,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, 
QualType RetTy,
   }
 
   if (FD && (getLangOpts().OpenCL ||
+             (getLangOpts().CUDA &&
+              getContext().getTargetInfo().getTriple().isSPIRV()) ||
              ((getLangOpts().HIP || getLangOpts().OffloadViaLLVM) &&
               getLangOpts().CUDAIsDevice))) {
     // Add metadata for a kernel function.
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 0f5baa1e1eb365..146d9c86e0715a 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -7368,7 +7368,9 @@ void Sema::ProcessDeclAttributeList(
   // good to have a way to specify "these attributes must appear as a group",
   // for these. Additionally, it would be good to have a way to specify "these
   // attribute must never appear as a group" for attributes like cold and hot.
-  if (!D->hasAttr<OpenCLKernelAttr>()) {
+  if (!(D->hasAttr<OpenCLKernelAttr>() ||
+        (D->hasAttr<CUDAGlobalAttr>() &&
+         Context.getTargetInfo().getTriple().isSPIRV()))) {
     // These attributes cannot be applied to a non-kernel function.
     if (const auto *A = D->getAttr<ReqdWorkGroupSizeAttr>()) {
       // FIXME: This emits a different error message than
diff --git a/clang/test/CodeGenCUDASPIRV/spirv-attrs.cu 
b/clang/test/CodeGenCUDASPIRV/spirv-attrs.cu
new file mode 100644
index 00000000000000..528d2cd60a3547
--- /dev/null
+++ b/clang/test/CodeGenCUDASPIRV/spirv-attrs.cu
@@ -0,0 +1,28 @@
+// RUN: %clang_cc1 -fcuda-is-device -triple spirv64 -o - -emit-llvm -x cuda %s 
 | FileCheck %s
+// RUN: %clang_cc1 -fcuda-is-device -triple spirv32 -o - -emit-llvm -x cuda %s 
 | FileCheck %s
+
+#define __global__ __attribute__((global))
+
+__attribute__((reqd_work_group_size(128, 1, 1)))
+__global__ void reqd_work_group_size_128_1_1() {}
+// CHECK: define spir_kernel void @_Z28reqd_work_group_size_128_1_1v() 
#[[ATTR:[0-9]+]] !reqd_work_group_size ![[SIZE_128:.*]]
+
+__attribute__((work_group_size_hint(2, 2, 2)))
+__global__ void work_group_size_hint_2_2_2() {}
+// CHECK: define spir_kernel void @_Z26work_group_size_hint_2_2_2v() #[[ATTR]] 
!work_group_size_hint ![[HINT_2:.*]]
+
+__attribute__((vec_type_hint(int)))
+__global__ void vec_type_hint_int() {}
+// CHECK: define spir_kernel void @_Z17vec_type_hint_intv() #[[ATTR]] 
!vec_type_hint ![[VEC_HINT:.*]]
+
+__attribute__((intel_reqd_sub_group_size(64)))
+__global__ void intel_reqd_sub_group_size_64() {}
+// CHECK: define spir_kernel void @_Z28intel_reqd_sub_group_size_64v() 
#[[ATTR]] !intel_reqd_sub_group_size ![[SUB_GROUP:.*]]
+
+// CHECK: attributes #[[ATTR]] = { convergent mustprogress noinline norecurse 
nounwind optnone {{.*}} }
+
+// CHECK: ![[SIZE_128]] = !{i32 128, i32 1, i32 1}
+// CHECK: ![[HINT_2]] = !{i32 2, i32 2, i32 2}
+// CHECK: ![[VEC_HINT]] = !{i32 undef, i32 1}
+// CHECK: ![[SUB_GROUP]] = !{i32 64}
+
diff --git a/clang/test/SemaCUDA/spirv-attrs.cu 
b/clang/test/SemaCUDA/spirv-attrs.cu
new file mode 100644
index 00000000000000..6539421423ee11
--- /dev/null
+++ b/clang/test/SemaCUDA/spirv-attrs.cu
@@ -0,0 +1,18 @@
+// expected-no-diagnostics
+
+// RUN: %clang_cc1 -triple spirv64 -aux-triple x86_64-unknown-linux-gnu \
+// RUN:   -fcuda-is-device -verify -fsyntax-only %s
+
+#include "Inputs/cuda.h"
+
+__attribute__((reqd_work_group_size(128, 1, 1)))
+__global__ void reqd_work_group_size_128_1_1() {}
+
+__attribute__((work_group_size_hint(2, 2, 2)))
+__global__ void work_group_size_hint_2_2_2() {}
+
+__attribute__((vec_type_hint(int)))
+__global__ void vec_type_hint_int() {}
+
+__attribute__((intel_reqd_sub_group_size(64)))
+__global__ void intel_reqd_sub_group_size_64() {}

``````````

</details>


https://github.com/llvm/llvm-project/pull/116589
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to