llvmbot wrote:

<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-amdgpu

Author: Sameer Sahasrabuddhe (ssahasra)

<details>
<summary>Changes</summary>

The existing "LDS DMA" builtins/intrinsics copy data from global/buffer pointer 
to LDS. These are now augmented with their ".async" version, where the compiler 
does not automatically track completion. The completion is now tracked using 
explicit mark/wait intrinsics, which must be inserted by the user. This makes 
it possible to write programs with efficient waits in software pipeline loops. 
The program can now wait for only the oldest outstanding operations to finish, 
while launching more operations for later use.

This change only contains the new names of the builtins/intrinsics, which 
continue to behave exactly like their non-async counterparts. A later change 
will implement the actual mark/wait semantics in SIInsertWaitcnts.

This is part of a stack split out from #<!-- -->173259

Fixes: SWDEV-521121

---

Patch is 96.51 KiB, truncated to 20.00 KiB below, full version: 
https://github.com/llvm/llvm-project/pull/180466.diff


29 Files Affected:

- (modified) clang/include/clang/Basic/BuiltinsAMDGPU.td (+4) 
- (modified) clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp (+5) 
- (modified) clang/lib/Sema/SemaAMDGPU.cpp (+5-1) 
- (modified) clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl (+12) 
- (modified) clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl (+12) 
- (modified) clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl 
(+4-2) 
- (added) llvm/docs/AMDGPUAsyncOperations.rst (+238) 
- (modified) llvm/docs/AMDGPUUsage.rst (+8) 
- (modified) llvm/docs/UserGuides.rst (+4) 
- (modified) llvm/include/llvm/IR/IntrinsicsAMDGPU.td (+15-8) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPU.td (+1-2) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (+23) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp (+8-3) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp (+15-5) 
- (modified) llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp (+2) 
- (modified) llvm/lib/Target/AMDGPU/BUFInstructions.td (+13-11) 
- (modified) llvm/lib/Target/AMDGPU/FLATInstructions.td (+13-12) 
- (modified) llvm/lib/Target/AMDGPU/SIISelLowering.cpp (+38-5) 
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.td (+2) 
- (added) llvm/test/CodeGen/AMDGPU/async-buffer-loads.ll (+91) 
- (added) llvm/test/CodeGen/AMDGPU/asyncmark-pregfx12.ll (+444) 
- (modified) llvm/test/CodeGen/AMDGPU/hazard-flat-instruction-valu-check.mir 
(+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir (+10-10) 
- (modified) llvm/test/CodeGen/AMDGPU/lds-dma-hazards.mir (+2-2) 
- (modified) llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir (+9-9) 
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.lds.err.ll (+24) 
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll (+21) 
- (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll (+37) 
- (modified) llvm/test/CodeGen/AMDGPU/sched.group.classification.mir (+4-4) 


``````````diff
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td 
b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index 740d136f465c1..7eb6cece7c55b 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -190,7 +190,9 @@ def __builtin_amdgcn_raw_ptr_buffer_atomic_fmin_f64 : 
AMDGPUBuiltin<"double(doub
 def __builtin_amdgcn_raw_ptr_buffer_atomic_fmax_f64 : 
AMDGPUBuiltin<"double(double, __amdgpu_buffer_rsrc_t, int, int, _Constant 
int)", [], "atomic-fmin-fmax-global-f64">;
 
 def __builtin_amdgcn_raw_ptr_buffer_load_lds : 
AMDGPUBuiltin<"void(__amdgpu_buffer_rsrc_t, void address_space<3> *, _Constant 
unsigned int, int, int, _Constant int, _Constant int)", [], 
"vmem-to-lds-load-insts">;
+def __builtin_amdgcn_raw_ptr_buffer_load_async_lds : 
AMDGPUBuiltin<"void(__amdgpu_buffer_rsrc_t, void address_space<3> *, _Constant 
unsigned int, int, int, _Constant int, _Constant int)", [], 
"vmem-to-lds-load-insts">;
 def __builtin_amdgcn_struct_ptr_buffer_load_lds : 
AMDGPUBuiltin<"void(__amdgpu_buffer_rsrc_t, void address_space<3> *, _Constant 
unsigned int, int, int, int, _Constant int, _Constant int)", [], 
"vmem-to-lds-load-insts">;
+def __builtin_amdgcn_struct_ptr_buffer_load_async_lds : 
AMDGPUBuiltin<"void(__amdgpu_buffer_rsrc_t, void address_space<3> *, _Constant 
unsigned int, int, int, int, _Constant int, _Constant int)", [], 
"vmem-to-lds-load-insts">;
 
 
//===----------------------------------------------------------------------===//
 // Ballot builtins.
@@ -288,7 +290,9 @@ def __builtin_amdgcn_global_atomic_fadd_v2bf16 : 
AMDGPUBuiltin<"_ExtVector<2, sh
 def __builtin_amdgcn_ds_atomic_fadd_v2bf16 : AMDGPUBuiltin<"_ExtVector<2, 
short>(_ExtVector<2, short> address_space<3> *, _ExtVector<2, short>)", 
[CustomTypeChecking], "atomic-ds-pk-add-16-insts">;
 def __builtin_amdgcn_ds_atomic_fadd_v2f16 : AMDGPUBuiltin<"_ExtVector<2, 
_Float16>(_ExtVector<2, _Float16> address_space<3> *, _ExtVector<2, 
_Float16>)", [CustomTypeChecking], "atomic-ds-pk-add-16-insts">;
 def __builtin_amdgcn_load_to_lds : AMDGPUBuiltin<"void(void *, void 
address_space<3> *, _Constant unsigned int, _Constant int, _Constant unsigned 
int)", [], "vmem-to-lds-load-insts">;
+def __builtin_amdgcn_load_async_to_lds : AMDGPUBuiltin<"void(void *, void 
address_space<3> *, _Constant unsigned int, _Constant int, _Constant unsigned 
int)", [], "vmem-to-lds-load-insts">;
 def __builtin_amdgcn_global_load_lds : AMDGPUBuiltin<"void(void 
address_space<1> *, void address_space<3> *, _Constant unsigned int, _Constant 
int, _Constant unsigned int)", [], "vmem-to-lds-load-insts">;
+def __builtin_amdgcn_global_load_async_lds : AMDGPUBuiltin<"void(void 
address_space<1> *, void address_space<3> *, _Constant unsigned int, _Constant 
int, _Constant unsigned int)", [], "vmem-to-lds-load-insts">;
 
 
//===----------------------------------------------------------------------===//
 // Deep learning builtins.
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp 
b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index a096ed27a788e..006ff1e1f658f 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -850,6 +850,11 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned 
BuiltinID,
     return emitBuiltinWithOneOverloadedType<5>(*this, E,
                                                Intrinsic::amdgcn_load_to_lds);
   }
+  case AMDGPU::BI__builtin_amdgcn_load_async_to_lds: {
+    // Should this have asan instrumentation?
+    return emitBuiltinWithOneOverloadedType<5>(
+        *this, E, Intrinsic::amdgcn_load_async_to_lds);
+  }
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_32x4B:
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_32x4B:
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_16x8B:
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index 4261e1849133f..9ec40bf06b744 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -38,9 +38,13 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned 
BuiltinID,
 
   switch (BuiltinID) {
   case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_lds:
+  case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_async_lds:
   case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_lds:
+  case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_async_lds:
   case AMDGPU::BI__builtin_amdgcn_load_to_lds:
-  case AMDGPU::BI__builtin_amdgcn_global_load_lds: {
+  case AMDGPU::BI__builtin_amdgcn_load_async_to_lds:
+  case AMDGPU::BI__builtin_amdgcn_global_load_lds:
+  case AMDGPU::BI__builtin_amdgcn_global_load_async_lds: {
     constexpr const int SizeIdx = 2;
     llvm::APSInt Size;
     Expr *ArgExpr = TheCall->getArg(SizeIdx);
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl
index 62c8deb6e4a89..e7c81b000a8f0 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl
@@ -19,10 +19,14 @@ typedef unsigned char u8;
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 4, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.global.load.async.lds(ptr 
addrspace(1) [[TMP2]], ptr addrspace(3) [[TMP3]], i32 4, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_global_load_lds_u32(global u32* src, local u32 *dst) {
   __builtin_amdgcn_global_load_lds(src, dst, /*size=*/4, /*offset=*/0, 
/*aux=*/0);
+  __builtin_amdgcn_global_load_async_lds(src, dst, /*size=*/4, /*offset=*/0, 
/*aux=*/0);
 }
 
 // CHECK-LABEL: @test_global_load_lds_u16(
@@ -36,10 +40,14 @@ void test_global_load_lds_u32(global u32* src, local u32 
*dst) {
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 2, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.global.load.async.lds(ptr 
addrspace(1) [[TMP2]], ptr addrspace(3) [[TMP3]], i32 2, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_global_load_lds_u16(global u16* src, local u16 *dst) {
   __builtin_amdgcn_global_load_lds(src, dst, /*size=*/2, /*offset=*/0, 
/*aux=*/0);
+  __builtin_amdgcn_global_load_async_lds(src, dst, /*size=*/2, /*offset=*/0, 
/*aux=*/0);
 }
 
 // CHECK-LABEL: @test_global_load_lds_u8(
@@ -53,8 +61,12 @@ void test_global_load_lds_u16(global u16* src, local u16 
*dst) {
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 1, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.global.load.async.lds(ptr 
addrspace(1) [[TMP2]], ptr addrspace(3) [[TMP3]], i32 1, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_global_load_lds_u8(global u8* src, local u8 *dst) {
   __builtin_amdgcn_global_load_lds(src, dst, /*size=*/1, /*offset=*/0, 
/*aux=*/0);
+  __builtin_amdgcn_global_load_async_lds(src, dst, /*size=*/1, /*offset=*/0, 
/*aux=*/0);
 }
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
index 6cdedb33bdd80..cc944204446ae 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
@@ -19,10 +19,14 @@ typedef unsigned char u8;
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 4, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.async.to.lds.p1(ptr addrspace(1) 
[[TMP2]], ptr addrspace(3) [[TMP3]], i32 4, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_load_to_lds_u32(global u32* src, local u32 *dst) {
   __builtin_amdgcn_load_to_lds(src, dst, /*size=*/4, /*offset=*/0, /*aux=*/0);
+  __builtin_amdgcn_load_async_to_lds(src, dst, /*size=*/4, /*offset=*/0, 
/*aux=*/0);
 }
 
 // CHECK-LABEL: @test_load_to_lds_u16(
@@ -36,10 +40,14 @@ void test_load_to_lds_u32(global u32* src, local u32 *dst) {
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 2, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.async.to.lds.p1(ptr addrspace(1) 
[[TMP2]], ptr addrspace(3) [[TMP3]], i32 2, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_load_to_lds_u16(global u16* src, local u16 *dst) {
   __builtin_amdgcn_load_to_lds(src, dst, /*size=*/2, /*offset=*/0, /*aux=*/0);
+  __builtin_amdgcn_load_async_to_lds(src, dst, /*size=*/2, /*offset=*/0, 
/*aux=*/0);
 }
 
 // CHECK-LABEL: @test_load_to_lds_u8(
@@ -53,8 +61,12 @@ void test_load_to_lds_u16(global u16* src, local u16 *dst) {
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 1, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.async.to.lds.p1(ptr addrspace(1) 
[[TMP2]], ptr addrspace(3) [[TMP3]], i32 1, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_load_to_lds_u8(global u8* src, local u8 *dst) {
   __builtin_amdgcn_load_to_lds(src, dst, /*size=*/1, /*offset=*/0, /*aux=*/0);
+  __builtin_amdgcn_load_async_to_lds(src, dst, /*size=*/1, /*offset=*/0, 
/*aux=*/0);
 }
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl
index 177165972b7a9..144cc7599bb5e 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl
@@ -5,17 +5,19 @@
 // CHECK-LABEL: @test_amdgcn_raw_ptr_buffer_load_lds(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    tail call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr 
addrspace(8) [[RSRC:%.*]], ptr addrspace(3) [[LDS:%.*]], i32 1, i32 
[[OFFSET:%.*]], i32 [[SOFFSET:%.*]], i32 2, i32 3)
+// CHECK-NEXT:    tail call void 
@llvm.amdgcn.raw.ptr.buffer.load.async.lds(ptr addrspace(8) [[RSRC]], ptr 
addrspace(3) [[LDS]], i32 1, i32 [[OFFSET]], i32 [[SOFFSET]], i32 2, i32 3)
 // CHECK-NEXT:    ret void
 //
 void test_amdgcn_raw_ptr_buffer_load_lds(__amdgpu_buffer_rsrc_t rsrc, __local 
void * lds, int offset, int soffset) {
     __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, lds, 1, offset, soffset, 2, 
3);
+    __builtin_amdgcn_raw_ptr_buffer_load_async_lds(rsrc, lds, 1, offset, 
soffset, 2, 3);
 }
 
 // CHECK-LABEL: @test_amdgcn_struct_ptr_buffer_load_lds(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    tail call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr 
addrspace(8) [[RSRC:%.*]], ptr addrspace(3) [[LDS:%.*]], i32 4, i32 
[[VINDEX:%.*]], i32 [[VOFFSET:%.*]], i32 [[SOFFSET:%.*]], i32 2, i32 3)
+// CHECK-NEXT:    tail call void 
@llvm.amdgcn.struct.ptr.buffer.load.async.lds(ptr addrspace(8) [[RSRC:%.*]], 
ptr addrspace(3) [[LDS:%.*]], i32 4, i32 [[VINDEX:%.*]], i32 [[VOFFSET:%.*]], 
i32 [[SOFFSET:%.*]], i32 2, i32 3)
 // CHECK-NEXT:    ret void
 //
 void test_amdgcn_struct_ptr_buffer_load_lds(__amdgpu_buffer_rsrc_t rsrc, 
__local void * lds, int size, int vindex, int voffset, int soffset) {
-    __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, lds, 4, vindex, voffset, 
soffset, 2, 3);
+    __builtin_amdgcn_struct_ptr_buffer_load_async_lds(rsrc, lds, 4, vindex, 
voffset, soffset, 2, 3);
 }
diff --git a/llvm/docs/AMDGPUAsyncOperations.rst 
b/llvm/docs/AMDGPUAsyncOperations.rst
new file mode 100644
index 0000000000000..a55b4d94a5e7a
--- /dev/null
+++ b/llvm/docs/AMDGPUAsyncOperations.rst
@@ -0,0 +1,238 @@
+.. _amdgpu-async-operations:
+
+===============================
+ AMDGPU Asynchronous Operations
+===============================
+
+.. contents::
+   :local:
+
+Introduction
+============
+
+Asynchronous operations are memory transfers (usually between the global memory
+and LDS) that are completed independently at an unspecified scope. A thread 
that
+requests one or more asynchronous transfers can use *async marks* to track
+their completion. The thread waits for each mark to be *completed*, which
+indicates that requests initiated in program order before this mark have also
+completed.
+
+Operations
+==========
+
+Memory Accesses
+---------------
+
+LDS DMA Operations
+^^^^^^^^^^^^^^^^^^
+
+.. code-block:: llvm
+
+  ; "Legacy" LDS DMA operations
+  void @llvm.amdgcn.load.async.to.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.global.load.async.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.raw.buffer.load.async.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.raw.ptr.buffer.load.async.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.struct.buffer.load.async.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.struct.ptr.buffer.load.async.lds(ptr %src, ptr %dst)
+
+Request an async operation that copies the specified number of bytes from the
+global/buffer pointer ``%src`` to the LDS pointer ``%dst``.
+
+.. note::
+
+   The above listing is *merely representative*. The actual function signatures
+   are identical to their non-async variants, and supported only on the
+   corresponding architectures (GFX9 and GFX10).
+
+Async Mark Operations
+---------------------
+
+An *async mark* in the abstract machine tracks all the async operations that
+are program ordered before that mark. A mark M is said to be *completed*
+only when all async operations program ordered before M are reported by the
+implementation as having finished, and it is said to be *outstanding* 
otherwise.
+
+Thus we have the following sufficient condition:
+
+  An async operation X is *completed* at a program point P if there exists a
+  mark M such that X is program ordered before M, M is program ordered before
+  P, and M is completed. X is said to be *outstanding* at P otherwise.
+
+The abstract machine maintains a sequence of *async marks* during the
+execution of a function body, which excludes any marks produced by calls to
+other functions encountered in the currently executing function.
+
+
+``@llvm.amdgcn.asyncmark()``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When executed, inserts an async mark in the sequence associated with the
+currently executing function body.
+
+``@llvm.amdgcn.wait.asyncmark(i16 %N)``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Waits until there are at most N outstanding marks in the sequence associated
+with the currently executing function body.
+
+Memory Consistency Model
+========================
+
+Each asynchronous operation consists of a non-atomic read on the source and a
+non-atomic write on the destination. Async "LDS DMA" intrinsics result in async
+accesses that guarantee visibility relative to other memory operations as
+follows:
+
+  An asynchronous operation `A` program ordered before an overlapping memory
+  operation `X` happens-before `X` only if `A` is completed before `X`.
+
+  A memory operation `X` program ordered before an overlapping asynchronous
+  operation `A` happens-before `A`.
+
+.. note::
+
+   The *only if* in the above wording implies that unlike the default LLVM
+   memory model, certain program order edges are not automatically included in
+   ``happens-before``.
+
+Examples
+========
+
+Uneven blocks of async transfers
+--------------------------------
+
+.. code-block:: c++
+
+   void foo(global int *g, local int *l) {
+     // first block
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     asyncmark();
+
+     // second block; longer
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     asyncmark();
+
+     // third block; shorter
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     asyncmark();
+
+     // Wait for first block
+     wait.asyncmark(2);
+   }
+
+Software pipeline
+-----------------
+
+.. code-block:: c++
+
+   void foo(global int *g, local int *l) {
+     // first block
+     asyncmark();
+
+     // second block
+     asyncmark();
+
+     // third block
+     asyncmark();
+
+     for (;;) {
+       wait.asyncmark(2);
+       // use data
+
+       // next block
+       asyncmark();
+     }
+
+     // flush one block
+     wait.asyncmark(2);
+
+     // flush one more block
+     wait.asyncmark(1);
+
+     // flush last block
+     wait.asyncmark(0);
+   }
+
+Ordinary function call
+----------------------
+
+.. code-block:: c++
+
+   extern void bar(); // may or may not make async calls
+
+   void foo(global int *g, local int *l) {
+       // first block
+       asyncmark();
+
+       // second block
+       asyncmark();
+
+       // function call
+       bar();
+
+       // third block
+       asyncmark();
+
+       wait.asyncmark(1); // will wait for at least the second block, possibly 
including bar()
+       wait.asyncmark(0); // will wait for third block, including bar()
+   }
+
+Implementation notes
+====================
+
+[This section is informational.]
+
+Optimization
+------------
+
+The implementation may eliminate async mark/wait intrinsics in the following 
cases:
+
+1. An ``asyncmark`` operation which is not included in the wait count of a 
later
+   wait operation in the current function. In particular, an ``asyncmark`` 
which
+   is not post-dominated by any ``wait.asyncmark``.
+2. A ``wait.asyncmark`` whose wait count is more than the outstanding async
+   marks at that point. In particular, a ``wait.asyncmark`` that is not
+   dominated by any ``asyncmark``.
+
+In general, at a function call, if the caller uses sufficient waits to track
+its own async operations, the actions performed by the callee cannot affect
+correctness. But inlining such a call may result in redundant waits.
+
+.. code-block:: c++
+
+   void foo() {
+     asyncmark(); // A
+   }
+
+   void bar() {
+     asyncmark(); // B
+     asyncmark(); // C
+     foo();
+     wait.asyncmark(1);
+   }
+
+Before inlining, the ``wait.asyncmark`` waits for mark B to be completed.
+
+.. code-block:: c++
+
+   void foo() {
+   }
+
+   void bar() {
+     asyncmark(); // B
+     asyncmark(); // C
+     asyncmark(); // A from call to foo()
+     wait.asyncmark(1);
+   }
+
+After inlining, the asyncmark-wait now waits for mark C to complete, which is
+longer than necessary. Ideally, the optimizer should have eliminated mark A in
+the body of foo() itself.
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 1282dcf98727a..668476450cb2e 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AM...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/180466
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to