https://github.com/ssahasra created 
https://github.com/llvm/llvm-project/pull/180466

The existing "LDS DMA" builtins/intrinsics copy data from global/buffer pointer 
to LDS. These are now augmented with their ".async" version, where the compiler 
does not automatically track completion. The completion is now tracked using 
explicit mark/wait intrinsics, which must be inserted by the user. This makes 
it possible to write programs with efficient waits in software pipeline loops. 
The program can now wait for only the oldest outstanding operations to finish, 
while launching more operations for later use.

This change only contains the new names of the builtins/intrinsics, which 
continue to behave exactly like their non-async counterparts. A later change 
will implement the actual mark/wait semantics in SIInsertWaitcnts.

This is part of a stack split out from #173259

Fixes: SWDEV-521121

>From 134e9b1e034e7b2061b56365c9382bff95aba0da Mon Sep 17 00:00:00 2001
From: Sameer Sahasrabuddhe <[email protected]>
Date: Mon, 9 Feb 2026 10:06:15 +0530
Subject: [PATCH] [AMDGPU] Asynchronous loads from global/buffer to LDS on
 pre-GFX12

The existing "LDS DMA" builtins/intrinsics copy data from global/buffer pointer
to LDS. These are now augmented with their ".async" version, where the compiler
does not automatically track completion. The completion is now tracked using
explicit mark/wait intrinsics, which must be inserted by the user. This makes it
possible to write programs with efficient waits in software pipeline loops. The
program can now wait for only the oldest outstanding operations to finish, while
launching more operations for later use.

This change only contains the new names of the builtins/intrinsics, which
continue to behave exactly like their non-async counterparts. A later change
will implement the actual mark/wait semantics in SIInsertWaitcnts.

Fixes: SWDEV-521121
---
 clang/include/clang/Basic/BuiltinsAMDGPU.td   |   4 +
 clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp   |   5 +
 clang/lib/Sema/SemaAMDGPU.cpp                 |   6 +-
 .../builtins-amdgcn-global-load-lds.cl        |  12 +
 .../builtins-amdgcn-load-to-lds.cl            |  12 +
 .../builtins-amdgcn-raw-buffer-load-lds.cl    |   6 +-
 llvm/docs/AMDGPUAsyncOperations.rst           | 238 ++++++++++
 llvm/docs/AMDGPUUsage.rst                     |   8 +
 llvm/docs/UserGuides.rst                      |   4 +
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  23 +-
 llvm/lib/Target/AMDGPU/AMDGPU.td              |   3 +-
 .../AMDGPU/AMDGPUInstructionSelector.cpp      |  23 +
 .../AMDGPU/AMDGPULowerBufferFatPointers.cpp   |  11 +-
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  |  20 +-
 .../AMDGPU/AsmParser/AMDGPUAsmParser.cpp      |   2 +
 llvm/lib/Target/AMDGPU/BUFInstructions.td     |  24 +-
 llvm/lib/Target/AMDGPU/FLATInstructions.td    |  25 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  43 +-
 llvm/lib/Target/AMDGPU/SIInstrInfo.td         |   2 +
 .../test/CodeGen/AMDGPU/async-buffer-loads.ll |  91 ++++
 .../test/CodeGen/AMDGPU/asyncmark-pregfx12.ll | 444 ++++++++++++++++++
 .../hazard-flat-instruction-valu-check.mir    |   4 +-
 .../AMDGPU/insert-waitcnts-fence-soft.mir     |  20 +-
 llvm/test/CodeGen/AMDGPU/lds-dma-hazards.mir  |   4 +-
 llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir  |  18 +-
 .../AMDGPU/llvm.amdgcn.buffer.load.lds.err.ll |  24 +
 .../AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll  |  21 +
 .../CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll |  37 ++
 .../AMDGPU/sched.group.classification.mir     |   8 +-
 29 files changed, 1066 insertions(+), 76 deletions(-)
 create mode 100644 llvm/docs/AMDGPUAsyncOperations.rst
 create mode 100644 llvm/test/CodeGen/AMDGPU/async-buffer-loads.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/asyncmark-pregfx12.ll

diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td 
b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index 740d136f465c1..7eb6cece7c55b 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -190,7 +190,9 @@ def __builtin_amdgcn_raw_ptr_buffer_atomic_fmin_f64 : 
AMDGPUBuiltin<"double(doub
 def __builtin_amdgcn_raw_ptr_buffer_atomic_fmax_f64 : 
AMDGPUBuiltin<"double(double, __amdgpu_buffer_rsrc_t, int, int, _Constant 
int)", [], "atomic-fmin-fmax-global-f64">;
 
 def __builtin_amdgcn_raw_ptr_buffer_load_lds : 
AMDGPUBuiltin<"void(__amdgpu_buffer_rsrc_t, void address_space<3> *, _Constant 
unsigned int, int, int, _Constant int, _Constant int)", [], 
"vmem-to-lds-load-insts">;
+def __builtin_amdgcn_raw_ptr_buffer_load_async_lds : 
AMDGPUBuiltin<"void(__amdgpu_buffer_rsrc_t, void address_space<3> *, _Constant 
unsigned int, int, int, _Constant int, _Constant int)", [], 
"vmem-to-lds-load-insts">;
 def __builtin_amdgcn_struct_ptr_buffer_load_lds : 
AMDGPUBuiltin<"void(__amdgpu_buffer_rsrc_t, void address_space<3> *, _Constant 
unsigned int, int, int, int, _Constant int, _Constant int)", [], 
"vmem-to-lds-load-insts">;
+def __builtin_amdgcn_struct_ptr_buffer_load_async_lds : 
AMDGPUBuiltin<"void(__amdgpu_buffer_rsrc_t, void address_space<3> *, _Constant 
unsigned int, int, int, int, _Constant int, _Constant int)", [], 
"vmem-to-lds-load-insts">;
 
 
//===----------------------------------------------------------------------===//
 // Ballot builtins.
@@ -288,7 +290,9 @@ def __builtin_amdgcn_global_atomic_fadd_v2bf16 : 
AMDGPUBuiltin<"_ExtVector<2, sh
 def __builtin_amdgcn_ds_atomic_fadd_v2bf16 : AMDGPUBuiltin<"_ExtVector<2, 
short>(_ExtVector<2, short> address_space<3> *, _ExtVector<2, short>)", 
[CustomTypeChecking], "atomic-ds-pk-add-16-insts">;
 def __builtin_amdgcn_ds_atomic_fadd_v2f16 : AMDGPUBuiltin<"_ExtVector<2, 
_Float16>(_ExtVector<2, _Float16> address_space<3> *, _ExtVector<2, 
_Float16>)", [CustomTypeChecking], "atomic-ds-pk-add-16-insts">;
 def __builtin_amdgcn_load_to_lds : AMDGPUBuiltin<"void(void *, void 
address_space<3> *, _Constant unsigned int, _Constant int, _Constant unsigned 
int)", [], "vmem-to-lds-load-insts">;
+def __builtin_amdgcn_load_async_to_lds : AMDGPUBuiltin<"void(void *, void 
address_space<3> *, _Constant unsigned int, _Constant int, _Constant unsigned 
int)", [], "vmem-to-lds-load-insts">;
 def __builtin_amdgcn_global_load_lds : AMDGPUBuiltin<"void(void 
address_space<1> *, void address_space<3> *, _Constant unsigned int, _Constant 
int, _Constant unsigned int)", [], "vmem-to-lds-load-insts">;
+def __builtin_amdgcn_global_load_async_lds : AMDGPUBuiltin<"void(void 
address_space<1> *, void address_space<3> *, _Constant unsigned int, _Constant 
int, _Constant unsigned int)", [], "vmem-to-lds-load-insts">;
 
 
//===----------------------------------------------------------------------===//
 // Deep learning builtins.
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp 
b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index a096ed27a788e..006ff1e1f658f 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -850,6 +850,11 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned 
BuiltinID,
     return emitBuiltinWithOneOverloadedType<5>(*this, E,
                                                Intrinsic::amdgcn_load_to_lds);
   }
+  case AMDGPU::BI__builtin_amdgcn_load_async_to_lds: {
+    // Should this have asan instrumentation?
+    return emitBuiltinWithOneOverloadedType<5>(
+        *this, E, Intrinsic::amdgcn_load_async_to_lds);
+  }
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_32x4B:
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_32x4B:
   case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_16x8B:
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index 4261e1849133f..9ec40bf06b744 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -38,9 +38,13 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned 
BuiltinID,
 
   switch (BuiltinID) {
   case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_lds:
+  case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_async_lds:
   case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_lds:
+  case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_async_lds:
   case AMDGPU::BI__builtin_amdgcn_load_to_lds:
-  case AMDGPU::BI__builtin_amdgcn_global_load_lds: {
+  case AMDGPU::BI__builtin_amdgcn_load_async_to_lds:
+  case AMDGPU::BI__builtin_amdgcn_global_load_lds:
+  case AMDGPU::BI__builtin_amdgcn_global_load_async_lds: {
     constexpr const int SizeIdx = 2;
     llvm::APSInt Size;
     Expr *ArgExpr = TheCall->getArg(SizeIdx);
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl
index 62c8deb6e4a89..e7c81b000a8f0 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-global-load-lds.cl
@@ -19,10 +19,14 @@ typedef unsigned char u8;
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 4, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.global.load.async.lds(ptr 
addrspace(1) [[TMP2]], ptr addrspace(3) [[TMP3]], i32 4, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_global_load_lds_u32(global u32* src, local u32 *dst) {
   __builtin_amdgcn_global_load_lds(src, dst, /*size=*/4, /*offset=*/0, 
/*aux=*/0);
+  __builtin_amdgcn_global_load_async_lds(src, dst, /*size=*/4, /*offset=*/0, 
/*aux=*/0);
 }
 
 // CHECK-LABEL: @test_global_load_lds_u16(
@@ -36,10 +40,14 @@ void test_global_load_lds_u32(global u32* src, local u32 
*dst) {
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 2, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.global.load.async.lds(ptr 
addrspace(1) [[TMP2]], ptr addrspace(3) [[TMP3]], i32 2, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_global_load_lds_u16(global u16* src, local u16 *dst) {
   __builtin_amdgcn_global_load_lds(src, dst, /*size=*/2, /*offset=*/0, 
/*aux=*/0);
+  __builtin_amdgcn_global_load_async_lds(src, dst, /*size=*/2, /*offset=*/0, 
/*aux=*/0);
 }
 
 // CHECK-LABEL: @test_global_load_lds_u8(
@@ -53,8 +61,12 @@ void test_global_load_lds_u16(global u16* src, local u16 
*dst) {
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 1, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.global.load.async.lds(ptr 
addrspace(1) [[TMP2]], ptr addrspace(3) [[TMP3]], i32 1, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_global_load_lds_u8(global u8* src, local u8 *dst) {
   __builtin_amdgcn_global_load_lds(src, dst, /*size=*/1, /*offset=*/0, 
/*aux=*/0);
+  __builtin_amdgcn_global_load_async_lds(src, dst, /*size=*/1, /*offset=*/0, 
/*aux=*/0);
 }
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
index 6cdedb33bdd80..cc944204446ae 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-load-to-lds.cl
@@ -19,10 +19,14 @@ typedef unsigned char u8;
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 4, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.async.to.lds.p1(ptr addrspace(1) 
[[TMP2]], ptr addrspace(3) [[TMP3]], i32 4, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_load_to_lds_u32(global u32* src, local u32 *dst) {
   __builtin_amdgcn_load_to_lds(src, dst, /*size=*/4, /*offset=*/0, /*aux=*/0);
+  __builtin_amdgcn_load_async_to_lds(src, dst, /*size=*/4, /*offset=*/0, 
/*aux=*/0);
 }
 
 // CHECK-LABEL: @test_load_to_lds_u16(
@@ -36,10 +40,14 @@ void test_load_to_lds_u32(global u32* src, local u32 *dst) {
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 2, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.async.to.lds.p1(ptr addrspace(1) 
[[TMP2]], ptr addrspace(3) [[TMP3]], i32 2, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_load_to_lds_u16(global u16* src, local u16 *dst) {
   __builtin_amdgcn_load_to_lds(src, dst, /*size=*/2, /*offset=*/0, /*aux=*/0);
+  __builtin_amdgcn_load_async_to_lds(src, dst, /*size=*/2, /*offset=*/0, 
/*aux=*/0);
 }
 
 // CHECK-LABEL: @test_load_to_lds_u8(
@@ -53,8 +61,12 @@ void test_load_to_lds_u16(global u16* src, local u16 *dst) {
 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
 // CHECK-NEXT:    [[TMP1:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
 // CHECK-NEXT:    call void @llvm.amdgcn.load.to.lds.p1(ptr addrspace(1) 
[[TMP0]], ptr addrspace(3) [[TMP1]], i32 1, i32 0, i32 0)
+// CHECK-NEXT:    [[TMP2:%.*]] = load ptr addrspace(1), ptr 
[[SRC_ADDR_ASCAST]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load ptr addrspace(3), ptr 
[[DST_ADDR_ASCAST]], align 4
+// CHECK-NEXT:    call void @llvm.amdgcn.load.async.to.lds.p1(ptr addrspace(1) 
[[TMP2]], ptr addrspace(3) [[TMP3]], i32 1, i32 0, i32 0)
 // CHECK-NEXT:    ret void
 //
 void test_load_to_lds_u8(global u8* src, local u8 *dst) {
   __builtin_amdgcn_load_to_lds(src, dst, /*size=*/1, /*offset=*/0, /*aux=*/0);
+  __builtin_amdgcn_load_async_to_lds(src, dst, /*size=*/1, /*offset=*/0, 
/*aux=*/0);
 }
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl 
b/clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl
index 177165972b7a9..144cc7599bb5e 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-raw-buffer-load-lds.cl
@@ -5,17 +5,19 @@
 // CHECK-LABEL: @test_amdgcn_raw_ptr_buffer_load_lds(
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    tail call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr 
addrspace(8) [[RSRC:%.*]], ptr addrspace(3) [[LDS:%.*]], i32 1, i32 
[[OFFSET:%.*]], i32 [[SOFFSET:%.*]], i32 2, i32 3)
+// CHECK-NEXT:    tail call void 
@llvm.amdgcn.raw.ptr.buffer.load.async.lds(ptr addrspace(8) [[RSRC]], ptr 
addrspace(3) [[LDS]], i32 1, i32 [[OFFSET]], i32 [[SOFFSET]], i32 2, i32 3)
 // CHECK-NEXT:    ret void
 //
 void test_amdgcn_raw_ptr_buffer_load_lds(__amdgpu_buffer_rsrc_t rsrc, __local 
void * lds, int offset, int soffset) {
     __builtin_amdgcn_raw_ptr_buffer_load_lds(rsrc, lds, 1, offset, soffset, 2, 
3);
+    __builtin_amdgcn_raw_ptr_buffer_load_async_lds(rsrc, lds, 1, offset, 
soffset, 2, 3);
 }
 
 // CHECK-LABEL: @test_amdgcn_struct_ptr_buffer_load_lds(
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    tail call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr 
addrspace(8) [[RSRC:%.*]], ptr addrspace(3) [[LDS:%.*]], i32 4, i32 
[[VINDEX:%.*]], i32 [[VOFFSET:%.*]], i32 [[SOFFSET:%.*]], i32 2, i32 3)
+// CHECK-NEXT:    tail call void 
@llvm.amdgcn.struct.ptr.buffer.load.async.lds(ptr addrspace(8) [[RSRC:%.*]], 
ptr addrspace(3) [[LDS:%.*]], i32 4, i32 [[VINDEX:%.*]], i32 [[VOFFSET:%.*]], 
i32 [[SOFFSET:%.*]], i32 2, i32 3)
 // CHECK-NEXT:    ret void
 //
 void test_amdgcn_struct_ptr_buffer_load_lds(__amdgpu_buffer_rsrc_t rsrc, 
__local void * lds, int size, int vindex, int voffset, int soffset) {
-    __builtin_amdgcn_struct_ptr_buffer_load_lds(rsrc, lds, 4, vindex, voffset, 
soffset, 2, 3);
+    __builtin_amdgcn_struct_ptr_buffer_load_async_lds(rsrc, lds, 4, vindex, 
voffset, soffset, 2, 3);
 }
diff --git a/llvm/docs/AMDGPUAsyncOperations.rst 
b/llvm/docs/AMDGPUAsyncOperations.rst
new file mode 100644
index 0000000000000..a55b4d94a5e7a
--- /dev/null
+++ b/llvm/docs/AMDGPUAsyncOperations.rst
@@ -0,0 +1,238 @@
+.. _amdgpu-async-operations:
+
+===============================
+ AMDGPU Asynchronous Operations
+===============================
+
+.. contents::
+   :local:
+
+Introduction
+============
+
+Asynchronous operations are memory transfers (usually between the global memory
+and LDS) that are completed independently at an unspecified scope. A thread 
that
+requests one or more asynchronous transfers can use *async marks* to track
+their completion. The thread waits for each mark to be *completed*, which
+indicates that requests initiated in program order before this mark have also
+completed.
+
+Operations
+==========
+
+Memory Accesses
+---------------
+
+LDS DMA Operations
+^^^^^^^^^^^^^^^^^^
+
+.. code-block:: llvm
+
+  ; "Legacy" LDS DMA operations
+  void @llvm.amdgcn.load.async.to.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.global.load.async.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.raw.buffer.load.async.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.raw.ptr.buffer.load.async.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.struct.buffer.load.async.lds(ptr %src, ptr %dst)
+  void @llvm.amdgcn.struct.ptr.buffer.load.async.lds(ptr %src, ptr %dst)
+
+Request an async operation that copies the specified number of bytes from the
+global/buffer pointer ``%src`` to the LDS pointer ``%dst``.
+
+.. note::
+
+   The above listing is *merely representative*. The actual function signatures
+   are identical to their non-async variants, and supported only on the
+   corresponding architectures (GFX9 and GFX10).
+
+Async Mark Operations
+---------------------
+
+An *async mark* in the abstract machine tracks all the async operations that
+are program ordered before that mark. A mark M is said to be *completed*
+only when all async operations program ordered before M are reported by the
+implementation as having finished, and it is said to be *outstanding* 
otherwise.
+
+Thus we have the following sufficient condition:
+
+  An async operation X is *completed* at a program point P if there exists a
+  mark M such that X is program ordered before M, M is program ordered before
+  P, and M is completed. X is said to be *outstanding* at P otherwise.
+
+The abstract machine maintains a sequence of *async marks* during the
+execution of a function body, which excludes any marks produced by calls to
+other functions encountered in the currently executing function.
+
+
+``@llvm.amdgcn.asyncmark()``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When executed, inserts an async mark in the sequence associated with the
+currently executing function body.
+
+``@llvm.amdgcn.wait.asyncmark(i16 %N)``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Waits until there are at most N outstanding marks in the sequence associated
+with the currently executing function body.
+
+Memory Consistency Model
+========================
+
+Each asynchronous operation consists of a non-atomic read on the source and a
+non-atomic write on the destination. Async "LDS DMA" intrinsics result in async
+accesses that guarantee visibility relative to other memory operations as
+follows:
+
+  An asynchronous operation `A` program ordered before an overlapping memory
+  operation `X` happens-before `X` only if `A` is completed before `X`.
+
+  A memory operation `X` program ordered before an overlapping asynchronous
+  operation `A` happens-before `A`.
+
+.. note::
+
+   The *only if* in the above wording implies that unlike the default LLVM
+   memory model, certain program order edges are not automatically included in
+   ``happens-before``.
+
+Examples
+========
+
+Uneven blocks of async transfers
+--------------------------------
+
+.. code-block:: c++
+
+   void foo(global int *g, local int *l) {
+     // first block
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     asyncmark();
+
+     // second block; longer
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     asyncmark();
+
+     // third block; shorter
+     async_load_to_lds(l, g);
+     async_load_to_lds(l, g);
+     asyncmark();
+
+     // Wait for first block
+     wait.asyncmark(2);
+   }
+
+Software pipeline
+-----------------
+
+.. code-block:: c++
+
+   void foo(global int *g, local int *l) {
+     // first block
+     asyncmark();
+
+     // second block
+     asyncmark();
+
+     // third block
+     asyncmark();
+
+     for (;;) {
+       wait.asyncmark(2);
+       // use data
+
+       // next block
+       asyncmark();
+     }
+
+     // flush one block
+     wait.asyncmark(2);
+
+     // flush one more block
+     wait.asyncmark(1);
+
+     // flush last block
+     wait.asyncmark(0);
+   }
+
+Ordinary function call
+----------------------
+
+.. code-block:: c++
+
+   extern void bar(); // may or may not make async calls
+
+   void foo(global int *g, local int *l) {
+       // first block
+       asyncmark();
+
+       // second block
+       asyncmark();
+
+       // function call
+       bar();
+
+       // third block
+       asyncmark();
+
+       wait.asyncmark(1); // will wait for at least the second block, possibly 
including bar()
+       wait.asyncmark(0); // will wait for third block, including bar()
+   }
+
+Implementation notes
+====================
+
+[This section is informational.]
+
+Optimization
+------------
+
+The implementation may eliminate async mark/wait intrinsics in the following 
cases:
+
+1. An ``asyncmark`` operation which is not included in the wait count of a 
later
+   wait operation in the current function. In particular, an ``asyncmark`` 
which
+   is not post-dominated by any ``wait.asyncmark``.
+2. A ``wait.asyncmark`` whose wait count is more than the outstanding async
+   marks at that point. In particular, a ``wait.asyncmark`` that is not
+   dominated by any ``asyncmark``.
+
+In general, at a function call, if the caller uses sufficient waits to track
+its own async operations, the actions performed by the callee cannot affect
+correctness. But inlining such a call may result in redundant waits.
+
+.. code-block:: c++
+
+   void foo() {
+     asyncmark(); // A
+   }
+
+   void bar() {
+     asyncmark(); // B
+     asyncmark(); // C
+     foo();
+     wait.asyncmark(1);
+   }
+
+Before inlining, the ``wait.asyncmark`` waits for mark B to be completed.
+
+.. code-block:: c++
+
+   void foo() {
+   }
+
+   void bar() {
+     asyncmark(); // B
+     asyncmark(); // C
+     asyncmark(); // A from call to foo()
+     wait.asyncmark(1);
+   }
+
+After inlining, the asyncmark-wait now waits for mark C to complete, which is
+longer than necessary. Ideally, the optimizer should have eliminated mark A in
+the body of foo() itself.
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 1282dcf98727a..668476450cb2e 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -7035,6 +7035,14 @@ cache modifiers. They cannot be performed atomically. 
They implement volatile
 (via aux/cpol bit 31) and nontemporal (via metadata) as if they were loads
 from the global address space.
 
+The LDS DMA instructions are synchronous by default, which means that the
+compiler will automatically ensure that the corresponding operation has
+completed before its side-effects are used. The :ref:`asynchronous
+versions<amdgpu-async-operations>` of these same instructions perform the same
+operations, but without automatic tracking in the compiler; the user must
+explicitly track the completion of these instructions before using their
+side-effects.
+
 Private address space uses ``buffer_load/store`` using the scratch V#
 (GFX6-GFX8), or ``scratch_load/store`` (GFX9-GFX11). Since only a single thread
 is accessing the memory, atomic memory orderings are not meaningful, and all
diff --git a/llvm/docs/UserGuides.rst b/llvm/docs/UserGuides.rst
index 10d7fef904d2d..a712d4eb4c13e 100644
--- a/llvm/docs/UserGuides.rst
+++ b/llvm/docs/UserGuides.rst
@@ -18,6 +18,7 @@ intermediate LLVM representation.
    AdvancedBuilds
    AliasAnalysis
    AMDGPUUsage
+   AMDGPUAsyncOperations
    Benchmarking
    BigEndianNEON
    BuildingADistribution
@@ -287,6 +288,9 @@ Additional Topics
 :doc:`AMDGPUUsage`
    This document describes using the AMDGPU backend to compile GPU kernels.
 
+:doc:`AMDGPUAsyncOperations`
+   Builtins for invoking asynchronous data transfer operations in AMD GPUs.
+
 :doc:`AMDGPUDwarfExtensionsForHeterogeneousDebugging`
    This document describes DWARF extensions to support heterogeneous debugging
    for targets such as the AMDGPU backend.
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td 
b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index a8eba9ed126b7..ec887ab0bdfad 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1998,9 +1998,9 @@ class AMDGPURawBufferLoadLDS : Intrinsic <
   [IntrWillReturn, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<2>>, 
ImmArg<ArgIndex<5>>,
    ImmArg<ArgIndex<6>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, 
AMDGPURsrcIntrinsic<0>;
 def int_amdgcn_raw_buffer_load_lds : AMDGPURawBufferLoadLDS;
+def int_amdgcn_raw_buffer_load_async_lds : AMDGPURawBufferLoadLDS;
 
 class AMDGPURawPtrBufferLoadLDS :
-  ClangBuiltin<"__builtin_amdgcn_raw_ptr_buffer_load_lds">,
   Intrinsic <
   [],
   [AMDGPUBufferRsrcTy,        // rsrc(SGPR)
@@ -2021,7 +2021,11 @@ class AMDGPURawPtrBufferLoadLDS :
    WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
    ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>,
    ImmArg<ArgIndex<6>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, 
AMDGPURsrcIntrinsic<0>;
-def int_amdgcn_raw_ptr_buffer_load_lds : AMDGPURawPtrBufferLoadLDS;
+def int_amdgcn_raw_ptr_buffer_load_lds : AMDGPURawPtrBufferLoadLDS,
+      ClangBuiltin<"__builtin_amdgcn_raw_ptr_buffer_load_lds">;
+def int_amdgcn_raw_ptr_buffer_load_async_lds : AMDGPURawPtrBufferLoadLDS,
+      ClangBuiltin<"__builtin_amdgcn_raw_ptr_buffer_load_async_lds">;
+
 
 class AMDGPUStructBufferLoadLDS : Intrinsic <
   [],
@@ -2042,9 +2046,9 @@ class AMDGPUStructBufferLoadLDS : Intrinsic <
   [IntrWillReturn, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<2>>, 
ImmArg<ArgIndex<6>>,
    ImmArg<ArgIndex<7>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, 
AMDGPURsrcIntrinsic<0>;
 def int_amdgcn_struct_buffer_load_lds : AMDGPUStructBufferLoadLDS;
+def int_amdgcn_struct_buffer_load_async_lds : AMDGPUStructBufferLoadLDS;
 
 class AMDGPUStructPtrBufferLoadLDS :
-  ClangBuiltin<"__builtin_amdgcn_struct_ptr_buffer_load_lds">,
   Intrinsic <
   [],
   [AMDGPUBufferRsrcTy,        // rsrc(SGPR)
@@ -2066,7 +2070,10 @@ class AMDGPUStructPtrBufferLoadLDS :
    WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
    ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<6>>,
    ImmArg<ArgIndex<7>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, 
AMDGPURsrcIntrinsic<0>;
-def int_amdgcn_struct_ptr_buffer_load_lds : AMDGPUStructPtrBufferLoadLDS;
+def int_amdgcn_struct_ptr_buffer_load_lds : AMDGPUStructPtrBufferLoadLDS,
+  ClangBuiltin<"__builtin_amdgcn_struct_ptr_buffer_load_lds">;
+def int_amdgcn_struct_ptr_buffer_load_async_lds : AMDGPUStructPtrBufferLoadLDS,
+  ClangBuiltin<"__builtin_amdgcn_struct_ptr_buffer_load_async_lds">;
 
 def int_amdgcn_s_buffer_prefetch_data : DefaultAttrsIntrinsic <
   [],
@@ -2794,10 +2801,9 @@ class AMDGPULoadToLDS :
      ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, 
IntrNoCallback, IntrNoFree],
      "", [SDNPMemOperand]>;
 def int_amdgcn_load_to_lds : AMDGPULoadToLDS;
+def int_amdgcn_load_async_to_lds : AMDGPULoadToLDS;
 
-class AMDGPUGlobalLoadLDS
-    : ClangBuiltin<"__builtin_amdgcn_global_load_lds">,
-      Intrinsic<
+class AMDGPUGlobalLoadLDS : Intrinsic<
           [],
           [LLVMQualPointerType<1>, // Base global pointer to load from
            LLVMQualPointerType<3>, // LDS base pointer to store to
@@ -2813,7 +2819,8 @@ class AMDGPUGlobalLoadLDS
            ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>,
            IntrNoCallback, IntrNoFree],
           "", [SDNPMemOperand]>;
-def int_amdgcn_global_load_lds : AMDGPUGlobalLoadLDS;
+def int_amdgcn_global_load_lds : AMDGPUGlobalLoadLDS, 
ClangBuiltin<"__builtin_amdgcn_global_load_lds">;
+def int_amdgcn_global_load_async_lds : AMDGPUGlobalLoadLDS, 
ClangBuiltin<"__builtin_amdgcn_global_load_async_lds">;
 
 // This is IntrHasSideEffects because it reads from a volatile hardware 
register.
 def int_amdgcn_pops_exiting_wave_id :
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index accaeda1cb239..e02faf84a5631 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -1266,8 +1266,7 @@ defm PkAddMinMaxInsts : 
AMDGPUSubtargetFeature<"pk-add-min-max-insts",
 
 defm VMemToLDSLoad : AMDGPUSubtargetFeature<"vmem-to-lds-load-insts",
   "The platform has memory to lds instructions (global_load w/lds bit set, 
buffer_load"
-  "w/lds bit set or global_load_lds. This does not include scratch_load_lds.",
-  /*GenPredicate=*/0
+  "w/lds bit set or global_load_lds. This does not include scratch_load_lds."
 >;
 
 defm LdsBarrierArriveAtomic : 
AMDGPUSubtargetFeature<"lds-barrier-arrive-atomic",
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index b96c2ef70dd83..f2e5be546a11d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -2376,16 +2376,22 @@ bool 
AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
   case Intrinsic::amdgcn_init_whole_wave:
     return selectInitWholeWave(I);
   case Intrinsic::amdgcn_raw_buffer_load_lds:
+  case Intrinsic::amdgcn_raw_buffer_load_async_lds:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_lds:
+  case Intrinsic::amdgcn_raw_ptr_buffer_load_async_lds:
   case Intrinsic::amdgcn_struct_buffer_load_lds:
+  case Intrinsic::amdgcn_struct_buffer_load_async_lds:
   case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
+  case Intrinsic::amdgcn_struct_ptr_buffer_load_async_lds:
     return selectBufferLoadLds(I);
   // Until we can store both the address space of the global and the LDS
   // arguments by having tto MachineMemOperands on an intrinsic, we just trust
   // that the argument is a global pointer (buffer pointers have been handled 
by
   // a LLVM IR-level lowering).
   case Intrinsic::amdgcn_load_to_lds:
+  case Intrinsic::amdgcn_load_async_to_lds:
   case Intrinsic::amdgcn_global_load_lds:
+  case Intrinsic::amdgcn_global_load_async_lds:
     return selectGlobalLoadLds(I);
   case Intrinsic::amdgcn_exp_compr:
     if (!STI.hasCompressedExport()) {
@@ -3432,11 +3438,25 @@ bool 
AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
   return true;
 }
 
+static bool isAsyncLDSDMA(Intrinsic::ID Intr) {
+  switch (Intr) {
+  case Intrinsic::amdgcn_raw_buffer_load_async_lds:
+  case Intrinsic::amdgcn_raw_ptr_buffer_load_async_lds:
+  case Intrinsic::amdgcn_struct_buffer_load_async_lds:
+  case Intrinsic::amdgcn_struct_ptr_buffer_load_async_lds:
+  case Intrinsic::amdgcn_load_async_to_lds:
+  case Intrinsic::amdgcn_global_load_async_lds:
+    return true;
+  }
+  return false;
+}
+
 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
   if (!Subtarget->hasVMemToLDSLoad())
     return false;
   unsigned Opc;
   unsigned Size = MI.getOperand(3).getImm();
+  Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
 
   // The struct intrinsic variants add one additional operand over raw.
   const bool HasVIndex = MI.getNumOperands() == 9;
@@ -3526,6 +3546,7 @@ bool 
AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
       Aux & (IsGFX12Plus ? AMDGPU::CPol::SWZ : AMDGPU::CPol::SWZ_pregfx12)
           ? 1
           : 0); // swz
+  MIB.addImm(isAsyncLDSDMA(IntrinsicID));
 
   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
   // Don't set the offset value here because the pointer points to the base of
@@ -3648,6 +3669,7 @@ bool 
AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
 
   unsigned Opc;
   unsigned Size = MI.getOperand(3).getImm();
+  Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
 
   switch (Size) {
   default:
@@ -3718,6 +3740,7 @@ bool 
AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
 
   unsigned Aux = MI.getOperand(5).getImm();
   MIB.addImm(Aux & ~AMDGPU::CPol::VIRTUAL_BITS); // cpol
+  MIB.addImm(isAsyncLDSDMA(IntrinsicID));
 
   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index c4762602fa169..05e97d2fc7508 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -2208,6 +2208,7 @@ static bool isRemovablePointerIntrinsic(Intrinsic::ID 
IID) {
   case Intrinsic::memset_inline:
   case Intrinsic::experimental_memset_pattern:
   case Intrinsic::amdgcn_load_to_lds:
+  case Intrinsic::amdgcn_load_async_to_lds:
     return true;
   }
 }
@@ -2296,7 +2297,8 @@ PtrParts 
SplitPtrStructs::visitIntrinsicInst(IntrinsicInst &I) {
     SplitUsers.insert(&I);
     return {NewRsrc, Off};
   }
-  case Intrinsic::amdgcn_load_to_lds: {
+  case Intrinsic::amdgcn_load_to_lds:
+  case Intrinsic::amdgcn_load_async_to_lds: {
     Value *Ptr = I.getArgOperand(0);
     if (!isSplitFatPtr(Ptr->getType()))
       return {nullptr, nullptr};
@@ -2307,9 +2309,12 @@ PtrParts 
SplitPtrStructs::visitIntrinsicInst(IntrinsicInst &I) {
     Value *ImmOff = I.getArgOperand(3);
     Value *Aux = I.getArgOperand(4);
     Value *SOffset = IRB.getInt32(0);
+    Intrinsic::ID NewIntr =
+        IID == Intrinsic::amdgcn_load_to_lds
+            ? Intrinsic::amdgcn_raw_ptr_buffer_load_lds
+            : Intrinsic::amdgcn_raw_ptr_buffer_load_async_lds;
     Instruction *NewLoad = IRB.CreateIntrinsic(
-        Intrinsic::amdgcn_raw_ptr_buffer_load_lds, {},
-        {Rsrc, LDSPtr, LoadSize, Off, SOffset, ImmOff, Aux});
+        NewIntr, {}, {Rsrc, LDSPtr, LoadSize, Off, SOffset, ImmOff, Aux});
     copyMetadata(NewLoad, &I);
     SplitUsers.insert(&I);
     I.replaceAllUsesWith(NewLoad);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp 
b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 62a252add0091..e56168978427f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3299,7 +3299,9 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
       constrainOpWithReadfirstlane(B, MI, 1); // M0
       return;
     case Intrinsic::amdgcn_raw_buffer_load_lds:
-    case Intrinsic::amdgcn_raw_ptr_buffer_load_lds: {
+    case Intrinsic::amdgcn_raw_buffer_load_async_lds:
+    case Intrinsic::amdgcn_raw_ptr_buffer_load_lds:
+    case Intrinsic::amdgcn_raw_ptr_buffer_load_async_lds: {
       applyDefaultMapping(OpdMapper);
       constrainOpWithReadfirstlane(B, MI, 1); // rsrc
       constrainOpWithReadfirstlane(B, MI, 2); // M0
@@ -3307,7 +3309,9 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
       return;
     }
     case Intrinsic::amdgcn_struct_buffer_load_lds:
-    case Intrinsic::amdgcn_struct_ptr_buffer_load_lds: {
+    case Intrinsic::amdgcn_struct_buffer_load_async_lds:
+    case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
+    case Intrinsic::amdgcn_struct_ptr_buffer_load_async_lds: {
       applyDefaultMapping(OpdMapper);
       constrainOpWithReadfirstlane(B, MI, 1); // rsrc
       constrainOpWithReadfirstlane(B, MI, 2); // M0
@@ -3323,7 +3327,9 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
       return;
     }
     case Intrinsic::amdgcn_load_to_lds:
-    case Intrinsic::amdgcn_global_load_lds: {
+    case Intrinsic::amdgcn_load_async_to_lds:
+    case Intrinsic::amdgcn_global_load_lds:
+    case Intrinsic::amdgcn_global_load_async_lds: {
       applyDefaultMapping(OpdMapper);
       constrainOpWithReadfirstlane(B, MI, 2);
       return;
@@ -5449,7 +5455,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const 
MachineInstr &MI) const {
       break;
     }
     case Intrinsic::amdgcn_raw_buffer_load_lds:
-    case Intrinsic::amdgcn_raw_ptr_buffer_load_lds: {
+    case Intrinsic::amdgcn_raw_buffer_load_async_lds:
+    case Intrinsic::amdgcn_raw_ptr_buffer_load_lds:
+    case Intrinsic::amdgcn_raw_ptr_buffer_load_async_lds: {
       OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
       OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
       OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
@@ -5482,7 +5490,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const 
MachineInstr &MI) const {
       break;
     }
     case Intrinsic::amdgcn_struct_buffer_load_lds:
-    case Intrinsic::amdgcn_struct_ptr_buffer_load_lds: {
+    case Intrinsic::amdgcn_struct_buffer_load_async_lds:
+    case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
+    case Intrinsic::amdgcn_struct_ptr_buffer_load_async_lds: {
       OpdsMapping[1] = getSGPROpMapping(MI.getOperand(1).getReg(), MRI, *TRI);
       OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
       OpdsMapping[4] = getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp 
b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 08a23142b2cc2..c97509047c0c3 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -126,6 +126,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
     ImmTySMEMOffsetMod,
     ImmTyCPol,
     ImmTyTFE,
+    ImmTyIsAsync,
     ImmTyD16,
     ImmTyClamp,
     ImmTyOModSI,
@@ -1119,6 +1120,7 @@ class AMDGPUOperand : public MCParsedAsmOperand {
     case ImmTyIndexKey16bit: OS << "index_key"; break;
     case ImmTyIndexKey32bit: OS << "index_key"; break;
     case ImmTyTFE: OS << "TFE"; break;
+    case ImmTyIsAsync: OS << "IsAsync"; break;
     case ImmTyD16: OS << "D16"; break;
     case ImmTyFORMAT: OS << "FORMAT"; break;
     case ImmTyClamp: OS << "Clamp"; break;
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td 
b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index fde67e9e2d83b..cc716d5303cdd 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -416,14 +416,15 @@ class getBUFVDataRegisterOperandForOp<RegisterOperand Op, 
bit isTFE> {
   RegisterOperand ret = getBUFVDataRegisterOperand<Size, isTFE>.ret;
 }
 
-class getMUBUFInsDA<list<RegisterOperand> vdataList,
-                    list<RegisterClassLike> vaddrList, bit isTFE, bit 
hasRestrictedSOffset, bit isTrue16 = false> {
+class getMUBUFInsDA<list<RegisterOperand> vdataList, list<RegisterClassLike> 
vaddrList,
+                    bit isTFE, bit hasRestrictedSOffset, bit isTrue16, bit 
isLds> {
   RegisterOperand vdataClass = !if(!empty(vdataList), ?, !head(vdataList));
   RegisterClassLike vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList));
   RegisterOperand vdata_op = 
getBUFVDataRegisterOperand<!cast<SIRegisterClassLike>(vdataClass.RegClass).Size,
 isTFE, isTrue16>.ret;
 
   dag SOffset = !if(hasRestrictedSOffset, (ins SReg_32:$soffset), (ins 
SCSrc_b32:$soffset));
-  dag NonVaddrInputs = !con((ins SReg_128_XNULL:$srsrc), SOffset, (ins 
Offset:$offset, CPol_0:$cpol, i1imm_0:$swz));
+  dag IsAsyncOpnd = !if(isLds, (ins i1imm_0:$IsAsync), (ins));
+  dag NonVaddrInputs = !con((ins SReg_128_XNULL:$srsrc), SOffset, (ins 
Offset:$offset, CPol_0:$cpol, i1imm_0:$swz), IsAsyncOpnd);
 
   dag Inputs = !if(!empty(vaddrList), NonVaddrInputs, !con((ins 
vaddrClass:$vaddr), NonVaddrInputs));
   dag ret = !if(!empty(vdataList), Inputs, !con((ins vdata_op:$vdata), 
Inputs));
@@ -448,13 +449,14 @@ class getMUBUFElements<ValueType vt> {
     );
 }
 
-class getMUBUFIns<int addrKind, list<RegisterOperand> vdataList, bit isTFE, 
bit hasRestrictedSOffset, bit isTrue16 = false> {
+class getMUBUFIns<int addrKind, list<RegisterOperand> vdataList, bit isTFE,
+                  bit hasRestrictedSOffset, bit isTrue16, bit isLds> {
   dag ret =
-    !if(!eq(addrKind, BUFAddrKind.Offset), getMUBUFInsDA<vdataList, [], isTFE, 
hasRestrictedSOffset, isTrue16>.ret,
-    !if(!eq(addrKind, BUFAddrKind.OffEn),  getMUBUFInsDA<vdataList, [VGPR_32], 
isTFE, hasRestrictedSOffset, isTrue16>.ret,
-    !if(!eq(addrKind, BUFAddrKind.IdxEn),  getMUBUFInsDA<vdataList, [VGPR_32], 
isTFE, hasRestrictedSOffset, isTrue16>.ret,
-    !if(!eq(addrKind, BUFAddrKind.BothEn), getMUBUFInsDA<vdataList, 
[VReg_64_AlignTarget], isTFE, hasRestrictedSOffset, isTrue16>.ret,
-    !if(!eq(addrKind, BUFAddrKind.Addr64), getMUBUFInsDA<vdataList, 
[VReg_64_AlignTarget], isTFE, hasRestrictedSOffset, isTrue16>.ret,
+    !if(!eq(addrKind, BUFAddrKind.Offset), getMUBUFInsDA<vdataList, [], isTFE, 
hasRestrictedSOffset, isTrue16, isLds>.ret,
+    !if(!eq(addrKind, BUFAddrKind.OffEn),  getMUBUFInsDA<vdataList, [VGPR_32], 
isTFE, hasRestrictedSOffset, isTrue16, isLds>.ret,
+    !if(!eq(addrKind, BUFAddrKind.IdxEn),  getMUBUFInsDA<vdataList, [VGPR_32], 
isTFE, hasRestrictedSOffset, isTrue16, isLds>.ret,
+    !if(!eq(addrKind, BUFAddrKind.BothEn), getMUBUFInsDA<vdataList, 
[VReg_64_AlignTarget], isTFE, hasRestrictedSOffset, isTrue16, isLds>.ret,
+    !if(!eq(addrKind, BUFAddrKind.Addr64), getMUBUFInsDA<vdataList, 
[VReg_64_AlignTarget], isTFE, hasRestrictedSOffset, isTrue16, isLds>.ret,
     (ins))))));
 }
 
@@ -499,7 +501,7 @@ class MUBUF_Load_Pseudo <string opName,
                          RegisterOperand vdata_op = 
getBUFVDataRegisterOperand<vdata_vt.Size, isTFE>.ret>
   : MUBUF_Pseudo<opName,
                  !if(!or(isLds, isLdsOpc), (outs), (outs vdata_op:$vdata)),
-                 !con(getMUBUFIns<addrKind, [], isTFE, 
hasRestrictedSOffset>.ret,
+                 !con(getMUBUFIns<addrKind, [], isTFE, hasRestrictedSOffset, 
0, isLds>.ret,
                       !if(HasTiedDest, (ins vdata_op:$vdata_in), (ins))),
                  getMUBUFAsmOps<addrKind, !or(isLds, isLdsOpc), isLds, 
isTFE>.ret,
                  pattern>,
@@ -643,7 +645,7 @@ class MUBUF_Store_Pseudo <string opName,
                           bit isTrue16 = false>
   : MUBUF_Pseudo<opName,
                  (outs),
-                 getMUBUFIns<addrKind, [getVregSrcForVT<store_vt, isTrue16, 
0>.ret], isTFE, hasRestrictedSOffset, isTrue16>.ret,
+                 getMUBUFIns<addrKind, [getVregSrcForVT<store_vt, isTrue16, 
0>.ret], isTFE, hasRestrictedSOffset, isTrue16, 0>.ret,
                  getMUBUFAsmOps<addrKind, 0, 0, isTFE>.ret,
                  pattern>,
     MUBUF_SetupAddr<addrKind> {
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td 
b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 3ad15ae28e51e..2ac24505e1dc2 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -390,13 +390,14 @@ multiclass FLAT_Global_Store_Pseudo_t16<string opName> {
 
 // Async loads, introduced in gfx1250, will store directly
 // to a DS address in vdst (they will not use M0 for DS addess).
-class FLAT_Global_Load_LDS_Pseudo <string opName, bit EnableSaddr = 0, bit 
IsAsync = 0> : FLAT_Pseudo<
+class FLAT_Global_Load_LDS_Pseudo <string opName, bit EnableSaddr = 0, bit 
IsAsync = 0, bit IsLegacyLDSDMA = 0> : FLAT_Pseudo<
   opName,
   (outs ),
   !con(
        !if(IsAsync, (ins VGPR_32:$vdst), (ins)),
        !if(EnableSaddr, (ins SReg_64:$saddr, VGPR_32:$vaddr), (ins 
VReg_64_AlignTarget:$vaddr)),
-       (ins flat_offset:$offset, CPol_0:$cpol)),
+       (ins flat_offset:$offset, CPol_0:$cpol),
+       !if(IsLegacyLDSDMA, (ins i1imm_0:$IsAsync), (ins))),
   !if(IsAsync, " $vdst,", "")#" $vaddr"#!if(EnableSaddr, ", $saddr", ", 
off")#"$offset$cpol"> {
   let LGKM_CNT = 0;
   let VM_CNT = !not(IsAsync);
@@ -416,10 +417,10 @@ class FLAT_Global_Load_LDS_Pseudo <string opName, bit 
EnableSaddr = 0, bit IsAsy
   let SchedRW = [WriteVMEM, WriteLDS];
 }
 
-multiclass FLAT_Global_Load_LDS_Pseudo<string opName, bit IsAsync = 0> {
-  def ""     : FLAT_Global_Load_LDS_Pseudo<opName, 0, IsAsync>,
+multiclass FLAT_Global_Load_LDS_Pseudo<string opName, bit IsAsync = 0, bit 
IsLegacyLDSDMA = 0> {
+  def ""     : FLAT_Global_Load_LDS_Pseudo<opName, 0, IsAsync, IsLegacyLDSDMA>,
     GlobalSaddrTable<0, opName>;
-  def _SADDR : FLAT_Global_Load_LDS_Pseudo<opName, 1, IsAsync>,
+  def _SADDR : FLAT_Global_Load_LDS_Pseudo<opName, 1, IsAsync, IsLegacyLDSDMA>,
     GlobalSaddrTable<1, opName>;
 }
 
@@ -1212,15 +1213,15 @@ let SubtargetPredicate = HasGFX10_BEncoding in {
                                 VGPROp_32, i32>;
 }
 
-defm GLOBAL_LOAD_LDS_UBYTE  : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_ubyte">;
-defm GLOBAL_LOAD_LDS_SBYTE  : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_sbyte">;
-defm GLOBAL_LOAD_LDS_USHORT : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_ushort">;
-defm GLOBAL_LOAD_LDS_SSHORT : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_sshort">;
-defm GLOBAL_LOAD_LDS_DWORD  : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_dword">;
+defm GLOBAL_LOAD_LDS_UBYTE  : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_ubyte", 0, 1>;
+defm GLOBAL_LOAD_LDS_SBYTE  : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_sbyte", 0, 1>;
+defm GLOBAL_LOAD_LDS_USHORT : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_ushort", 0, 1>;
+defm GLOBAL_LOAD_LDS_SSHORT : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_sshort", 0, 1>;
+defm GLOBAL_LOAD_LDS_DWORD  : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_dword", 0, 1>;
 
 let SubtargetPredicate = HasGFX950Insts in {
-defm GLOBAL_LOAD_LDS_DWORDX3 : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_dwordx3">;
-defm GLOBAL_LOAD_LDS_DWORDX4 : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_dwordx4">;
+defm GLOBAL_LOAD_LDS_DWORDX3 : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_dwordx3", 0, 1>;
+defm GLOBAL_LOAD_LDS_DWORDX4 : FLAT_Global_Load_LDS_Pseudo 
<"global_load_lds_dwordx4", 0, 1>;
 }
 
 let SubtargetPredicate = isGFX12PlusNot12_50 in
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp 
b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7d2abfe8f17df..ab6535614e6bf 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1448,9 +1448,13 @@ void 
SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
         }
         break;
       case Intrinsic::amdgcn_raw_buffer_load_lds:
+      case Intrinsic::amdgcn_raw_buffer_load_async_lds:
       case Intrinsic::amdgcn_raw_ptr_buffer_load_lds:
+      case Intrinsic::amdgcn_raw_ptr_buffer_load_async_lds:
       case Intrinsic::amdgcn_struct_buffer_load_lds:
-      case Intrinsic::amdgcn_struct_ptr_buffer_load_lds: {
+      case Intrinsic::amdgcn_struct_buffer_load_async_lds:
+      case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
+      case Intrinsic::amdgcn_struct_ptr_buffer_load_async_lds: {
         unsigned Width = 
cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
 
         // Entry 0: Load from buffer.
@@ -1698,7 +1702,9 @@ void 
SITargetLowering::getTgtMemIntrinsic(SmallVectorImpl<IntrinsicInfo> &Infos,
     return;
   }
   case Intrinsic::amdgcn_load_to_lds:
-  case Intrinsic::amdgcn_global_load_lds: {
+  case Intrinsic::amdgcn_load_async_to_lds:
+  case Intrinsic::amdgcn_global_load_lds:
+  case Intrinsic::amdgcn_global_load_async_lds: {
     unsigned Width = cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
     auto *Aux = cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1));
     bool IsVolatile = Aux->getZExtValue() & AMDGPU::CPol::VOLATILE;
@@ -1821,7 +1827,9 @@ bool SITargetLowering::getAddrModeArguments(const 
IntrinsicInst *II,
     Ptr = II->getArgOperand(0);
     break;
   case Intrinsic::amdgcn_load_to_lds:
+  case Intrinsic::amdgcn_load_async_to_lds:
   case Intrinsic::amdgcn_global_load_lds:
+  case Intrinsic::amdgcn_global_load_async_lds:
   case Intrinsic::amdgcn_global_load_async_to_lds_b8:
   case Intrinsic::amdgcn_global_load_async_to_lds_b32:
   case Intrinsic::amdgcn_global_load_async_to_lds_b64:
@@ -11403,6 +11411,19 @@ SDValue SITargetLowering::handleD16VData(SDValue 
VData, SelectionDAG &DAG,
   return VData;
 }
 
+static bool isAsyncLDSDMA(Intrinsic::ID Intr) {
+  switch (Intr) {
+  case Intrinsic::amdgcn_raw_buffer_load_async_lds:
+  case Intrinsic::amdgcn_raw_ptr_buffer_load_async_lds:
+  case Intrinsic::amdgcn_struct_buffer_load_async_lds:
+  case Intrinsic::amdgcn_struct_ptr_buffer_load_async_lds:
+  case Intrinsic::amdgcn_load_async_to_lds:
+  case Intrinsic::amdgcn_global_load_async_lds:
+    return true;
+  }
+  return false;
+}
+
 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
                                               SelectionDAG &DAG) const {
   SDLoc DL(Op);
@@ -11598,15 +11619,21 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue 
Op,
                                    M->getMemoryVT(), M->getMemOperand());
   }
   case Intrinsic::amdgcn_raw_buffer_load_lds:
+  case Intrinsic::amdgcn_raw_buffer_load_async_lds:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_lds:
+  case Intrinsic::amdgcn_raw_ptr_buffer_load_async_lds:
   case Intrinsic::amdgcn_struct_buffer_load_lds:
-  case Intrinsic::amdgcn_struct_ptr_buffer_load_lds: {
+  case Intrinsic::amdgcn_struct_buffer_load_async_lds:
+  case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
+  case Intrinsic::amdgcn_struct_ptr_buffer_load_async_lds: {
     if (!Subtarget->hasVMemToLDSLoad())
       return SDValue();
     unsigned Opc;
     bool HasVIndex =
         IntrinsicID == Intrinsic::amdgcn_struct_buffer_load_lds ||
-        IntrinsicID == Intrinsic::amdgcn_struct_ptr_buffer_load_lds;
+        IntrinsicID == Intrinsic::amdgcn_struct_buffer_load_async_lds ||
+        IntrinsicID == Intrinsic::amdgcn_struct_ptr_buffer_load_lds ||
+        IntrinsicID == Intrinsic::amdgcn_struct_ptr_buffer_load_async_lds;
     unsigned OpOffset = HasVIndex ? 1 : 0;
     SDValue VOffset = Op.getOperand(5 + OpOffset);
     bool HasVOffset = !isNullConstant(VOffset);
@@ -11678,6 +11705,8 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue 
Op,
             ? 1
             : 0,
         DL, MVT::i8));                                           // swz
+    Ops.push_back(
+        DAG.getTargetConstant(isAsyncLDSDMA(IntrinsicID), DL, MVT::i8));
     Ops.push_back(M0Val.getValue(0));                            // Chain
     Ops.push_back(M0Val.getValue(1));                            // Glue
 
@@ -11691,7 +11720,9 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue 
Op,
   // for "trust me" that the remaining cases are global pointers until
   // such time as we can put two mem operands on an intrinsic.
   case Intrinsic::amdgcn_load_to_lds:
-  case Intrinsic::amdgcn_global_load_lds: {
+  case Intrinsic::amdgcn_load_async_to_lds:
+  case Intrinsic::amdgcn_global_load_lds:
+  case Intrinsic::amdgcn_global_load_async_lds: {
     if (!Subtarget->hasVMemToLDSLoad())
       return SDValue();
 
@@ -11760,6 +11791,8 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue 
Op,
     unsigned Aux = Op.getConstantOperandVal(6);
     Ops.push_back(DAG.getTargetConstant(Aux & ~AMDGPU::CPol::VIRTUAL_BITS, DL,
                                         MVT::i32)); // CPol
+    Ops.push_back(
+        DAG.getTargetConstant(isAsyncLDSDMA(IntrinsicID), DL, MVT::i8));
 
     Ops.push_back(M0Val.getValue(0)); // Chain
     Ops.push_back(M0Val.getValue(1)); // Glue
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td 
b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 8f2448730c099..81c9633635eed 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -1265,6 +1265,8 @@ def CPol_NonGLC : ValuePredicatedOperand<CPol, 
"!(Op.getImm() & CPol::GLC)", 1>;
 def CPol_GLC_WithDefault : DefaultOperand<CPol_GLC, !shl(1, CPolBit.GLC)>;
 def CPol_NonGLC_WithDefault : DefaultOperand<CPol_NonGLC, 0>;
 
+def IsAsync : NamedBitOperand<"isasync">;
+
 def TFE : NamedBitOperand<"tfe">;
 def UNorm : NamedBitOperand<"unorm">;
 def DA : NamedBitOperand<"da">;
diff --git a/llvm/test/CodeGen/AMDGPU/async-buffer-loads.ll 
b/llvm/test/CodeGen/AMDGPU/async-buffer-loads.ll
new file mode 100644
index 0000000000000..93b51ff83deb8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/async-buffer-loads.ll
@@ -0,0 +1,91 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 
UTC_ARGS: --version 6
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s
+
+define float @raw.buffer.load(<4 x i32> inreg %rsrc, ptr addrspace(3) inreg 
%lds) {
+; CHECK-LABEL: raw.buffer.load:
+; CHECK:       ; %bb.0: ; %main_body
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_mov_b32 m0, s20
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    buffer_load_dword off, s[16:19], 0 lds
+; CHECK-NEXT:    buffer_load_dword off, s[16:19], 0 offset:4 glc lds
+; CHECK-NEXT:    buffer_load_dword off, s[16:19], 0 offset:8 slc lds
+; CHECK-NEXT:    v_mov_b32_e32 v0, s20
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    ds_read_b32 v0, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+main_body:
+  call void @llvm.amdgcn.raw.buffer.load.async.lds(<4 x i32> %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0)
+  call void @llvm.amdgcn.raw.buffer.load.async.lds(<4 x i32> %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0, i32 4, i32 1)
+  call void @llvm.amdgcn.raw.buffer.load.async.lds(<4 x i32> %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0, i32 8, i32 2)
+  %res = load float, ptr addrspace(3) %lds
+  ret float %res
+}
+
+define float @raw.ptr.buffer.load(ptr addrspace(8) inreg %rsrc, ptr 
addrspace(3) inreg %lds) {
+; CHECK-LABEL: raw.ptr.buffer.load:
+; CHECK:       ; %bb.0: ; %main_body
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_mov_b32 m0, s20
+; CHECK-NEXT:    s_nop 0
+; CHECK-NEXT:    buffer_load_dword off, s[16:19], 0 lds
+; CHECK-NEXT:    buffer_load_dword off, s[16:19], 0 offset:4 glc lds
+; CHECK-NEXT:    buffer_load_dword off, s[16:19], 0 offset:8 slc lds
+; CHECK-NEXT:    v_mov_b32_e32 v0, s20
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    ds_read_b32 v0, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+main_body:
+  call void @llvm.amdgcn.raw.ptr.buffer.load.async.lds(ptr addrspace(8) %rsrc, 
ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0)
+  call void @llvm.amdgcn.raw.ptr.buffer.load.async.lds(ptr addrspace(8) %rsrc, 
ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 4, i32 1)
+  call void @llvm.amdgcn.raw.ptr.buffer.load.async.lds(ptr addrspace(8) %rsrc, 
ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 8, i32 2)
+  %res = load float, ptr addrspace(3) %lds
+  ret float %res
+}
+
+define float @struct.buffer.load(<4 x i32> inreg %rsrc, ptr addrspace(3) inreg 
%lds) {
+; CHECK-LABEL: struct.buffer.load:
+; CHECK:       ; %bb.0: ; %main_body
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_mov_b32 m0, s20
+; CHECK-NEXT:    v_mov_b32_e32 v0, 8
+; CHECK-NEXT:    buffer_load_dword v0, s[16:19], 0 idxen lds
+; CHECK-NEXT:    buffer_load_dword v0, s[16:19], 0 idxen offset:4 glc lds
+; CHECK-NEXT:    buffer_load_dword v0, s[16:19], 0 idxen offset:8 slc lds
+; CHECK-NEXT:    v_mov_b32_e32 v0, s20
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    ds_read_b32 v0, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+main_body:
+  call void @llvm.amdgcn.struct.buffer.load.async.lds(<4 x i32> %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 8, i32 0, i32 0, i32 0, i32 0)
+  call void @llvm.amdgcn.struct.buffer.load.async.lds(<4 x i32> %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 8, i32 0, i32 0, i32 4, i32 1)
+  call void @llvm.amdgcn.struct.buffer.load.async.lds(<4 x i32> %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 8, i32 0, i32 0, i32 8, i32 2)
+  %res = load float, ptr addrspace(3) %lds
+  ret float %res
+}
+
+define float @struct.ptr.buffer.load(ptr addrspace(8) inreg %rsrc, ptr 
addrspace(3) inreg %lds) {
+; CHECK-LABEL: struct.ptr.buffer.load:
+; CHECK:       ; %bb.0: ; %main_body
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_mov_b32 m0, s20
+; CHECK-NEXT:    v_mov_b32_e32 v0, 8
+; CHECK-NEXT:    buffer_load_dword v0, s[16:19], 0 idxen lds
+; CHECK-NEXT:    buffer_load_dword v0, s[16:19], 0 idxen offset:4 glc lds
+; CHECK-NEXT:    buffer_load_dword v0, s[16:19], 0 idxen offset:8 slc lds
+; CHECK-NEXT:    v_mov_b32_e32 v0, s20
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    ds_read_b32 v0, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+main_body:
+  call void @llvm.amdgcn.struct.ptr.buffer.load.async.lds(ptr addrspace(8) 
%rsrc, ptr addrspace(3) %lds, i32 4, i32 8, i32 0, i32 0, i32 0, i32 0)
+  call void @llvm.amdgcn.struct.ptr.buffer.load.async.lds(ptr addrspace(8) 
%rsrc, ptr addrspace(3) %lds, i32 4, i32 8, i32 0, i32 0, i32 4, i32 1)
+  call void @llvm.amdgcn.struct.ptr.buffer.load.async.lds(ptr addrspace(8) 
%rsrc, ptr addrspace(3) %lds, i32 4, i32 8, i32 0, i32 0, i32 8, i32 2)
+  %res = load float, ptr addrspace(3) %lds
+  ret float %res
+}
diff --git a/llvm/test/CodeGen/AMDGPU/asyncmark-pregfx12.ll 
b/llvm/test/CodeGen/AMDGPU/asyncmark-pregfx12.ll
new file mode 100644
index 0000000000000..f78cd0b959a02
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/asyncmark-pregfx12.ll
@@ -0,0 +1,444 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 
UTC_ARGS: --version 6
+; RUN: sed 's/.ASYNC/.async/' %s | llc -march=amdgcn -mcpu=gfx900 -o - | 
FileCheck %s -check-prefixes=WITHASYNC
+; RUN: sed 's/.ASYNC//' %s       | llc -march=amdgcn -mcpu=gfx900 -o - | 
FileCheck %s -check-prefixes=WITHOUT
+
+; Test async operations with global_load_lds and global loads
+; This version uses wave barriers to enforce program order so that unrelated 
vmem
+; instructions do not get reordered before reaching this point.
+
+define void @interleaved_global_and_dma(ptr addrspace(1) %foo, ptr 
addrspace(3) %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out) {
+; WITHASYNC-LABEL: interleaved_global_and_dma:
+; WITHASYNC:       ; %bb.0: ; %entry
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WITHASYNC-NEXT:    global_load_dword v7, v[3:4], off
+; WITHASYNC-NEXT:    global_load_dword v8, v[0:1], off
+; WITHASYNC-NEXT:    v_readfirstlane_b32 s4, v2
+; WITHASYNC-NEXT:    ; wave barrier
+; WITHASYNC-NEXT:    s_mov_b32 m0, s4
+; WITHASYNC-NEXT:    global_load_dword v0, v[0:1], off
+; WITHASYNC-NEXT:    s_nop 0
+; WITHASYNC-NEXT:    global_load_dword v[3:4], off lds
+; WITHASYNC-NEXT:    ; wave barrier
+; WITHASYNC-NEXT:    global_load_dword v[3:4], off lds
+; WITHASYNC-NEXT:    ; wave barrier
+; WITHASYNC-NEXT:    global_load_dword v1, v[3:4], off
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(1)
+; WITHASYNC-NEXT:    ds_read_b32 v2, v2
+; WITHASYNC-NEXT:    v_add_u32_e32 v3, v8, v7
+; WITHASYNC-NEXT:    s_waitcnt lgkmcnt(0)
+; WITHASYNC-NEXT:    v_add3_u32 v0, v3, v2, v0
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0)
+; WITHASYNC-NEXT:    v_add3_u32 v0, v0, v1, v2
+; WITHASYNC-NEXT:    global_store_dword v[5:6], v0, off
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0)
+; WITHASYNC-NEXT:    s_setpc_b64 s[30:31]
+;
+; WITHOUT-LABEL: interleaved_global_and_dma:
+; WITHOUT:       ; %bb.0: ; %entry
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WITHOUT-NEXT:    global_load_dword v7, v[3:4], off
+; WITHOUT-NEXT:    global_load_dword v8, v[0:1], off
+; WITHOUT-NEXT:    v_readfirstlane_b32 s4, v2
+; WITHOUT-NEXT:    ; wave barrier
+; WITHOUT-NEXT:    s_mov_b32 m0, s4
+; WITHOUT-NEXT:    global_load_dword v0, v[0:1], off
+; WITHOUT-NEXT:    s_nop 0
+; WITHOUT-NEXT:    global_load_dword v[3:4], off lds
+; WITHOUT-NEXT:    ; wave barrier
+; WITHOUT-NEXT:    global_load_dword v[3:4], off lds
+; WITHOUT-NEXT:    ; wave barrier
+; WITHOUT-NEXT:    global_load_dword v1, v[3:4], off
+; WITHOUT-NEXT:    s_waitcnt vmcnt(1)
+; WITHOUT-NEXT:    ds_read_b32 v2, v2
+; WITHOUT-NEXT:    v_add_u32_e32 v3, v8, v7
+; WITHOUT-NEXT:    s_waitcnt lgkmcnt(0)
+; WITHOUT-NEXT:    v_add3_u32 v0, v3, v2, v0
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0)
+; WITHOUT-NEXT:    v_add3_u32 v0, v0, v1, v2
+; WITHOUT-NEXT:    global_store_dword v[5:6], v0, off
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0)
+; WITHOUT-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  ; First batch: global load, global load, async global-to-LDS
+  %bar_v11 = load i32, ptr addrspace(1) %bar
+  %foo_v1 = load i32, ptr addrspace(1) %foo
+  call void @llvm.amdgcn.wave.barrier()
+  call void @llvm.amdgcn.global.load.ASYNC.lds(ptr addrspace(1) %bar, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0)
+
+  ; Second batch: global load, async global-to-LDS, global load
+  %foo_v2 = load i32, ptr addrspace(1) %foo
+  call void @llvm.amdgcn.wave.barrier()
+  call void @llvm.amdgcn.global.load.ASYNC.lds(ptr addrspace(1) %bar, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0)
+  call void @llvm.amdgcn.wave.barrier()
+  %bar_v12 = load i32, ptr addrspace(1) %bar
+
+  ; Wait for first async mark and read from LDS
+  ; This results in vmcnt(3) corresponding to the second batch.
+  %lds_val21 = load i32, ptr addrspace(3) %lds
+
+  ; Wait for the next lds dma
+  ; This results in vmcnt(1), corresponding to %bar_v12. Could have been 
combined with the lgkmcnt(1) for %lds_val21.
+  %lds_val22 = load i32, ptr addrspace(3) %lds
+  %sum1 = add i32 %foo_v1, %bar_v11
+  %sum2 = add i32 %sum1, %lds_val21
+  %sum3 = add i32 %sum2, %foo_v2
+  ; Finally a vmcnt(0) for %bar_v12, which was not included in the async mark 
that followed it.
+  %sum4 = add i32 %sum3, %bar_v12
+  %sum5 = add i32 %sum4, %lds_val22
+  store i32 %sum5, ptr addrspace(1) %out
+
+  ret void
+}
+
+define void @interleaved_buffer_and_dma(ptr addrspace(8) inreg %buf, ptr 
addrspace(1) %foo, ptr addrspace(3) inreg %lds, ptr addrspace(1) %bar, ptr 
addrspace(1) %out) {
+; WITHASYNC-LABEL: interleaved_buffer_and_dma:
+; WITHASYNC:       ; %bb.0: ; %entry
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WITHASYNC-NEXT:    global_load_dword v6, v[2:3], off
+; WITHASYNC-NEXT:    global_load_dword v7, v[0:1], off
+; WITHASYNC-NEXT:    s_mov_b32 m0, s20
+; WITHASYNC-NEXT:    ; wave barrier
+; WITHASYNC-NEXT:    v_mov_b32_e32 v8, 0x54
+; WITHASYNC-NEXT:    global_load_dword v0, v[0:1], off
+; WITHASYNC-NEXT:    v_mov_b32_e32 v1, 0x58
+; WITHASYNC-NEXT:    buffer_load_dword v8, s[16:19], 0 offen lds
+; WITHASYNC-NEXT:    ; wave barrier
+; WITHASYNC-NEXT:    buffer_load_dword v1, s[16:19], 0 offen lds
+; WITHASYNC-NEXT:    ; wave barrier
+; WITHASYNC-NEXT:    global_load_dword v1, v[2:3], off
+; WITHASYNC-NEXT:    v_mov_b32_e32 v2, s20
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(1)
+; WITHASYNC-NEXT:    ds_read_b32 v2, v2
+; WITHASYNC-NEXT:    v_add_u32_e32 v3, v7, v6
+; WITHASYNC-NEXT:    s_waitcnt lgkmcnt(0)
+; WITHASYNC-NEXT:    v_add3_u32 v0, v3, v2, v0
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0)
+; WITHASYNC-NEXT:    v_add3_u32 v0, v0, v1, v2
+; WITHASYNC-NEXT:    global_store_dword v[4:5], v0, off
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0)
+; WITHASYNC-NEXT:    s_setpc_b64 s[30:31]
+;
+; WITHOUT-LABEL: interleaved_buffer_and_dma:
+; WITHOUT:       ; %bb.0: ; %entry
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WITHOUT-NEXT:    global_load_dword v6, v[2:3], off
+; WITHOUT-NEXT:    global_load_dword v7, v[0:1], off
+; WITHOUT-NEXT:    s_mov_b32 m0, s20
+; WITHOUT-NEXT:    ; wave barrier
+; WITHOUT-NEXT:    v_mov_b32_e32 v8, 0x54
+; WITHOUT-NEXT:    global_load_dword v0, v[0:1], off
+; WITHOUT-NEXT:    v_mov_b32_e32 v1, 0x58
+; WITHOUT-NEXT:    buffer_load_dword v8, s[16:19], 0 offen lds
+; WITHOUT-NEXT:    ; wave barrier
+; WITHOUT-NEXT:    buffer_load_dword v1, s[16:19], 0 offen lds
+; WITHOUT-NEXT:    ; wave barrier
+; WITHOUT-NEXT:    global_load_dword v1, v[2:3], off
+; WITHOUT-NEXT:    v_mov_b32_e32 v2, s20
+; WITHOUT-NEXT:    s_waitcnt vmcnt(1)
+; WITHOUT-NEXT:    ds_read_b32 v2, v2
+; WITHOUT-NEXT:    v_add_u32_e32 v3, v7, v6
+; WITHOUT-NEXT:    s_waitcnt lgkmcnt(0)
+; WITHOUT-NEXT:    v_add3_u32 v0, v3, v2, v0
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0)
+; WITHOUT-NEXT:    v_add3_u32 v0, v0, v1, v2
+; WITHOUT-NEXT:    global_store_dword v[4:5], v0, off
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0)
+; WITHOUT-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  ; First batch: global load, global load, async global-to-LDS.
+  %bar_v11 = load i32, ptr addrspace(1) %bar
+  %foo_v1 = load i32, ptr addrspace(1) %foo
+  call void @llvm.amdgcn.wave.barrier()
+  call void @llvm.amdgcn.raw.ptr.buffer.load.ASYNC.lds(ptr addrspace(8) %buf, 
ptr addrspace(3) %lds, i32 4, i32 84, i32 0, i32 0, i32 0)
+
+  ; Second batch: global load, async global-to-LDS, global load.
+  %foo_v2 = load i32, ptr addrspace(1) %foo
+  call void @llvm.amdgcn.wave.barrier()
+  call void @llvm.amdgcn.raw.ptr.buffer.load.ASYNC.lds(ptr addrspace(8) %buf, 
ptr addrspace(3) %lds, i32 4, i32 88, i32 0, i32 0, i32 0)
+  call void @llvm.amdgcn.wave.barrier()
+  %bar_v12 = load i32, ptr addrspace(1) %bar
+
+  ; Wait for first async mark and read from LDS.
+  ; This results in vmcnt(3) corresponding to the second batch.
+  %lds_val21 = load i32, ptr addrspace(3) %lds
+
+  ; Wait for the next lds dma.
+  ; This results in vmcnt(1) because the last global load is not async.
+  %lds_val22 = load i32, ptr addrspace(3) %lds
+  %sum1 = add i32 %foo_v1, %bar_v11
+  %sum2 = add i32 %sum1, %lds_val21
+  %sum3 = add i32 %sum2, %foo_v2
+  %sum4 = add i32 %sum3, %bar_v12
+  %sum5 = add i32 %sum4, %lds_val22
+  store i32 %sum5, ptr addrspace(1) %out
+
+  ret void
+}
+
+; A perfect loop that is unlikely to exist in real life. It uses only async LDS
+; DMA operations, and result in vmcnt waits that exactly match the stream of
+; those outstanding operations.
+
+define void @test_pipelined_loop(ptr addrspace(1) %foo, ptr addrspace(3) %lds, 
ptr addrspace(1) %bar, ptr addrspace(1) %out, i32 %n) {
+; WITHASYNC-LABEL: test_pipelined_loop:
+; WITHASYNC:       ; %bb.0: ; %prolog
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WITHASYNC-NEXT:    v_readfirstlane_b32 s4, v2
+; WITHASYNC-NEXT:    s_mov_b32 m0, s4
+; WITHASYNC-NEXT:    s_nop 0
+; WITHASYNC-NEXT:    global_load_dword v[0:1], off lds
+; WITHASYNC-NEXT:    global_load_dword v[0:1], off lds
+; WITHASYNC-NEXT:    v_mov_b32_e32 v5, 0
+; WITHASYNC-NEXT:    s_mov_b32 s6, 2
+; WITHASYNC-NEXT:    s_mov_b64 s[4:5], 0
+; WITHASYNC-NEXT:  .LBB2_1: ; %loop_body
+; WITHASYNC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; WITHASYNC-NEXT:    v_readfirstlane_b32 s7, v2
+; WITHASYNC-NEXT:    s_mov_b32 m0, s7
+; WITHASYNC-NEXT:    s_add_i32 s6, s6, 1
+; WITHASYNC-NEXT:    global_load_dword v[0:1], off lds
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0)
+; WITHASYNC-NEXT:    ds_read_b32 v6, v2
+; WITHASYNC-NEXT:    v_cmp_ge_i32_e32 vcc, s6, v7
+; WITHASYNC-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; WITHASYNC-NEXT:    s_waitcnt lgkmcnt(0)
+; WITHASYNC-NEXT:    v_add_u32_e32 v5, v5, v6
+; WITHASYNC-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; WITHASYNC-NEXT:    s_cbranch_execnz .LBB2_1
+; WITHASYNC-NEXT:  ; %bb.2: ; %epilog
+; WITHASYNC-NEXT:    s_or_b64 exec, exec, s[4:5]
+; WITHASYNC-NEXT:    v_add_u32_e32 v0, v5, v6
+; WITHASYNC-NEXT:    global_store_dword v[3:4], v0, off
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0)
+; WITHASYNC-NEXT:    s_setpc_b64 s[30:31]
+;
+; WITHOUT-LABEL: test_pipelined_loop:
+; WITHOUT:       ; %bb.0: ; %prolog
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WITHOUT-NEXT:    v_readfirstlane_b32 s4, v2
+; WITHOUT-NEXT:    s_mov_b32 m0, s4
+; WITHOUT-NEXT:    s_nop 0
+; WITHOUT-NEXT:    global_load_dword v[0:1], off lds
+; WITHOUT-NEXT:    global_load_dword v[0:1], off lds
+; WITHOUT-NEXT:    v_mov_b32_e32 v5, 0
+; WITHOUT-NEXT:    s_mov_b32 s6, 2
+; WITHOUT-NEXT:    s_mov_b64 s[4:5], 0
+; WITHOUT-NEXT:  .LBB2_1: ; %loop_body
+; WITHOUT-NEXT:    ; =>This Inner Loop Header: Depth=1
+; WITHOUT-NEXT:    v_readfirstlane_b32 s7, v2
+; WITHOUT-NEXT:    s_mov_b32 m0, s7
+; WITHOUT-NEXT:    s_add_i32 s6, s6, 1
+; WITHOUT-NEXT:    global_load_dword v[0:1], off lds
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0)
+; WITHOUT-NEXT:    ds_read_b32 v6, v2
+; WITHOUT-NEXT:    v_cmp_ge_i32_e32 vcc, s6, v7
+; WITHOUT-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; WITHOUT-NEXT:    s_waitcnt lgkmcnt(0)
+; WITHOUT-NEXT:    v_add_u32_e32 v5, v5, v6
+; WITHOUT-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; WITHOUT-NEXT:    s_cbranch_execnz .LBB2_1
+; WITHOUT-NEXT:  ; %bb.2: ; %epilog
+; WITHOUT-NEXT:    s_or_b64 exec, exec, s[4:5]
+; WITHOUT-NEXT:    v_add_u32_e32 v0, v5, v6
+; WITHOUT-NEXT:    global_store_dword v[3:4], v0, off
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0)
+; WITHOUT-NEXT:    s_setpc_b64 s[30:31]
+prolog:
+  ; Load first iteration
+  call void @llvm.amdgcn.global.load.ASYNC.lds(ptr addrspace(1) %foo, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0)
+
+  ; Load second iteration
+  call void @llvm.amdgcn.global.load.ASYNC.lds(ptr addrspace(1) %foo, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0)
+
+  br label %loop_body
+
+loop_body:
+  %i = phi i32 [ 2, %prolog ], [ %i.next, %loop_body ]
+  %sum = phi i32 [ 0, %prolog ], [ %sum_i, %loop_body ]
+
+  ; Load next iteration
+  call void @llvm.amdgcn.global.load.ASYNC.lds(ptr addrspace(1) %foo, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0)
+
+  ; Wait for iteration i-2 and process
+  %lds_idx = sub i32 %i, 2
+  %lds_val = load i32, ptr addrspace(3) %lds
+
+  %sum_i = add i32 %sum, %lds_val
+
+  %i.next = add i32 %i, 1
+  %cmp = icmp slt i32 %i.next, %n
+  br i1 %cmp, label %loop_body, label %epilog
+
+epilog:
+  ; Process remaining iterations
+  %lds_val_n_2 = load i32, ptr addrspace(3) %lds
+  %sum_e2 = add i32 %sum_i, %lds_val_n_2
+
+  %lds_val_n_1 = load i32, ptr addrspace(3) %lds
+  %sum_e1 = add i32 %sum_e2, %lds_val_n_1
+  store i32 %sum_e2, ptr addrspace(1) %bar
+
+  ret void
+}
+
+; Software pipelined loop with async global-to-LDS and global loads
+
+define void @test_pipelined_loop_with_global(ptr addrspace(1) %foo, ptr 
addrspace(3) %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out, i32 %n) {
+; WITHASYNC-LABEL: test_pipelined_loop_with_global:
+; WITHASYNC:       ; %bb.0: ; %prolog
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WITHASYNC-NEXT:    v_readfirstlane_b32 s4, v2
+; WITHASYNC-NEXT:    global_load_dword v10, v[0:1], off
+; WITHASYNC-NEXT:    global_load_dword v8, v[0:1], off
+; WITHASYNC-NEXT:    global_load_dword v14, v[3:4], off
+; WITHASYNC-NEXT:    global_load_dword v9, v[3:4], off
+; WITHASYNC-NEXT:    s_mov_b32 m0, s4
+; WITHASYNC-NEXT:    s_mov_b32 s6, 2
+; WITHASYNC-NEXT:    global_load_dword v[0:1], off lds
+; WITHASYNC-NEXT:    global_load_dword v[0:1], off lds
+; WITHASYNC-NEXT:    s_mov_b64 s[4:5], 0
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(4)
+; WITHASYNC-NEXT:    v_mov_b32_e32 v13, v8
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(2)
+; WITHASYNC-NEXT:    v_mov_b32_e32 v15, v9
+; WITHASYNC-NEXT:  .LBB3_1: ; %loop_body
+; WITHASYNC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; WITHASYNC-NEXT:    v_readfirstlane_b32 s7, v2
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(1)
+; WITHASYNC-NEXT:    v_mov_b32_e32 v12, v15
+; WITHASYNC-NEXT:    v_mov_b32_e32 v11, v13
+; WITHASYNC-NEXT:    global_load_dword v13, v[0:1], off
+; WITHASYNC-NEXT:    global_load_dword v15, v[3:4], off
+; WITHASYNC-NEXT:    s_mov_b32 m0, s7
+; WITHASYNC-NEXT:    s_add_i32 s6, s6, 1
+; WITHASYNC-NEXT:    global_load_dword v[0:1], off lds
+; WITHASYNC-NEXT:    v_cmp_ge_i32_e32 vcc, s6, v7
+; WITHASYNC-NEXT:    v_mov_b32_e32 v16, v14
+; WITHASYNC-NEXT:    v_mov_b32_e32 v17, v10
+; WITHASYNC-NEXT:    v_mov_b32_e32 v10, v8
+; WITHASYNC-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; WITHASYNC-NEXT:    v_mov_b32_e32 v14, v9
+; WITHASYNC-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; WITHASYNC-NEXT:    s_cbranch_execnz .LBB3_1
+; WITHASYNC-NEXT:  ; %bb.2: ; %epilog
+; WITHASYNC-NEXT:    s_or_b64 exec, exec, s[4:5]
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0)
+; WITHASYNC-NEXT:    ds_read_b32 v0, v2
+; WITHASYNC-NEXT:    v_add_u32_e32 v1, v17, v16
+; WITHASYNC-NEXT:    v_add_u32_e32 v2, v13, v15
+; WITHASYNC-NEXT:    s_waitcnt lgkmcnt(0)
+; WITHASYNC-NEXT:    v_add3_u32 v1, v1, v0, v12
+; WITHASYNC-NEXT:    v_add3_u32 v1, v11, v1, v0
+; WITHASYNC-NEXT:    v_add3_u32 v0, v2, v0, v1
+; WITHASYNC-NEXT:    global_store_dword v[5:6], v0, off
+; WITHASYNC-NEXT:    s_waitcnt vmcnt(0)
+; WITHASYNC-NEXT:    s_setpc_b64 s[30:31]
+;
+; WITHOUT-LABEL: test_pipelined_loop_with_global:
+; WITHOUT:       ; %bb.0: ; %prolog
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WITHOUT-NEXT:    v_readfirstlane_b32 s4, v2
+; WITHOUT-NEXT:    global_load_dword v10, v[0:1], off
+; WITHOUT-NEXT:    global_load_dword v8, v[0:1], off
+; WITHOUT-NEXT:    global_load_dword v14, v[3:4], off
+; WITHOUT-NEXT:    global_load_dword v9, v[3:4], off
+; WITHOUT-NEXT:    s_mov_b32 m0, s4
+; WITHOUT-NEXT:    s_mov_b32 s6, 2
+; WITHOUT-NEXT:    global_load_dword v[0:1], off lds
+; WITHOUT-NEXT:    global_load_dword v[0:1], off lds
+; WITHOUT-NEXT:    s_mov_b64 s[4:5], 0
+; WITHOUT-NEXT:    s_waitcnt vmcnt(4)
+; WITHOUT-NEXT:    v_mov_b32_e32 v13, v8
+; WITHOUT-NEXT:    s_waitcnt vmcnt(2)
+; WITHOUT-NEXT:    v_mov_b32_e32 v15, v9
+; WITHOUT-NEXT:  .LBB3_1: ; %loop_body
+; WITHOUT-NEXT:    ; =>This Inner Loop Header: Depth=1
+; WITHOUT-NEXT:    v_readfirstlane_b32 s7, v2
+; WITHOUT-NEXT:    s_waitcnt vmcnt(1)
+; WITHOUT-NEXT:    v_mov_b32_e32 v12, v15
+; WITHOUT-NEXT:    v_mov_b32_e32 v11, v13
+; WITHOUT-NEXT:    global_load_dword v13, v[0:1], off
+; WITHOUT-NEXT:    global_load_dword v15, v[3:4], off
+; WITHOUT-NEXT:    s_mov_b32 m0, s7
+; WITHOUT-NEXT:    s_add_i32 s6, s6, 1
+; WITHOUT-NEXT:    global_load_dword v[0:1], off lds
+; WITHOUT-NEXT:    v_cmp_ge_i32_e32 vcc, s6, v7
+; WITHOUT-NEXT:    v_mov_b32_e32 v16, v14
+; WITHOUT-NEXT:    v_mov_b32_e32 v17, v10
+; WITHOUT-NEXT:    v_mov_b32_e32 v10, v8
+; WITHOUT-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; WITHOUT-NEXT:    v_mov_b32_e32 v14, v9
+; WITHOUT-NEXT:    s_andn2_b64 exec, exec, s[4:5]
+; WITHOUT-NEXT:    s_cbranch_execnz .LBB3_1
+; WITHOUT-NEXT:  ; %bb.2: ; %epilog
+; WITHOUT-NEXT:    s_or_b64 exec, exec, s[4:5]
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0)
+; WITHOUT-NEXT:    ds_read_b32 v0, v2
+; WITHOUT-NEXT:    v_add_u32_e32 v1, v17, v16
+; WITHOUT-NEXT:    v_add_u32_e32 v2, v13, v15
+; WITHOUT-NEXT:    s_waitcnt lgkmcnt(0)
+; WITHOUT-NEXT:    v_add3_u32 v1, v1, v0, v12
+; WITHOUT-NEXT:    v_add3_u32 v1, v11, v1, v0
+; WITHOUT-NEXT:    v_add3_u32 v0, v2, v0, v1
+; WITHOUT-NEXT:    global_store_dword v[5:6], v0, off
+; WITHOUT-NEXT:    s_waitcnt vmcnt(0)
+; WITHOUT-NEXT:    s_setpc_b64 s[30:31]
+prolog:
+  ; Load first iteration
+  %v0 = load i32, ptr addrspace(1) %foo
+  %g0 = load i32, ptr addrspace(1) %bar
+  call void @llvm.amdgcn.global.load.ASYNC.lds(ptr addrspace(1) %foo, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0)
+
+  ; Load second iteration
+  %v1 = load i32, ptr addrspace(1) %foo
+  %g1 = load i32, ptr addrspace(1) %bar
+  call void @llvm.amdgcn.global.load.ASYNC.lds(ptr addrspace(1) %foo, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0)
+
+  br label %loop_body
+
+  ; The vmcnt at the end of the prolog and at the start of the loop header 
seems
+  ; to be a result of the PHI nodes whose inputs are global loads. It is
+  ; stricter than necessary, to the point that the pipelined loop now has at
+  ; most two outstanding async ops instead of three. We could, in principle,
+  ; further relax the wait by introducing async global loads (not LDS DMA) in a
+  ; similar way.
+
+loop_body:
+  %i = phi i32 [ 2, %prolog ], [ %i.next, %loop_body ]
+  %prev_sum = phi i32 [ 0, %prolog ], [ %sum, %loop_body ]
+  %prev_v = phi i32 [ %v0, %prolog ], [ %v1, %loop_body ]
+  %prev_g = phi i32 [ %g0, %prolog ], [ %g1, %loop_body ]
+  %v1_phi = phi i32 [ %v1, %prolog ], [ %cur_v, %loop_body ]
+  %g1_phi = phi i32 [ %g1, %prolog ], [ %cur_g, %loop_body ]
+
+  ; Load next iteration
+  %cur_v = load i32, ptr addrspace(1) %foo
+  %cur_g = load i32, ptr addrspace(1) %bar
+  call void @llvm.amdgcn.global.load.ASYNC.lds(ptr addrspace(1) %foo, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0)
+
+  ; Wait for iteration i-2 and process
+  %lds_idx = sub i32 %i, 2
+  %lds_val = load i32, ptr addrspace(3) %lds
+
+  %sum1 = add i32 %prev_v, %prev_g
+  %sum = add i32 %sum1, %lds_val
+
+  %i.next = add i32 %i, 1
+  %cmp = icmp slt i32 %i.next, %n
+  br i1 %cmp, label %loop_body, label %epilog
+
+epilog:
+  ; Process remaining iterations
+  %lds_val_n_2 = load i32, ptr addrspace(3) %lds
+  %sum_e0 = add i32 %sum, %g1_phi
+  %sum_e1 = add i32 %v1_phi, %sum_e0
+  %sum_e2 = add i32 %sum_e1, %lds_val_n_2
+
+  %lds_val_n_1 = load i32, ptr addrspace(3) %lds
+  %sum_e3 = add i32 %cur_v, %cur_g
+  %sum_e4 = add i32 %sum_e3, %lds_val_n_1
+  %sum_e5 = add i32 %sum_e4, %sum_e2
+  store i32 %sum_e5, ptr addrspace(1) %out
+
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/hazard-flat-instruction-valu-check.mir 
b/llvm/test/CodeGen/AMDGPU/hazard-flat-instruction-valu-check.mir
index 1b2fb6ca1cdb7..a43b221a25fa2 100644
--- a/llvm/test/CodeGen/AMDGPU/hazard-flat-instruction-valu-check.mir
+++ b/llvm/test/CodeGen/AMDGPU/hazard-flat-instruction-valu-check.mir
@@ -12,10 +12,10 @@ body:             |
     ; GCN-LABEL: name: test_flat_valu_hazard
     ; GCN: liveins: $vgpr0, $vgpr1
     ; GCN-NEXT: {{  $}}
-    ; GCN-NEXT: GLOBAL_LOAD_LDS_DWORD_SADDR killed $sgpr0_sgpr1, killed 
$vgpr0, 32, 2, implicit $m0, implicit $exec
+    ; GCN-NEXT: GLOBAL_LOAD_LDS_DWORD_SADDR killed $sgpr0_sgpr1, killed 
$vgpr0, 32, 2, 0, implicit $m0, implicit $exec
     ; GCN-NEXT: $vgpr0 = V_MOV_B32_e32 killed $vgpr1, implicit $exec, implicit 
$exec
     ; GCN-NEXT: FLAT_STORE_DWORDX2 killed renamable $vgpr2_vgpr3, killed 
renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
-    GLOBAL_LOAD_LDS_DWORD_SADDR killed $sgpr0_sgpr1, killed $vgpr0, 32, 2, 
implicit $m0, implicit $exec
+    GLOBAL_LOAD_LDS_DWORD_SADDR killed $sgpr0_sgpr1, killed $vgpr0, 32, 2, 0, 
implicit $m0, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $vgpr1, implicit $exec, implicit $exec
     FLAT_STORE_DWORDX2 killed renamable $vgpr2_vgpr3, killed renamable 
$vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir 
b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir
index 675a1c94bc435..ca622b739a1b8 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-fence-soft.mir
@@ -10,12 +10,12 @@ body:             |
     ; GCN-LABEL: name: dma_then_fence
     ; GCN: S_WAITCNT 0
     ; GCN-NEXT: $m0 = S_MOV_B32 0
-    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
+    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
     ; GCN-NEXT: S_WAITCNT 3952
     ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, 
implicit $exec
     ; GCN-NEXT: S_ENDPGM 0
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
     S_WAITCNT_lds_direct
     $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
     S_ENDPGM 0
@@ -31,13 +31,13 @@ body:             |
     ; GCN-LABEL: name: dma_then_global_load
     ; GCN: S_WAITCNT 0
     ; GCN-NEXT: $m0 = S_MOV_B32 0
-    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
+    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
     ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
     ; GCN-NEXT: S_WAITCNT 3953
     ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, 
implicit $exec
     ; GCN-NEXT: S_ENDPGM 0
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
     $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
     S_WAITCNT_lds_direct
     $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
@@ -71,12 +71,12 @@ body:             |
   bb.0:
     ; GCN-LABEL: name: dma_then_system_fence
     ; GCN: S_WAITCNT 0
-    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
+    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
     ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
     ; GCN-NEXT: S_WAITCNT 3953
     ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, 
implicit $exec
     ; GCN-NEXT: S_ENDPGM 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
     $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
     S_WAITCNT_lds_direct
     $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
@@ -93,13 +93,13 @@ body:             |
     ; GCN-LABEL: name: merge_with_prev_wait
     ; GCN: S_WAITCNT 0
     ; GCN-NEXT: $m0 = S_MOV_B32 0
-    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
+    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
     ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
     ; GCN-NEXT: S_WAITCNT 3952
     ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, 
implicit $exec
     ; GCN-NEXT: S_ENDPGM 0
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
     $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
     S_WAITCNT 3952
     S_WAITCNT_lds_direct
@@ -117,13 +117,13 @@ body:             |
     ; GCN-LABEL: name: merge_with_next_wait
     ; GCN: S_WAITCNT 0
     ; GCN-NEXT: $m0 = S_MOV_B32 0
-    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
+    ; GCN-NEXT: BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison` + 4, addrspace 1), (store (s32) into `ptr addrspace(3) 
poison` + 4, addrspace 3)
     ; GCN-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
     ; GCN-NEXT: S_WAITCNT 3952
     ; GCN-NEXT: $vgpr1 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, 
implicit $exec
     ; GCN-NEXT: S_ENDPGM 0
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
     $vgpr2 = GLOBAL_LOAD_DWORD $vgpr4_vgpr5, 0, 0, implicit $exec
     S_WAITCNT_lds_direct
     S_WAITCNT 3952
diff --git a/llvm/test/CodeGen/AMDGPU/lds-dma-hazards.mir 
b/llvm/test/CodeGen/AMDGPU/lds-dma-hazards.mir
index 4fe0ec45048ce..31b56a67e0464 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-dma-hazards.mir
+++ b/llvm/test/CodeGen/AMDGPU/lds-dma-hazards.mir
@@ -9,7 +9,7 @@ name: buffer_load_dword_lds
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_ADDR64 $vgpr0_vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 
0, 0, implicit $exec, implicit $m0
+    BUFFER_LOAD_DWORD_LDS_ADDR64 $vgpr0_vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 
0, 0, 0, implicit $exec, implicit $m0
 ...
 
 # GCN-LABEL: name: buffer_store_lds_dword
@@ -33,7 +33,7 @@ name: global_load_lds_dword
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    GLOBAL_LOAD_LDS_DWORD $vgpr2_vgpr3, 0, 0, implicit $exec, implicit $m0
+    GLOBAL_LOAD_LDS_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $m0
 ...
 
 # GCN-LABEL: name: scratch_load_lds_dword
diff --git a/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir 
b/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir
index 0e64d0430668e..0546e2fa2db6a 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir
+++ b/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir
@@ -10,7 +10,7 @@ name: buffer_load_dword_lds_ds_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
     $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: 
(load (s32) from `ptr addrspace(3) poison`)
     S_ENDPGM 0
 
@@ -27,7 +27,7 @@ name: buffer_load_dword_lds_vmcnt_1
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison`), (store (s32) into `ptr addrspace(3) poison`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison`), (store (s32) into `ptr addrspace(3) poison`)
     $vgpr10 = BUFFER_LOAD_DWORD_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 
$sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr 
addrspace(1) poison`)
     $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: 
(load (s32) from `ptr addrspace(3) poison`)
     S_ENDPGM 0
@@ -44,7 +44,7 @@ name: buffer_load_dword_lds_flat_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison`), (store (s32) into `ptr addrspace(3) poison`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison`), (store (s32) into `ptr addrspace(3) poison`)
     $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit 
$flat_scr :: (load (s32) from `ptr poison`)
 
     S_ENDPGM 0
@@ -61,7 +61,7 @@ name: global_load_lds_dword_ds_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    GLOBAL_LOAD_LDS_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $m0 :: 
(load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr 
addrspace(3) poison` + 4)
+    GLOBAL_LOAD_LDS_DWORD $vgpr0_vgpr1, 4, 0, 0, implicit $exec, implicit $m0 
:: (load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr 
addrspace(3) poison` + 4)
     $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: 
(load (s32) from `ptr addrspace(3) poison`)
     S_ENDPGM 0
 
@@ -80,7 +80,7 @@ body:             |
   bb.0:
     $m0 = S_MOV_B32 0
     $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: 
(load (s32) from `ptr addrspace(3) poison`)
-    GLOBAL_LOAD_LDS_DWORD $vgpr2_vgpr3, 4, 0, implicit $exec, implicit $m0 :: 
(load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr 
addrspace(3) poison` + 4)
+    GLOBAL_LOAD_LDS_DWORD $vgpr2_vgpr3, 4, 0, 0, implicit $exec, implicit $m0 
:: (load (s32) from `ptr addrspace(1) poison` + 4), (store (s32) into `ptr 
addrspace(3) poison` + 4)
     $vgpr4 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
     S_ENDPGM 0
 
@@ -100,7 +100,7 @@ body:             |
   bb.0:
     $m0 = S_MOV_B32 0
     $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: 
(load (s32) from `ptr addrspace(3) poison`)
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison`), (store (s32) into `ptr addrspace(3) poison`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison`), (store (s32) into `ptr addrspace(3) poison`)
     $vgpr4 = V_ADD_U32_e32 $vgpr0, $vgpr0, implicit $exec
     S_ENDPGM 0
 
@@ -149,9 +149,9 @@ name: series_of_buffer_load_dword_lds_ds_read
 body:             |
   bb.0:
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison`), (store (s32) into `ptr addrspace(3) poison`)
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
-    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 
0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 8), (store (s32) into `ptr addrspace(3) poison` + 8)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison`), (store (s32) into `ptr addrspace(3) poison`)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 4), (store (s32) into `ptr addrspace(3) poison` + 4)
+    BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 
0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) 
poison` + 8), (store (s32) into `ptr addrspace(3) poison` + 8)
     $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: 
(load (s32) from `ptr addrspace(3) poison`)
     S_ENDPGM 0
 
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.lds.err.ll 
b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.lds.err.ll
index 770283563001f..f9cfc1f487327 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.lds.err.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.buffer.load.lds.err.ll
@@ -18,20 +18,44 @@ define amdgpu_ps void @buffer_load_lds(<4 x i32> inreg 
%rsrc, ptr addrspace(3) i
   ret void
 }
 
+;--- struct.async.ll
+define amdgpu_ps void @buffer_load_lds(<4 x i32> inreg %rsrc, ptr addrspace(3) 
inreg %lds) {
+  call void @llvm.amdgcn.struct.buffer.load.async.lds(<4 x i32> %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0)
+  ret void
+}
+
 ;--- struct.ptr.ll
 define amdgpu_ps void @buffer_load_lds(ptr addrspace(8) inreg %rsrc, ptr 
addrspace(3) inreg %lds) {
   call void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, 
ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0)
   ret void
 }
 
+;--- struct.ptr.async.ll
+define amdgpu_ps void @buffer_load_lds(ptr addrspace(8) inreg %rsrc, ptr 
addrspace(3) inreg %lds) {
+  call void @llvm.amdgcn.struct.ptr.buffer.load.async.lds(ptr addrspace(8) 
%rsrc, ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0)
+  ret void
+}
+
 ;--- raw.ll
 define amdgpu_ps void @buffer_load_lds(<4 x i32> inreg %rsrc, ptr addrspace(3) 
inreg %lds) {
   call void @llvm.amdgcn.raw.buffer.load.lds(<4 x i32> %rsrc, ptr addrspace(3) 
%lds, i32 4, i32 0, i32 0, i32 0, i32 0)
   ret void
 }
 
+;--- raw.async.ll
+define amdgpu_ps void @buffer_load_lds(<4 x i32> inreg %rsrc, ptr addrspace(3) 
inreg %lds) {
+  call void @llvm.amdgcn.raw.buffer.load.async.lds(<4 x i32> %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0)
+  ret void
+}
+
 ;--- raw.ptr.ll
 define amdgpu_ps void @buffer_load_lds(ptr addrspace(8) inreg %rsrc, ptr 
addrspace(3) inreg %lds) {
   call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %rsrc, ptr 
addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0)
   ret void
 }
+
+;--- raw.ptr.async.ll
+define amdgpu_ps void @buffer_load_lds(ptr addrspace(8) inreg %rsrc, ptr 
addrspace(3) inreg %lds) {
+  call void @llvm.amdgcn.raw.ptr.buffer.load.async.lds(ptr addrspace(8) %rsrc, 
ptr addrspace(3) %lds, i32 4, i32 0, i32 0, i32 0, i32 0)
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll 
b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
index eb99c89e53ed9..b45dd459f262e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.gfx950.ll
@@ -43,6 +43,27 @@ define amdgpu_ps void 
@global_load_lds_dwordx3_vaddr_saddr(ptr addrspace(1) noca
   ret void
 }
 
+define amdgpu_ps void @buffer_load_lds_async(ptr addrspace(7) nocapture inreg 
%gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX950-SDAG-LABEL: buffer_load_lds_async:
+; GFX950-SDAG:       ; %bb.0:
+; GFX950-SDAG-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX950-SDAG-NEXT:    s_mov_b32 m0, s5
+; GFX950-SDAG-NEXT:    s_nop 0
+; GFX950-SDAG-NEXT:    buffer_load_dwordx3 v0, s[0:3], 0 offen offset:16 sc0 
lds
+; GFX950-SDAG-NEXT:    s_endpgm
+;
+; GFX950-GISEL-LABEL: buffer_load_lds_async:
+; GFX950-GISEL:       ; %bb.0:
+; GFX950-GISEL-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX950-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX950-GISEL-NEXT:    s_nop 0
+; GFX950-GISEL-NEXT:    buffer_load_dwordx3 v0, s[0:3], 0 offen offset:16 sc0 
lds
+; GFX950-GISEL-NEXT:    s_endpgm
+  %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
+  call void @llvm.amdgcn.load.async.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 12, i32 16, i32 1)
+  ret void
+}
+
 define amdgpu_ps void @buffer_load_lds_dwordx3_vaddr_saddr(ptr addrspace(7) 
nocapture inreg %gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
 ; GFX950-LABEL: buffer_load_lds_dwordx3_vaddr_saddr:
 ; GFX950:       ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll 
b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll
index f66ad928d261d..f1425db821fdc 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.load.to.lds.ll
@@ -43,6 +43,43 @@ main_body:
   ret void
 }
 
+define amdgpu_ps void @buffer_load_async_lds(ptr addrspace(7) nocapture inreg 
%gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
+; GFX90A-LABEL: buffer_load_async_lds:
+; GFX90A:       ; %bb.0: ; %main_body
+; GFX90A-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX90A-NEXT:    s_mov_b32 m0, s5
+; GFX90A-NEXT:    s_nop 0
+; GFX90A-NEXT:    buffer_load_dword v0, s[0:3], 0 offen offset:16 glc lds
+; GFX90A-NEXT:    s_endpgm
+;
+; GFX942-LABEL: buffer_load_async_lds:
+; GFX942:       ; %bb.0: ; %main_body
+; GFX942-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX942-NEXT:    s_mov_b32 m0, s5
+; GFX942-NEXT:    s_nop 0
+; GFX942-NEXT:    buffer_load_dword v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX942-NEXT:    s_endpgm
+;
+; GFX10-LABEL: buffer_load_async_lds:
+; GFX10:       ; %bb.0: ; %main_body
+; GFX10-NEXT:    v_add_nc_u32_e32 v0, s4, v0
+; GFX10-NEXT:    s_mov_b32 m0, s5
+; GFX10-NEXT:    buffer_load_dword v0, s[0:3], 0 offen offset:16 glc lds
+; GFX10-NEXT:    s_endpgm
+;
+; GFX942-GISEL-LABEL: buffer_load_async_lds:
+; GFX942-GISEL:       ; %bb.0: ; %main_body
+; GFX942-GISEL-NEXT:    v_add_u32_e32 v0, s4, v0
+; GFX942-GISEL-NEXT:    s_mov_b32 m0, s5
+; GFX942-GISEL-NEXT:    s_nop 0
+; GFX942-GISEL-NEXT:    buffer_load_dword v0, s[0:3], 0 offen offset:16 sc0 lds
+; GFX942-GISEL-NEXT:    s_endpgm
+main_body:
+  %gptr.off = getelementptr i8, ptr addrspace(7) %gptr, i32 %off
+  call void @llvm.amdgcn.load.async.to.lds.p7(ptr addrspace(7) %gptr.off, ptr 
addrspace(3) %lptr, i32 4, i32 16, i32 1)
+  ret void
+}
+
 define amdgpu_ps void @buffer_load_lds_dword_vaddr_saddr(ptr addrspace(7) 
nocapture inreg %gptr, i32 %off, ptr addrspace(3) nocapture inreg %lptr) {
 ; GFX90A-LABEL: buffer_load_lds_dword_vaddr_saddr:
 ; GFX90A:       ; %bb.0: ; %main_body
diff --git a/llvm/test/CodeGen/AMDGPU/sched.group.classification.mir 
b/llvm/test/CodeGen/AMDGPU/sched.group.classification.mir
index a4aad574aaaf4..876a58f25e668 100644
--- a/llvm/test/CodeGen/AMDGPU/sched.group.classification.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched.group.classification.mir
@@ -18,11 +18,11 @@ body: |
     ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[DEF2]], 
[[DEF3]], implicit $exec
     ; CHECK-NEXT: [[V_ADD_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 
[[DEF3]], [[V_ADD_U32_e32_]], implicit $exec
     ; CHECK-NEXT: $m0 = S_MOV_B32 0
-    ; CHECK-NEXT: BUFFER_LOAD_DWORDX4_LDS_OFFEN [[DEF]], [[DEF1]], 0, 0, 0, 0, 
implicit $exec, implicit $m0
+    ; CHECK-NEXT: BUFFER_LOAD_DWORDX4_LDS_OFFEN [[DEF]], [[DEF1]], 0, 0, 0, 0, 
0, implicit $exec, implicit $m0
     ; CHECK-NEXT: [[V_ADD_U32_e32_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 
[[V_ADD_U32_e32_]], [[V_ADD_U32_e32_1]], implicit $exec
     ; CHECK-NEXT: [[V_ADD_U32_e32_3:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 
[[V_ADD_U32_e32_1]], [[V_ADD_U32_e32_2]], implicit $exec
     ; CHECK-NEXT: $m0 = S_MOV_B32 1
-    ; CHECK-NEXT: BUFFER_LOAD_DWORDX4_LDS_OFFEN [[DEF]], [[DEF1]], 0, 0, 0, 0, 
implicit $exec, implicit $m0
+    ; CHECK-NEXT: BUFFER_LOAD_DWORDX4_LDS_OFFEN [[DEF]], [[DEF1]], 0, 0, 0, 0, 
0, implicit $exec, implicit $m0
     ; CHECK-NEXT: [[V_ADD_U32_e32_4:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 
[[V_ADD_U32_e32_2]], [[V_ADD_U32_e32_3]], implicit $exec
     ; CHECK-NEXT: [[V_ADD_U32_e32_5:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 
[[V_ADD_U32_e32_3]], [[V_ADD_U32_e32_4]], implicit $exec
     ; CHECK-NEXT: [[V_ADD_U32_e32_6:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 
[[V_ADD_U32_e32_4]], [[V_ADD_U32_e32_5]], implicit $exec
@@ -41,9 +41,9 @@ body: |
     %4:vgpr_32 = V_ADD_U32_e32 %2, %3, implicit $exec
     %5:vgpr_32 = V_ADD_U32_e32 %3, %4, implicit $exec
     $m0 = S_MOV_B32 0
-    BUFFER_LOAD_DWORDX4_LDS_OFFEN %0, %1, 0, 0, 0, 0, implicit $exec, implicit 
$m0
+    BUFFER_LOAD_DWORDX4_LDS_OFFEN %0, %1, 0, 0, 0, 0, 0, implicit $exec, 
implicit $m0
     $m0 = S_MOV_B32 1
-    BUFFER_LOAD_DWORDX4_LDS_OFFEN %0, %1, 0, 0, 0, 0, implicit $exec, implicit 
$m0
+    BUFFER_LOAD_DWORDX4_LDS_OFFEN %0, %1, 0, 0, 0, 0, 0, implicit $exec, 
implicit $m0
     %6:vgpr_32 = V_ADD_U32_e32 %4, %5, implicit $exec
     %7:vgpr_32 = V_ADD_U32_e32 %5, %6, implicit $exec
     %8:vgpr_32 = V_ADD_U32_e32 %6, %7, implicit $exec

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to