https://github.com/ssahasra created https://github.com/llvm/llvm-project/pull/173259
Asynchronous operations are memory transfers (usually between the global memory and LDS) that are completed independently at an unspecified scope. A thread that requests one or more asynchronous transfers can use *async markers* to track their completion. The thread waits for each marker to be *completed*, which indicates that requests initiated in program order before this marker have also completed. For now, we implement asyncmark/wait operations on pre-GFX12 architectures that support "LDS DMA" operations. These "legacy" operations are now extended to accept an optional `ASYNC` parameter as a bit in the auxiliary argument. When set, it indicates that the compiler should not automatically track the completion of this operation. Future work will extend support to GFX12Plus architectures that support "true" async operations. Co-authored-by: Ryan Mitchell <[email protected]> Fixes: SWDEV-521121 >From f3375205f0a0c5985b084ce88ae9d359c2298121 Mon Sep 17 00:00:00 2001 From: Sameer Sahasrabuddhe <[email protected]> Date: Mon, 22 Dec 2025 17:33:06 +0530 Subject: [PATCH] [AMDGPU] Introduce asyncmark/wait intrinsics Asynchronous operations are memory transfers (usually between the global memory and LDS) that are completed independently at an unspecified scope. A thread that requests one or more asynchronous transfers can use *async markers* to track their completion. The thread waits for each marker to be *completed*, which indicates that requests initiated in program order before this marker have also completed. For now, we implement asyncmark/wait operations on pre-GFX12 architectures that support "LDS DMA" operations. These "legacy" operations are now extended to accept an optional `ASYNC` parameter as a bit in the auxiliary argument. When set, it indicates that the compiler should not automatically track the completion of this operation. Future work will extend support to GFX12Plus architectures that support "true" async operations. Co-authored-by: Ryan Mitchell <[email protected]> Fixes: SWDEV-521121 --- clang/include/clang/Basic/BuiltinsAMDGPU.def | 8 + .../builtins-amdgcn-asyncmark-errs.cl | 7 + .../builtins-amdgcn-asyncmark.cl | 16 + llvm/docs/AMDGPUAsyncOperations.rst | 180 ++++ llvm/docs/AMDGPUUsage.rst | 12 +- llvm/docs/UserGuides.rst | 4 + llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 9 + .../AMDGPU/AMDGPUInstructionSelector.cpp | 6 + llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp | 16 +- llvm/lib/Target/AMDGPU/SIDefines.h | 7 +- llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp | 323 ++++++- llvm/lib/Target/AMDGPU/SIInstrInfo.h | 4 + llvm/lib/Target/AMDGPU/SOPInstructions.td | 13 +- llvm/test/CodeGen/AMDGPU/async-mark-err.ll | 10 + .../CodeGen/AMDGPU/async-mark-pregfx12.ll | 898 ++++++++++++++++++ 15 files changed, 1490 insertions(+), 23 deletions(-) create mode 100644 clang/test/CodeGenOpenCL/builtins-amdgcn-asyncmark-errs.cl create mode 100644 clang/test/CodeGenOpenCL/builtins-amdgcn-asyncmark.cl create mode 100644 llvm/docs/AMDGPUAsyncOperations.rst create mode 100644 llvm/test/CodeGen/AMDGPU/async-mark-err.ll create mode 100644 llvm/test/CodeGen/AMDGPU/async-mark-pregfx12.ll diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def index 88b306462a92c..972e1580912dc 100644 --- a/clang/include/clang/Basic/BuiltinsAMDGPU.def +++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def @@ -538,6 +538,14 @@ TARGET_BUILTIN(__builtin_amdgcn_ashr_pk_u8_i32, "UsUiUiUi", "nc", "ashr-pk-insts TARGET_BUILTIN(__builtin_amdgcn_cvt_scalef32_2xpk16_fp6_f32, "V6UiV16fV16ff", "nc", "gfx950-insts") TARGET_BUILTIN(__builtin_amdgcn_cvt_scalef32_2xpk16_bf6_f32, "V6UiV16fV16ff", "nc", "gfx950-insts") +//===----------------------------------------------------------------------===// +// Async mark builtins. +//===----------------------------------------------------------------------===// + +// FIXME: Not supported on GFX12 yet. Will need a new feature when we do. +TARGET_BUILTIN(__builtin_amdgcn_asyncmark, "v", "n", "vmem-to-lds-load-insts") +TARGET_BUILTIN(__builtin_amdgcn_wait_asyncmark, "vIs", "n", "vmem-to-lds-load-insts") + //===----------------------------------------------------------------------===// // GFX12+ only builtins. //===----------------------------------------------------------------------===// diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-asyncmark-errs.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-asyncmark-errs.cl new file mode 100644 index 0000000000000..7d4a141fbde6e --- /dev/null +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-asyncmark-errs.cl @@ -0,0 +1,7 @@ +// REQUIRES: amdgpu-registered-target +// RUN: %clang_cc1 -O0 -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -target-cpu gfx1250 -verify -S -o - %s + +void test_feature() { + __builtin_amdgcn_asyncmark(); // expected-error{{'__builtin_amdgcn_asyncmark' needs target feature vmem-to-lds-load-insts}} + __builtin_amdgcn_wait_asyncmark(0); // expected-error{{'__builtin_amdgcn_wait_asyncmark' needs target feature vmem-to-lds-load-insts}} +} diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-asyncmark.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-asyncmark.cl new file mode 100644 index 0000000000000..976ae3cea5d6d --- /dev/null +++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-asyncmark.cl @@ -0,0 +1,16 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown -target-cpu gfx900 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown -target-cpu gfx942 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -cl-std=CL2.0 -O0 -triple amdgcn-unknown-unknown -target-cpu gfx1010 -emit-llvm -o - %s | FileCheck %s +// REQUIRES: amdgpu-registered-target + +// CHECK-LABEL: @test_invocation( +// CHECK-NEXT: entry: +// CHECK-NEXT: call void @llvm.amdgcn.asyncmark() +// CHECK-NEXT: call void @llvm.amdgcn.wait.asyncmark(i16 0) +// CHECK-NEXT: ret void +// +void test_invocation() { + __builtin_amdgcn_asyncmark(); + __builtin_amdgcn_wait_asyncmark(0); +} diff --git a/llvm/docs/AMDGPUAsyncOperations.rst b/llvm/docs/AMDGPUAsyncOperations.rst new file mode 100644 index 0000000000000..006c59d53294c --- /dev/null +++ b/llvm/docs/AMDGPUAsyncOperations.rst @@ -0,0 +1,180 @@ +=============================== + AMDGPU Asynchronous Operations +=============================== + +.. contents:: + :local: + +Introduction +============ + +Asynchronous operations are memory transfers (usually between the global memory +and LDS) that are completed independently at an unspecified scope. A thread that +requests one or more asynchronous transfers can use *async markers* to track +their completion. The thread waits for each marker to be *completed*, which +indicates that requests initiated in program order before this marker have also +completed. + +Operations +========== + +``async_load_to_lds`` +--------------------- + +.. code-block:: llvm + + ; Legacy "LDS DMA" operations + void @llvm.amdgcn.load.to.lds(ptr %src, ptr %dst, ASYNC) + void @llvm.amdgcn.global.load.lds(ptr %src, ptr %dst, ASYNC) + void @llvm.amdgcn.raw.buffer.load.lds(ptr %src, ptr %dst, ASYNC) + void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr %src, ptr %dst, ASYNC) + void @llvm.amdgcn.struct.buffer.load.lds(ptr %src, ptr %dst, ASYNC) + void @llvm.amdgcn.struct.ptr.buffer.load.lds(ptr %src, ptr %dst, ASYNC) + +Requests an async operation that copies the specified number of bytes from the +global/buffer pointer ``%src`` to the LDS pointer ``%dst``. + +The optional parameter `ASYNC` is a bit in the auxiliary argument to those +intrinsics, as documented in :ref:`LDS DMA operations<amdgpu-lds-dma-bits>`. +When set, it indicates that the compiler should not automatically track the +completion of this operation. + +``@llvm.amdgcn.asyncmark()`` +---------------------------- + +Creates an *async marker* to track all the async operations that are program +ordered before this call. A marker M is said to be *completed* only when all +async operations program ordered before M are reported by the implementation as +having finished, and it is said to be *outstanding* otherwise. + +Thus we have the following sufficient condition: + + An async operation X is *completed* at a program point P if there exists a + marker M such that X is program ordered before M, M is program ordered before + P, and M is completed. X is said to be *outstanding* at P otherwise. + +``@llvm.amdgcn.wait.asyncmark(i32 %N)`` +--------------------------------------- + +Waits until the ``N+1`` th predecessor marker M in program order before this +call is completed, if M exists. + +N is an unsigned integer; the ``N+1`` th predecessor marker of point X is a +marker M such that there are `N` markers in program order from M to X, not +including M. + +Memory Consistency Model +======================== + +Each asynchronous operation consists of a non-atomic read on the source and a +non-atomic write on the destination. Legacy "LDS DMA" intrinsics result in async +accesses that guarantee visibility relative to other memory operations as +follows: + + The side-effects of an asynchronous operation `A` program ordered before any + memory operation `X` are visible to `X` if `A` is completed before `X`. + + The side-effects of any memory operation `X` program ordered before an + asynchronous operation `A` are visible to `A`. + +Function calls in LLVM +====================== + +The underlying abstract machine does not implicitly track the completion of +async operations while entering or returning from a function call. + +.. note:: + + As long as the caller uses sufficient wait's to track its own async + operations, the actions performed by the callee cannot affect correctness. + But the resulting implementation may contain redundant waits, which can be + improved by setting the attribute to a value other than `async:unknown`. + +Examples +======== + +Uneven blocks of async transfers +-------------------------------- + +.. code-block:: c++ + + void foo(global int *g, local int *l) { + // first block + async_load_to_lds(l, g); + async_load_to_lds(l, g); + async_load_to_lds(l, g); + asyncmark(); + + // second block; longer + async_load_to_lds(l, g); + async_load_to_lds(l, g); + async_load_to_lds(l, g); + async_load_to_lds(l, g); + async_load_to_lds(l, g); + asyncmark(); + + // third block; shorter + async_load_to_lds(l, g); + async_load_to_lds(l, g); + asyncmark(); + + // Wait for first block + wait.asyncmark(2); + } + +Software pipeline +----------------- + +.. code-block:: c++ + + void foo(global int *g, local int *l) { + // first block + asyncmark(); + + // second block + asyncmark(); + + // third block + asyncmark(); + + for (;;) { + wait.asyncmark(2); + // use data + + // next block + asyncmark(); + } + + // flush one block + wait.asyncmark(2); + + // flush one more block + wait.asyncmark(1); + + // flush last block + wait.asyncmark(0); + } + +Ordinary function call +---------------------- + +.. code-block:: c++ + + extern void bar(); // may or may not make async calls + + void foo(global int *g, local int *l) { + // first block + asyncmark(); + + // second block + asyncmark(); + + // function call + bar(); + + // third block + asyncmark(); + + wait.asyncmark(1); // will wait for at least the second block, possibly including bar() + wait.asyncmark(0); // will wait for third block, including bar() + } diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst index 7ecf1c1124894..691f4c8017a2f 100644 --- a/llvm/docs/AMDGPUUsage.rst +++ b/llvm/docs/AMDGPUUsage.rst @@ -6594,12 +6594,18 @@ operations. ``buffer/global/flat_load/store/atomic`` instructions to global memory are termed vector memory operations. +.. _amdgpu-lds-dma-bits: + ``global_load_lds`` or ``buffer/global_load`` instructions with the `lds` flag are LDS DMA loads. They interact with caches as if the loaded data were being loaded to registers and not to LDS, and so therefore support the same -cache modifiers. They cannot be performed atomically. They implement volatile -(via aux/cpol bit 31) and nontemporal (via metadata) as if they were loads -from the global address space. +cache modifiers. They cannot be performed atomically. They can be performed with +asynchronous, volatile and nontemporal semantics as if they were loads +from the global address space: + +- asynchronous: aux bit u0x40000000 +- volatile: aux bit u0x20 +- nontemporal: metadata Private address space uses ``buffer_load/store`` using the scratch V# (GFX6-GFX8), or ``scratch_load/store`` (GFX9-GFX11). Since only a single thread diff --git a/llvm/docs/UserGuides.rst b/llvm/docs/UserGuides.rst index d3ca2f69016c1..6b34cc5632d40 100644 --- a/llvm/docs/UserGuides.rst +++ b/llvm/docs/UserGuides.rst @@ -18,6 +18,7 @@ intermediate LLVM representation. AdvancedBuilds AliasAnalysis AMDGPUUsage + AMDGPUAsyncOperations Benchmarking BigEndianNEON BuildingADistribution @@ -283,6 +284,9 @@ Additional Topics :doc:`AMDGPUUsage` This document describes using the AMDGPU backend to compile GPU kernels. +:doc:`AMDGPUAsyncOperations` + Builtins for invoking asynchronous data transfer operations in AMD GPUs. + :doc:`AMDGPUDwarfExtensionsForHeterogeneousDebugging` This document describes DWARF extensions to support heterogeneous debugging for targets such as the AMDGPU backend. diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td index 19d5f24c5d5e0..ea47fe83ea9ca 100644 --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -2855,6 +2855,15 @@ def int_amdgcn_global_load_lds : AMDGPUGlobalLoadLDS; def int_amdgcn_pops_exiting_wave_id : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrHasSideEffects]>; +// Sets a marker in the stream of async requests. Modelled as InaccessibleMem. +def int_amdgcn_asyncmark : ClangBuiltin<"__builtin_amdgcn_asyncmark">, + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; + +// Waits until the Nth previous marker is completed, if it exists. +def int_amdgcn_wait_asyncmark : + ClangBuiltin<"__builtin_amdgcn_wait_asyncmark">, + Intrinsic<[], [llvm_i16_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>; + //===----------------------------------------------------------------------===// // GFX10 Intrinsics //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index 5dc7c8327102e..334ba33c20264 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -2375,6 +2375,12 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( case Intrinsic::amdgcn_load_to_lds: case Intrinsic::amdgcn_global_load_lds: return selectGlobalLoadLds(I); + case Intrinsic::amdgcn_asyncmark: + case Intrinsic::amdgcn_wait_asyncmark: + // FIXME: Not supported on GFX12 yet. Will need a new feature when we do. + if (!Subtarget->hasVMemToLDSLoad()) + return false; + break; case Intrinsic::amdgcn_exp_compr: if (!STI.hasCompressedExport()) { Function &F = I.getMF()->getFunction(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp index bf9b4297bd435..75da5acbc57c7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp @@ -347,7 +347,7 @@ void AMDGPUAsmPrinter::emitInstruction(const MachineInstr *MI) { } } else { // We don't want these pseudo instructions encoded. They are - // placeholder terminator instructions and should only be printed as + // placeholder instructions and should only be printed as // comments. if (MI->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) { if (isVerbose()) @@ -361,6 +361,20 @@ void AMDGPUAsmPrinter::emitInstruction(const MachineInstr *MI) { return; } + if (MI->getOpcode() == AMDGPU::S_ASYNCMARK) { + if (isVerbose()) + OutStreamer->emitRawComment(" s_asyncmark"); + return; + } + + if (MI->getOpcode() == AMDGPU::S_WAIT_ASYNCMARK) { + if (isVerbose()) { + OutStreamer->emitRawComment(" s_wait_asyncmark(" + + Twine(MI->getOperand(0).getImm()) + ")"); + } + return; + } + if (MI->getOpcode() == AMDGPU::SCHED_BARRIER) { if (isVerbose()) { std::string HexString; diff --git a/llvm/lib/Target/AMDGPU/SIDefines.h b/llvm/lib/Target/AMDGPU/SIDefines.h index 0d206aba33543..6504b0fdae190 100644 --- a/llvm/lib/Target/AMDGPU/SIDefines.h +++ b/llvm/lib/Target/AMDGPU/SIDefines.h @@ -368,12 +368,15 @@ enum CPol { GLC = 1, SLC = 2, DLC = 4, + SWZ_pregfx12 = 8, SCC = 16, + ASYNC_pregfx12 = 32, + SC0 = GLC, SC1 = SCC, NT = SLC, - ALL_pregfx12 = GLC | SLC | DLC | SCC, - SWZ_pregfx12 = 8, + // Bits that should survive in MIR + ALL_pregfx12 = GLC | SLC | DLC | SCC | ASYNC_pregfx12, // Below are GFX12+ cache policy bits diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp index e21583ae0876f..2333b2fc4d460 100644 --- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp +++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp @@ -457,6 +457,9 @@ class SIInsertWaitcnts { // message. DenseSet<MachineInstr *> ReleaseVGPRInsts; + // Track legacy async instructions to later remove CPol::ASYNC_pregfx12 + SmallVector<MachineInstr *, 32> InstsWithAsyncCpolBit; + HardwareLimits Limits; public: @@ -566,6 +569,35 @@ class SIInsertWaitcnts { return VmemReadMapping[getVmemType(Inst)]; } + bool hasCPolAsyncBit(const MachineInstr &MI) const { + const MachineOperand *CPol = TII->getNamedOperand(MI, AMDGPU::OpName::cpol); + if (!CPol || !CPol->isImm()) + return false; + return CPol->getImm() & AMDGPU::CPol::ASYNC_pregfx12; + } + + // FIXME: For GFX1250, this should also check for usesASYNC_CNT + bool isAsync(const MachineInstr &MI) const { + if (!SIInstrInfo::isLDSDMA(MI)) + return false; + if (SIInstrInfo::usesASYNC_CNT(MI)) { + return true; + } + return hasCPolAsyncBit(MI); + } + + bool isNonAsyncLdsDmaWrite(const MachineInstr &MI) const { + if (!SIInstrInfo::mayWriteLDSThroughDMA(MI)) + return false; + return !isAsync(MI); + } + + bool isAsyncLdsDmaWrite(const MachineInstr &MI) const { + if (!SIInstrInfo::mayWriteLDSThroughDMA(MI)) + return false; + return isAsync(MI); + } + bool isVmemAccess(const MachineInstr &MI) const; bool generateWaitcntInstBefore(MachineInstr &MI, WaitcntBrackets &ScoreBrackets, @@ -653,6 +685,11 @@ class WaitcntBrackets { return It != VMem.end() ? It->second.Scores[T] : 0; } + unsigned getClampedWait(InstCounterType T, unsigned ScoreToWait) const { + return std::min(getScoreUB(T) - ScoreToWait, + Context->getWaitCountMax(T) - 1); + } + bool merge(const WaitcntBrackets &Other); bool counterOutOfOrder(InstCounterType T) const; @@ -666,11 +703,13 @@ class WaitcntBrackets { AMDGPU::Waitcnt &Wait) const; void determineWaitForLDSDMA(InstCounterType T, VMEMID TID, AMDGPU::Waitcnt &Wait) const; + AMDGPU::Waitcnt determineAsyncWait(unsigned N); void tryClearSCCWriteEvent(MachineInstr *Inst); void applyWaitcnt(const AMDGPU::Waitcnt &Wait); void applyWaitcnt(InstCounterType T, unsigned Count); void updateByEvent(WaitEventType E, MachineInstr &MI); + void recordAsyncMark(MachineInstr &MI); unsigned hasPendingEvent() const { return PendingEvents; } unsigned hasPendingEvent(WaitEventType E) const { @@ -704,10 +743,7 @@ class WaitcntBrackets { return LastGDS > ScoreLBs[DS_CNT] && LastGDS <= ScoreUBs[DS_CNT]; } - unsigned getPendingGDSWait() const { - return std::min(getScoreUB(DS_CNT) - LastGDS, - Context->getWaitCountMax(DS_CNT) - 1); - } + unsigned getPendingGDSWait() const { return getClampedWait(DS_CNT, LastGDS); } void setPendingGDS() { LastGDS = ScoreUBs[DS_CNT]; } @@ -766,6 +802,9 @@ class WaitcntBrackets { static bool mergeScore(const MergeInfo &M, unsigned &Score, unsigned OtherScore); + bool mergeAsyncMarkers( + const MergeInfo MergeInfos[NUM_INST_CNTS], + const SmallVectorImpl<std::array<unsigned, NUM_INST_CNTS>> &OtherMarkers); iterator_range<MCRegUnitIterator> regunits(MCPhysReg Reg) const { assert(Reg != AMDGPU::SCC && "Shouldn't be used on SCC"); @@ -817,6 +856,8 @@ class WaitcntBrackets { void setScoreByOperand(const MachineOperand &Op, InstCounterType CntTy, unsigned Val); + InstCounterType getAsyncCounterType() const { return LOAD_CNT; } + const SIInsertWaitcnts *Context; unsigned ScoreLBs[NUM_INST_CNTS] = {0}; @@ -872,6 +913,11 @@ class WaitcntBrackets { // Store representative LDS DMA operations. The only useful info here is // alias info. One store is kept per unique AAInfo. SmallVector<const MachineInstr *> LDSDMAStores; + + SmallVector<std::array<unsigned, NUM_INST_CNTS>> AsyncMarkers; + // Track the upper bound score for async operations that are not part of a + // marker yet. Initialized to all zeros. + std::array<unsigned, NUM_INST_CNTS> AsyncScore{}; }; class SIInsertWaitcntsLegacy : public MachineFunctionPass { @@ -1063,7 +1109,7 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { setScoreByOperand(Op, T, CurrScore); } if (Inst.mayStore() && - (TII->isDS(Inst) || TII->mayWriteLDSThroughDMA(Inst))) { + (TII->isDS(Inst) || (Context->isNonAsyncLdsDmaWrite(Inst)))) { // MUBUF and FLAT LDS DMA operations need a wait on vmcnt before LDS // written can be accessed. A load from LDS to VMEM does not need a wait. // @@ -1107,6 +1153,12 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { setVMemScore(LDSDMA_BEGIN + Slot, T, CurrScore); } + if (Context->isAsyncLdsDmaWrite(Inst) && T == LOAD_CNT) { + // FIXME: Not supported on GFX12 yet. Will need a new feature when we do. + assert(!SIInstrInfo::usesASYNC_CNT(Inst)); + AsyncScore[T] = CurrScore; + } + if (SIInstrInfo::isSBarrierSCCWrite(Inst.getOpcode())) { setRegScore(AMDGPU::SCC, T, CurrScore); PendingSCCWrite = &Inst; @@ -1114,6 +1166,18 @@ void WaitcntBrackets::updateByEvent(WaitEventType E, MachineInstr &Inst) { } } +void WaitcntBrackets::recordAsyncMark(MachineInstr &Inst) { + AsyncMarkers.emplace_back(AsyncScore); + AsyncScore = {}; + LLVM_DEBUG({ + dbgs() << "recordAsyncMark:\n" << Inst; + for (const auto &Marker : AsyncMarkers) { + llvm::interleaveComma(Marker, dbgs()); + dbgs() << '\n'; + } + }); +} + void WaitcntBrackets::print(raw_ostream &OS) const { const GCNSubtarget *ST = Context->ST; @@ -1207,6 +1271,58 @@ void WaitcntBrackets::print(raw_ostream &OS) const { } OS << '\n'; + OS << "Async score: "; + if (!AsyncScore.size()) { + OS << "none"; + } + llvm::interleaveComma(AsyncScore, OS); + OS << '\n'; + + OS << "Async markers:"; + if (!AsyncMarkers.size()) { + OS << " none"; + } + for (const auto &Marker : AsyncMarkers) { + OS << '\n'; + for (auto T : inst_counter_types()) { + unsigned MarkedScore = Marker[T]; + switch (T) { + case LOAD_CNT: + OS << " " << (ST->hasExtendedWaitCounts() ? "LOAD" : "VM") + << "_CNT: " << MarkedScore; + break; + case DS_CNT: + OS << " " << (ST->hasExtendedWaitCounts() ? "DS" : "LGKM") + << "_CNT: " << MarkedScore; + break; + case EXP_CNT: + OS << " EXP_CNT: " << MarkedScore; + break; + case STORE_CNT: + OS << " " << (ST->hasExtendedWaitCounts() ? "STORE" : "VS") + << "_CNT: " << MarkedScore; + break; + case SAMPLE_CNT: + OS << " SAMPLE_CNT: " << MarkedScore; + break; + case BVH_CNT: + OS << " BVH_CNT: " << MarkedScore; + break; + case KM_CNT: + OS << " KM_CNT: " << MarkedScore; + break; + case X_CNT: + OS << " X_CNT: " << MarkedScore; + break; + default: + OS << " UNKNOWN: " << MarkedScore; + break; + } + } + } + + OS << "\n"; + OS << '\n'; } @@ -1265,11 +1381,57 @@ void WaitcntBrackets::determineWaitForScore(InstCounterType T, } else { // If a counter has been maxed out avoid overflow by waiting for // MAX(CounterType) - 1 instead. - unsigned NeededWait = - std::min(UB - ScoreToWait, Context->getWaitCountMax(T) - 1); + unsigned NeededWait = getClampedWait(T, ScoreToWait); + addWait(Wait, T, NeededWait); + } + } +} + +AMDGPU::Waitcnt WaitcntBrackets::determineAsyncWait(unsigned N) { + LLVM_DEBUG({ + dbgs() << "Need " << N << " async markers. Found " << AsyncMarkers.size() + << ":\n"; + for (const auto &Marker : AsyncMarkers) { + llvm::interleaveComma(Marker, dbgs()); + dbgs() << '\n'; + } + }); + + AMDGPU::Waitcnt Wait; + if (AsyncMarkers.size() <= N) { + LLVM_DEBUG(dbgs() << "No additional wait for async marker.\n"); + return Wait; + } + + size_t MarkerIndex = AsyncMarkers.size() - N - 1; + const auto &RequiredMarker = AsyncMarkers[MarkerIndex]; + for (InstCounterType T : inst_counter_types()) { + unsigned ScoreToWait = RequiredMarker[T]; + if (ScoreToWait == 0) { + continue; + } + unsigned LB = getScoreLB(T); + unsigned UB = getScoreUB(T); + if (ScoreToWait > LB && ScoreToWait <= UB) { + unsigned NeededWait = getClampedWait(T, ScoreToWait); + LLVM_DEBUG(dbgs() << "Score to wait: " << ScoreToWait + << " Needed wait: " << NeededWait << '\n'); addWait(Wait, T, NeededWait); } } + + // Immediately remove the waited marker and all older ones + // This happens BEFORE the wait is actually inserted, which is fine + // because we've already extracted the wait requirements + LLVM_DEBUG({ + dbgs() << "Removing " << (MarkerIndex + 1) + << " async markers after determining wait\n"; + }); + AsyncMarkers.erase(AsyncMarkers.begin(), + AsyncMarkers.begin() + MarkerIndex + 1); + + LLVM_DEBUG(dbgs() << "Waits to add: " << Wait); + return Wait; } void WaitcntBrackets::determineWaitForPhysReg(InstCounterType T, MCPhysReg Reg, @@ -1522,6 +1684,11 @@ bool WaitcntGeneratorPreGFX12::applyPreexistingWaitcnt( // possibility in an articial MIR test since such a situation cannot be // recreated by running the memory legalizer. II.eraseFromParent(); + } else if (Opcode == AMDGPU::S_WAIT_ASYNCMARK) { + unsigned N = II.getOperand(0).getImm(); + LLVM_DEBUG(dbgs() << "Processing S_WAIT_ASYNCMARK: " << II << '\n';); + AMDGPU::Waitcnt OldWait = ScoreBrackets.determineAsyncWait(N); + Wait = Wait.combined(OldWait); } else { assert(Opcode == AMDGPU::S_WAITCNT_VSCNT); assert(II.getOperand(0).getReg() == AMDGPU::SGPR_NULL); @@ -1698,6 +1865,8 @@ bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt( // LDS, so no work required here yet. II.eraseFromParent(); continue; + } else if (Opcode == AMDGPU::S_WAIT_ASYNCMARK) { + reportFatalUsageError("S_WAIT_ASYNCMARK is not ready for GFX12 yet"); } else { std::optional<InstCounterType> CT = counterTypeForInstr(Opcode); assert(CT.has_value()); @@ -1720,6 +1889,7 @@ bool WaitcntGeneratorGFX12Plus::applyPreexistingWaitcnt( // Save the pre combine waitcnt in order to make xcnt checks. AMDGPU::Waitcnt PreCombine = Wait; + if (CombinedLoadDsCntInstr) { // Only keep an S_WAIT_LOADCNT_DSCNT if both counters actually need // to be waited for. Otherwise, let the instruction be deleted so @@ -1938,6 +2108,7 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, WaitcntBrackets &ScoreBrackets, MachineInstr *OldWaitcntInstr, bool FlushVmCnt) { + LLVM_DEBUG(dbgs() << "GenerateWaitcntInstBefore: "; MI.print(dbgs());); setForceEmitWaitcnt(); assert(!MI.isMetaInstruction()); @@ -2213,8 +2384,10 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI, if (ForceEmitZeroLoadFlag && Wait.LoadCnt != ~0u) Wait.LoadCnt = 0; - return generateWaitcnt(Wait, MI.getIterator(), *MI.getParent(), ScoreBrackets, - OldWaitcntInstr); + bool Changed = generateWaitcnt(Wait, MI.getIterator(), *MI.getParent(), + ScoreBrackets, OldWaitcntInstr); + + return Changed; } bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait, @@ -2439,6 +2612,68 @@ bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score, return OtherShifted > MyShifted; } +bool WaitcntBrackets::mergeAsyncMarkers( + const MergeInfo MergeInfos[NUM_INST_CNTS], + const SmallVectorImpl<std::array<unsigned, NUM_INST_CNTS>> &OtherMarkers) { + bool StrictDom = false; + + LLVM_DEBUG(dbgs() << "Merging async markers ..."); + // Early exit: both empty + if (AsyncMarkers.empty() && OtherMarkers.empty()) { + LLVM_DEBUG(dbgs() << " nothing to merge\n"); + return false; + } + LLVM_DEBUG(dbgs() << '\n'); + + // Determine maximum length needed after merging + size_t MaxSize = std::max(AsyncMarkers.size(), OtherMarkers.size()); + + // Pad with zero-filled markers if our list is shorter. + // Zero represents "no pending async operations at this checkpoint" + // and acts as the identity element for max() during merging + std::array<unsigned, NUM_INST_CNTS> ZeroMarker{}; + AsyncMarkers.resize(MaxSize, ZeroMarker); + + LLVM_DEBUG({ + dbgs() << "Before merge:\n"; + for (const auto &Marker : AsyncMarkers) { + llvm::interleaveComma(Marker, dbgs()); + dbgs() << '\n'; + } + }); + + LLVM_DEBUG({ + dbgs() << "Other markers:\n"; + for (const auto &Marker : OtherMarkers) { + llvm::interleaveComma(Marker, dbgs()); + dbgs() << '\n'; + } + }); + + // Merge element-wise using the existing mergeScore function + // Use the appropriate MergeInfo for each counter type + for (size_t Idx = 0; Idx < MaxSize; ++Idx) { + for (auto T : inst_counter_types(Context->MaxCounter)) { + // Get the score from OtherMarkers, using 0 if index is out of bounds + unsigned OtherScore = + (Idx < OtherMarkers.size()) ? OtherMarkers[Idx][T] : 0; + + // Merge using the counter-specific MergeInfo + StrictDom |= mergeScore(MergeInfos[T], AsyncMarkers[Idx][T], OtherScore); + } + } + + LLVM_DEBUG({ + dbgs() << "After merge:\n"; + for (const auto &Marker : AsyncMarkers) { + llvm::interleaveComma(Marker, dbgs()); + dbgs() << '\n'; + } + }); + + return StrictDom; +} + /// Merge the pending events and associater score brackets of \p Other into /// this brackets status. /// @@ -2454,6 +2689,9 @@ bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { for (auto K : Other.SGPRs.keys()) SGPRs.try_emplace(K); + // Array to store MergeInfo for each counter type + MergeInfo MergeInfos[NUM_INST_CNTS]; + for (auto T : inst_counter_types(Context->MaxCounter)) { // Merge event flags for this counter const unsigned *WaitEventMaskForInst = Context->WaitEventMaskForInst; @@ -2470,7 +2708,7 @@ bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { if (NewUB < ScoreLBs[T]) report_fatal_error("waitcnt score overflow"); - MergeInfo M; + MergeInfo &M = MergeInfos[T]; M.OldLB = ScoreLBs[T]; M.OtherLB = Other.ScoreLBs[T]; M.MyShift = NewUB - ScoreUBs[T]; @@ -2517,6 +2755,11 @@ bool WaitcntBrackets::merge(const WaitcntBrackets &Other) { } } + StrictDom |= mergeAsyncMarkers(MergeInfos, Other.AsyncMarkers); + for (auto T : inst_counter_types(Context->MaxCounter)) { + StrictDom |= mergeScore(MergeInfos[T], AsyncScore[T], Other.AsyncScore[T]); + } + purgeEmptyTrackingData(); return StrictDom; } @@ -2529,6 +2772,7 @@ static bool isWaitInstr(MachineInstr &Inst) { Opcode == AMDGPU::S_WAIT_LOADCNT_DSCNT || Opcode == AMDGPU::S_WAIT_STORECNT_DSCNT || Opcode == AMDGPU::S_WAITCNT_lds_direct || + Opcode == AMDGPU::S_WAIT_ASYNCMARK || counterTypeForInstr(Opcode).has_value(); } @@ -2579,6 +2823,19 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF, continue; } + if (Inst.getOpcode() == AMDGPU::S_ASYNCMARK) { + // FIXME: Not supported on GFX12 yet. Will need a new feature when we do. + assert(ST->getGeneration() < AMDGPUSubtarget::GFX12); + ScoreBrackets.recordAsyncMark(Inst); + ++Iter; + continue; + } + + // Since most instructions don't have an Aux/CPol argument, it's faster to + // first filter out anything that is not an LDS DMA writes. + if (SIInstrInfo::mayWriteLDSThroughDMA(Inst) && hasCPolAsyncBit(Inst)) + InstsWithAsyncCpolBit.push_back(&Inst); + bool FlushVmCnt = Block.getFirstTerminator() == Inst && isPreheaderToFlush(Block, ScoreBrackets); @@ -2937,7 +3194,7 @@ bool SIInsertWaitcnts::run(MachineFunction &MF) { if (!SuccBI.Incoming) { SuccBI.Dirty = true; if (SuccBII <= BII) { - LLVM_DEBUG(dbgs() << "repeat on backedge\n"); + LLVM_DEBUG(dbgs() << "Repeat on backedge without merge\n"); Repeat = true; } if (!MoveBracketsToSucc) { @@ -2945,11 +3202,20 @@ bool SIInsertWaitcnts::run(MachineFunction &MF) { } else { SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets); } - } else if (SuccBI.Incoming->merge(*Brackets)) { - SuccBI.Dirty = true; - if (SuccBII <= BII) { - LLVM_DEBUG(dbgs() << "repeat on backedge\n"); - Repeat = true; + } else { + LLVM_DEBUG({ + dbgs() << "Try to merge "; + MBB->printName(dbgs()); + dbgs() << " into "; + Succ->printName(dbgs()); + dbgs() << '\n'; + }); + if (SuccBI.Incoming->merge(*Brackets)) { + SuccBI.Dirty = true; + if (SuccBII <= BII) { + LLVM_DEBUG(dbgs() << "Repeat on backedge with merge\n"); + Repeat = true; + } } } } @@ -3037,6 +3303,31 @@ bool SIInsertWaitcnts::run(MachineFunction &MF) { } } } + + // Remove the ASYNC bit from all tracked LDS DMA instructions + if (!InstsWithAsyncCpolBit.empty()) { + LLVM_DEBUG(dbgs() << "*** Clearing ASYNC bit from " + << InstsWithAsyncCpolBit.size() + << " LDS DMA instructions\n"); + + // Remove duplicates if any + llvm::sort(InstsWithAsyncCpolBit); + InstsWithAsyncCpolBit.erase(llvm::unique(InstsWithAsyncCpolBit), + InstsWithAsyncCpolBit.end()); + + for (MachineInstr *MI : InstsWithAsyncCpolBit) { + if (MachineOperand *CPol = + TII->getNamedOperand(*MI, AMDGPU::OpName::cpol)) { + unsigned CPolBits = CPol->getImm(); + assert(CPolBits & AMDGPU::CPol::ASYNC_pregfx12); + LLVM_DEBUG(dbgs() << " Clearing ASYNC bit from: " << *MI); + CPol->setImm(CPolBits & ~AMDGPU::CPol::ASYNC_pregfx12); + Modified = true; + } + } + InstsWithAsyncCpolBit.clear(); + } + ReleaseVGPRInsts.clear(); PreheadersToFlush.clear(); SLoadAddresses.clear(); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h index 2839011a5be8f..b668342cc4018 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -1015,6 +1015,10 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo { return MI.getDesc().TSFlags & SIInstrFlags::VM_CNT; } + static bool usesASYNC_CNT(const MachineInstr &MI) { + return MI.getDesc().TSFlags & SIInstrFlags::ASYNC_CNT; + } + static bool usesLGKM_CNT(const MachineInstr &MI) { return MI.getDesc().TSFlags & SIInstrFlags::LGKM_CNT; } diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td index 99b352bdf6765..02457399803b3 100644 --- a/llvm/lib/Target/AMDGPU/SOPInstructions.td +++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td @@ -1688,11 +1688,22 @@ let SubtargetPredicate = HasWaitXcnt in { // Represents the point at which a wave must wait for all outstanding direct loads to LDS. // Typically inserted by the memory legalizer and consumed by SIInsertWaitcnts. - def S_WAITCNT_lds_direct : SPseudoInstSI<(outs), (ins)> { let hasSideEffects = 0; } +// FIXME: Until support for GFX12Plus is ready, restrict these to only preGFX12 +let SubtargetPredicate = isGFX9GFX10GFX11 in { +def S_ASYNCMARK : SPseudoInstSI<(outs), (ins), + [(int_amdgcn_asyncmark)]> { + let maybeAtomic = 0; +} +def S_WAIT_ASYNCMARK : SOPP_Pseudo <"", (ins s16imm:$simm16), "$simm16", + [(int_amdgcn_wait_asyncmark timm:$simm16)]> { + let maybeAtomic = 0; +} +} + def S_SETHALT : SOPP_Pseudo <"s_sethalt" , (ins i32imm:$simm16), "$simm16", [(int_amdgcn_s_sethalt timm:$simm16)]>; def S_SETKILL : SOPP_Pseudo <"s_setkill" , (ins i16imm:$simm16), "$simm16"> { diff --git a/llvm/test/CodeGen/AMDGPU/async-mark-err.ll b/llvm/test/CodeGen/AMDGPU/async-mark-err.ll new file mode 100644 index 0000000000000..1df43f1ab374a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/async-mark-err.ll @@ -0,0 +1,10 @@ +; RUN: not --crash llc -filetype=null -mtriple=amdgcn -mcpu=gfx1250 %s 2>&1 | FileCheck --ignore-case %s +; RUN: not llc -filetype=null -global-isel -mtriple=amdgcn -mcpu=gfx1250 %s 2>&1 | FileCheck --ignore-case %s +; +; CHECK: LLVM ERROR: Cannot select + +define amdgpu_ps void @async_err() { + call void @llvm.amdgcn.asyncmark() + call void @llvm.amdgcn.wait.asyncmark(i16 0) + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/async-mark-pregfx12.ll b/llvm/test/CodeGen/AMDGPU/async-mark-pregfx12.ll new file mode 100644 index 0000000000000..ee54e7362117e --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/async-mark-pregfx12.ll @@ -0,0 +1,898 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -march=amdgcn -mcpu=gfx900 < %s | FileCheck %s -check-prefixes=GFX900 +; RUN: llc -march=amdgcn -mcpu=gfx942 < %s | FileCheck %s -check-prefixes=GFX942 +; RUN: llc -march=amdgcn -mcpu=gfx1010 < %s | FileCheck %s -check-prefixes=GFX1010 + +; Test async mark/wait with global_load_lds and global loads +; This version uses wave barriers to enforce program order so that unrelated vmem +; instructions do not get reordered before reaching this point. + +define void @interleaved_with_wave_barrier(ptr addrspace(1) %foo, ptr addrspace(3) %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out) { +; GFX900-LABEL: interleaved_with_wave_barrier: +; GFX900: ; %bb.0: ; %entry +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: s_movk_i32 s4, 0x54 +; GFX900-NEXT: v_add_u32_e32 v11, 0x54, v2 +; GFX900-NEXT: v_add_co_u32_e32 v7, vcc, s4, v0 +; GFX900-NEXT: v_readfirstlane_b32 s4, v11 +; GFX900-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v1, vcc +; GFX900-NEXT: s_mov_b32 m0, s4 +; GFX900-NEXT: global_load_dword v9, v[3:4], off offset:44 +; GFX900-NEXT: global_load_dword v10, v[0:1], off offset:4 +; GFX900-NEXT: ; wave barrier +; GFX900-NEXT: s_movk_i32 s4, 0x58 +; GFX900-NEXT: global_load_dword v[7:8], off glc lds +; GFX900-NEXT: v_add_u32_e32 v8, 0x58, v2 +; GFX900-NEXT: ; wave barrier +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: global_load_dword v7, v[0:1], off offset:8 +; GFX900-NEXT: v_add_co_u32_e32 v0, vcc, s4, v3 +; GFX900-NEXT: v_readfirstlane_b32 s4, v8 +; GFX900-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v4, vcc +; GFX900-NEXT: s_mov_b32 m0, s4 +; GFX900-NEXT: ; wave barrier +; GFX900-NEXT: s_nop 0 +; GFX900-NEXT: global_load_dword v[0:1], off glc slc lds +; GFX900-NEXT: ; wave barrier +; GFX900-NEXT: global_load_dword v0, v[3:4], off offset:48 +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: ; s_wait_asyncmark(1) +; GFX900-NEXT: s_waitcnt vmcnt(3) +; GFX900-NEXT: ds_read_b32 v1, v2 offset:84 +; GFX900-NEXT: ; s_wait_asyncmark(0) +; GFX900-NEXT: s_waitcnt vmcnt(1) +; GFX900-NEXT: ds_read_b32 v2, v2 offset:88 +; GFX900-NEXT: v_add_u32_e32 v3, v10, v9 +; GFX900-NEXT: s_waitcnt lgkmcnt(1) +; GFX900-NEXT: v_add3_u32 v1, v3, v1, v7 +; GFX900-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_add3_u32 v0, v1, v0, v2 +; GFX900-NEXT: global_store_dword v[5:6], v0, off +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX942-LABEL: interleaved_with_wave_barrier: +; GFX942: ; %bb.0: ; %entry +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_add_u32_e32 v11, 0x54, v2 +; GFX942-NEXT: s_mov_b64 s[0:1], 0x54 +; GFX942-NEXT: v_mov_b32_e32 v7, v6 +; GFX942-NEXT: v_mov_b32_e32 v9, v4 +; GFX942-NEXT: v_mov_b32_e32 v6, v5 +; GFX942-NEXT: v_lshl_add_u64 v[4:5], v[0:1], 0, s[0:1] +; GFX942-NEXT: v_readfirstlane_b32 s0, v11 +; GFX942-NEXT: v_mov_b32_e32 v8, v3 +; GFX942-NEXT: s_mov_b32 m0, s0 +; GFX942-NEXT: global_load_dword v3, v[8:9], off offset:44 +; GFX942-NEXT: global_load_dword v10, v[0:1], off offset:4 +; GFX942-NEXT: ; wave barrier +; GFX942-NEXT: s_mov_b64 s[0:1], 0x58 +; GFX942-NEXT: global_load_lds_dword v[4:5], off sc0 +; GFX942-NEXT: v_add_u32_e32 v5, 0x58, v2 +; GFX942-NEXT: ; wave barrier +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: global_load_dword v4, v[0:1], off offset:8 +; GFX942-NEXT: v_lshl_add_u64 v[0:1], v[8:9], 0, s[0:1] +; GFX942-NEXT: v_readfirstlane_b32 s0, v5 +; GFX942-NEXT: s_mov_b32 m0, s0 +; GFX942-NEXT: ; wave barrier +; GFX942-NEXT: s_waitcnt vmcnt(2) +; GFX942-NEXT: v_add_u32_e32 v3, v10, v3 +; GFX942-NEXT: global_load_lds_dword v[0:1], off sc0 nt +; GFX942-NEXT: ; wave barrier +; GFX942-NEXT: global_load_dword v0, v[8:9], off offset:48 +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: ; s_wait_asyncmark(1) +; GFX942-NEXT: s_waitcnt vmcnt(3) +; GFX942-NEXT: ds_read_b32 v1, v2 offset:84 +; GFX942-NEXT: ; s_wait_asyncmark(0) +; GFX942-NEXT: s_waitcnt vmcnt(1) +; GFX942-NEXT: ds_read_b32 v2, v2 offset:88 +; GFX942-NEXT: s_waitcnt lgkmcnt(1) +; GFX942-NEXT: v_add3_u32 v1, v3, v1, v4 +; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_add3_u32 v0, v1, v0, v2 +; GFX942-NEXT: global_store_dword v[6:7], v0, off +; GFX942-NEXT: s_waitcnt vmcnt(0) +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX1010-LABEL: interleaved_with_wave_barrier: +; GFX1010: ; %bb.0: ; %entry +; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_add_nc_u32_e32 v7, 0x54, v2 +; GFX1010-NEXT: v_add_nc_u32_e32 v11, 0x58, v2 +; GFX1010-NEXT: global_load_dword v9, v[3:4], off offset:44 +; GFX1010-NEXT: global_load_dword v10, v[0:1], off offset:4 +; GFX1010-NEXT: ; wave barrier +; GFX1010-NEXT: v_readfirstlane_b32 s4, v7 +; GFX1010-NEXT: v_add_co_u32 v7, vcc_lo, 0x54, v0 +; GFX1010-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v1, vcc_lo +; GFX1010-NEXT: s_mov_b32 m0, s4 +; GFX1010-NEXT: v_readfirstlane_b32 s4, v11 +; GFX1010-NEXT: global_load_dword v[7:8], off glc lds +; GFX1010-NEXT: v_add_co_u32 v7, vcc_lo, 0x58, v3 +; GFX1010-NEXT: ; wave barrier +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v4, vcc_lo +; GFX1010-NEXT: global_load_dword v0, v[0:1], off offset:8 +; GFX1010-NEXT: s_mov_b32 m0, s4 +; GFX1010-NEXT: ; wave barrier +; GFX1010-NEXT: global_load_dword v[7:8], off glc slc lds +; GFX1010-NEXT: ; wave barrier +; GFX1010-NEXT: global_load_dword v1, v[3:4], off offset:48 +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: ; s_wait_asyncmark(1) +; GFX1010-NEXT: s_waitcnt vmcnt(3) +; GFX1010-NEXT: ds_read_b32 v3, v2 offset:84 +; GFX1010-NEXT: ; s_wait_asyncmark(0) +; GFX1010-NEXT: s_waitcnt vmcnt(1) +; GFX1010-NEXT: ds_read_b32 v2, v2 offset:88 +; GFX1010-NEXT: v_add_nc_u32_e32 v4, v10, v9 +; GFX1010-NEXT: s_waitcnt lgkmcnt(1) +; GFX1010-NEXT: v_add3_u32 v0, v4, v3, v0 +; GFX1010-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX1010-NEXT: global_store_dword v[5:6], v0, off +; GFX1010-NEXT: s_setpc_b64 s[30:31] +entry: + ; First batch: global load, global load, async global-to-LDS + %bar_gep11 = getelementptr i32, ptr addrspace(1) %bar, i32 11 + %bar_v11 = load i32, ptr addrspace(1) %bar_gep11 + %foo_gep1 = getelementptr i32, ptr addrspace(1) %foo, i32 1 + %foo_v1 = load i32, ptr addrspace(1) %foo_gep1 + %lds_gep21 = getelementptr i32, ptr addrspace(3) %lds, i32 21 + %bar_gep21 = getelementptr i32, ptr addrspace(1) %foo, i32 21 + call void @llvm.amdgcn.wave.barrier() + call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %bar_gep21, ptr addrspace(3) %lds_gep21, i32 4, i32 0, i32 u0x21) + call void @llvm.amdgcn.wave.barrier() + call void @llvm.amdgcn.asyncmark() + + ; Second batch: global load, async global-to-LDS, global load + %foo_gep2 = getelementptr i32, ptr addrspace(1) %foo, i32 2 + %foo_v2 = load i32, ptr addrspace(1) %foo_gep2 + %bar_gep22 = getelementptr i32, ptr addrspace(1) %bar, i32 22 + %lds_gep22 = getelementptr i32, ptr addrspace(3) %lds, i32 22 + call void @llvm.amdgcn.wave.barrier() + call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %bar_gep22, ptr addrspace(3) %lds_gep22, i32 4, i32 0, i32 u0x23) + call void @llvm.amdgcn.wave.barrier() + %bar_gep12 = getelementptr i32, ptr addrspace(1) %bar, i32 12 + %bar_v12 = load i32, ptr addrspace(1) %bar_gep12 + call void @llvm.amdgcn.asyncmark() + + ; Wait for first async mark and read from LDS + ; This results in vmcnt(3) corresponding to the second batch. + call void @llvm.amdgcn.wait.asyncmark(i16 1) + %lds_val21 = load i32, ptr addrspace(3) %lds_gep21 + + ; Wait for the next lds dma + ; This results in vmcnt(1), corresponding to %bar_v12. Could have been combined with the lgkmcnt(1) for %lds_val21. + ; Notable that the asyncmark is sufficient to prevent the optimizer from coalescing the previous ds_read with the next one. + call void @llvm.amdgcn.wait.asyncmark(i16 0) + %lds_val22 = load i32, ptr addrspace(3) %lds_gep22 + %sum1 = add i32 %foo_v1, %bar_v11 + %sum2 = add i32 %sum1, %lds_val21 + %sum3 = add i32 %sum2, %foo_v2 + ; Finally a vmcnt(0) for %bar_v12, which was not included in the async mark that followed it. + %sum4 = add i32 %sum3, %bar_v12 + %sum5 = add i32 %sum4, %lds_val22 + store i32 %sum5, ptr addrspace(1) %out + + ret void +} + +define void @buffers_with_wave_barrier(ptr addrspace(8) inreg %buf, ptr addrspace(1) %foo, ptr addrspace(3) inreg %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out) { +; GFX900-LABEL: buffers_with_wave_barrier: +; GFX900: ; %bb.0: ; %entry +; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX900-NEXT: global_load_dword v6, v[2:3], off offset:44 +; GFX900-NEXT: global_load_dword v7, v[0:1], off offset:4 +; GFX900-NEXT: s_add_i32 m0, s20, 0x54 +; GFX900-NEXT: v_mov_b32_e32 v8, 0x54 +; GFX900-NEXT: ; wave barrier +; GFX900-NEXT: buffer_load_dword v8, s[16:19], 0 offen lds +; GFX900-NEXT: ; wave barrier +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: global_load_dword v0, v[0:1], off offset:8 +; GFX900-NEXT: s_add_i32 m0, s20, 0x58 +; GFX900-NEXT: v_mov_b32_e32 v1, 0x58 +; GFX900-NEXT: ; wave barrier +; GFX900-NEXT: buffer_load_dword v1, s[16:19], 0 offen lds +; GFX900-NEXT: ; wave barrier +; GFX900-NEXT: global_load_dword v1, v[2:3], off offset:48 +; GFX900-NEXT: v_mov_b32_e32 v2, s20 +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: ; s_wait_asyncmark(1) +; GFX900-NEXT: s_waitcnt vmcnt(3) +; GFX900-NEXT: ds_read_b32 v3, v2 offset:84 +; GFX900-NEXT: ; s_wait_asyncmark(0) +; GFX900-NEXT: s_waitcnt vmcnt(1) +; GFX900-NEXT: ds_read_b32 v2, v2 offset:88 +; GFX900-NEXT: v_add_u32_e32 v6, v7, v6 +; GFX900-NEXT: s_waitcnt lgkmcnt(1) +; GFX900-NEXT: v_add3_u32 v0, v6, v3, v0 +; GFX900-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX900-NEXT: global_store_dword v[4:5], v0, off +; GFX900-NEXT: s_waitcnt vmcnt(0) +; GFX900-NEXT: s_setpc_b64 s[30:31] +; +; GFX942-LABEL: buffers_with_wave_barrier: +; GFX942: ; %bb.0: ; %entry +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: global_load_dword v6, v[2:3], off offset:44 +; GFX942-NEXT: global_load_dword v7, v[0:1], off offset:4 +; GFX942-NEXT: s_add_i32 m0, s16, 0x54 +; GFX942-NEXT: v_mov_b32_e32 v8, 0x54 +; GFX942-NEXT: ; wave barrier +; GFX942-NEXT: buffer_load_dword v8, s[0:3], 0 offen lds +; GFX942-NEXT: ; wave barrier +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: global_load_dword v0, v[0:1], off offset:8 +; GFX942-NEXT: s_add_i32 m0, s16, 0x58 +; GFX942-NEXT: v_mov_b32_e32 v1, 0x58 +; GFX942-NEXT: ; wave barrier +; GFX942-NEXT: buffer_load_dword v1, s[0:3], 0 offen lds +; GFX942-NEXT: ; wave barrier +; GFX942-NEXT: global_load_dword v1, v[2:3], off offset:48 +; GFX942-NEXT: v_mov_b32_e32 v2, s16 +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: ; s_wait_asyncmark(1) +; GFX942-NEXT: s_waitcnt vmcnt(3) +; GFX942-NEXT: ds_read_b32 v3, v2 offset:84 +; GFX942-NEXT: ; s_wait_asyncmark(0) +; GFX942-NEXT: s_waitcnt vmcnt(1) +; GFX942-NEXT: ds_read_b32 v2, v2 offset:88 +; GFX942-NEXT: v_add_u32_e32 v6, v7, v6 +; GFX942-NEXT: s_waitcnt lgkmcnt(1) +; GFX942-NEXT: v_add3_u32 v0, v6, v3, v0 +; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX942-NEXT: global_store_dword v[4:5], v0, off +; GFX942-NEXT: s_waitcnt vmcnt(0) +; GFX942-NEXT: s_setpc_b64 s[30:31] +; +; GFX1010-LABEL: buffers_with_wave_barrier: +; GFX1010: ; %bb.0: ; %entry +; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_mov_b32_e32 v6, 0x54 +; GFX1010-NEXT: global_load_dword v7, v[2:3], off offset:44 +; GFX1010-NEXT: global_load_dword v8, v[0:1], off offset:4 +; GFX1010-NEXT: s_add_i32 m0, s20, 0x54 +; GFX1010-NEXT: ; wave barrier +; GFX1010-NEXT: buffer_load_dword v6, s[16:19], 0 offen lds +; GFX1010-NEXT: ; wave barrier +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: v_mov_b32_e32 v6, 0x58 +; GFX1010-NEXT: global_load_dword v0, v[0:1], off offset:8 +; GFX1010-NEXT: s_add_i32 m0, s20, 0x58 +; GFX1010-NEXT: ; wave barrier +; GFX1010-NEXT: buffer_load_dword v6, s[16:19], 0 offen lds +; GFX1010-NEXT: ; wave barrier +; GFX1010-NEXT: global_load_dword v1, v[2:3], off offset:48 +; GFX1010-NEXT: v_mov_b32_e32 v2, s20 +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: ; s_wait_asyncmark(1) +; GFX1010-NEXT: s_waitcnt vmcnt(3) +; GFX1010-NEXT: ds_read_b32 v3, v2 offset:84 +; GFX1010-NEXT: ; s_wait_asyncmark(0) +; GFX1010-NEXT: s_waitcnt vmcnt(1) +; GFX1010-NEXT: ds_read_b32 v2, v2 offset:88 +; GFX1010-NEXT: v_add_nc_u32_e32 v6, v8, v7 +; GFX1010-NEXT: s_waitcnt lgkmcnt(1) +; GFX1010-NEXT: v_add3_u32 v0, v6, v3, v0 +; GFX1010-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_add3_u32 v0, v0, v1, v2 +; GFX1010-NEXT: global_store_dword v[4:5], v0, off +; GFX1010-NEXT: s_setpc_b64 s[30:31] +entry: + ; First batch: global load, global load, async global-to-LDS. + %bar_gep11 = getelementptr i32, ptr addrspace(1) %bar, i32 11 + %bar_v11 = load i32, ptr addrspace(1) %bar_gep11 + %foo_gep1 = getelementptr i32, ptr addrspace(1) %foo, i32 1 + %foo_v1 = load i32, ptr addrspace(1) %foo_gep1 + %lds_gep21 = getelementptr i32, ptr addrspace(3) %lds, i32 21 + call void @llvm.amdgcn.wave.barrier() + call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %buf, ptr addrspace(3) %lds_gep21, i32 4, i32 84, i32 0, i32 0, i32 u0x20) + call void @llvm.amdgcn.wave.barrier() + call void @llvm.amdgcn.asyncmark() + + ; Second batch: global load, async global-to-LDS, global load. + %foo_gep2 = getelementptr i32, ptr addrspace(1) %foo, i32 2 + %foo_v2 = load i32, ptr addrspace(1) %foo_gep2 + %lds_gep22 = getelementptr i32, ptr addrspace(3) %lds, i32 22 + call void @llvm.amdgcn.wave.barrier() + call void @llvm.amdgcn.raw.ptr.buffer.load.lds(ptr addrspace(8) %buf, ptr addrspace(3) %lds_gep22, i32 4, i32 88, i32 0, i32 0, i32 u0x20) + call void @llvm.amdgcn.wave.barrier() + %bar_gep12 = getelementptr i32, ptr addrspace(1) %bar, i32 12 + %bar_v12 = load i32, ptr addrspace(1) %bar_gep12 + call void @llvm.amdgcn.asyncmark() + + ; Wait for first async mark and read from LDS. + ; This results in vmcnt(3) corresponding to the second batch. + call void @llvm.amdgcn.wait.asyncmark(i16 1) + %lds_val21 = load i32, ptr addrspace(3) %lds_gep21 + + ; Wait for the next lds dma. + ; This results in vmcnt(1) because the last global load is not async. + call void @llvm.amdgcn.wait.asyncmark(i16 0) + %lds_val22 = load i32, ptr addrspace(3) %lds_gep22 + %sum1 = add i32 %foo_v1, %bar_v11 + %sum2 = add i32 %sum1, %lds_val21 + %sum3 = add i32 %sum2, %foo_v2 + %sum4 = add i32 %sum3, %bar_v12 + %sum5 = add i32 %sum4, %lds_val22 + store i32 %sum5, ptr addrspace(1) %out + + ret void +} + +; A perfect loop that is unlikely to exist in real life. It uses only async LDS +; DMA operations, and result in vmcnt waits that exactly match the stream of +; those outstanding operations. + +define amdgpu_kernel void @test_pipelined_loop(ptr addrspace(1) %foo, ptr addrspace(3) %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out, i32 %n) { +; GFX900-LABEL: test_pipelined_loop: +; GFX900: ; %bb.0: ; %prolog +; GFX900-NEXT: s_load_dword s2, s[4:5], 0x2c +; GFX900-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX900-NEXT: v_mov_b32_e32 v0, 0 +; GFX900-NEXT: s_load_dword s3, s[4:5], 0x44 +; GFX900-NEXT: v_mov_b32_e32 v1, 0 +; GFX900-NEXT: s_waitcnt lgkmcnt(0) +; GFX900-NEXT: s_mov_b32 m0, s2 +; GFX900-NEXT: s_nop 0 +; GFX900-NEXT: global_load_dword v0, s[0:1] lds +; GFX900-NEXT: s_add_i32 m0, s2, 4 +; GFX900-NEXT: s_add_u32 s6, s0, 4 +; GFX900-NEXT: s_addc_u32 s7, s1, 0 +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: global_load_dword v0, s[6:7] lds +; GFX900-NEXT: s_add_u32 s0, s0, 8 +; GFX900-NEXT: s_addc_u32 s1, s1, 0 +; GFX900-NEXT: s_mov_b32 s6, 2 +; GFX900-NEXT: s_mov_b32 s7, s2 +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: .LBB2_1: ; %loop_body +; GFX900-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX900-NEXT: s_add_i32 m0, s7, 8 +; GFX900-NEXT: v_mov_b32_e32 v2, s7 +; GFX900-NEXT: global_load_dword v0, s[0:1] lds +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: ; s_wait_asyncmark(2) +; GFX900-NEXT: s_waitcnt vmcnt(2) +; GFX900-NEXT: ds_read_b32 v2, v2 +; GFX900-NEXT: s_add_i32 s6, s6, 1 +; GFX900-NEXT: s_add_u32 s0, s0, 4 +; GFX900-NEXT: s_addc_u32 s1, s1, 0 +; GFX900-NEXT: s_add_i32 s7, s7, 4 +; GFX900-NEXT: s_cmp_lt_i32 s6, s3 +; GFX900-NEXT: s_waitcnt lgkmcnt(0) +; GFX900-NEXT: v_add_u32_e32 v1, v1, v2 +; GFX900-NEXT: s_cbranch_scc1 .LBB2_1 +; GFX900-NEXT: ; %bb.2: ; %epilog +; GFX900-NEXT: s_lshl2_add_u32 s0, s3, s2 +; GFX900-NEXT: s_add_i32 s0, s0, -8 +; GFX900-NEXT: v_mov_b32_e32 v0, s0 +; GFX900-NEXT: ; s_wait_asyncmark(1) +; GFX900-NEXT: s_waitcnt vmcnt(1) +; GFX900-NEXT: ds_read_b32 v0, v0 +; GFX900-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34 +; GFX900-NEXT: v_mov_b32_e32 v2, 0 +; GFX900-NEXT: ; s_wait_asyncmark(0) +; GFX900-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX900-NEXT: v_add_u32_e32 v0, v1, v0 +; GFX900-NEXT: global_store_dword v2, v0, s[0:1] +; GFX900-NEXT: s_endpgm +; +; GFX942-LABEL: test_pipelined_loop: +; GFX942: ; %bb.0: ; %prolog +; GFX942-NEXT: s_load_dword s2, s[4:5], 0x2c +; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: s_load_dword s3, s[4:5], 0x44 +; GFX942-NEXT: v_mov_b32_e32 v1, 0 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: s_mov_b32 m0, s2 +; GFX942-NEXT: s_nop 0 +; GFX942-NEXT: global_load_lds_dword v0, s[0:1] +; GFX942-NEXT: s_add_i32 m0, s2, 4 +; GFX942-NEXT: s_add_u32 s6, s0, 4 +; GFX942-NEXT: s_addc_u32 s7, s1, 0 +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: global_load_lds_dword v0, s[6:7] +; GFX942-NEXT: s_add_u32 s0, s0, 8 +; GFX942-NEXT: s_addc_u32 s1, s1, 0 +; GFX942-NEXT: s_mov_b32 s6, 2 +; GFX942-NEXT: s_mov_b32 s7, s2 +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: .LBB2_1: ; %loop_body +; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX942-NEXT: s_add_i32 m0, s7, 8 +; GFX942-NEXT: v_mov_b32_e32 v2, s7 +; GFX942-NEXT: global_load_lds_dword v0, s[0:1] +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: ; s_wait_asyncmark(2) +; GFX942-NEXT: s_waitcnt vmcnt(2) +; GFX942-NEXT: ds_read_b32 v2, v2 +; GFX942-NEXT: s_add_i32 s6, s6, 1 +; GFX942-NEXT: s_add_u32 s0, s0, 4 +; GFX942-NEXT: s_addc_u32 s1, s1, 0 +; GFX942-NEXT: s_add_i32 s7, s7, 4 +; GFX942-NEXT: s_cmp_lt_i32 s6, s3 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: v_add_u32_e32 v1, v1, v2 +; GFX942-NEXT: s_cbranch_scc1 .LBB2_1 +; GFX942-NEXT: ; %bb.2: ; %epilog +; GFX942-NEXT: s_lshl2_add_u32 s0, s3, s2 +; GFX942-NEXT: s_add_i32 s0, s0, -8 +; GFX942-NEXT: v_mov_b32_e32 v0, s0 +; GFX942-NEXT: ; s_wait_asyncmark(1) +; GFX942-NEXT: s_waitcnt vmcnt(1) +; GFX942-NEXT: ds_read_b32 v0, v0 +; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34 +; GFX942-NEXT: v_mov_b32_e32 v2, 0 +; GFX942-NEXT: ; s_wait_asyncmark(0) +; GFX942-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_add_u32_e32 v0, v1, v0 +; GFX942-NEXT: global_store_dword v2, v0, s[0:1] +; GFX942-NEXT: s_endpgm +; +; GFX1010-LABEL: test_pipelined_loop: +; GFX1010: ; %bb.0: ; %prolog +; GFX1010-NEXT: s_clause 0x1 +; GFX1010-NEXT: s_load_dword s2, s[4:5], 0x2c +; GFX1010-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 +; GFX1010-NEXT: v_mov_b32_e32 v0, 0 +; GFX1010-NEXT: s_load_dword s3, s[4:5], 0x44 +; GFX1010-NEXT: v_mov_b32_e32 v1, 0 +; GFX1010-NEXT: s_waitcnt lgkmcnt(0) +; GFX1010-NEXT: s_mov_b32 m0, s2 +; GFX1010-NEXT: global_load_dword v0, s[0:1] lds +; GFX1010-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0) +; GFX1010-NEXT: s_add_i32 m0, s2, 4 +; GFX1010-NEXT: s_add_u32 s6, s0, 4 +; GFX1010-NEXT: s_addc_u32 s7, s1, 0 +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: global_load_dword v0, s[6:7] lds +; GFX1010-NEXT: s_add_u32 s0, s0, 8 +; GFX1010-NEXT: s_addc_u32 s1, s1, 0 +; GFX1010-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0) +; GFX1010-NEXT: s_mov_b32 s6, 2 +; GFX1010-NEXT: s_mov_b32 s7, s2 +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: .LBB2_1: ; %loop_body +; GFX1010-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1010-NEXT: s_add_i32 m0, s7, 8 +; GFX1010-NEXT: v_mov_b32_e32 v2, s7 +; GFX1010-NEXT: global_load_dword v0, s[0:1] lds +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: ; s_wait_asyncmark(2) +; GFX1010-NEXT: s_waitcnt vmcnt(2) +; GFX1010-NEXT: ds_read_b32 v2, v2 +; GFX1010-NEXT: s_add_i32 s6, s6, 1 +; GFX1010-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0) +; GFX1010-NEXT: s_add_u32 s0, s0, 4 +; GFX1010-NEXT: s_addc_u32 s1, s1, 0 +; GFX1010-NEXT: s_add_i32 s7, s7, 4 +; GFX1010-NEXT: s_cmp_lt_i32 s6, s3 +; GFX1010-NEXT: s_waitcnt lgkmcnt(0) +; GFX1010-NEXT: v_add_nc_u32_e32 v1, v1, v2 +; GFX1010-NEXT: s_cbranch_scc1 .LBB2_1 +; GFX1010-NEXT: ; %bb.2: ; %epilog +; GFX1010-NEXT: s_lshl2_add_u32 s0, s3, s2 +; GFX1010-NEXT: ; s_wait_asyncmark(1) +; GFX1010-NEXT: s_waitcnt vmcnt(1) +; GFX1010-NEXT: s_add_i32 s0, s0, -8 +; GFX1010-NEXT: v_mov_b32_e32 v2, 0 +; GFX1010-NEXT: v_mov_b32_e32 v0, s0 +; GFX1010-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34 +; GFX1010-NEXT: ds_read_b32 v0, v0 +; GFX1010-NEXT: ; s_wait_asyncmark(0) +; GFX1010-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_add_nc_u32_e32 v0, v1, v0 +; GFX1010-NEXT: global_store_dword v2, v0, s[0:1] +; GFX1010-NEXT: s_endpgm +prolog: + ; Load first iteration + call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %foo, ptr addrspace(3) %lds, i32 4, i32 0, i32 u0x20) + call void @llvm.amdgcn.asyncmark() + + ; Load second iteration + %lds_gep1 = getelementptr i32, ptr addrspace(3) %lds, i32 1 + %foo_gep1 = getelementptr i32, ptr addrspace(1) %foo, i32 1 + call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %foo_gep1, ptr addrspace(3) %lds_gep1, i32 4, i32 0, i32 u0x20) + call void @llvm.amdgcn.asyncmark() + + br label %loop_body + +loop_body: + %i = phi i32 [ 2, %prolog ], [ %i.next, %loop_body ] + %sum = phi i32 [ 0, %prolog ], [ %sum_i, %loop_body ] + + ; Load next iteration + %lds_gep_cur = getelementptr i32, ptr addrspace(3) %lds, i32 %i + %foo_gep_cur = getelementptr i32, ptr addrspace(1) %foo, i32 %i + call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %foo_gep_cur, ptr addrspace(3) %lds_gep_cur, i32 4, i32 0, i32 u0x20) + call void @llvm.amdgcn.asyncmark() + + ; Wait for iteration i-2 and process + call void @llvm.amdgcn.wait.asyncmark(i16 2) + %lds_idx = sub i32 %i, 2 + %lds_gep_read = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_idx + %lds_val = load i32, ptr addrspace(3) %lds_gep_read + + %sum_i = add i32 %sum, %lds_val + + %i.next = add i32 %i, 1 + %cmp = icmp slt i32 %i.next, %n + br i1 %cmp, label %loop_body, label %epilog + +epilog: + ; Process remaining iterations + call void @llvm.amdgcn.wait.asyncmark(i16 1) + %lds_n_2 = sub i32 %n, 2 + %lds_gep_n_2 = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_n_2 + %lds_val_n_2 = load i32, ptr addrspace(3) %lds_gep_n_2 + %sum_e2 = add i32 %sum_i, %lds_val_n_2 + %out_gep_e1 = getelementptr i32, ptr addrspace(1) %out, i32 %lds_n_2 + + call void @llvm.amdgcn.wait.asyncmark(i16 0) + %lds_n_1 = sub i32 %n, 1 + %lds_gep_n_1 = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_n_1 + %lds_val_n_1 = load i32, ptr addrspace(3) %lds_gep_n_1 + %sum_e1 = add i32 %sum_e2, %lds_val_n_1 + store i32 %sum_e2, ptr addrspace(1) %bar + + ret void +} + +; Software pipelined loop with async global-to-LDS and global loads + +define amdgpu_kernel void @test_pipelined_loop_with_global(ptr addrspace(1) %foo, ptr addrspace(3) %lds, ptr addrspace(1) %bar, ptr addrspace(1) %out, i32 %n) { +; GFX900-LABEL: test_pipelined_loop_with_global: +; GFX900: ; %bb.0: ; %prolog +; GFX900-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 +; GFX900-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34 +; GFX900-NEXT: s_load_dword s8, s[4:5], 0x2c +; GFX900-NEXT: v_mov_b32_e32 v0, 0 +; GFX900-NEXT: s_mov_b32 s10, 2 +; GFX900-NEXT: s_load_dword s9, s[4:5], 0x44 +; GFX900-NEXT: s_waitcnt lgkmcnt(0) +; GFX900-NEXT: s_load_dword s12, s[6:7], 0x0 +; GFX900-NEXT: s_load_dword s13, s[0:1], 0x0 +; GFX900-NEXT: s_mov_b32 m0, s8 +; GFX900-NEXT: s_add_u32 s4, s6, 4 +; GFX900-NEXT: global_load_dword v0, s[6:7] lds +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: global_load_dword v1, v0, s[6:7] offset:4 +; GFX900-NEXT: global_load_dword v2, v0, s[0:1] offset:4 +; GFX900-NEXT: s_addc_u32 s5, s7, 0 +; GFX900-NEXT: s_add_i32 m0, s8, 4 +; GFX900-NEXT: s_add_u32 s0, s0, 8 +; GFX900-NEXT: global_load_dword v0, s[4:5] lds +; GFX900-NEXT: s_addc_u32 s1, s1, 0 +; GFX900-NEXT: s_add_u32 s6, s6, 8 +; GFX900-NEXT: s_mov_b32 s11, s8 +; GFX900-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX900-NEXT: s_addc_u32 s7, s7, 0 +; GFX900-NEXT: s_waitcnt lgkmcnt(0) +; GFX900-NEXT: v_mov_b32_e32 v8, s13 +; GFX900-NEXT: v_mov_b32_e32 v7, s12 +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: s_waitcnt vmcnt(2) +; GFX900-NEXT: v_mov_b32_e32 v3, v1 +; GFX900-NEXT: s_waitcnt vmcnt(1) +; GFX900-NEXT: v_mov_b32_e32 v4, v2 +; GFX900-NEXT: .LBB3_1: ; %loop_body +; GFX900-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX900-NEXT: s_waitcnt vmcnt(2) +; GFX900-NEXT: v_mov_b32_e32 v5, v4 +; GFX900-NEXT: v_mov_b32_e32 v6, v3 +; GFX900-NEXT: global_load_dword v3, v0, s[6:7] +; GFX900-NEXT: global_load_dword v4, v0, s[0:1] +; GFX900-NEXT: s_add_i32 m0, s11, 8 +; GFX900-NEXT: v_mov_b32_e32 v9, s11 +; GFX900-NEXT: global_load_dword v0, s[6:7] lds +; GFX900-NEXT: ; s_asyncmark +; GFX900-NEXT: ; s_wait_asyncmark(2) +; GFX900-NEXT: ds_read_b32 v9, v9 +; GFX900-NEXT: s_add_i32 s10, s10, 1 +; GFX900-NEXT: s_add_u32 s0, s0, 4 +; GFX900-NEXT: s_addc_u32 s1, s1, 0 +; GFX900-NEXT: v_add_u32_e32 v7, v7, v8 +; GFX900-NEXT: s_add_u32 s6, s6, 4 +; GFX900-NEXT: s_waitcnt lgkmcnt(0) +; GFX900-NEXT: v_add_u32_e32 v7, v7, v9 +; GFX900-NEXT: s_addc_u32 s7, s7, 0 +; GFX900-NEXT: global_store_dword v0, v7, s[4:5] +; GFX900-NEXT: s_add_u32 s4, s4, 4 +; GFX900-NEXT: s_addc_u32 s5, s5, 0 +; GFX900-NEXT: s_add_i32 s11, s11, 4 +; GFX900-NEXT: s_cmp_lt_i32 s10, s9 +; GFX900-NEXT: v_mov_b32_e32 v7, v1 +; GFX900-NEXT: v_mov_b32_e32 v8, v2 +; GFX900-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX900-NEXT: ; %bb.2: ; %epilog +; GFX900-NEXT: s_add_i32 s0, s9, -2 +; GFX900-NEXT: s_lshl2_add_u32 s1, s0, s8 +; GFX900-NEXT: v_mov_b32_e32 v0, s1 +; GFX900-NEXT: ; s_wait_asyncmark(1) +; GFX900-NEXT: s_waitcnt vmcnt(4) +; GFX900-NEXT: ds_read_b32 v1, v0 +; GFX900-NEXT: s_ashr_i32 s1, s0, 31 +; GFX900-NEXT: s_lshl_b64 s[0:1], s[0:1], 2 +; GFX900-NEXT: v_add_u32_e32 v2, v6, v5 +; GFX900-NEXT: s_add_u32 s0, s2, s0 +; GFX900-NEXT: s_waitcnt lgkmcnt(0) +; GFX900-NEXT: v_add_u32_e32 v1, v2, v1 +; GFX900-NEXT: s_addc_u32 s1, s3, s1 +; GFX900-NEXT: v_mov_b32_e32 v2, 0 +; GFX900-NEXT: global_store_dword v2, v1, s[0:1] +; GFX900-NEXT: ; s_wait_asyncmark(0) +; GFX900-NEXT: s_waitcnt vmcnt(2) +; GFX900-NEXT: ds_read_b32 v0, v0 offset:4 +; GFX900-NEXT: v_add_u32_e32 v1, v3, v4 +; GFX900-NEXT: s_waitcnt lgkmcnt(0) +; GFX900-NEXT: v_add_u32_e32 v0, v1, v0 +; GFX900-NEXT: global_store_dword v2, v0, s[0:1] offset:4 +; GFX900-NEXT: s_endpgm +; +; GFX942-LABEL: test_pipelined_loop_with_global: +; GFX942: ; %bb.0: ; %prolog +; GFX942-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 +; GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34 +; GFX942-NEXT: s_load_dword s8, s[4:5], 0x2c +; GFX942-NEXT: v_mov_b32_e32 v0, 0 +; GFX942-NEXT: s_mov_b32 s10, 2 +; GFX942-NEXT: s_load_dword s9, s[4:5], 0x44 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: s_load_dword s12, s[6:7], 0x0 +; GFX942-NEXT: s_load_dword s13, s[0:1], 0x0 +; GFX942-NEXT: s_mov_b32 m0, s8 +; GFX942-NEXT: s_add_u32 s4, s6, 4 +; GFX942-NEXT: global_load_lds_dword v0, s[6:7] +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: global_load_dword v1, v0, s[6:7] offset:4 +; GFX942-NEXT: global_load_dword v2, v0, s[0:1] offset:4 +; GFX942-NEXT: s_addc_u32 s5, s7, 0 +; GFX942-NEXT: s_add_i32 m0, s8, 4 +; GFX942-NEXT: s_add_u32 s0, s0, 8 +; GFX942-NEXT: global_load_lds_dword v0, s[4:5] +; GFX942-NEXT: s_addc_u32 s1, s1, 0 +; GFX942-NEXT: s_add_u32 s6, s6, 8 +; GFX942-NEXT: s_mov_b32 s11, s8 +; GFX942-NEXT: s_mov_b64 s[4:5], s[2:3] +; GFX942-NEXT: s_addc_u32 s7, s7, 0 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: v_mov_b32_e32 v8, s13 +; GFX942-NEXT: v_mov_b32_e32 v7, s12 +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: s_waitcnt vmcnt(2) +; GFX942-NEXT: v_mov_b32_e32 v3, v1 +; GFX942-NEXT: s_waitcnt vmcnt(1) +; GFX942-NEXT: v_mov_b32_e32 v4, v2 +; GFX942-NEXT: .LBB3_1: ; %loop_body +; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX942-NEXT: s_waitcnt vmcnt(2) +; GFX942-NEXT: v_mov_b32_e32 v5, v4 +; GFX942-NEXT: v_mov_b32_e32 v6, v3 +; GFX942-NEXT: global_load_dword v3, v0, s[6:7] +; GFX942-NEXT: global_load_dword v4, v0, s[0:1] +; GFX942-NEXT: s_add_i32 m0, s11, 8 +; GFX942-NEXT: v_mov_b32_e32 v9, s11 +; GFX942-NEXT: global_load_lds_dword v0, s[6:7] +; GFX942-NEXT: ; s_asyncmark +; GFX942-NEXT: ; s_wait_asyncmark(2) +; GFX942-NEXT: ds_read_b32 v9, v9 +; GFX942-NEXT: s_add_i32 s10, s10, 1 +; GFX942-NEXT: s_add_u32 s0, s0, 4 +; GFX942-NEXT: s_addc_u32 s1, s1, 0 +; GFX942-NEXT: v_add_u32_e32 v7, v7, v8 +; GFX942-NEXT: s_add_u32 s6, s6, 4 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: v_add_u32_e32 v7, v7, v9 +; GFX942-NEXT: s_addc_u32 s7, s7, 0 +; GFX942-NEXT: global_store_dword v0, v7, s[4:5] +; GFX942-NEXT: s_add_u32 s4, s4, 4 +; GFX942-NEXT: s_addc_u32 s5, s5, 0 +; GFX942-NEXT: s_add_i32 s11, s11, 4 +; GFX942-NEXT: s_cmp_lt_i32 s10, s9 +; GFX942-NEXT: v_mov_b32_e32 v7, v1 +; GFX942-NEXT: v_mov_b32_e32 v8, v2 +; GFX942-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX942-NEXT: ; %bb.2: ; %epilog +; GFX942-NEXT: s_add_i32 s0, s9, -2 +; GFX942-NEXT: s_lshl2_add_u32 s1, s0, s8 +; GFX942-NEXT: v_mov_b32_e32 v0, s1 +; GFX942-NEXT: ; s_wait_asyncmark(1) +; GFX942-NEXT: s_waitcnt vmcnt(4) +; GFX942-NEXT: ds_read_b32 v1, v0 +; GFX942-NEXT: s_ashr_i32 s1, s0, 31 +; GFX942-NEXT: s_lshl_b64 s[0:1], s[0:1], 2 +; GFX942-NEXT: v_add_u32_e32 v2, v6, v5 +; GFX942-NEXT: s_add_u32 s0, s2, s0 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: v_add_u32_e32 v1, v2, v1 +; GFX942-NEXT: s_addc_u32 s1, s3, s1 +; GFX942-NEXT: v_mov_b32_e32 v2, 0 +; GFX942-NEXT: global_store_dword v2, v1, s[0:1] +; GFX942-NEXT: ; s_wait_asyncmark(0) +; GFX942-NEXT: s_waitcnt vmcnt(2) +; GFX942-NEXT: ds_read_b32 v0, v0 offset:4 +; GFX942-NEXT: v_add_u32_e32 v1, v3, v4 +; GFX942-NEXT: s_waitcnt lgkmcnt(0) +; GFX942-NEXT: v_add_u32_e32 v0, v1, v0 +; GFX942-NEXT: global_store_dword v2, v0, s[0:1] offset:4 +; GFX942-NEXT: s_endpgm +; +; GFX1010-LABEL: test_pipelined_loop_with_global: +; GFX1010: ; %bb.0: ; %prolog +; GFX1010-NEXT: s_clause 0x2 +; GFX1010-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x24 +; GFX1010-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x34 +; GFX1010-NEXT: s_load_dword s8, s[4:5], 0x2c +; GFX1010-NEXT: v_mov_b32_e32 v0, 0 +; GFX1010-NEXT: s_load_dword s9, s[4:5], 0x44 +; GFX1010-NEXT: s_waitcnt lgkmcnt(0) +; GFX1010-NEXT: s_load_dword s10, s[6:7], 0x0 +; GFX1010-NEXT: s_load_dword s11, s[0:1], 0x0 +; GFX1010-NEXT: s_mov_b32 m0, s8 +; GFX1010-NEXT: s_add_u32 s4, s6, 4 +; GFX1010-NEXT: global_load_dword v0, s[6:7] lds +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: s_clause 0x1 +; GFX1010-NEXT: global_load_dword v1, v0, s[6:7] offset:4 +; GFX1010-NEXT: global_load_dword v2, v0, s[0:1] offset:4 +; GFX1010-NEXT: s_addc_u32 s5, s7, 0 +; GFX1010-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0) +; GFX1010-NEXT: s_add_i32 m0, s8, 4 +; GFX1010-NEXT: s_add_u32 s0, s0, 8 +; GFX1010-NEXT: global_load_dword v0, s[4:5] lds +; GFX1010-NEXT: s_addc_u32 s1, s1, 0 +; GFX1010-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0) +; GFX1010-NEXT: s_add_u32 s4, s6, 8 +; GFX1010-NEXT: s_addc_u32 s5, s7, 0 +; GFX1010-NEXT: s_mov_b64 s[6:7], s[2:3] +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: s_waitcnt lgkmcnt(0) +; GFX1010-NEXT: v_mov_b32_e32 v7, s10 +; GFX1010-NEXT: v_mov_b32_e32 v8, s11 +; GFX1010-NEXT: s_mov_b32 s10, 2 +; GFX1010-NEXT: s_mov_b32 s11, s8 +; GFX1010-NEXT: s_waitcnt vmcnt(2) +; GFX1010-NEXT: v_mov_b32_e32 v3, v1 +; GFX1010-NEXT: s_waitcnt vmcnt(1) +; GFX1010-NEXT: v_mov_b32_e32 v4, v2 +; GFX1010-NEXT: .LBB3_1: ; %loop_body +; GFX1010-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX1010-NEXT: s_add_i32 m0, s11, 8 +; GFX1010-NEXT: s_waitcnt vmcnt(1) +; GFX1010-NEXT: v_mov_b32_e32 v5, v4 +; GFX1010-NEXT: v_mov_b32_e32 v6, v3 +; GFX1010-NEXT: s_clause 0x1 +; GFX1010-NEXT: global_load_dword v3, v0, s[4:5] +; GFX1010-NEXT: global_load_dword v4, v0, s[0:1] +; GFX1010-NEXT: global_load_dword v0, s[4:5] lds +; GFX1010-NEXT: v_mov_b32_e32 v9, s11 +; GFX1010-NEXT: ; s_asyncmark +; GFX1010-NEXT: ; s_wait_asyncmark(2) +; GFX1010-NEXT: v_add_nc_u32_e32 v10, v7, v8 +; GFX1010-NEXT: s_add_i32 s10, s10, 1 +; GFX1010-NEXT: ds_read_b32 v9, v9 +; GFX1010-NEXT: s_add_u32 s0, s0, 4 +; GFX1010-NEXT: s_addc_u32 s1, s1, 0 +; GFX1010-NEXT: s_add_u32 s4, s4, 4 +; GFX1010-NEXT: v_mov_b32_e32 v7, v1 +; GFX1010-NEXT: v_mov_b32_e32 v8, v2 +; GFX1010-NEXT: s_addc_u32 s5, s5, 0 +; GFX1010-NEXT: s_waitcnt lgkmcnt(0) +; GFX1010-NEXT: v_add_nc_u32_e32 v9, v10, v9 +; GFX1010-NEXT: global_store_dword v0, v9, s[6:7] +; GFX1010-NEXT: s_waitcnt_depctr depctr_vm_vsrc(0) +; GFX1010-NEXT: s_add_u32 s6, s6, 4 +; GFX1010-NEXT: s_addc_u32 s7, s7, 0 +; GFX1010-NEXT: s_add_i32 s11, s11, 4 +; GFX1010-NEXT: s_cmp_lt_i32 s10, s9 +; GFX1010-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1010-NEXT: ; %bb.2: ; %epilog +; GFX1010-NEXT: s_add_i32 s0, s9, -2 +; GFX1010-NEXT: ; s_wait_asyncmark(1) +; GFX1010-NEXT: s_waitcnt vmcnt(3) +; GFX1010-NEXT: s_lshl2_add_u32 s1, s0, s8 +; GFX1010-NEXT: v_add_nc_u32_e32 v2, v6, v5 +; GFX1010-NEXT: v_mov_b32_e32 v0, s1 +; GFX1010-NEXT: s_ashr_i32 s1, s0, 31 +; GFX1010-NEXT: v_mov_b32_e32 v5, 0 +; GFX1010-NEXT: s_lshl_b64 s[0:1], s[0:1], 2 +; GFX1010-NEXT: s_add_u32 s0, s2, s0 +; GFX1010-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX1010-NEXT: ds_read_b32 v1, v0 +; GFX1010-NEXT: s_addc_u32 s1, s3, s1 +; GFX1010-NEXT: s_waitcnt lgkmcnt(0) +; GFX1010-NEXT: v_add_nc_u32_e32 v1, v2, v1 +; GFX1010-NEXT: global_store_dword v5, v1, s[0:1] +; GFX1010-NEXT: ; s_wait_asyncmark(0) +; GFX1010-NEXT: s_waitcnt vmcnt(0) +; GFX1010-NEXT: ds_read_b32 v0, v0 offset:4 +; GFX1010-NEXT: v_add_nc_u32_e32 v1, v3, v4 +; GFX1010-NEXT: s_waitcnt lgkmcnt(0) +; GFX1010-NEXT: v_add_nc_u32_e32 v0, v1, v0 +; GFX1010-NEXT: global_store_dword v5, v0, s[0:1] offset:4 +; GFX1010-NEXT: s_endpgm +prolog: + ; Load first iteration + %v0 = load i32, ptr addrspace(1) %foo + %g0 = load i32, ptr addrspace(1) %bar + call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %foo, ptr addrspace(3) %lds, i32 4, i32 0, i32 u0x20) + call void @llvm.amdgcn.asyncmark() + + ; Load second iteration + %foo_gep1 = getelementptr i32, ptr addrspace(1) %foo, i32 1 + %v1 = load i32, ptr addrspace(1) %foo_gep1 + %bar_gep1 = getelementptr i32, ptr addrspace(1) %bar, i32 1 + %g1 = load i32, ptr addrspace(1) %bar_gep1 + + %lds_gep1 = getelementptr i32, ptr addrspace(3) %lds, i32 1 + call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %foo_gep1, ptr addrspace(3) %lds_gep1, i32 4, i32 0, i32 u0x20) + call void @llvm.amdgcn.asyncmark() + + br label %loop_body + + ; The vmcnt at the end of the prolog and at the start of the loop header seems + ; to be stricter than necessary because of the ordinary global operations. We + ; could, in principle, further relax the wait by introducing asyn globals (non + ; LDS DMA) in a similar way. + +loop_body: + %i = phi i32 [ 2, %prolog ], [ %i.next, %loop_body ] + %prev_v = phi i32 [ %v0, %prolog ], [ %v1, %loop_body ] + %prev_g = phi i32 [ %g0, %prolog ], [ %g1, %loop_body ] + %v1_phi = phi i32 [ %v1, %prolog ], [ %cur_v, %loop_body ] + %g1_phi = phi i32 [ %g1, %prolog ], [ %cur_g, %loop_body ] + + ; Load next iteration + %foo_gep_cur = getelementptr i32, ptr addrspace(1) %foo, i32 %i + %cur_v = load i32, ptr addrspace(1) %foo_gep_cur + %bar_gep_cur = getelementptr i32, ptr addrspace(1) %bar, i32 %i + %cur_g = load i32, ptr addrspace(1) %bar_gep_cur + %lds_gep_cur = getelementptr i32, ptr addrspace(3) %lds, i32 %i + call void @llvm.amdgcn.global.load.lds(ptr addrspace(1) %foo_gep_cur, ptr addrspace(3) %lds_gep_cur, i32 4, i32 0, i32 u0x20) + call void @llvm.amdgcn.asyncmark() + + ; Wait for iteration i-2 and process + call void @llvm.amdgcn.wait.asyncmark(i16 2) + %lds_idx = sub i32 %i, 2 + %lds_gep_read = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_idx + %lds_val = load i32, ptr addrspace(3) %lds_gep_read + + %sum1 = add i32 %prev_v, %prev_g + %sum2 = add i32 %sum1, %lds_val + %out_gep = getelementptr i32, ptr addrspace(1) %out, i32 %lds_idx + store i32 %sum2, ptr addrspace(1) %out_gep + + %i.next = add i32 %i, 1 + %cmp = icmp slt i32 %i.next, %n + br i1 %cmp, label %loop_body, label %epilog + +epilog: + ; Process remaining iterations + call void @llvm.amdgcn.wait.asyncmark(i16 1) + %lds_n_2 = sub i32 %n, 2 + %lds_gep_n_2 = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_n_2 + %lds_val_n_2 = load i32, ptr addrspace(3) %lds_gep_n_2 + %sum_e1 = add i32 %v1_phi, %g1_phi + %sum_e2 = add i32 %sum_e1, %lds_val_n_2 + %out_gep_e1 = getelementptr i32, ptr addrspace(1) %out, i32 %lds_n_2 + store i32 %sum_e2, ptr addrspace(1) %out_gep_e1 + + call void @llvm.amdgcn.wait.asyncmark(i16 0) + %lds_n_1 = sub i32 %n, 1 + %lds_gep_n_1 = getelementptr i32, ptr addrspace(3) %lds, i32 %lds_n_1 + %lds_val_n_1 = load i32, ptr addrspace(3) %lds_gep_n_1 + %sum_e3 = add i32 %cur_v, %cur_g + %sum_e4 = add i32 %sum_e3, %lds_val_n_1 + %out_gep_e2 = getelementptr i32, ptr addrspace(1) %out, i32 %lds_n_1 + store i32 %sum_e4, ptr addrspace(1) %out_gep_e2 + + ret void +} _______________________________________________ cfe-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
