https://github.com/ro-i updated https://github.com/llvm/llvm-project/pull/133907

>From adbca593dd83f8f74cbfc0d1ba9932e3beb4adb0 Mon Sep 17 00:00:00 2001
From: Robert Imschweiler <[email protected]>
Date: Tue, 1 Apr 2025 08:03:16 -0500
Subject: [PATCH] [IR] Add CallBr intrinsics support

This commit adds support for using intrinsics with callbr.
The uses of this will most of the time look like this example:
```llvm
  callbr void @llvm.amdgcn.kill(i1 %c) to label %cont [label %kill]
kill:
  unreachable
cont:
  ...
```
---
 llvm/docs/LangRef.rst                         |  27 +++--
 .../llvm/CodeGen/GlobalISel/IRTranslator.h    |   2 +
 llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp  |  35 +++++-
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  68 ++++++++----
 .../SelectionDAG/SelectionDAGBuilder.h        |   4 +-
 llvm/lib/IR/Verifier.cpp                      |  33 +++++-
 llvm/test/Assembler/callbr.ll                 |  20 ++++
 llvm/test/CodeGen/AMDGPU/callbr-intrinsics.ll | 101 ++++++++++++++++++
 llvm/test/Verifier/callbr-intrinsic.ll        |  57 ++++++++++
 9 files changed, 313 insertions(+), 34 deletions(-)
 create mode 100644 llvm/test/Assembler/callbr.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/callbr-intrinsics.ll
 create mode 100644 llvm/test/Verifier/callbr-intrinsic.ll

diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 3c089b5a0ba79..c17e1000f9e8c 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -9788,8 +9788,12 @@ The '``callbr``' instruction causes control to transfer 
to a specified
 function, with the possibility of control flow transfer to either the
 '``fallthrough``' label or one of the '``indirect``' labels.
 
-This instruction should only be used to implement the "goto" feature of gcc
-style inline assembly. Any other usage is an error in the IR verifier.
+This instruction can currently only be used
+
+#. to implement the "goto" feature of gcc style inline assembly or
+#. to call selected intrinsics.
+
+Any other usage is an error in the IR verifier.
 
 Note that in order to support outputs along indirect edges, LLVM may need to
 split critical edges, which may require synthesizing a replacement block for
@@ -9838,7 +9842,7 @@ This instruction requires several arguments:
    indicates the function accepts a variable number of arguments, the
    extra arguments can be specified.
 #. '``fallthrough label``': the label reached when the inline assembly's
-   execution exits the bottom.
+   execution exits the bottom / the intrinsic call returns.
 #. '``indirect labels``': the labels reached when a callee transfers control
    to a location other than the '``fallthrough label``'. Label constraints
    refer to these destinations.
@@ -9856,9 +9860,12 @@ flow goes after the call.
 The output values of a '``callbr``' instruction are available both in the
 the '``fallthrough``' block, and any '``indirect``' blocks(s).
 
-The only use of this today is to implement the "goto" feature of gcc inline
-assembly where additional labels can be provided as locations for the inline
-assembly to jump to.
+The only current uses of this are:
+
+#. implement the "goto" feature of gcc inline assembly where additional
+   labels can be provided as locations for the inline assembly to jump to.
+#. support selected intrinsics which manipulate control flow and should
+   be chained to specific terminators, such as '``unreachable``'.
 
 Example:
 """"""""
@@ -9873,6 +9880,14 @@ Example:
       <result> = callbr i32 asm "", "=r,r,!i"(i32 %x)
                   to label %fallthrough [label %indirect]
 
+      ; intrinsic which should be followed by unreachable (the order of the
+      ; blocks after the callbr instruction doesn't matter)
+        callbr void @llvm.amdgcn.kill(i1 %c) to label %cont [label %kill]
+      cont:
+        ...
+      kill:
+        unreachable
+
 .. _i_resume:
 
 '``resume``' Instruction
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h 
b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
index 9d6038db4391f..5f5a6f5c72abf 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
@@ -317,6 +317,8 @@ class IRTranslator : public MachineFunctionPass {
   bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
 
   bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
+  bool translateCallBrIntrinsic(const CallBrInst &I,
+                                MachineIRBuilder &MIRBuilder);
 
   bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
 
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp 
b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 78a633f7a049d..5810c470e4d84 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2824,7 +2824,7 @@ bool IRTranslator::translateCall(const User &U, 
MachineIRBuilder &MIRBuilder) {
                             IsTgtMemIntrinsic ? &Info : nullptr);
 }
 
-/// Translate a call to an intrinsic.
+/// Translate a call or callbr to an intrinsic.
 /// Depending on whether TLI->getTgtMemIntrinsic() is true, TgtMemIntrinsicInfo
 /// is a pointer to the correspondingly populated IntrinsicInfo object.
 /// Otherwise, this pointer is null.
@@ -3052,10 +3052,39 @@ bool IRTranslator::translateInvoke(const User &U,
   return true;
 }
 
+/// The intrinsics currently supported by callbr are implicit control flow
+/// intrinsics such as amdgcn.kill.
 bool IRTranslator::translateCallBr(const User &U,
                                    MachineIRBuilder &MIRBuilder) {
-  // FIXME: Implement this.
-  return false;
+  if (containsBF16Type(U))
+    return false; // see translateCall
+
+  const CallBrInst &I = cast<CallBrInst>(U);
+  MachineBasicBlock *CallBrMBB = &MIRBuilder.getMBB();
+
+  // FIXME: inline asm is not yet supported for callbr in GlobalISel. As soon 
as
+  // we add support, we need to handle the indirect asm targets, see
+  // SelectionDAGBuilder::visitCallBr().
+  Intrinsic::ID IID = I.getIntrinsicID();
+  if (I.isInlineAsm())
+    return false;
+  if (!translateIntrinsic(I, IID, MIRBuilder))
+    return false;
+
+  // Retrieve successors.
+  SmallPtrSet<BasicBlock *, 8> Dests = {I.getDefaultDest()};
+  MachineBasicBlock *Return = &getMBB(*I.getDefaultDest());
+
+  // Update successor info.
+  addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
+  // TODO: For most of the cases where there is an intrinsic callbr, we're
+  // having exactly one indirect target, which will be unreachable. As soon as
+  // this changes, we might need to enhance
+  // Target->setIsInlineAsmBrIndirectTarget or add something similar for
+  // intrinsic indirect branches.
+  CallBrMBB->normalizeSuccProbs();
+
+  return true;
 }
 
 bool IRTranslator::translateLandingPad(const User &U,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp 
b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 7bc6c832cee51..d61468aed6b32 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3507,16 +3507,39 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst 
&I) {
                           DAG.getBasicBlock(Return)));
 }
 
+/// The intrinsics currently supported by callbr are implicit control flow
+/// intrinsics such as amdgcn.kill.
+/// - they should be called (no "dontcall-" attributes)
+/// - they do not touch memory on the target (= !TLI.getTgtMemIntrinsic())
+/// - they do not need custom argument handling (no
+/// TLI.CollectTargetIntrinsicOperands())
+void SelectionDAGBuilder::visitCallBrIntrinsic(const CallBrInst &I) {
+  auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(I);
+
+  SmallVector<SDValue, 8> Ops =
+      getTargetIntrinsicOperands(I, HasChain, OnlyLoad);
+  SDVTList VTs = getTargetIntrinsicVTList(I, HasChain);
+
+  // Create the node.
+  SDValue Result =
+      getTargetNonMemIntrinsicNode(*I.getType(), HasChain, Ops, VTs);
+  Result = handleTargetIntrinsicRet(I, HasChain, OnlyLoad, Result);
+
+  setValue(&I, Result);
+}
+
 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
 
-  // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
-  // have to do anything here to lower funclet bundles.
-  failForInvalidBundles(I, "callbrs",
-                        {LLVMContext::OB_deopt, LLVMContext::OB_funclet});
-
-  assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
-  visitInlineAsm(I);
+  if (I.isInlineAsm()) {
+    // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
+    // have to do anything here to lower funclet bundles.
+    failForInvalidBundles(I, "callbrs",
+                          {LLVMContext::OB_deopt, LLVMContext::OB_funclet});
+    visitInlineAsm(I);
+  } else {
+    visitCallBrIntrinsic(I);
+  }
   CopyToExportRegsIfNeeded(&I);
 
   // Retrieve successors.
@@ -3526,18 +3549,25 @@ void SelectionDAGBuilder::visitCallBr(const CallBrInst 
&I) {
 
   // Update successor info.
   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
-  for (BasicBlock *Dest : I.getIndirectDests()) {
-    MachineBasicBlock *Target = FuncInfo.getMBB(Dest);
-    Target->setIsInlineAsmBrIndirectTarget();
-    // If we introduce a type of asm goto statement that is permitted to use an
-    // indirect call instruction to jump to its labels, then we should add a
-    // call to Target->setMachineBlockAddressTaken() here, to mark the target
-    // block as requiring a BTI.
-
-    Target->setLabelMustBeEmitted();
-    // Don't add duplicate machine successors.
-    if (Dests.insert(Dest).second)
-      addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
+  // TODO: For most of the cases where there is an intrinsic callbr, we're
+  // having exactly one indirect target, which will be unreachable. As soon as
+  // this changes, we might need to enhance
+  // Target->setIsInlineAsmBrIndirectTarget or add something similar for
+  // intrinsic indirect branches.
+  if (I.isInlineAsm()) {
+    for (BasicBlock *Dest : I.getIndirectDests()) {
+      MachineBasicBlock *Target = FuncInfo.getMBB(Dest);
+      Target->setIsInlineAsmBrIndirectTarget();
+      // If we introduce a type of asm goto statement that is permitted to use
+      // an indirect call instruction to jump to its labels, then we should add
+      // a call to Target->setMachineBlockAddressTaken() here, to mark the
+      // target block as requiring a BTI.
+
+      Target->setLabelMustBeEmitted();
+      // Don't add duplicate machine successors.
+      if (Dests.insert(Dest).second)
+        addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
+    }
   }
   CallBrMBB->normalizeSuccProbs();
 
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h 
b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index ed63bee58c957..1f4af56804ec9 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -551,10 +551,12 @@ class SelectionDAGBuilder {
 private:
   // These all get lowered before this pass.
   void visitInvoke(const InvokeInst &I);
-  void visitCallBr(const CallBrInst &I);
   void visitCallBrLandingPad(const CallInst &I);
   void visitResume(const ResumeInst &I);
 
+  void visitCallBr(const CallBrInst &I);
+  void visitCallBrIntrinsic(const CallBrInst &I);
+
   void visitUnary(const User &I, unsigned Opcode);
   void visitFNeg(const User &I) { visitUnary(I, ISD::FNEG); }
 
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 7917712846990..2f13784f8c168 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -3379,11 +3379,34 @@ void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
 }
 
 void Verifier::visitCallBrInst(CallBrInst &CBI) {
-  Check(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", 
&CBI);
-  const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
-  Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
+  if (!CBI.isInlineAsm()) {
+    Check(CBI.getCalledFunction(),
+          "Callbr: indirect function / invalid signature");
+    Check(!CBI.hasOperandBundles(),
+          "Callbr for intrinsics currently doesn't support operand bundles");
+
+    switch (CBI.getIntrinsicID()) {
+    case Intrinsic::amdgcn_kill: {
+      Check(CBI.getNumIndirectDests() == 1,
+            "Callbr amdgcn_kill only supports one indirect dest");
+      bool Unreachable = isa<UnreachableInst>(CBI.getIndirectDest(0)->begin());
+      CallInst *Call = dyn_cast<CallInst>(CBI.getIndirectDest(0)->begin());
+      Check(Unreachable || (Call && Call->getIntrinsicID() ==
+                                        Intrinsic::amdgcn_unreachable),
+            "Callbr amdgcn_kill indirect dest needs to be unreachable");
+      break;
+    }
+    default:
+      CheckFailed(
+          "Callbr currently only supports asm-goto and selected intrinsics");
+    }
+    visitIntrinsicCall(CBI.getIntrinsicID(), CBI);
+  } else {
+    const InlineAsm *IA = cast<InlineAsm>(CBI.getCalledOperand());
+    Check(!IA->canThrow(), "Unwinding from Callbr is not allowed");
 
-  verifyInlineAsmCall(CBI);
+    verifyInlineAsmCall(CBI);
+  }
   visitTerminator(CBI);
 }
 
@@ -5479,7 +5502,7 @@ void Verifier::visitInstruction(Instruction &I) {
              (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)) ||
              IsAttachedCallOperand(F, CBI, i)),
             "Cannot take the address of an intrinsic!", &I);
-      Check(!F->isIntrinsic() || isa<CallInst>(I) ||
+      Check(!F->isIntrinsic() || isa<CallInst>(I) || isa<CallBrInst>(I) ||
                 F->getIntrinsicID() == Intrinsic::donothing ||
                 F->getIntrinsicID() == Intrinsic::seh_try_begin ||
                 F->getIntrinsicID() == Intrinsic::seh_try_end ||
diff --git a/llvm/test/Assembler/callbr.ll b/llvm/test/Assembler/callbr.ll
new file mode 100644
index 0000000000000..37fd777e1a395
--- /dev/null
+++ b/llvm/test/Assembler/callbr.ll
@@ -0,0 +1,20 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+declare void @llvm.amdgcn.kill(i1)
+
+define void @test_kill(i1 %c) {
+; CHECK-LABEL: define void @test_kill(
+; CHECK-SAME: i1 [[C:%.*]]) {
+; CHECK-NEXT:    callbr void @llvm.amdgcn.kill(i1 [[C]])
+; CHECK-NEXT:            to label %[[CONT:.*]] [label %kill]
+; CHECK:       [[KILL:.*:]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[CONT]]:
+; CHECK-NEXT:    ret void
+;
+  callbr void @llvm.amdgcn.kill(i1 %c) to label %cont [label %kill]
+kill:
+  unreachable
+cont:
+  ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/callbr-intrinsics.ll 
b/llvm/test/CodeGen/AMDGPU/callbr-intrinsics.ll
new file mode 100644
index 0000000000000..ff1e23836fed2
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/callbr-intrinsics.ll
@@ -0,0 +1,101 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 
UTC_ARGS: --version 5
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx90a < %s | FileCheck %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx90a < %s | FileCheck 
--check-prefix=GISEL %s
+
+define void @test_kill(ptr %src, ptr %dst, i1 %c) {
+; CHECK-LABEL: test_kill:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v0, v[0:1]
+; CHECK-NEXT:    v_and_b32_e32 v1, 1, v4
+; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; CHECK-NEXT:    s_mov_b64 s[4:5], exec
+; CHECK-NEXT:    s_andn2_b64 s[6:7], exec, vcc
+; CHECK-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; CHECK-NEXT:    s_cbranch_scc0 .LBB0_2
+; CHECK-NEXT:  ; %bb.1:
+; CHECK-NEXT:    s_and_b64 exec, exec, s[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_store_dword v[2:3], v0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+; CHECK-NEXT:  .LBB0_2:
+; CHECK-NEXT:    s_mov_b64 exec, 0
+; CHECK-NEXT:    s_endpgm
+;
+; GISEL-LABEL: test_kill:
+; GISEL:       ; %bb.0:
+; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    flat_load_dword v0, v[0:1]
+; GISEL-NEXT:    v_and_b32_e32 v1, 1, v4
+; GISEL-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
+; GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GISEL-NEXT:    s_andn2_b64 s[6:7], exec, vcc
+; GISEL-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; GISEL-NEXT:    s_cbranch_scc0 .LBB0_2
+; GISEL-NEXT:  ; %bb.1:
+; GISEL-NEXT:    s_and_b64 exec, exec, s[4:5]
+; GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    flat_store_dword v[2:3], v0
+; GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
+; GISEL-NEXT:  .LBB0_2:
+; GISEL-NEXT:    s_mov_b64 exec, 0
+; GISEL-NEXT:    s_endpgm
+  %a = load i32, ptr %src, align 4
+  callbr void @llvm.amdgcn.kill(i1 %c) to label %cont [label %kill]
+kill:
+  unreachable
+cont:
+  store i32 %a, ptr %dst, align 4
+  ret void
+}
+
+define void @test_kill_block_order(ptr %src, ptr %dst, i1 %c) {
+; CHECK-LABEL: test_kill_block_order:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v0, v[0:1]
+; CHECK-NEXT:    v_and_b32_e32 v1, 1, v4
+; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; CHECK-NEXT:    s_mov_b64 s[4:5], exec
+; CHECK-NEXT:    s_andn2_b64 s[6:7], exec, vcc
+; CHECK-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; CHECK-NEXT:    s_cbranch_scc0 .LBB1_2
+; CHECK-NEXT:  ; %bb.1:
+; CHECK-NEXT:    s_and_b64 exec, exec, s[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_store_dword v[2:3], v0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+; CHECK-NEXT:  .LBB1_2:
+; CHECK-NEXT:    s_mov_b64 exec, 0
+; CHECK-NEXT:    s_endpgm
+;
+; GISEL-LABEL: test_kill_block_order:
+; GISEL:       ; %bb.0:
+; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    flat_load_dword v0, v[0:1]
+; GISEL-NEXT:    v_and_b32_e32 v1, 1, v4
+; GISEL-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
+; GISEL-NEXT:    s_mov_b64 s[4:5], exec
+; GISEL-NEXT:    s_andn2_b64 s[6:7], exec, vcc
+; GISEL-NEXT:    s_andn2_b64 s[4:5], s[4:5], s[6:7]
+; GISEL-NEXT:    s_cbranch_scc0 .LBB1_2
+; GISEL-NEXT:  ; %bb.1:
+; GISEL-NEXT:    s_and_b64 exec, exec, s[4:5]
+; GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    flat_store_dword v[2:3], v0
+; GISEL-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
+; GISEL-NEXT:  .LBB1_2:
+; GISEL-NEXT:    s_mov_b64 exec, 0
+; GISEL-NEXT:    s_endpgm
+  %a = load i32, ptr %src, align 4
+  callbr void @llvm.amdgcn.kill(i1 %c) to label %cont [label %kill]
+cont:
+  store i32 %a, ptr %dst, align 4
+  ret void
+kill:
+  unreachable
+}
diff --git a/llvm/test/Verifier/callbr-intrinsic.ll 
b/llvm/test/Verifier/callbr-intrinsic.ll
new file mode 100644
index 0000000000000..60f62f07811c5
--- /dev/null
+++ b/llvm/test/Verifier/callbr-intrinsic.ll
@@ -0,0 +1,57 @@
+; RUN: not opt -S %s -passes=verify 2>&1 | FileCheck %s
+
+declare void @llvm.amdgcn.kill(i1)
+
+; CHECK: Callbr amdgcn_kill only supports one indirect dest
+define void @test_callbr_intrinsic_indirect0(i1 %c) {
+  callbr void @llvm.amdgcn.kill(i1 %c) to label %cont []
+kill:
+  unreachable
+cont:
+  ret void
+}
+
+; CHECK-NEXT: Callbr amdgcn_kill only supports one indirect dest
+define void @test_callbr_intrinsic_indirect2(i1 %c) {
+  callbr void @llvm.amdgcn.kill(i1 %c) to label %cont [label %kill1, label 
%kill2]
+kill1:
+  unreachable
+kill2:
+  unreachable
+cont:
+  ret void
+}
+
+; CHECK-NEXT: Callbr amdgcn_kill indirect dest needs to be unreachable
+define void @test_callbr_intrinsic_no_unreachable(i1 %c) {
+  callbr void @llvm.amdgcn.kill(i1 %c) to label %cont [label %kill]
+kill:
+  ret void
+cont:
+  ret void
+}
+
+; CHECK-NEXT: Callbr currently only supports asm-goto and selected intrinsics
+declare i32 @llvm.amdgcn.workitem.id.x()
+define void @test_callbr_intrinsic_unsupported() {
+  callbr i32 @llvm.amdgcn.workitem.id.x() to label %cont []
+cont:
+  ret void
+}
+
+; CHECK-NEXT: Callbr: indirect function / invalid signature
+define void @test_callbr_intrinsic_wrong_signature(ptr %ptr) {
+  %func = load ptr, ptr %ptr, align 8
+  callbr void %func() to label %cont []
+cont:
+  ret void
+}
+
+; CHECK-NEXT: Callbr for intrinsics currently doesn't support operand bundles
+define void @test_callbr_intrinsic_no_operand_bundles(i1 %c) {
+  callbr void @llvm.amdgcn.kill(i1 %c) [ "foo"(i1 %c) ] to label %cont [label 
%kill]
+kill:
+  unreachable
+cont:
+  ret void
+}

_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to