================
@@ -221,12 +434,119 @@ void UpdateSSA(DominatorTree &DT, CallBrInst *CBR,
CallInst *Intrinsic,
}
}
-bool InlineAsmPrepare::runOnFunction(Function &F) {
+static bool splitCriticalEdges(CallBrInst *CBR, DominatorTree *DT) {
+ bool Changed = false;
+
+ CriticalEdgeSplittingOptions Options(DT);
+ Options.setMergeIdenticalEdges();
+
+ // The indirect destination might be duplicated between another parameter...
+ //
+ // %0 = callbr ... [label %x, label %x]
+ //
+ // ...hence MergeIdenticalEdges and AllowIndentical edges, but we don't need
+ // to split the default destination if it's duplicated between an indirect
+ // destination...
+ //
+ // %1 = callbr ... to label %x [label %x]
+ //
+ // ...hence starting at 1 and checking against successor 0 (aka the default
+ // destination).
+ for (unsigned I = 1, E = CBR->getNumSuccessors(); I != E; ++I)
+ if (CBR->getSuccessor(I) == CBR->getSuccessor(0) ||
+ isCriticalEdge(CBR, I, /*AllowIdenticalEdges*/ true))
+ if (SplitKnownCriticalEdge(CBR, I, Options))
+ Changed = true;
+
+ return Changed;
+}
+
+/// Create a separate SSA definition in each indirect target (via
+/// llvm.callbr.landingpad). This may require splitting critical edges so we
+/// have a location to place the intrinsic. Then remap users of the original
+/// callbr output SSA value to instead point to the appropriate
+/// llvm.callbr.landingpad value.
+static bool insertIntrinsicCalls(CallBrInst *CBR, DominatorTree &DT) {
bool Changed = false;
- SmallVector<CallBrInst *, 2> CBRs = FindCallBrs(F);
+ SmallPtrSet<const BasicBlock *, 4> Visited;
+ IRBuilder<> Builder(CBR->getContext());
- if (CBRs.empty())
- return Changed;
+ if (!CBR->getNumIndirectDests())
+ return false;
+
+ SSAUpdater SSAUpdate;
+ SSAUpdate.Initialize(CBR->getType(), CBR->getName());
+ SSAUpdate.AddAvailableValue(CBR->getParent(), CBR);
+ SSAUpdate.AddAvailableValue(CBR->getDefaultDest(), CBR);
+
+ for (BasicBlock *IndDest : CBR->getIndirectDests()) {
+ if (!Visited.insert(IndDest).second)
+ continue;
+
+ Builder.SetInsertPoint(&*IndDest->begin());
+ CallInst *Intrinsic = Builder.CreateIntrinsic(
+ CBR->getType(), Intrinsic::callbr_landingpad, {CBR});
+ SSAUpdate.AddAvailableValue(IndDest, Intrinsic);
+ updateSSA(DT, CBR, Intrinsic, SSAUpdate);
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+static bool processCallBrInst(Function &F, CallBrInst *CBR, DominatorTree *DT)
{
+ bool Changed = false;
+
+ Changed |= splitCriticalEdges(CBR, DT);
+ Changed |= insertIntrinsicCalls(CBR, *DT);
+
+ return Changed;
+}
+
+static bool runImpl(Function &F, ArrayRef<CallBase *> IAs, DominatorTree *DT) {
+ bool Changed = false;
+
+ for (CallBase *CB : IAs)
+ if (auto *CBR = dyn_cast<CallBrInst>(CB))
+ Changed |= processCallBrInst(F, CBR, DT);
+ else
+ Changed |= processInlineAsm(F, CB);
+
+ return Changed;
+}
+
+/// Find all inline assembly calls that need preparation. This always collects
+/// CallBrInsts (which need SSA fixups), and at -O0 also collects regular
+/// inline asm calls (which need "rm" to "m" constraint conversion for the fast
+/// register allocator).
+static SmallVector<CallBase *, 4>
+findInlineAsmCandidates(Function &F, const TargetMachine *TM) {
+ bool isOptLevelNone = TM->getOptLevel() == CodeGenOptLevel::None;
+ SmallVector<CallBase *, 4> InlineAsms;
+
+ for (BasicBlock &BB : F) {
+ if (auto *CBR = dyn_cast<CallBrInst>(BB.getTerminator())) {
+ if (!CBR->getType()->isVoidTy() && !CBR->use_empty())
+ InlineAsms.push_back(CBR);
+ continue;
+ }
+
+ if (isOptLevelNone)
+ // Only inline assembly compiled at '-O0' (i.e. uses the fast register
+ // allocator) needs to be processed.
+ for (Instruction &I : BB)
+ if (CallBase *CB = dyn_cast<CallBase>(&I); CB && CB->isInlineAsm())
+ InlineAsms.push_back(CB);
----------------
bwendling wrote:
Yes. Done.
https://github.com/llvm/llvm-project/pull/181973
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits