Author: Nick Desaulniers Date: 2022-03-01T14:21:33-08:00 New Revision: 41d4f89e38b718b3a291fb24ff0e2b654ee1ff79
URL: https://github.com/llvm/llvm-project/commit/41d4f89e38b718b3a291fb24ff0e2b654ee1ff79 DIFF: https://github.com/llvm/llvm-project/commit/41d4f89e38b718b3a291fb24ff0e2b654ee1ff79.diff LOG: [X86ISelLowering] permit BlockAddressSDNode "i" constraints for PIC When building 32b x86 code as PIC, the existing handling of "i" constraints is conservative since generally we have to go through the GOT to find references to functions. But generally, BlockAddresses from C code refer to the Function in the current TU. Permit BlockAddresses to be used with the "i" constraint for those cases. I regressed this in commit 4edb9983cb8c ("[SelectionDAG] treat X constrained labels as i for asm") Fixes: https://github.com/llvm/llvm-project/issues/53868 Reviewed By: efriedma, MaskRay Differential Revision: https://reviews.llvm.org/D119905 (cherry picked from commit 027c16bef4b727095eea00bbef9266f1f4a78c27) Added: Modified: llvm/lib/Target/X86/X86ISelLowering.cpp llvm/test/CodeGen/X86/inline-asm-pic.ll Removed: ################################################################################ diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index a1c387574ebb..77c2e7d16990 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -54599,8 +54599,9 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, // In any sort of PIC mode addresses need to be computed at runtime by // adding in a register or some sort of table lookup. These can't - // be used as immediates. - if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) + // be used as immediates. BlockAddresses are fine though. + if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) && + !isa<BlockAddressSDNode>(Op)) return; // If we are in non-pic codegen mode, we allow the address of a global (with diff --git a/llvm/test/CodeGen/X86/inline-asm-pic.ll b/llvm/test/CodeGen/X86/inline-asm-pic.ll index 7aeb1bfbdf41..503f8db91a29 100644 --- a/llvm/test/CodeGen/X86/inline-asm-pic.ll +++ b/llvm/test/CodeGen/X86/inline-asm-pic.ll @@ -18,3 +18,41 @@ entry: tail call void asm "mov $1,%gs:$0", "=*m,ri,~{dirflag},~{fpsr},~{flags}"(i8** elementtype(i8*) inttoptr (i32 152 to i8**), i8* bitcast (i8** @main_q to i8*)) nounwind ret void } + +; The intent of this test is to ensure that we handle blockaddress' correctly +; with "i" constraints for -m32 -fPIC. + +define void @x() { +; CHECK-LABEL: x: +; CHECK: ## %bb.0: +; CHECK-NEXT: ## InlineAsm Start +; CHECK-NEXT: ## Ltmp0 +; CHECK-EMPTY: +; CHECK-NEXT: ## InlineAsm End +; CHECK-NEXT: ## %bb.2: ## %return +; CHECK-NEXT: retl +; CHECK-NEXT: Ltmp0: ## Block address taken +; CHECK-NEXT: LBB1_1: ## %overflow +; CHECK-NEXT: retl + callbr void asm "# ${0:l}\0A", "i"(i8* blockaddress(@x, %overflow)) + to label %return [label %overflow] + +overflow: + br label %return + +return: + ret void +} + +; Test unusual case of blockaddress from @x in @y's asm. +define void @y() { +; CHECK-LABEL: y: +; CHECK: ## %bb.0: +; CHECK-NEXT: ## InlineAsm Start +; CHECK-NEXT: ## Ltmp0 +; CHECK-EMPTY: +; CHECK-NEXT: ## InlineAsm End +; CHECK-NEXT: retl + call void asm "# ${0:l}\0A", "i"(i8* blockaddress(@x, %overflow)) + ret void +} _______________________________________________ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
