Author: Matt Arsenault Date: 2020-12-22T20:56:24-05:00 New Revision: 29ed846d671117b9a635767dac43cb19fb5ce11f
URL: https://github.com/llvm/llvm-project/commit/29ed846d671117b9a635767dac43cb19fb5ce11f DIFF: https://github.com/llvm/llvm-project/commit/29ed846d671117b9a635767dac43cb19fb5ce11f.diff LOG: AMDGPU: Fix assert when checking for implicit operand legality Added: Modified: llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir Removed: ################################################################################ diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp index 75a54c0a412e..d6c151d3d2cc 100644 --- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -213,8 +213,12 @@ static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI, if (UseMI == &MI) continue; if (MO.isDef() || UseMI->getParent() != MI.getParent() || - UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END || - !TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src)) + UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END) + return false; + + unsigned OpIdx = UseMI->getOperandNo(&MO); + if (OpIdx >= UseMI->getDesc().getNumOperands() || + !TII->isOperandLegal(*UseMI, OpIdx, &Src)) return false; } // Change VGPR to SGPR destination. diff --git a/llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir b/llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir index e96f2839ab02..6c438d80e532 100644 --- a/llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir +++ b/llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir @@ -72,3 +72,19 @@ body: | %1:sreg_32_xm0 = COPY %0 S_ENDPGM 0, implicit %1 ... + +# Make sure there's no assert when looking at the implicit use on S_ENDPGM +# GCN-LABEL: name: s_to_v_copy_implicit_use +# GCN: %0:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %1:sreg_64, 0, 0, 0 :: (load 4, addrspace 4) +# GCN-NEXT: %2:vgpr_32 = COPY %0 +# GCN-NEXT: S_ENDPGM 0, implicit %2 +--- +name: s_to_v_copy_implicit_use +tracksRegLiveness: true +body: | + bb.0: + %0:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM undef %2:sreg_64, 0, 0, 0 :: (load 4, addrspace 4) + %1:vgpr_32 = COPY %0 + S_ENDPGM 0, implicit %1 + +... _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits