http://llvm.org/bugs/show_bug.cgi?id=16486

            Bug ID: 16486
           Summary: Regression: Emits worse sequence for conditional move
                    operation
           Product: libraries
           Version: trunk
          Hardware: PC
                OS: All
            Status: NEW
          Severity: normal
          Priority: P
         Component: Backend: X86
          Assignee: [email protected]
          Reporter: [email protected]
                CC: [email protected]
    Classification: Unclassified

consider the following:
define i32 @_Z4blahiiii(i32 %a, i32 %b, i32 %c, i32 %d) #0 {
entry:
  %cmp = icmp sgt i32 %a, %b
  %add = add nsw i32 %a, %c
  %add1 = add nsw i32 %b, %d
  %x.0 = select i1 %cmp, i32 %add, i32 %add1
  ret i32 %x.0
}

this leaves us with:
        addl    %edi, %edx
        addl    %esi, %ecx
        cmpl    %esi, %edi
        cmovgl  %edx, %ecx
        movl    %ecx, %eax
        ret

however, if we swap around operands a bit:
define i32 @_Z4blahiiii(i32 %a, i32 %b, i32 %c, i32 %d) #0 {
entry:  
  %cmp = icmp sgt i32 %a, %b
  %add = add nsw i32 %c, %a
  %add1 = add nsw i32 %d, %b
  %x.0 = select i1 %cmp, i32 %add, i32 %add1
  ret i32 %x.0
}

we instead get:
        addl    %edi, %edx
        leal    (%rcx,%rsi), %eax
        cmpl    %esi, %edi
        cmovgl  %edx, %eax
        ret

-- 
You are receiving this mail because:
You are on the CC list for the bug.
_______________________________________________
LLVMbugs mailing list
[email protected]
http://lists.cs.uiuc.edu/mailman/listinfo/llvmbugs

Reply via email to