Author: Simon Pilgrim Date: 2020-11-26T12:06:44Z New Revision: 2da8fa4ebf1077b35c1bc618616ec902e737c11b
URL: https://github.com/llvm/llvm-project/commit/2da8fa4ebf1077b35c1bc618616ec902e737c11b DIFF: https://github.com/llvm/llvm-project/commit/2da8fa4ebf1077b35c1bc618616ec902e737c11b.diff LOG: [X86] Extend neg-abs test coverage Add 32-bit tests and test i8/i16/i32/i64/i128 like we do for abs.ll Added: Modified: llvm/test/CodeGen/X86/neg-abs.ll Removed: ################################################################################ diff --git a/llvm/test/CodeGen/X86/neg-abs.ll b/llvm/test/CodeGen/X86/neg-abs.ll index 099aa8bdf792..10896bb564c8 100644 --- a/llvm/test/CodeGen/X86/neg-abs.ll +++ b/llvm/test/CodeGen/X86/neg-abs.ll @@ -1,18 +1,156 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -verify-machineinstrs \ -; RUN: -mtriple=x86_64-unknown-unknown < %s | FileCheck %s +; RUN: llc < %s -verify-machineinstrs -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 -declare i64 @llvm.abs.i64(i64, i1 immarg) +declare i8 @llvm.abs.i8(i8, i1) +declare i16 @llvm.abs.i16(i16, i1) +declare i24 @llvm.abs.i24(i24, i1) +declare i32 @llvm.abs.i32(i32, i1) +declare i64 @llvm.abs.i64(i64, i1) +declare i128 @llvm.abs.i128(i128, i1) -define i64@neg_abs(i64 %x) { -; CHECK-LABEL: neg_abs: -; CHECK: # %bb.0: -; CHECK-NEXT: movq %rdi, %rax -; CHECK-NEXT: negq %rax -; CHECK-NEXT: cmovlq %rdi, %rax -; CHECK-NEXT: negq %rax -; CHECK-NEXT: retq +define i8 @neg_abs_i8(i8 %x) nounwind { +; X86-LABEL: neg_abs_i8: +; X86: # %bb.0: +; X86-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: sarb $7, %al +; X86-NEXT: xorb %al, %cl +; X86-NEXT: subb %cl, %al +; X86-NEXT: retl +; +; X64-LABEL: neg_abs_i8: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: sarb $7, %al +; X64-NEXT: xorb %al, %dil +; X64-NEXT: subb %dil, %al +; X64-NEXT: retq + %abs = tail call i8 @llvm.abs.i8(i8 %x, i1 true) + %neg = sub nsw i8 0, %abs + ret i8 %neg +} + +define i16 @neg_abs_i16(i16 %x) nounwind { +; X86-LABEL: neg_abs_i16: +; X86: # %bb.0: +; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: sarl $15, %eax +; X86-NEXT: xorl %eax, %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: # kill: def $ax killed $ax killed $eax +; X86-NEXT: retl +; +; X64-LABEL: neg_abs_i16: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: negw %ax +; X64-NEXT: cmovlw %di, %ax +; X64-NEXT: negl %eax +; X64-NEXT: # kill: def $ax killed $ax killed $eax +; X64-NEXT: retq + %abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true) + %neg = sub nsw i16 0, %abs + ret i16 %neg +} + +define i32 @neg_abs_i32(i32 %x) nounwind { +; X86-LABEL: neg_abs_i32: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %eax +; X86-NEXT: sarl $31, %eax +; X86-NEXT: xorl %eax, %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: retl +; +; X64-LABEL: neg_abs_i32: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: negl %eax +; X64-NEXT: cmovll %edi, %eax +; X64-NEXT: negl %eax +; X64-NEXT: retq + %abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true) + %neg = sub nsw i32 0, %abs + ret i32 %neg +} + +define i64 @neg_abs_i64(i64 %x) nounwind { +; X86-LABEL: neg_abs_i64: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: sarl $31, %edx +; X86-NEXT: xorl %edx, %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: xorl %edx, %esi +; X86-NEXT: movl %edx, %eax +; X86-NEXT: subl %esi, %eax +; X86-NEXT: sbbl %ecx, %edx +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: neg_abs_i64: +; X64: # %bb.0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: negq %rax +; X64-NEXT: cmovlq %rdi, %rax +; X64-NEXT: negq %rax +; X64-NEXT: retq %abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true) %neg = sub nsw i64 0, %abs ret i64 %neg } + +define i128 @neg_abs_i128(i128 %x) nounwind { +; X86-LABEL: neg_abs_i128: +; X86: # %bb.0: +; X86-NEXT: pushl %ebp +; X86-NEXT: pushl %ebx +; X86-NEXT: pushl %edi +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: sarl $31, %edx +; X86-NEXT: xorl %edx, %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: xorl %edx, %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %edi +; X86-NEXT: xorl %edx, %edi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx +; X86-NEXT: xorl %edx, %ebx +; X86-NEXT: movl %edx, %ebp +; X86-NEXT: subl %ebx, %ebp +; X86-NEXT: movl %edx, %ebx +; X86-NEXT: sbbl %edi, %ebx +; X86-NEXT: movl %edx, %edi +; X86-NEXT: sbbl %esi, %edi +; X86-NEXT: sbbl %ecx, %edx +; X86-NEXT: movl %ebp, (%eax) +; X86-NEXT: movl %ebx, 4(%eax) +; X86-NEXT: movl %edi, 8(%eax) +; X86-NEXT: movl %edx, 12(%eax) +; X86-NEXT: popl %esi +; X86-NEXT: popl %edi +; X86-NEXT: popl %ebx +; X86-NEXT: popl %ebp +; X86-NEXT: retl $4 +; +; X64-LABEL: neg_abs_i128: +; X64: # %bb.0: +; X64-NEXT: movq %rsi, %rdx +; X64-NEXT: sarq $63, %rdx +; X64-NEXT: xorq %rdx, %rsi +; X64-NEXT: xorq %rdx, %rdi +; X64-NEXT: movq %rdx, %rax +; X64-NEXT: subq %rdi, %rax +; X64-NEXT: sbbq %rsi, %rdx +; X64-NEXT: retq + %abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true) + %neg = sub nsw i128 0, %abs + ret i128 %neg +} _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits