hkmatsumoto updated this revision to Diff 418869.
hkmatsumoto added a comment.
Create ctpop(X) > 1 instead of ctpop(X) >= 2
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D122077/new/
https://reviews.llvm.org/D122077
Files:
llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
llvm/test/Transforms/InstCombine/ispow2.ll
Index: llvm/test/Transforms/InstCombine/ispow2.ll
===================================================================
--- llvm/test/Transforms/InstCombine/ispow2.ll
+++ llvm/test/Transforms/InstCombine/ispow2.ll
@@ -740,10 +740,8 @@
define i1 @is_pow2or0_ctpop(i32 %x) {
; CHECK-LABEL: @is_pow2or0_ctpop(
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[T0]], 1
-; CHECK-NEXT: [[ISZERO:%.*]] = icmp eq i32 [[X]], 0
-; CHECK-NEXT: [[R:%.*]] = or i1 [[ISZERO]], [[CMP]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[T0]], 2
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
%cmp = icmp eq i32 %t0, 1
@@ -755,10 +753,8 @@
define i1 @is_pow2or0_ctpop_swap_cmp(i32 %x) {
; CHECK-LABEL: @is_pow2or0_ctpop_swap_cmp(
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[T0]], 1
-; CHECK-NEXT: [[ISZERO:%.*]] = icmp eq i32 [[X]], 0
-; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP]], [[ISZERO]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[T0]], 2
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
%cmp = icmp eq i32 %t0, 1
@@ -770,10 +766,8 @@
define i1 @is_pow2or0_ctpop_logical(i32 %x) {
; CHECK-LABEL: @is_pow2or0_ctpop_logical(
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[T0]], 1
-; CHECK-NEXT: [[ISZERO:%.*]] = icmp eq i32 [[X]], 0
-; CHECK-NEXT: [[R:%.*]] = or i1 [[ISZERO]], [[CMP]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[T0]], 2
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
%cmp = icmp eq i32 %t0, 1
@@ -785,10 +779,8 @@
define <2 x i1> @is_pow2or0_ctpop_commute_vec(<2 x i8> %x) {
; CHECK-LABEL: @is_pow2or0_ctpop_commute_vec(
; CHECK-NEXT: [[T0:%.*]] = tail call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[T0]], <i8 1, i8 1>
-; CHECK-NEXT: [[ISZERO:%.*]] = icmp eq <2 x i8> [[X]], zeroinitializer
-; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[ISZERO]], [[CMP]]
-; CHECK-NEXT: ret <2 x i1> [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i8> [[T0]], <i8 2, i8 2>
+; CHECK-NEXT: ret <2 x i1> [[TMP1]]
;
%t0 = tail call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> %x)
%cmp = icmp eq <2 x i8> %t0, <i8 1, i8 1>
@@ -807,8 +799,8 @@
; CHECK-NEXT: call void @use_i1(i1 [[CMP]])
; CHECK-NEXT: [[ISZERO:%.*]] = icmp eq i32 [[X]], 0
; CHECK-NEXT: call void @use_i1(i1 [[ISZERO]])
-; CHECK-NEXT: [[R:%.*]] = or i1 [[ISZERO]], [[CMP]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[T0]], 2
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
call void @use(i32 %t0)
@@ -828,8 +820,8 @@
; CHECK-NEXT: call void @use_i1(i1 [[CMP]])
; CHECK-NEXT: [[ISZERO:%.*]] = icmp eq i32 [[X]], 0
; CHECK-NEXT: call void @use_i1(i1 [[ISZERO]])
-; CHECK-NEXT: [[R:%.*]] = or i1 [[ISZERO]], [[CMP]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[T0]], 2
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
call void @use(i32 %t0)
@@ -940,10 +932,8 @@
define i1 @isnot_pow2nor0_ctpop(i32 %x) {
; CHECK-LABEL: @isnot_pow2nor0_ctpop(
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[T0]], 1
-; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne i32 [[X]], 0
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOTZERO]], [[CMP]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[T0]], 1
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
%cmp = icmp ne i32 %t0, 1
@@ -955,10 +945,8 @@
define i1 @isnot_pow2nor0_ctpop_swap_cmp(i32 %x) {
; CHECK-LABEL: @isnot_pow2nor0_ctpop_swap_cmp(
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[T0]], 1
-; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne i32 [[X]], 0
-; CHECK-NEXT: [[R:%.*]] = and i1 [[CMP]], [[NOTZERO]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[T0]], 1
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
%cmp = icmp ne i32 %t0, 1
@@ -970,10 +958,8 @@
define i1 @isnot_pow2nor0_ctpop_logical(i32 %x) {
; CHECK-LABEL: @isnot_pow2nor0_ctpop_logical(
; CHECK-NEXT: [[T0:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[T0]], 1
-; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne i32 [[X]], 0
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOTZERO]], [[CMP]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[T0]], 1
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
%cmp = icmp ne i32 %t0, 1
@@ -985,10 +971,8 @@
define <2 x i1> @isnot_pow2nor0_ctpop_commute_vec(<2 x i8> %x) {
; CHECK-LABEL: @isnot_pow2nor0_ctpop_commute_vec(
; CHECK-NEXT: [[T0:%.*]] = tail call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> [[X:%.*]])
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[T0]], <i8 1, i8 1>
-; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne <2 x i8> [[X]], zeroinitializer
-; CHECK-NEXT: [[R:%.*]] = and <2 x i1> [[NOTZERO]], [[CMP]]
-; CHECK-NEXT: ret <2 x i1> [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <2 x i8> [[T0]], <i8 1, i8 1>
+; CHECK-NEXT: ret <2 x i1> [[TMP1]]
;
%t0 = tail call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> %x)
%cmp = icmp ne <2 x i8> %t0, <i8 1, i8 1>
@@ -1007,8 +991,8 @@
; CHECK-NEXT: call void @use_i1(i1 [[CMP]])
; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne i32 [[X]], 0
; CHECK-NEXT: call void @use_i1(i1 [[NOTZERO]])
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOTZERO]], [[CMP]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[T0]], 1
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
call void @use(i32 %t0)
@@ -1028,8 +1012,8 @@
; CHECK-NEXT: call void @use_i1(i1 [[CMP]])
; CHECK-NEXT: [[NOTZERO:%.*]] = icmp ne i32 [[X]], 0
; CHECK-NEXT: call void @use_i1(i1 [[NOTZERO]])
-; CHECK-NEXT: [[R:%.*]] = and i1 [[NOTZERO]], [[CMP]]
-; CHECK-NEXT: ret i1 [[R]]
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[T0]], 1
+; CHECK-NEXT: ret i1 [[TMP1]]
;
%t0 = tail call i32 @llvm.ctpop.i32(i32 %x)
call void @use(i32 %t0)
Index: llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
===================================================================
--- llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -911,6 +911,26 @@
CxtI.getName() + ".simplified");
}
+/// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and
+/// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1).
+static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd,
+ InstCombiner::BuilderTy &Builder) {
+ CmpInst::Predicate Pred0, Pred1;
+ Value *X;
+ if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
+ m_SpecificInt(1))) ||
+ !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())))
+ return nullptr;
+
+ Value *CtPop = Cmp0->getOperand(0);
+ if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_NE)
+ return Builder.CreateICmpUGT(CtPop, ConstantInt::get(CtPop->getType(), 1));
+ if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_EQ)
+ return Builder.CreateICmpULT(CtPop, ConstantInt::get(CtPop->getType(), 2));
+
+ return nullptr;
+}
+
/// Reduce a pair of compares that check if a value has exactly 1 bit set.
static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd,
InstCombiner::BuilderTy &Builder) {
@@ -1237,6 +1257,11 @@
if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, And, Builder, Q))
return V;
+ if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, /*IsAnd=*/true, Builder))
+ return V;
+ if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, /*IsAnd=*/true, Builder))
+ return V;
+
// E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false))
return V;
@@ -2595,6 +2620,11 @@
if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, Or, Builder, Q))
return V;
+ if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, /*IsAnd=*/false, Builder))
+ return V;
+ if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, /*IsAnd=*/false, Builder))
+ return V;
+
// E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true))
return V;
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits