Modified: trunk/Source/_javascript_Core/ChangeLog (279041 => 279042)
--- trunk/Source/_javascript_Core/ChangeLog 2021-06-18 17:17:45 UTC (rev 279041)
+++ trunk/Source/_javascript_Core/ChangeLog 2021-06-18 18:25:50 UTC (rev 279042)
@@ -1,3 +1,57 @@
+2021-06-18 Yijia Huang <[email protected]>
+
+ Add a new pattern to B3ReduceStrength based on Bug 226984
+ https://bugs.webkit.org/show_bug.cgi?id=227138
+
+ Reviewed by Filip Pizlo.
+
+ In the previous patch bug 226984, a new pattern could be introduced to
+ B3ReduceStrength.cpp for further optimization, which is that:
+
+ dest = (src >> shiftAmount) & mask
+
+ is equivalent to
+
+ src >> shiftAmount
+
+ under these constraints:
+
+ 1. shiftAmount >= 0
+ 2. mask has a binary format in contiguous ones starting from the
+ least significant bit.
+ 3. shiftAmount + bitCount(mask) == maxBitWidth
+
+ For instance (32-bit):
+
+ (src >> 12) & 0x000fffff == src >> 12
+
+ This reduction is more beneficial than UBFX in this case.
+
+ // B3 IR
+ Int @0 = ArgumentReg(%0)
+ Int @1 = 12
+ Int @2 = ZShr(@0, @1)
+ Int @3 = 0x000fffff
+ Int @4 = BitAnd(@2, @3))
+ Void@5 = Return(@4, Terminal)
+
+ w/o the pattern:
+ // Old optimized AIR
+ Ubfx %0, $12, $20, %0, @4
+ Ret %0, @5
+
+ w/ the pattern:
+ // New optimized AIR
+ Urshift %0, $12, %0, @3
+ Ret32 %0, @6
+
+ * b3/B3ReduceStrength.cpp:
+ * b3/testb3.h:
+ * b3/testb3_2.cpp:
+ (testBitAndZeroShiftRightImmMask32):
+ (testBitAndZeroShiftRightImmMask64):
+ (addBitTests):
+
2021-06-18 Robin Morisset <[email protected]>
[DFG] Untyped branches should eliminate checks based on results from the AbstractInterpreter
Modified: trunk/Source/_javascript_Core/b3/B3ReduceStrength.cpp (279041 => 279042)
--- trunk/Source/_javascript_Core/b3/B3ReduceStrength.cpp 2021-06-18 17:17:45 UTC (rev 279041)
+++ trunk/Source/_javascript_Core/b3/B3ReduceStrength.cpp 2021-06-18 18:25:50 UTC (rev 279042)
@@ -42,6 +42,7 @@
#include "B3ValueKeyInlines.h"
#include "B3ValueInlines.h"
#include <wtf/HashMap.h>
+#include <wtf/StdLibExtras.h>
namespace JSC { namespace B3 {
@@ -1035,6 +1036,23 @@
break;
}
+ // Turn this: BitAnd(ZShr(value, shiftAmount), mask)
+ // - shiftAmount >= 0 and mask is contiguous ones from LSB, example 0b01111111
+ // - shiftAmount + bitCount(mask) == maxBitWidth
+ // Into this: ZShr(value, shiftAmount)
+ if (m_value->child(0)->opcode() == ZShr
+ && m_value->child(0)->child(1)->hasInt()
+ && m_value->child(1)->hasInt()) {
+ int64_t shiftAmount = m_value->child(0)->child(1)->asInt();
+ uint64_t mask = m_value->child(1)->asInt();
+ bool isValid = mask && !(mask & (mask + 1));
+ uint64_t maxBitWidth = m_value->child(0)->child(0)->type() == Int64 ? 64 : 32;
+ if (shiftAmount >= 0 && isValid && static_cast<uint64_t>(shiftAmount + WTF::bitCount(mask)) == maxBitWidth) {
+ replaceWithIdentity(m_value->child(0));
+ break;
+ }
+ }
+
// Turn this: BitAnd(value, all-ones)
// Into this: value.
if ((m_value->type() == Int64 && m_value->child(1)->isInt64(std::numeric_limits<uint64_t>::max()))
Modified: trunk/Source/_javascript_Core/b3/testb3_2.cpp (279041 => 279042)
--- trunk/Source/_javascript_Core/b3/testb3_2.cpp 2021-06-18 17:17:45 UTC (rev 279041)
+++ trunk/Source/_javascript_Core/b3/testb3_2.cpp 2021-06-18 18:25:50 UTC (rev 279042)
@@ -2688,6 +2688,78 @@
}
}
+void testBitAndZeroShiftRightArgImmMask32()
+{
+ // Turn this: (tmp >> imm) & mask
+ // Into this: tmp >> imm
+ uint32_t tmp = 0xffffffff;
+ Vector<uint32_t> imms = { 4, 28 };
+ Vector<uint32_t> masks = { 0x0fffffff, 0xf };
+
+ auto test = [&] (uint32_t imm, uint32_t mask) {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* tmpValue = root->appendNew<Value>(
+ proc, Trunc, Origin(),
+ root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+ Value* immValue = root->appendNew<Const32Value>(proc, Origin(), imm);
+ Value* leftValue = root->appendNew<Value>(proc, ZShr, Origin(), tmpValue, immValue);
+ Value* rightValue = root->appendNew<Const32Value>(proc, Origin(), mask);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, BitAnd, Origin(), leftValue, rightValue));
+
+ auto code = compileProc(proc);
+ if (isARM64()) {
+ checkUsesInstruction(*code, "lsr");
+ checkDoesNotUseInstruction(*code, "and");
+ checkDoesNotUseInstruction(*code, "ubfx");
+ }
+ uint32_t lhs = invoke<uint32_t>(*code, tmp);
+ uint32_t rhs = tmp >> imm;
+ CHECK(lhs == rhs);
+ };
+
+ for (size_t i = 0; i < imms.size(); ++i)
+ test(imms.at(i), masks.at(i));
+}
+
+void testBitAndZeroShiftRightArgImmMask64()
+{
+ // Turn this: (tmp >> imm) & mask
+ // Into this: tmp >> imm
+ uint64_t tmp = 0xffffffffffffffff;
+ Vector<uint64_t> imms = { 4, 60 };
+ Vector<uint64_t> masks = { 0x0fffffffffffffff, 0xf };
+
+ auto test = [&] (uint64_t imm, uint64_t mask) {
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+
+ Value* tmpValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* immValue = root->appendNew<Const32Value>(proc, Origin(), imm);
+ Value* leftValue = root->appendNew<Value>(proc, ZShr, Origin(), tmpValue, immValue);
+ Value* rightValue = root->appendNew<Const64Value>(proc, Origin(), mask);
+ root->appendNewControlValue(
+ proc, Return, Origin(),
+ root->appendNew<Value>(proc, BitAnd, Origin(), leftValue, rightValue));
+
+ auto code = compileProc(proc);
+ if (isARM64()) {
+ checkUsesInstruction(*code, "lsr");
+ checkDoesNotUseInstruction(*code, "and");
+ checkDoesNotUseInstruction(*code, "ubfx");
+ }
+ uint64_t lhs = invoke<uint64_t>(*code, tmp);
+ uint64_t rhs = tmp >> imm;
+ CHECK(lhs == rhs);
+ };
+
+ for (size_t i = 0; i < imms.size(); ++i)
+ test(imms.at(i), masks.at(i));
+}
+
static void testBitAndArgs(int64_t a, int64_t b)
{
Procedure proc;
@@ -3506,7 +3578,8 @@
RUN(testUbfx32PatternMatch());
RUN(testUbfx64());
RUN(testUbfx64PatternMatch());
-
+ RUN(testBitAndZeroShiftRightArgImmMask32());
+ RUN(testBitAndZeroShiftRightArgImmMask64());
RUN(testBitAndArgs(43, 43));
RUN(testBitAndArgs(43, 0));
RUN(testBitAndArgs(10, 3));