Title: [280111] trunk/Source/_javascript_Core
Revision
280111
Author
[email protected]
Date
2021-07-20 16:20:32 -0700 (Tue, 20 Jul 2021)

Log Message

Add ARM64 EON opcode and select it in AIR
https://bugs.webkit.org/show_bug.cgi?id=228057

Reviewed by Saam Barati.

EON Rd Rn Rm <shift> #amount

Bitwise Exclusive OR NOT (shifted register) performs a bitwise Exclusive OR NOT
of a register value Rn and an optionally-shifted register value Rm, and writes the
result to the destination register. The instruction selector can utilize this to
lowering certain patterns in B3 IR before further Air optimization.

The equivalent pattern of EON Rd Rn Rm is d = n ^ (m ^ -1)

Given B3 IR:
Int @0 = ArgumentReg(%x0)
Int @1 = ArgumentReg(%x1)
Int @2 = -1
Int @3 = BitXor(@1, @2)
Int @4 = BitXor(@0, b@3)
Void@5 = Return(@4, Terminal)

// Old optimized AIR
Not   %x1, %x1,      @3
Xor   %x0, %x1, %x0, @4
Ret   %x0,           @5

// New optimized AIR
XorNot %x0, %x1, %x0, @4
Ret    %x0,           @5

The equivalent pattern of EON-with-shift is d = n ^ ((m ShiftType amount) ^ -1)

Given B3 IR:
Int @0 = ArgumentReg(%x0)
Int @1 = ArgumentReg(%x1)
Int @2 = amount
Int @3 = -1
Int @4 = Shl(@1, @2)
Int @5 = BitXor(@4, @3)
Int @6 = BitXor(@0, @5)
Void b@7 = Return(b@6, Terminal)

// Old optimized AIR
Lshift   %x1, amount, %x1, @4
Not      %x1,    %x1,      @5
Xor      %x0,    %x1, %x0, @6
Ret      %x0,              @7

// New optimized AIR
XorNotLeftShift %x0, %x1, $63, %x0, @6
Ret             %x0,                @7

* assembler/MacroAssemblerARM64.h:
(JSC::MacroAssemblerARM64::xorNot32):
(JSC::MacroAssemblerARM64::xorNot64):
(JSC::MacroAssemblerARM64::xorNotLeftShift32):
(JSC::MacroAssemblerARM64::xorNotRightShift32):
(JSC::MacroAssemblerARM64::xorNotUnsignedRightShift32):
(JSC::MacroAssemblerARM64::xorNotLeftShift64):
(JSC::MacroAssemblerARM64::xorNotRightShift64):
(JSC::MacroAssemblerARM64::xorNotUnsignedRightShift64):
* assembler/testmasm.cpp:
(JSC::testXorNot32):
(JSC::testXorNot64):
(JSC::testXorNotWithLeftShift32):
(JSC::testXorNotWithRightShift32):
(JSC::testXorNotWithUnsignedRightShift32):
(JSC::testXorNotWithLeftShift64):
(JSC::testXorNotWithRightShift64):
(JSC::testXorNotWithUnsignedRightShift64):
* b3/B3LowerToAir.cpp:
* b3/air/AirOpcode.opcodes:
* b3/testb3.h:
* b3/testb3_2.cpp:
(testXorNot32):
(testXorNot64):
(testXorNotWithLeftShift32):
(testXorNotWithRightShift32):
(testXorNotWithUnsignedRightShift32):
(testXorNotWithLeftShift64):
(testXorNotWithRightShift64):
(testXorNotWithUnsignedRightShift64):
(addBitTests):

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (280110 => 280111)


--- trunk/Source/_javascript_Core/ChangeLog	2021-07-20 23:08:27 UTC (rev 280110)
+++ trunk/Source/_javascript_Core/ChangeLog	2021-07-20 23:20:32 UTC (rev 280111)
@@ -1,3 +1,90 @@
+2021-07-20  Yijia Huang  <[email protected]>
+
+        Add ARM64 EON opcode and select it in AIR
+        https://bugs.webkit.org/show_bug.cgi?id=228057
+
+        Reviewed by Saam Barati.
+
+        EON Rd Rn Rm <shift> #amount
+
+        Bitwise Exclusive OR NOT (shifted register) performs a bitwise Exclusive OR NOT 
+        of a register value Rn and an optionally-shifted register value Rm, and writes the 
+        result to the destination register. The instruction selector can utilize this to 
+        lowering certain patterns in B3 IR before further Air optimization.
+
+        The equivalent pattern of EON Rd Rn Rm is d = n ^ (m ^ -1)
+
+        Given B3 IR:
+        Int @0 = ArgumentReg(%x0)
+        Int @1 = ArgumentReg(%x1)
+        Int @2 = -1
+        Int @3 = BitXor(@1, @2)
+        Int @4 = BitXor(@0, b@3)
+        Void@5 = Return(@4, Terminal)
+
+        // Old optimized AIR
+        Not   %x1, %x1,      @3
+        Xor   %x0, %x1, %x0, @4
+        Ret   %x0,           @5
+
+        // New optimized AIR
+        XorNot %x0, %x1, %x0, @4
+        Ret    %x0,           @5
+
+        The equivalent pattern of EON-with-shift is d = n ^ ((m ShiftType amount) ^ -1)
+
+        Given B3 IR:
+        Int @0 = ArgumentReg(%x0)
+        Int @1 = ArgumentReg(%x1)
+        Int @2 = amount
+        Int @3 = -1
+        Int @4 = Shl(@1, @2)
+        Int @5 = BitXor(@4, @3)
+        Int @6 = BitXor(@0, @5)
+        Void b@7 = Return(b@6, Terminal)
+
+        // Old optimized AIR
+        Lshift   %x1, amount, %x1, @4
+        Not      %x1,    %x1,      @5
+        Xor      %x0,    %x1, %x0, @6
+        Ret      %x0,              @7
+
+        // New optimized AIR
+        XorNotLeftShift %x0, %x1, $63, %x0, @6
+        Ret             %x0,                @7
+
+        * assembler/MacroAssemblerARM64.h:
+        (JSC::MacroAssemblerARM64::xorNot32):
+        (JSC::MacroAssemblerARM64::xorNot64):
+        (JSC::MacroAssemblerARM64::xorNotLeftShift32):
+        (JSC::MacroAssemblerARM64::xorNotRightShift32):
+        (JSC::MacroAssemblerARM64::xorNotUnsignedRightShift32):
+        (JSC::MacroAssemblerARM64::xorNotLeftShift64):
+        (JSC::MacroAssemblerARM64::xorNotRightShift64):
+        (JSC::MacroAssemblerARM64::xorNotUnsignedRightShift64):
+        * assembler/testmasm.cpp:
+        (JSC::testXorNot32):
+        (JSC::testXorNot64):
+        (JSC::testXorNotWithLeftShift32):
+        (JSC::testXorNotWithRightShift32):
+        (JSC::testXorNotWithUnsignedRightShift32):
+        (JSC::testXorNotWithLeftShift64):
+        (JSC::testXorNotWithRightShift64):
+        (JSC::testXorNotWithUnsignedRightShift64):
+        * b3/B3LowerToAir.cpp:
+        * b3/air/AirOpcode.opcodes:
+        * b3/testb3.h:
+        * b3/testb3_2.cpp:
+        (testXorNot32):
+        (testXorNot64):
+        (testXorNotWithLeftShift32):
+        (testXorNotWithRightShift32):
+        (testXorNotWithUnsignedRightShift32):
+        (testXorNotWithLeftShift64):
+        (testXorNotWithRightShift64):
+        (testXorNotWithUnsignedRightShift64):
+        (addBitTests):
+
 2021-07-20  Yusuke Suzuki  <[email protected]>
 
         [JSC] invalidParameterInstanceofSourceAppender should care direct call of Symbol.hasInstance

Modified: trunk/Source/_javascript_Core/assembler/MacroAssemblerARM64.h (280110 => 280111)


--- trunk/Source/_javascript_Core/assembler/MacroAssemblerARM64.h	2021-07-20 23:08:27 UTC (rev 280110)
+++ trunk/Source/_javascript_Core/assembler/MacroAssemblerARM64.h	2021-07-20 23:20:32 UTC (rev 280111)
@@ -509,6 +509,46 @@
         m_assembler.orn<64>(dest, src, mask);
     }
 
+    void xorNot32(RegisterID src, RegisterID mask, RegisterID dest)
+    {
+        m_assembler.eon<32>(dest, src, mask);
+    }
+
+    void xorNot64(RegisterID src, RegisterID mask, RegisterID dest)
+    {
+        m_assembler.eon<64>(dest, src, mask);
+    }
+
+    void xorNotLeftShift32(RegisterID n, RegisterID m, TrustedImm32 amount, RegisterID d)
+    {
+        m_assembler.eon<32>(d, n, m, Assembler::LSL, amount.m_value);
+    }
+
+    void xorNotRightShift32(RegisterID n, RegisterID m, TrustedImm32 amount, RegisterID d)
+    {
+        m_assembler.eon<32>(d, n, m, Assembler::ASR, amount.m_value);
+    }
+
+    void xorNotUnsignedRightShift32(RegisterID n, RegisterID m, TrustedImm32 amount, RegisterID d)
+    {
+        m_assembler.eon<32>(d, n, m, Assembler::LSR, amount.m_value);
+    }
+
+    void xorNotLeftShift64(RegisterID n, RegisterID m, TrustedImm32 amount, RegisterID d)
+    {
+        m_assembler.eon<64>(d, n, m, Assembler::LSL, amount.m_value);
+    }
+
+    void xorNotRightShift64(RegisterID n, RegisterID m, TrustedImm32 amount, RegisterID d)
+    {
+        m_assembler.eon<64>(d, n, m, Assembler::ASR, amount.m_value);
+    }
+
+    void xorNotUnsignedRightShift64(RegisterID n, RegisterID m, TrustedImm32 amount, RegisterID d)
+    {
+        m_assembler.eon<64>(d, n, m, Assembler::LSR, amount.m_value);
+    }
+
     void extractInsertBitfieldAtLowEnd32(RegisterID src, TrustedImm32 lsb, TrustedImm32 width, RegisterID dest)
     {
         m_assembler.bfxil<32>(dest, src, lsb.m_value, width.m_value);

Modified: trunk/Source/_javascript_Core/assembler/testmasm.cpp (280110 => 280111)


--- trunk/Source/_javascript_Core/assembler/testmasm.cpp	2021-07-20 23:08:27 UTC (rev 280110)
+++ trunk/Source/_javascript_Core/assembler/testmasm.cpp	2021-07-20 23:20:32 UTC (rev 280111)
@@ -2020,6 +2020,198 @@
         }
     }
 }
+
+void testXorNot32()
+{
+    auto test = compile([] (CCallHelpers& jit) {
+        emitFunctionPrologue(jit);
+
+        jit.xorNot32(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::returnValueGPR);
+
+        emitFunctionEpilogue(jit);
+        jit.ret();
+    });
+
+    for (auto mask : int32Operands()) {
+        int32_t src = ""
+        CHECK_EQ(invoke<int32_t>(test, src, mask), (src ^ ~mask));
+        CHECK_EQ(invoke<int32_t>(test, 0U, mask), ~mask);
+    }
+}
+
+void testXorNot64()
+{
+    auto test = compile([] (CCallHelpers& jit) {
+        emitFunctionPrologue(jit);
+
+        jit.xorNot64(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::returnValueGPR);
+
+        emitFunctionEpilogue(jit);
+        jit.ret();
+    });
+
+    for (auto mask : int64Operands()) {
+        int64_t src = ""
+        CHECK_EQ(invoke<int64_t>(test, src, mask), (src ^ ~mask));
+        CHECK_EQ(invoke<int64_t>(test, 0ULL, mask), ~mask);
+    }
+}
+
+void testXorNotWithLeftShift32()
+{
+    Vector<int32_t> amounts = { 0, 17, 31 };
+    for (auto n : int32Operands()) {
+        for (auto m : int32Operands()) {
+            for (auto amount : amounts) {
+                auto test = compile([=] (CCallHelpers& jit) {
+                    emitFunctionPrologue(jit);
+
+                    jit.xorNotLeftShift32(GPRInfo::argumentGPR0, 
+                        GPRInfo::argumentGPR1, 
+                        CCallHelpers::TrustedImm32(amount), 
+                        GPRInfo::returnValueGPR);
+
+                    emitFunctionEpilogue(jit);
+                    jit.ret();
+                });
+
+                int32_t lhs = invoke<int32_t>(test, n, m);
+                int32_t rhs = n ^ ~(m << amount);
+                CHECK_EQ(lhs, rhs);
+            }
+        }
+    }
+}
+
+void testXorNotWithRightShift32()
+{
+    Vector<int32_t> amounts = { 0, 17, 31 };
+    for (auto n : int32Operands()) {
+        for (auto m : int32Operands()) {
+            for (auto amount : amounts) {
+                auto test = compile([=] (CCallHelpers& jit) {
+                    emitFunctionPrologue(jit);
+
+                    jit.xorNotRightShift32(GPRInfo::argumentGPR0, 
+                        GPRInfo::argumentGPR1, 
+                        CCallHelpers::TrustedImm32(amount), 
+                        GPRInfo::returnValueGPR);
+
+                    emitFunctionEpilogue(jit);
+                    jit.ret();
+                });
+
+                int32_t lhs = invoke<int32_t>(test, n, m);
+                int32_t rhs = n ^ ~(m >> amount);
+                CHECK_EQ(lhs, rhs);
+            }
+        }
+    }
+}
+
+void testXorNotWithUnsignedRightShift32()
+{
+    Vector<uint32_t> amounts = { 0, 17, 31 };
+    for (auto n : int32Operands()) {
+        for (auto m : int32Operands()) {
+            for (auto amount : amounts) {
+                auto test = compile([=] (CCallHelpers& jit) {
+                    emitFunctionPrologue(jit);
+
+                    jit.xorNotUnsignedRightShift32(GPRInfo::argumentGPR0, 
+                        GPRInfo::argumentGPR1, 
+                        CCallHelpers::TrustedImm32(amount), 
+                        GPRInfo::returnValueGPR);
+
+                    emitFunctionEpilogue(jit);
+                    jit.ret();
+                });
+
+                uint32_t lhs = invoke<uint32_t>(test, n, m);
+                uint32_t rhs = static_cast<uint32_t>(n) ^ ~(static_cast<uint32_t>(m) >> amount);
+                CHECK_EQ(lhs, rhs);
+            }
+        }
+    }
+}
+
+void testXorNotWithLeftShift64()
+{
+    Vector<int32_t> amounts = { 0, 34, 63 };
+    for (auto n : int64Operands()) {
+        for (auto m : int64Operands()) {
+            for (auto amount : amounts) {
+                auto test = compile([=] (CCallHelpers& jit) {
+                    emitFunctionPrologue(jit);
+
+                    jit.xorNotLeftShift64(GPRInfo::argumentGPR0, 
+                        GPRInfo::argumentGPR1, 
+                        CCallHelpers::TrustedImm32(amount), 
+                        GPRInfo::returnValueGPR);
+
+                    emitFunctionEpilogue(jit);
+                    jit.ret();
+                });
+
+                int64_t lhs = invoke<int64_t>(test, n, m);
+                int64_t rhs = n ^ ~(m << amount);
+                CHECK_EQ(lhs, rhs);
+            }
+        }
+    }
+}
+
+void testXorNotWithRightShift64()
+{
+    Vector<int32_t> amounts = { 0, 34, 63 };
+    for (auto n : int64Operands()) {
+        for (auto m : int64Operands()) {
+            for (auto amount : amounts) {
+                auto test = compile([=] (CCallHelpers& jit) {
+                    emitFunctionPrologue(jit);
+
+                    jit.xorNotRightShift64(GPRInfo::argumentGPR0, 
+                        GPRInfo::argumentGPR1, 
+                        CCallHelpers::TrustedImm32(amount), 
+                        GPRInfo::returnValueGPR);
+
+                    emitFunctionEpilogue(jit);
+                    jit.ret();
+                });
+
+                int64_t lhs = invoke<int64_t>(test, n, m);
+                int64_t rhs = n ^ ~(m >> amount);
+                CHECK_EQ(lhs, rhs);
+            }
+        }
+    }
+}
+
+void testXorNotWithUnsignedRightShift64()
+{
+    Vector<uint32_t> amounts = { 0, 34, 63 };
+    for (auto n : int64Operands()) {
+        for (auto m : int64Operands()) {
+            for (auto amount : amounts) {
+                auto test = compile([=] (CCallHelpers& jit) {
+                    emitFunctionPrologue(jit);
+
+                    jit.xorNotUnsignedRightShift64(GPRInfo::argumentGPR0, 
+                        GPRInfo::argumentGPR1, 
+                        CCallHelpers::TrustedImm32(amount), 
+                        GPRInfo::returnValueGPR);
+
+                    emitFunctionEpilogue(jit);
+                    jit.ret();
+                });
+
+                uint64_t lhs = invoke<uint64_t>(test, n, m);
+                uint64_t rhs = static_cast<uint64_t>(n) ^ ~(static_cast<uint64_t>(m) >> amount);
+                CHECK_EQ(lhs, rhs);
+            }
+        }
+    }
+}
 #endif
 
 #if CPU(X86) || CPU(X86_64) || CPU(ARM64)
@@ -4274,6 +4466,15 @@
     RUN(testSubWithLeftShift64());
     RUN(testSubWithRightShift64());
     RUN(testSubWithUnsignedRightShift64());
+
+    RUN(testXorNot32());
+    RUN(testXorNot64());
+    RUN(testXorNotWithLeftShift32());
+    RUN(testXorNotWithRightShift32());
+    RUN(testXorNotWithUnsignedRightShift32());
+    RUN(testXorNotWithLeftShift64());
+    RUN(testXorNotWithRightShift64());
+    RUN(testXorNotWithUnsignedRightShift64());
 #endif
 
 #if CPU(ARM64E)

Modified: trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp (280110 => 280111)


--- trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp	2021-07-20 23:08:27 UTC (rev 280110)
+++ trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp	2021-07-20 23:20:32 UTC (rev 280111)
@@ -826,6 +826,23 @@
         append(opcode, result);
     }
 
+    Air::Opcode opcodeBasedOnShiftKind(B3::Opcode b3Opcode, 
+        Air::Opcode shl32, Air::Opcode shl64, 
+        Air::Opcode sshr32, Air::Opcode sshr64, 
+        Air::Opcode zshr32, Air::Opcode zshr64)
+    {
+        switch (b3Opcode) {
+        case Shl:
+            return tryOpcodeForType(shl32, shl64, m_value->type());
+        case SShr:
+            return tryOpcodeForType(sshr32, sshr64, m_value->type());
+        case ZShr:
+            return tryOpcodeForType(zshr32, zshr64, m_value->type());
+        default:
+            return Air::Oops;
+        }
+    }
+
     // Call this method when doing two-operand lowering of a commutative operation. You have a choice of
     // which incoming Value is moved into the result. This will select which one is likely to be most
     // profitable to use as the result. Doing the right thing can have big performance consequences in tight
@@ -2649,20 +2666,10 @@
 
             // add-with-shift Pattern: n + (m ShiftType amount)
             auto tryAppendAddWithShift = [&] (Value* left, Value* right) -> bool {
-                auto tryOpcode = [&] (B3::Opcode opcode) -> Air::Opcode {
-                    switch (opcode) {
-                    case Shl:
-                        return tryOpcodeForType(AddLeftShift32, AddLeftShift64, m_value->type());
-                    case SShr:
-                        return tryOpcodeForType(AddRightShift32, AddRightShift64, m_value->type());
-                    case ZShr:
-                        return tryOpcodeForType(AddUnsignedRightShift32, AddUnsignedRightShift64, m_value->type());
-                    default:
-                        return Air::Oops;
-                    }
-                };
-
-                Air::Opcode opcode = tryOpcode(right->opcode());
+                Air::Opcode opcode = opcodeBasedOnShiftKind(right->opcode(), 
+                    AddLeftShift32, AddLeftShift64, 
+                    AddRightShift32, AddRightShift64, 
+                    AddUnsignedRightShift32, AddUnsignedRightShift64);
                 if (!isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Imm, Arg::Tmp)) 
                     return false;
                 if (!canBeInternal(right) || !imm(right->child(1)) || right->child(1)->asInt() < 0)
@@ -2734,20 +2741,10 @@
 
             // sub-with-shift Pattern: n - (m ShiftType amount)
             auto tryAppendSubAndShift = [&] () -> bool {
-                auto tryOpcode = [&] (B3::Opcode opcode) -> Air::Opcode {
-                    switch (opcode) {
-                    case Shl:
-                        return tryOpcodeForType(SubLeftShift32, SubLeftShift64, m_value->type());
-                    case SShr:
-                        return tryOpcodeForType(SubRightShift32, SubRightShift64, m_value->type());
-                    case ZShr:
-                        return tryOpcodeForType(SubUnsignedRightShift32, SubUnsignedRightShift64, m_value->type());
-                    default:
-                        return Air::Oops;
-                    }
-                };
-
-                Air::Opcode opcode = tryOpcode(right->opcode());
+                Air::Opcode opcode = opcodeBasedOnShiftKind(right->opcode(), 
+                    SubLeftShift32, SubLeftShift64, 
+                    SubRightShift32, SubRightShift64, 
+                    SubUnsignedRightShift32, SubUnsignedRightShift64);
                 if (!isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Imm, Arg::Tmp)) 
                     return false;
                 if (!canBeInternal(right) || !imm(right->child(1)) || right->child(1)->asInt() < 0)
@@ -3123,11 +3120,14 @@
         }
 
         case BitXor: {
+            Value* left = m_value->child(0);
+            Value* right = m_value->child(1);
+
             // FIXME: If canBeInternal(child), we should generate this using the comparison path.
             // https://bugs.webkit.org/show_bug.cgi?id=152367
             
-            if (m_value->child(1)->isInt(-1)) {
-                appendUnOp<Not32, Not64>(m_value->child(0));
+            if (right->isInt(-1)) {
+                appendUnOp<Not32, Not64>(left);
                 return;
             }
             
@@ -3134,16 +3134,58 @@
             // This pattern is super useful on both x86 and ARM64, since the inversion of the CAS result
             // can be done with zero cost on x86 (just flip the set from E to NE) and it's a progression
             // on ARM64 (since STX returns 0 on success, so ordinarily we have to flip it).
-            if (m_value->child(1)->isInt(1)
-                && m_value->child(0)->opcode() == AtomicWeakCAS
-                && canBeInternal(m_value->child(0))) {
-                commitInternal(m_value->child(0));
-                appendCAS(m_value->child(0), true);
+            if (right->isInt(1) && left->opcode() == AtomicWeakCAS && canBeInternal(left)) {
+                commitInternal(left);
+                appendCAS(left, true);
                 return;
             }
-            
-            appendBinOp<Xor32, Xor64, XorDouble, XorFloat, Commutative>(
-                m_value->child(0), m_value->child(1));
+
+            // EON Pattern: d = n ^ (m ^ -1)
+            auto tryAppendEON = [&] (Value* left, Value* right) -> bool {
+                if (right->opcode() != BitXor)
+                    return false;
+                Value* nValue = left;
+                Value* minusOne = right->child(1);
+                if (m_locked.contains(nValue) || !minusOne->hasInt() || !minusOne->isInt(-1))
+                    return false;
+
+                // eon-with-shift Pattern: d = n ^ ((m ShiftType amount) ^ -1)
+                auto tryAppendEONWithShift = [&] (Value* shiftValue) -> bool {
+                    Air::Opcode opcode = opcodeBasedOnShiftKind(shiftValue->opcode(), 
+                        XorNotLeftShift32, XorNotLeftShift64, 
+                        XorNotRightShift32, XorNotRightShift64, 
+                        XorNotUnsignedRightShift32, XorNotUnsignedRightShift64);
+                    if (!isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Imm, Arg::Tmp)) 
+                        return false;
+                    Value* mValue = shiftValue->child(0);
+                    Value* amountValue = shiftValue->child(1);
+                    if (!canBeInternal(shiftValue) || m_locked.contains(mValue) || !imm(amountValue) || amountValue->asInt() < 0)
+                        return false;
+                    uint64_t amount = amountValue->asInt();
+                    uint64_t datasize = m_value->type() == Int32 ? 32 : 64;
+                    if (amount >= datasize)
+                        return false;
+
+                    append(opcode, tmp(nValue), tmp(mValue), imm(amountValue), tmp(m_value));
+                    commitInternal(shiftValue);
+                    return true;
+                };
+
+                if (tryAppendEONWithShift(right->child(0)))
+                    return true;
+
+                Value* mValue = right->child(0);
+                Air::Opcode opcode = opcodeForType(XorNot32, XorNot64, m_value->type());
+                if (!isValidForm(opcode, Arg::Tmp, Arg::Tmp, Arg::Tmp) || m_locked.contains(mValue))
+                    return false;
+                append(opcode, tmp(nValue), tmp(mValue), tmp(m_value));
+                return true;
+            };
+
+            if (tryAppendEON(left, right) || tryAppendEON(right, left))
+                return;
+
+            appendBinOp<Xor32, Xor64, XorDouble, XorFloat, Commutative>(left, right);
             return;
         }
             

Modified: trunk/Source/_javascript_Core/b3/air/AirOpcode.opcodes (280110 => 280111)


--- trunk/Source/_javascript_Core/b3/air/AirOpcode.opcodes	2021-07-20 23:08:27 UTC (rev 280110)
+++ trunk/Source/_javascript_Core/b3/air/AirOpcode.opcodes	2021-07-20 23:20:32 UTC (rev 280111)
@@ -859,6 +859,30 @@
 arm64: OrNot64 U:G:64, U:G:64, D:G:64
     Tmp, Tmp, Tmp
 
+arm64: XorNot32 U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Tmp
+
+arm64: XorNot64 U:G:64, U:G:64, D:G:64
+    Tmp, Tmp, Tmp
+
+arm64: XorNotLeftShift32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Imm, Tmp
+
+arm64: XorNotRightShift32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Imm, Tmp
+
+arm64: XorNotUnsignedRightShift32 U:G:32, U:G:32, U:G:32, ZD:G:32
+    Tmp, Tmp, Imm, Tmp
+
+arm64: XorNotLeftShift64 U:G:64, U:G:64, U:G:32, D:G:64
+    Tmp, Tmp, Imm, Tmp
+
+arm64: XorNotRightShift64 U:G:64, U:G:64, U:G:32, D:G:64
+    Tmp, Tmp, Imm, Tmp
+
+arm64: XorNotUnsignedRightShift64 U:G:64, U:G:64, U:G:32, D:G:64
+    Tmp, Tmp, Imm, Tmp
+
 arm64: ExtractInsertBitfieldAtLowEnd32 U:G:32, U:G:32, U:G:32, UZD:G:32
     Tmp, Imm, Imm, Tmp
 

Modified: trunk/Source/_javascript_Core/b3/testb3.h (280110 => 280111)


--- trunk/Source/_javascript_Core/b3/testb3.h	2021-07-20 23:08:27 UTC (rev 280110)
+++ trunk/Source/_javascript_Core/b3/testb3.h	2021-07-20 23:20:32 UTC (rev 280111)
@@ -439,6 +439,14 @@
 void testBIC64();
 void testOrNot32();
 void testOrNot64();
+void testXorNot32();
+void testXorNot64();
+void testXorNotWithLeftShift32();
+void testXorNotWithRightShift32();
+void testXorNotWithUnsignedRightShift32();
+void testXorNotWithLeftShift64();
+void testXorNotWithRightShift64();
+void testXorNotWithUnsignedRightShift64();
 void testBitfieldZeroExtend32();
 void testBitfieldZeroExtend64();
 void testExtractRegister32();

Modified: trunk/Source/_javascript_Core/b3/testb3_2.cpp (280110 => 280111)


--- trunk/Source/_javascript_Core/b3/testb3_2.cpp	2021-07-20 23:08:27 UTC (rev 280110)
+++ trunk/Source/_javascript_Core/b3/testb3_2.cpp	2021-07-20 23:20:32 UTC (rev 280111)
@@ -4427,6 +4427,342 @@
     }
 }
 
+void testXorNot32()
+{
+    if (JSC::Options::defaultB3OptLevel() < 2)
+        return;
+
+    // Test Pattern: d = n ^ (m ^ -1)
+    auto test = [&] (int32_t n, int32_t m) -> int32_t {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* nValue = root->appendNew<Value>(
+            proc, Trunc, Origin(), 
+            root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* mValue = root->appendNew<Value>(
+            proc, Trunc, Origin(), 
+            root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+        Value* minusOneValue = root->appendNew<Const32Value>(proc, Origin(), -1);
+
+        Value* xorValue = root->appendNew<Value>(proc, BitXor, Origin(), mValue, minusOneValue);
+        root->appendNewControlValue(
+            proc, Return, Origin(), 
+            root->appendNew<Value>(proc, BitXor, Origin(), nValue, xorValue));
+
+        auto code = compileProc(proc);
+        if (isARM64())
+            checkUsesInstruction(*code, "eon");
+        return invoke<int32_t>(*code, n, m);
+    };
+
+    for (auto n : int32Operands()) {
+        for (auto m : int32Operands())
+            CHECK(test(n.value, m.value) == (n.value ^ (m.value ^ -1)));
+    }
+}
+
+void testXorNot64()
+{
+    if (JSC::Options::defaultB3OptLevel() < 2)
+        return;
+
+    // Test Pattern: d = n ^ (m ^ -1)
+    auto test = [&] (int64_t n, int64_t m) -> int64_t {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* nValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* mValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* minusOneValue = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+        Value* xorValue = root->appendNew<Value>(proc, BitXor, Origin(), mValue, minusOneValue);
+        root->appendNewControlValue(
+            proc, Return, Origin(), 
+            root->appendNew<Value>(proc, BitXor, Origin(), nValue, xorValue));
+
+        auto code = compileProc(proc);
+        if (isARM64())
+            checkUsesInstruction(*code, "eon");
+        return invoke<int64_t>(*code, n, m);
+    };
+
+    for (auto n : int64Operands()) {
+        for (auto m : int64Operands())
+            CHECK(test(n.value, m.value) == (n.value ^ (m.value ^ -1LL)));
+    }
+}
+
+void testXorNotWithLeftShift32()
+{
+    if (JSC::Options::defaultB3OptLevel() < 2)
+        return;
+    Vector<int32_t> amounts = { 1, 17, 31 };
+
+    // Test Pattern: d = n ^ ((m << amount) ^ -1)
+    auto test = [&] (int32_t n, int32_t m, int32_t amount) -> int32_t {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* nValue = root->appendNew<Value>(
+            proc, Trunc, Origin(), 
+            root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* mValue = root->appendNew<Value>(
+            proc, Trunc, Origin(), 
+            root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+        Value* amountValue = root->appendNew<Const32Value>(proc, Origin(), amount);
+        Value* minusOneValue = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+        Value* shiftValue = root->appendNew<Value>(proc, Shl, Origin(), mValue, amountValue);
+        Value* xorValue = root->appendNew<Value>(proc, BitXor, Origin(), shiftValue, minusOneValue);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(), 
+            root->appendNew<Value>(proc, BitXor, Origin(), nValue, xorValue));
+
+        auto code = compileProc(proc);
+        if (isARM64()) {
+            std::string regex(".*eon.*,.*,.*,.*lsl #");
+            regex += std::to_string(amount) + ".*";
+            checkUsesInstruction(*code, regex.c_str(), true);
+        }
+        return invoke<int32_t>(*code, n, m);
+    };
+
+    for (auto nOperand : int32Operands()) {
+        for (auto mOperand : int32Operands()) {
+            for (auto amount : amounts) {
+                int32_t n = nOperand.value;
+                int32_t m = mOperand.value;
+                CHECK_EQ(test(n, m, amount), n ^ ((m << amount) ^ -1));
+            }
+        }
+    }
+}
+
+void testXorNotWithRightShift32()
+{
+    if (JSC::Options::defaultB3OptLevel() < 2)
+        return;
+    Vector<int32_t> amounts = { 1, 17, 31 };
+
+    // Test Pattern: d = n ^ ((m >> amount) ^ -1)
+    auto test = [&] (int32_t n, int32_t m, int32_t amount) -> int32_t {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* nValue = root->appendNew<Value>(
+            proc, Trunc, Origin(), 
+            root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* mValue = root->appendNew<Value>(
+            proc, Trunc, Origin(), 
+            root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+        Value* amountValue = root->appendNew<Const32Value>(proc, Origin(), amount);
+        Value* minusOneValue = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+        Value* shiftValue = root->appendNew<Value>(proc, SShr, Origin(), mValue, amountValue);
+        Value* xorValue = root->appendNew<Value>(proc, BitXor, Origin(), shiftValue, minusOneValue);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(), 
+            root->appendNew<Value>(proc, BitXor, Origin(), nValue, xorValue));
+
+        auto code = compileProc(proc);
+        if (isARM64()) {
+            std::string regex(".*eon.*,.*,.*,.*asr #");
+            regex += std::to_string(amount) + ".*";
+            checkUsesInstruction(*code, regex.c_str(), true);
+        }
+        return invoke<int32_t>(*code, n, m);
+    };
+
+    for (auto nOperand : int32Operands()) {
+        for (auto mOperand : int32Operands()) {
+            for (auto amount : amounts) {
+                int32_t n = nOperand.value;
+                int32_t m = mOperand.value;
+                CHECK_EQ(test(n, m, amount), n ^ ((m >> amount) ^ -1));
+            }
+        }
+    }
+}
+
+void testXorNotWithUnsignedRightShift32()
+{
+    if (JSC::Options::defaultB3OptLevel() < 2)
+        return;
+    Vector<uint32_t> amounts = { 1, 17, 31 };
+
+    // Test Pattern: d = n ^ ((m >> amount) ^ -1)
+    auto test = [&] (int32_t n, int32_t m, int32_t amount) -> uint32_t {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* nValue = root->appendNew<Value>(
+            proc, Trunc, Origin(), 
+            root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0));
+        Value* mValue = root->appendNew<Value>(
+            proc, Trunc, Origin(), 
+            root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+        Value* amountValue = root->appendNew<Const32Value>(proc, Origin(), amount);
+        Value* minusOneValue = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+        Value* shiftValue = root->appendNew<Value>(proc, ZShr, Origin(), mValue, amountValue);
+        Value* xorValue = root->appendNew<Value>(proc, BitXor, Origin(), shiftValue, minusOneValue);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(), 
+            root->appendNew<Value>(proc, BitXor, Origin(), nValue, xorValue));
+
+        auto code = compileProc(proc);
+        if (isARM64()) {
+            std::string regex(".*eon.*,.*,.*,.*lsr #");
+            regex += std::to_string(amount) + ".*";
+            checkUsesInstruction(*code, regex.c_str(), true);
+        }
+        return invoke<uint32_t>(*code, n, m);
+    };
+
+    for (auto nOperand : int32Operands()) {
+        for (auto mOperand : int32Operands()) {
+            for (auto amount : amounts) {
+                uint32_t n = nOperand.value;
+                uint32_t m = mOperand.value;
+                CHECK_EQ(test(n, m, amount), n ^ ((m >> amount) ^ -1));
+            }
+        }
+    }
+}
+
+void testXorNotWithLeftShift64()
+{
+    if (JSC::Options::defaultB3OptLevel() < 2)
+        return;
+    Vector<int32_t> amounts = { 1, 34, 63 };
+
+    // Test Pattern: d = n ^ ((m << amount) ^ -1)
+    auto test = [&] (int64_t n, int64_t m, int32_t amount) -> int64_t {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* nValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* mValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* amountValue = root->appendNew<Const32Value>(proc, Origin(), amount);
+        Value* minusOneValue = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+        Value* shiftValue = root->appendNew<Value>(proc, Shl, Origin(), mValue, amountValue);
+        Value* xorValue = root->appendNew<Value>(proc, BitXor, Origin(), shiftValue, minusOneValue);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(), 
+            root->appendNew<Value>(proc, BitXor, Origin(), nValue, xorValue));
+
+        auto code = compileProc(proc);
+        if (isARM64()) {
+            std::string regex(".*eon.*,.*,.*,.*lsl #");
+            regex += std::to_string(amount) + ".*";
+            checkUsesInstruction(*code, regex.c_str(), true);
+        }
+        return invoke<int64_t>(*code, n, m);
+    };
+
+    for (auto nOperand : int64Operands()) {
+        for (auto mOperand : int64Operands()) {
+            for (auto amount : amounts) {
+                int64_t n = nOperand.value;
+                int64_t m = mOperand.value;
+                CHECK_EQ(test(n, m, amount), n ^ ((m << amount) ^ -1));
+            }
+        }
+    }
+}
+
+void testXorNotWithRightShift64()
+{
+    if (JSC::Options::defaultB3OptLevel() < 2)
+        return;
+    Vector<int32_t> amounts = { 1, 34, 63 };
+
+    // Test Pattern: d = n ^ ((m >> amount) ^ -1)
+    auto test = [&] (int64_t n, int64_t m, int32_t amount) -> int64_t {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* nValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* mValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* amountValue = root->appendNew<Const32Value>(proc, Origin(), amount);
+        Value* minusOneValue = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+        Value* shiftValue = root->appendNew<Value>(proc, SShr, Origin(), mValue, amountValue);
+        Value* xorValue = root->appendNew<Value>(proc, BitXor, Origin(), shiftValue, minusOneValue);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(), 
+            root->appendNew<Value>(proc, BitXor, Origin(), nValue, xorValue));
+
+        auto code = compileProc(proc);
+        if (isARM64()) {
+            std::string regex(".*eon.*,.*,.*,.*asr #");
+            regex += std::to_string(amount) + ".*";
+            checkUsesInstruction(*code, regex.c_str(), true);
+        }
+        return invoke<int64_t>(*code, n, m);
+    };
+
+    for (auto nOperand : int64Operands()) {
+        for (auto mOperand : int64Operands()) {
+            for (auto amount : amounts) {
+                int64_t n = nOperand.value;
+                int64_t m = mOperand.value;
+                CHECK_EQ(test(n, m, amount), n ^ ((m >> amount) ^ -1));
+            }
+        }
+    }
+}
+
+void testXorNotWithUnsignedRightShift64()
+{
+    if (JSC::Options::defaultB3OptLevel() < 2)
+        return;
+    Vector<uint32_t> amounts = { 1, 17, 31 };
+
+    // Test Pattern: d = n ^ ((m >> amount) ^ -1)
+    auto test = [&] (int64_t n, int64_t m, int32_t amount) -> uint64_t {
+        Procedure proc;
+        BasicBlock* root = proc.addBlock();
+
+        Value* nValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+        Value* mValue = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+        Value* amountValue = root->appendNew<Const32Value>(proc, Origin(), amount);
+        Value* minusOneValue = root->appendNew<Const64Value>(proc, Origin(), -1);
+
+        Value* shiftValue = root->appendNew<Value>(proc, ZShr, Origin(), mValue, amountValue);
+        Value* xorValue = root->appendNew<Value>(proc, BitXor, Origin(), shiftValue, minusOneValue);
+
+        root->appendNewControlValue(
+            proc, Return, Origin(), 
+            root->appendNew<Value>(proc, BitXor, Origin(), nValue, xorValue));
+
+        auto code = compileProc(proc);
+        if (isARM64()) {
+            std::string regex(".*eon.*,.*,.*,.*lsr #");
+            regex += std::to_string(amount) + ".*";
+            checkUsesInstruction(*code, regex.c_str(), true);
+        }
+        return invoke<uint64_t>(*code, n, m);
+    };
+
+    for (auto nOperand : int64Operands()) {
+        for (auto mOperand : int64Operands()) {
+            for (auto amount : amounts) {
+                uint64_t n = nOperand.value;
+                uint64_t m = mOperand.value;
+                CHECK_EQ(test(n, m, amount), n ^ ((m >> amount) ^ -1));
+            }
+        }
+    }
+}
+
 void testBitfieldZeroExtend32()
 {
     if (JSC::Options::defaultB3OptLevel() < 2)
@@ -6025,6 +6361,14 @@
     RUN(testBIC64());
     RUN(testOrNot32());
     RUN(testOrNot64());
+    RUN(testXorNot32());
+    RUN(testXorNot64());
+    RUN(testXorNotWithLeftShift32());
+    RUN(testXorNotWithRightShift32());
+    RUN(testXorNotWithUnsignedRightShift32());
+    RUN(testXorNotWithLeftShift64());
+    RUN(testXorNotWithRightShift64());
+    RUN(testXorNotWithUnsignedRightShift64());
     RUN(testBitfieldZeroExtend32());
     RUN(testBitfieldZeroExtend64());
     RUN(testExtractRegister32());
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to