Title: [192131] trunk/Source/_javascript_Core
Revision
192131
Author
msab...@apple.com
Date
2015-11-07 10:11:39 -0800 (Sat, 07 Nov 2015)

Log Message

Add conditional moves to the MacroAssembler
https://bugs.webkit.org/show_bug.cgi?id=150761

Reviewed by Filip Pizlo.

Added moveConditionally, moveConditionallyTest & moveConditionallyDouble to X86 macro assemblers.
Bench tested correct opcodes and operations on X86-64 and X86 for a select number of comparisons.

* assembler/MacroAssemblerX86Common.h:
(JSC::MacroAssemblerX86Common::moveConditionally):
(JSC::MacroAssemblerX86Common::moveConditionallyTest):
(JSC::MacroAssemblerX86Common::moveConditionallyDouble):
* assembler/X86Assembler.h:
(JSC::X86Assembler::cmovcc):
(JSC::X86Assembler::cmovl_rr):
(JSC::X86Assembler::cmovl_mr):
(JSC::X86Assembler::cmovel_rr):
(JSC::X86Assembler::cmovnel_rr):
(JSC::X86Assembler::cmovpl_rr):
(JSC::X86Assembler::cmovnpl_rr):
(JSC::X86Assembler::cmovq_rr):
(JSC::X86Assembler::cmovq_mr):
(JSC::X86Assembler::cmoveq_rr):
(JSC::X86Assembler::cmovneq_rr):
(JSC::X86Assembler::cmovpq_rr):
(JSC::X86Assembler::cmovnpq_rr):
(JSC::X86Assembler::X86InstructionFormatter::twoByteOp64):

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (192130 => 192131)


--- trunk/Source/_javascript_Core/ChangeLog	2015-11-07 17:17:47 UTC (rev 192130)
+++ trunk/Source/_javascript_Core/ChangeLog	2015-11-07 18:11:39 UTC (rev 192131)
@@ -1,3 +1,33 @@
+2015-11-07  Michael Saboff  <msab...@apple.com>
+
+        Add conditional moves to the MacroAssembler
+        https://bugs.webkit.org/show_bug.cgi?id=150761
+
+        Reviewed by Filip Pizlo.
+
+        Added moveConditionally, moveConditionallyTest & moveConditionallyDouble to X86 macro assemblers.
+        Bench tested correct opcodes and operations on X86-64 and X86 for a select number of comparisons.
+
+        * assembler/MacroAssemblerX86Common.h:
+        (JSC::MacroAssemblerX86Common::moveConditionally):
+        (JSC::MacroAssemblerX86Common::moveConditionallyTest):
+        (JSC::MacroAssemblerX86Common::moveConditionallyDouble):
+        * assembler/X86Assembler.h:
+        (JSC::X86Assembler::cmovcc):
+        (JSC::X86Assembler::cmovl_rr):
+        (JSC::X86Assembler::cmovl_mr):
+        (JSC::X86Assembler::cmovel_rr):
+        (JSC::X86Assembler::cmovnel_rr):
+        (JSC::X86Assembler::cmovpl_rr):
+        (JSC::X86Assembler::cmovnpl_rr):
+        (JSC::X86Assembler::cmovq_rr):
+        (JSC::X86Assembler::cmovq_mr):
+        (JSC::X86Assembler::cmoveq_rr):
+        (JSC::X86Assembler::cmovneq_rr):
+        (JSC::X86Assembler::cmovpq_rr):
+        (JSC::X86Assembler::cmovnpq_rr):
+        (JSC::X86Assembler::X86InstructionFormatter::twoByteOp64):
+
 2015-11-06  Saam barati  <sbar...@apple.com>
 
         Control Flow Profiler should keep execution counts of basic blocks

Modified: trunk/Source/_javascript_Core/assembler/MacroAssemblerX86Common.h (192130 => 192131)


--- trunk/Source/_javascript_Core/assembler/MacroAssemblerX86Common.h	2015-11-07 17:17:47 UTC (rev 192130)
+++ trunk/Source/_javascript_Core/assembler/MacroAssemblerX86Common.h	2015-11-07 18:11:39 UTC (rev 192131)
@@ -1046,6 +1046,54 @@
         m_assembler.movq_i64r(imm.m_value, dest);
     }
 
+    void moveConditionally(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmpq_rr(right, left);
+        m_assembler.cmovq_rr(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyTest(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.testq_rr(testReg, mask);
+        m_assembler.cmovq_rr(x86Condition(cond), src, dest);
+    }
+    
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+
+        if (cond == DoubleEqual) {
+            if (left == right) {
+                m_assembler.cmovnpq_rr(src, dest);
+                return;
+            }
+
+            Jump isUnordered(m_assembler.jp());
+            m_assembler.cmoveq_rr(src, dest);
+            isUnordered.link(this);
+            return;
+        }
+
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right) {
+                m_assembler.cmovpq_rr(src, dest);
+                return;
+            }
+
+            m_assembler.cmovpq_rr(src, dest);
+            m_assembler.cmovneq_rr(src, dest);
+            return;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        m_assembler.cmovq_rr(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest);
+    }
+    
     void swap(RegisterID reg1, RegisterID reg2)
     {
         if (reg1 != reg2)
@@ -1078,6 +1126,54 @@
         m_assembler.movl_i32r(imm.asIntptr(), dest);
     }
 
+    void moveConditionally(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+    {
+        m_assembler.cmpl_rr(right, left);
+        m_assembler.cmovl_rr(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyTest(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+    {
+        m_assembler.testl_rr(testReg, mask);
+        m_assembler.cmovl_rr(x86Condition(cond), src, dest);
+    }
+
+    void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+    {
+        ASSERT(isSSE2Present());
+
+        if (cond & DoubleConditionBitInvert)
+            m_assembler.ucomisd_rr(left, right);
+        else
+            m_assembler.ucomisd_rr(right, left);
+
+        if (cond == DoubleEqual) {
+            if (left == right) {
+                m_assembler.cmovnpl_rr(src, dest);
+                return;
+            }
+
+            Jump isUnordered(m_assembler.jp());
+            m_assembler.cmovel_rr(src, dest);
+            isUnordered.link(this);
+            return;
+        }
+
+        if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right) {
+                m_assembler.cmovpl_rr(src, dest);
+                return;
+            }
+
+            m_assembler.cmovpl_rr(src, dest);
+            m_assembler.cmovnel_rr(src, dest);
+            return;
+        }
+
+        ASSERT(!(cond & DoubleConditionBitSpecial));
+        m_assembler.cmovl_rr(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest);
+    }
+
     void swap(RegisterID reg1, RegisterID reg2)
     {
         if (reg1 != reg2)

Modified: trunk/Source/_javascript_Core/assembler/X86Assembler.h (192130 => 192131)


--- trunk/Source/_javascript_Core/assembler/X86Assembler.h	2015-11-07 17:17:47 UTC (rev 192130)
+++ trunk/Source/_javascript_Core/assembler/X86Assembler.h	2015-11-07 18:11:39 UTC (rev 192131)
@@ -248,6 +248,7 @@
         OP2_CVTSI2SD_VsdEd  = 0x2A,
         OP2_CVTTSD2SI_GdWsd = 0x2C,
         OP2_UCOMISD_VsdWsd  = 0x2E,
+        OP2_CMOVCC          = 0x40,
         OP2_ADDSD_VsdWsd    = 0x58,
         OP2_MULSD_VsdWsd    = 0x59,
         OP2_CVTSD2SS_VsdWsd = 0x5A,
@@ -279,6 +280,12 @@
         OP3_MFENCE          = 0xF0,
     } ThreeByteOpcodeID;
 
+    
+    TwoByteOpcodeID cmovcc(Condition cond)
+    {
+        return (TwoByteOpcodeID)(OP2_CMOVCC + cond);
+    }
+
     TwoByteOpcodeID jccRel32(Condition cond)
     {
         return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
@@ -1589,6 +1596,83 @@
         m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
     }
 
+    void cmovl_rr(Condition cond, RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), src, dst);
+    }
+
+    void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, base, offset);
+    }
+
+    void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, base, index, scale, offset);
+    }
+
+    void cmovel_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionE), src, dst);
+    }
+    
+    void cmovnel_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionNE), src, dst);
+    }
+    
+    void cmovpl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionP), src, dst);
+    }
+    
+    void cmovnpl_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(ConditionNP), src, dst);
+    }
+
+#if CPU(X86_64)
+    void cmovq_rr(Condition cond, RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), src, dst);
+    }
+
+    void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), dst, base, offset);
+    }
+
+    void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(cond), dst, base, index, scale, offset);
+    }
+
+    void cmoveq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionE), src, dst);
+    }
+
+    void cmovneq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionNE), src, dst);
+    }
+
+    void cmovpq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionP), src, dst);
+    }
+
+    void cmovnpq_rr(RegisterID src, RegisterID dst)
+    {
+        m_formatter.twoByteOp64(cmovcc(ConditionNP), src, dst);
+    }
+#else
+    void cmovl_mr(Condition cond, const void* addr, RegisterID dst)
+    {
+        m_formatter.twoByteOp(cmovcc(cond), dst, addr);
+    }
+#endif
+
     void leal_mr(int offset, RegisterID base, RegisterID dst)
     {
         m_formatter.oneByteOp(OP_LEA, dst, base, offset);
@@ -2577,6 +2661,24 @@
             m_buffer.putByteUnchecked(opcode);
             registerModRM(reg, rm);
         }
+
+        void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexW(reg, 0, base);
+            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            m_buffer.putByteUnchecked(opcode);
+            memoryModRM(reg, base, offset);
+        }
+
+        void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+        {
+            m_buffer.ensureSpace(maxInstructionSize);
+            emitRexW(reg, index, base);
+            m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+            m_buffer.putByteUnchecked(opcode);
+            memoryModRM(reg, base, index, scale, offset);
+        }
 #endif
 
         // Byte-operands:
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to