Title: [255994] trunk/Source/_javascript_Core
Revision
255994
Author
ryanhad...@apple.com
Date
2020-02-06 17:32:50 -0800 (Thu, 06 Feb 2020)

Log Message

Unreviewed, rolling out r255987.
https://bugs.webkit.org/show_bug.cgi?id=207369

JSTests failures (Requested by yusukesuzuki on #webkit).

Reverted changeset:

"[JSC] CodeBlock::shrinkToFit should shrink
m_constantRegisters and m_constantsSourceCodeRepresentation in
64bit architectures"
https://bugs.webkit.org/show_bug.cgi?id=207356
https://trac.webkit.org/changeset/255987

Patch by Commit Queue <commit-qu...@webkit.org> on 2020-02-06

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (255993 => 255994)


--- trunk/Source/_javascript_Core/ChangeLog	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/ChangeLog	2020-02-07 01:32:50 UTC (rev 255994)
@@ -1,3 +1,18 @@
+2020-02-06  Commit Queue  <commit-qu...@webkit.org>
+
+        Unreviewed, rolling out r255987.
+        https://bugs.webkit.org/show_bug.cgi?id=207369
+
+        JSTests failures (Requested by yusukesuzuki on #webkit).
+
+        Reverted changeset:
+
+        "[JSC] CodeBlock::shrinkToFit should shrink
+        m_constantRegisters and m_constantsSourceCodeRepresentation in
+        64bit architectures"
+        https://bugs.webkit.org/show_bug.cgi?id=207356
+        https://trac.webkit.org/changeset/255987
+
 2020-02-06  Yusuke Suzuki  <ysuz...@apple.com>
 
         [JSC] CodeBlock::shrinkToFit should shrink m_constantRegisters and m_constantsSourceCodeRepresentation in 64bit architectures

Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp (255993 => 255994)


--- trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp	2020-02-07 01:32:50 UTC (rev 255994)
@@ -1953,20 +1953,14 @@
     return false;
 }
 
-void CodeBlock::shrinkToFit(const ConcurrentJSLocker&, ShrinkMode shrinkMode)
+void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
 {
     ConcurrentJSLocker locker(m_lock);
 
-#if USE(JSVALUE32_64)
-    // Only 32bit Baseline JIT is touching m_constantRegisters address directly.
-    if (shrinkMode == ShrinkMode::EarlyShrink)
+    if (shrinkMode == EarlyShrink) {
         m_constantRegisters.shrinkToFit();
-#else
-    m_constantRegisters.shrinkToFit();
-#endif
-    m_constantsSourceCodeRepresentation.shrinkToFit();
-
-    if (shrinkMode == ShrinkMode::EarlyShrink) {
+        m_constantsSourceCodeRepresentation.shrinkToFit();
+        
         if (m_rareData) {
             m_rareData->m_switchJumpTables.shrinkToFit();
             m_rareData->m_stringSwitchJumpTables.shrinkToFit();

Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.h (255993 => 255994)


--- trunk/Source/_javascript_Core/bytecode/CodeBlock.h	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.h	2020-02-07 01:32:50 UTC (rev 255994)
@@ -635,7 +635,7 @@
 
     DirectEvalCodeCache& directEvalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_directEvalCodeCache; }
 
-    enum class ShrinkMode {
+    enum ShrinkMode {
         // Shrink prior to generating machine code that may point directly into vectors.
         EarlyShrink,
 
@@ -642,9 +642,9 @@
         // Shrink after generating machine code, and after possibly creating new vectors
         // and appending to others. At this time it is not safe to shrink certain vectors
         // because we would have generated machine code that references them directly.
-        LateShrink,
+        LateShrink
     };
-    void shrinkToFit(const ConcurrentJSLocker&, ShrinkMode);
+    void shrinkToFit(ShrinkMode);
 
     // Functions for controlling when JITting kicks in, in a mixed mode
     // execution world.

Modified: trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp (255993 => 255994)


--- trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp	2020-02-07 01:32:50 UTC (rev 255994)
@@ -392,6 +392,8 @@
     link(*linkBuffer);
     m_speculative->linkOSREntries(*linkBuffer);
 
+    codeBlock()->shrinkToFit(CodeBlock::LateShrink);
+
     disassemble(*linkBuffer);
 
     m_graph.m_plan.setFinalizer(makeUnique<JITFinalizer>(
@@ -491,6 +493,8 @@
     link(*linkBuffer);
     m_speculative->linkOSREntries(*linkBuffer);
     
+    codeBlock()->shrinkToFit(CodeBlock::LateShrink);
+
     if (requiresArityFixup)
         linkBuffer->link(callArityFixup, FunctionPtr<JITThunkPtrTag>(vm().getCTIStub(arityFixupGenerator).code()));
 

Modified: trunk/Source/_javascript_Core/dfg/DFGJITFinalizer.cpp (255993 => 255994)


--- trunk/Source/_javascript_Core/dfg/DFGJITFinalizer.cpp	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/dfg/DFGJITFinalizer.cpp	2020-02-07 01:32:50 UTC (rev 255994)
@@ -83,6 +83,13 @@
 {
     CodeBlock* codeBlock = m_plan.codeBlock();
 
+    // Some JIT finalizers may have added more constants. Shrink-to-fit those things now.
+    {
+        ConcurrentJSLocker locker(codeBlock->m_lock);
+        codeBlock->constants().shrinkToFit();
+        codeBlock->constantsSourceCodeRepresentation().shrinkToFit();
+    }
+
 #if ENABLE(FTL_JIT)
     m_jitCode->optimizeAfterWarmUp(codeBlock);
 #endif // ENABLE(FTL_JIT)

Modified: trunk/Source/_javascript_Core/dfg/DFGPlan.cpp (255993 => 255994)


--- trunk/Source/_javascript_Core/dfg/DFGPlan.cpp	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/dfg/DFGPlan.cpp	2020-02-07 01:32:50 UTC (rev 255994)
@@ -279,10 +279,7 @@
     // in the CodeBlock. This is a good time to perform an early shrink, which is more
     // powerful than a late one. It's safe to do so because we haven't generated any code
     // that references any of the tables directly, yet.
-    {
-        ConcurrentJSLocker locker(m_codeBlock->m_lock);
-        m_codeBlock->shrinkToFit(locker, CodeBlock::ShrinkMode::EarlyShrink);
-    }
+    m_codeBlock->shrinkToFit(CodeBlock::EarlyShrink);
 
     if (validationEnabled())
         validate(dfg);
@@ -620,7 +617,6 @@
         {
             ConcurrentJSLocker locker(m_codeBlock->m_lock);
             m_codeBlock->jitCode()->shrinkToFit(locker);
-            m_codeBlock->shrinkToFit(locker, CodeBlock::ShrinkMode::LateShrink);
         }
 
         if (validationEnabled()) {

Modified: trunk/Source/_javascript_Core/jit/JIT.cpp (255993 => 255994)


--- trunk/Source/_javascript_Core/jit/JIT.cpp	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/jit/JIT.cpp	2020-02-07 01:32:50 UTC (rev 255994)
@@ -943,10 +943,7 @@
         static_cast<double>(result.size()) /
         static_cast<double>(m_codeBlock->instructionsSize()));
 
-    {
-        ConcurrentJSLocker locker(m_codeBlock->m_lock);
-        m_codeBlock->shrinkToFit(locker, CodeBlock::ShrinkMode::LateShrink);
-    }
+    m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
     m_codeBlock->setJITCode(
         adoptRef(*new DirectJITCode(result, withArityCheck, JITType::BaselineJIT)));
 

Modified: trunk/Source/_javascript_Core/jit/JIT.h (255993 => 255994)


--- trunk/Source/_javascript_Core/jit/JIT.h	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/jit/JIT.h	2020-02-07 01:32:50 UTC (rev 255994)
@@ -337,6 +337,9 @@
         void compileOpEqJumpSlow(Vector<SlowCaseEntry>::iterator&, CompileOpEqType, int jumpTarget);
         bool isOperandConstantDouble(VirtualRegister);
         
+        void emitLoadDouble(VirtualRegister, FPRegisterID value);
+        void emitLoadInt32ToDouble(VirtualRegister, FPRegisterID value);
+
         enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterBase, ShouldFilterValue, ShouldFilterBaseAndValue };
         // value register in write barrier is used before any scratch registers
         // so may safely be the same as either of the scratch registers.
@@ -414,7 +417,6 @@
 #if USE(JSVALUE32_64)
         bool getOperandConstantInt(VirtualRegister op1, VirtualRegister op2, VirtualRegister& op, int32_t& constant);
 
-        void emitLoadDouble(VirtualRegister, FPRegisterID value);
         void emitLoadTag(VirtualRegister, RegisterID tag);
         void emitLoadPayload(VirtualRegister, RegisterID payload);
 

Modified: trunk/Source/_javascript_Core/jit/JITInlines.h (255993 => 255994)


--- trunk/Source/_javascript_Core/jit/JITInlines.h	2020-02-07 00:59:59 UTC (rev 255993)
+++ trunk/Source/_javascript_Core/jit/JITInlines.h	2020-02-07 01:32:50 UTC (rev 255994)
@@ -377,15 +377,6 @@
 
 #if USE(JSVALUE32_64)
 
-inline void JIT::emitLoadDouble(VirtualRegister reg, FPRegisterID value)
-{
-    if (reg.isConstant()) {
-        WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(reg);
-        loadDouble(TrustedImmPtr(&inConstantPool), value);
-    } else
-        loadDouble(addressFor(reg), value);
-}
-
 inline void JIT::emitLoadTag(VirtualRegister reg, RegisterID tag)
 {
     if (reg.isConstant()) {
@@ -449,6 +440,25 @@
     emitLoad(reg1, tag1, payload1);
 }
 
+inline void JIT::emitLoadDouble(VirtualRegister reg, FPRegisterID value)
+{
+    if (reg.isConstant()) {
+        WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(reg);
+        loadDouble(TrustedImmPtr(&inConstantPool), value);
+    } else
+        loadDouble(addressFor(reg), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(VirtualRegister reg, FPRegisterID value)
+{
+    if (reg.isConstant()) {
+        WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(reg);
+        char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
+        convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+    } else
+        convertInt32ToDouble(payloadFor(reg), value);
+}
+
 inline void JIT::emitStore(VirtualRegister reg, RegisterID tag, RegisterID payload, RegisterID base)
 {
     store32(payload, payloadFor(reg, base));
@@ -604,6 +614,24 @@
         emitJumpSlowCaseIfNotJSCell(reg);
 }
 
+inline void JIT::emitLoadDouble(VirtualRegister reg, FPRegisterID value)
+{
+    if (reg.isConstant()) {
+        WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(reg);
+        loadDouble(TrustedImmPtr(&inConstantPool), value);
+    } else
+        loadDouble(addressFor(reg), value);
+}
+
+inline void JIT::emitLoadInt32ToDouble(VirtualRegister reg, FPRegisterID value)
+{
+    if (reg.isConstant()) {
+        ASSERT(isOperandConstantInt(reg));
+        convertInt32ToDouble(Imm32(getConstantOperand(reg).asInt32()), value);
+    } else
+        convertInt32ToDouble(addressFor(reg), value);
+}
+
 ALWAYS_INLINE JIT::PatchableJump JIT::emitPatchableJumpIfNotInt(RegisterID reg)
 {
     return patchableBranch64(Below, reg, numberTagRegister);
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to