- Revision
- 254926
- Author
- [email protected]
- Date
- 2020-01-22 09:06:32 -0800 (Wed, 22 Jan 2020)
Log Message
Throw away baseline code if there is an optimized replacement
https://bugs.webkit.org/show_bug.cgi?id=202503
<rdar://problem/58552041>
Reviewed by Yusuke Suzuki.
This patch's goal is to help us save JIT executable memory by throwing
away baseline code when it has an optimized replacement. To make it
easy to reason about, we do this when finalizing a GC, when the CodeBlock
is not on the stack, and when no OSR exits are linked to jump to the baseline
code. Also, as a measure to combat a performance regression, we only throw
away code on the second GC cycle in which it is eligible for this.
When we downgrade Baseline to LLInt, we also throw away all JIT data
and unlink all incoming calls.
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::CodeBlock):
(JSC::CodeBlock::finishCreation):
(JSC::CodeBlock::finalizeUnconditionally):
(JSC::CodeBlock::resetJITData):
(JSC::CodeBlock::optimizedReplacement):
(JSC::CodeBlock::hasOptimizedReplacement):
(JSC::CodeBlock::tallyFrequentExitSites):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::setJITCode):
* dfg/DFGDriver.cpp:
(JSC::DFG::compileImpl):
* dfg/DFGOSRExitCompilerCommon.cpp:
(JSC::DFG::callerReturnPC):
(JSC::DFG::adjustAndJumpToTarget):
* heap/CodeBlockSet.cpp:
(JSC::CodeBlockSet::isCurrentlyExecuting):
* heap/CodeBlockSet.h:
* heap/Heap.cpp:
(JSC::Heap::finalizeUnconditionalFinalizers):
(JSC::Heap::runEndPhase):
Modified Paths
Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (254925 => 254926)
--- trunk/Source/_javascript_Core/ChangeLog 2020-01-22 16:59:30 UTC (rev 254925)
+++ trunk/Source/_javascript_Core/ChangeLog 2020-01-22 17:06:32 UTC (rev 254926)
@@ -1,3 +1,42 @@
+2020-01-22 Saam Barati <[email protected]>
+
+ Throw away baseline code if there is an optimized replacement
+ https://bugs.webkit.org/show_bug.cgi?id=202503
+ <rdar://problem/58552041>
+
+ Reviewed by Yusuke Suzuki.
+
+ This patch's goal is to help us save JIT executable memory by throwing
+ away baseline code when it has an optimized replacement. To make it
+ easy to reason about, we do this when finalizing a GC, when the CodeBlock
+ is not on the stack, and when no OSR exits are linked to jump to the baseline
+ code. Also, as a measure to combat a performance regression, we only throw
+ away code on the second GC cycle in which it is eligible for this.
+ When we downgrade Baseline to LLInt, we also throw away all JIT data
+ and unlink all incoming calls.
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::CodeBlock):
+ (JSC::CodeBlock::finishCreation):
+ (JSC::CodeBlock::finalizeUnconditionally):
+ (JSC::CodeBlock::resetJITData):
+ (JSC::CodeBlock::optimizedReplacement):
+ (JSC::CodeBlock::hasOptimizedReplacement):
+ (JSC::CodeBlock::tallyFrequentExitSites):
+ * bytecode/CodeBlock.h:
+ (JSC::CodeBlock::setJITCode):
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compileImpl):
+ * dfg/DFGOSRExitCompilerCommon.cpp:
+ (JSC::DFG::callerReturnPC):
+ (JSC::DFG::adjustAndJumpToTarget):
+ * heap/CodeBlockSet.cpp:
+ (JSC::CodeBlockSet::isCurrentlyExecuting):
+ * heap/CodeBlockSet.h:
+ * heap/Heap.cpp:
+ (JSC::Heap::finalizeUnconditionalFinalizers):
+ (JSC::Heap::runEndPhase):
+
2020-01-21 Ross Kirsling <[email protected]>
[JSC] Date parse logic should be less redundant
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp (254925 => 254926)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2020-01-22 16:59:30 UTC (rev 254925)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2020-01-22 17:06:32 UTC (rev 254926)
@@ -295,6 +295,8 @@
, m_didFailJITCompilation(false)
, m_didFailFTLCompilation(false)
, m_hasBeenCompiledWithFTL(false)
+ , m_hasLinkedOSRExit(false)
+ , m_isEligibleForLLIntDowngrade(false)
, m_numCalleeLocals(other.m_numCalleeLocals)
, m_numVars(other.m_numVars)
, m_numberOfArgumentsToSkip(other.m_numberOfArgumentsToSkip)
@@ -354,6 +356,8 @@
, m_didFailJITCompilation(false)
, m_didFailFTLCompilation(false)
, m_hasBeenCompiledWithFTL(false)
+ , m_hasLinkedOSRExit(false)
+ , m_isEligibleForLLIntDowngrade(false)
, m_numCalleeLocals(unlinkedCodeBlock->numCalleeLocals())
, m_numVars(unlinkedCodeBlock->numVars())
, m_hasDebuggerStatement(false)
@@ -442,14 +446,8 @@
const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
#if ENABLE(JIT)
- auto instruction = instructions().at(unlinkedHandler.target);
- MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
- if (instruction->isWide32())
- codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
- else if (instruction->isWide16())
- codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
- else
- codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
+ auto& instruction = *instructions().at(unlinkedHandler.target).ptr();
+ MacroAssemblerCodePtr<BytecodePtrTag> codePtr = LLInt::getCodePtr<BytecodePtrTag>(instruction);
handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
#else
handler.initialize(unlinkedHandler);
@@ -1381,6 +1379,41 @@
UNUSED_PARAM(vm);
updateAllPredictions();
+
+#if ENABLE(JIT)
+ bool isEligibleForLLIntDowngrade = m_isEligibleForLLIntDowngrade;
+ m_isEligibleForLLIntDowngrade = false;
+ // If BaselineJIT code is not executing, and an optimized replacement exists, we attempt
+ // to discard baseline JIT code and reinstall LLInt code to save JIT memory.
+ if (Options::useLLInt() && !m_hasLinkedOSRExit && jitType() == JITType::BaselineJIT && !m_vm->heap.codeBlockSet().isCurrentlyExecuting(this)) {
+ if (CodeBlock* optimizedCodeBlock = optimizedReplacement()) {
+ if (!optimizedCodeBlock->m_osrExitCounter) {
+ if (isEligibleForLLIntDowngrade) {
+ m_jitCode = nullptr;
+ LLInt::setEntrypoint(this);
+ RELEASE_ASSERT(jitType() == JITType::InterpreterThunk);
+
+ for (size_t i = 0; i < m_unlinkedCode->numberOfExceptionHandlers(); i++) {
+ const UnlinkedHandlerInfo& unlinkedHandler = m_unlinkedCode->exceptionHandler(i);
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+ auto& instruction = *instructions().at(unlinkedHandler.target).ptr();
+ MacroAssemblerCodePtr<BytecodePtrTag> codePtr = LLInt::getCodePtr<BytecodePtrTag>(instruction);
+ handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
+ }
+
+ unlinkIncomingCalls();
+
+ // It's safe to clear these out here because in finalizeUnconditionally all compiler threads
+ // are safepointed, meaning they're running either before or after bytecode parser, and bytecode
+ // parser is the only data structure pointing into the various *infos.
+ resetJITData();
+ } else
+ m_isEligibleForLLIntDowngrade = true;
+ }
+ }
+ }
+
+#endif
if (JITCode::couldBeInterpreted(jitType()))
finalizeLLIntInlineCaches();
@@ -1591,13 +1624,24 @@
// We can clear these because no other thread will have references to any stub infos, call
// link infos, or by val infos if we don't have JIT code. Attempts to query these data
// structures using the concurrent API (getICStatusMap and friends) will return nothing if we
- // don't have JIT code.
- jitData->m_stubInfos.clear();
- jitData->m_callLinkInfos.clear();
- jitData->m_byValInfos.clear();
+ // don't have JIT code. So it's safe to call this if we fail a baseline JIT compile.
+ //
+ // We also call this from finalizeUnconditionally when we degrade from baseline JIT to LLInt
+ // code. This is safe to do since all compiler threads are safepointed in finalizeUnconditionally,
+ // which means we've made it past bytecode parsing. Only the bytecode parser will hold onto
+ // references to these various *infos via its use of ICStatusMap. Also, OSR exit might point to
+ // these *infos, but when we have an OSR exit linked to this CodeBlock, we won't downgrade
+ // to LLInt.
+
+ for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
+ stubInfo->aboutToDie();
+ stubInfo->deref();
+ }
+
// We can clear this because the DFG's queries to these data structures are guarded by whether
// there is JIT code.
- jitData->m_rareCaseProfiles.clear();
+
+ m_jitData = nullptr;
}
}
#endif
@@ -1729,12 +1773,26 @@
}
#if ENABLE(JIT)
-bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
+CodeBlock* CodeBlock::optimizedReplacement(JITType typeToReplace)
{
CodeBlock* replacement = this->replacement();
- return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
+ if (!replacement)
+ return nullptr;
+ if (JITCode::isHigherTier(replacement->jitType(), typeToReplace))
+ return replacement;
+ return nullptr;
}
+CodeBlock* CodeBlock::optimizedReplacement()
+{
+ return optimizedReplacement(jitType());
+}
+
+bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
+{
+ return !!optimizedReplacement(typeToReplace);
+}
+
bool CodeBlock::hasOptimizedReplacement()
{
return hasOptimizedReplacement(jitType());
@@ -2776,7 +2834,7 @@
void CodeBlock::tallyFrequentExitSites()
{
ASSERT(JITCode::isOptimizingJIT(jitType()));
- ASSERT(alternative()->jitType() == JITType::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(alternative()->jitType()));
CodeBlock* profiledBlock = alternative();
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.h (254925 => 254926)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2020-01-22 16:59:30 UTC (rev 254925)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2020-01-22 17:06:32 UTC (rev 254926)
@@ -258,6 +258,11 @@
Optional<BytecodeIndex> bytecodeIndexFromCallSiteIndex(CallSiteIndex);
+ // Because we might throw out baseline JIT code and all its baseline JIT data (m_jitData),
+ // you need to be careful about the lifetime of when you use the return value of this function.
+ // The return value may have raw pointers into this data structure that gets thrown away.
+ // Specifically, you need to ensure that no GC can be finalized (typically that means no
+ // allocations) between calling this and the last use of it.
void getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result);
void getICStatusMap(ICStatusMap& result);
@@ -412,7 +417,6 @@
void setJITCode(Ref<JITCode>&& code)
{
- ASSERT(heap()->isDeferred());
if (!code->isShared())
heap()->reportExtraMemoryAllocated(code->size());
@@ -444,6 +448,8 @@
DFG::CapabilityLevel capabilityLevel();
DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); }
+ CodeBlock* optimizedReplacement(JITType typeToReplace);
+ CodeBlock* optimizedReplacement(); // the typeToReplace is my JITType
bool hasOptimizedReplacement(JITType typeToReplace);
bool hasOptimizedReplacement(); // the typeToReplace is my JITType
#endif
@@ -863,6 +869,9 @@
bool m_didFailFTLCompilation : 1;
bool m_hasBeenCompiledWithFTL : 1;
+ bool m_hasLinkedOSRExit : 1;
+ bool m_isEligibleForLLIntDowngrade : 1;
+
// Internal methods for use by validation code. It would be private if it wasn't
// for the fact that we use it from anonymous namespaces.
void beginValidationDidFail();
Modified: trunk/Source/_javascript_Core/dfg/DFGDriver.cpp (254925 => 254926)
--- trunk/Source/_javascript_Core/dfg/DFGDriver.cpp 2020-01-22 16:59:30 UTC (rev 254925)
+++ trunk/Source/_javascript_Core/dfg/DFGDriver.cpp 2020-01-22 17:06:32 UTC (rev 254926)
@@ -81,7 +81,7 @@
ASSERT(codeBlock);
ASSERT(codeBlock->alternative());
- ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(codeBlock->alternative()->jitType()));
ASSERT(!profiledDFGCodeBlock || profiledDFGCodeBlock->jitType() == JITType::DFGJIT);
if (logCompilationChanges(mode))
Modified: trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp (254925 => 254926)
--- trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp 2020-01-22 16:59:30 UTC (rev 254925)
+++ trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp 2020-01-22 17:06:32 UTC (rev 254926)
@@ -193,6 +193,8 @@
#undef LLINT_RETURN_LOCATION
} else {
+ baselineCodeBlockForCaller->m_hasLinkedOSRExit = true;
+
switch (trueCallerCallKind) {
case InlineCallFrame::Call:
case InlineCallFrame::Construct:
@@ -413,6 +415,8 @@
#endif
jumpTarget = destination.retagged<OSRExitPtrTag>().executableAddress();
} else {
+ codeBlockForExit->m_hasLinkedOSRExit = true;
+
BytecodeIndex exitIndex = exit.m_codeOrigin.bytecodeIndex();
MacroAssemblerCodePtr<JSEntryPtrTag> destination;
if (exitIndex.checkpoint())
Modified: trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp (254925 => 254926)
--- trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp 2020-01-22 16:59:30 UTC (rev 254925)
+++ trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp 2020-01-22 17:06:32 UTC (rev 254926)
@@ -55,6 +55,11 @@
m_currentlyExecuting.clear();
}
+bool CodeBlockSet::isCurrentlyExecuting(CodeBlock* codeBlock)
+{
+ return m_currentlyExecuting.contains(codeBlock);
+}
+
void CodeBlockSet::dump(PrintStream& out) const
{
CommaPrinter comma;
Modified: trunk/Source/_javascript_Core/heap/CodeBlockSet.h (254925 => 254926)
--- trunk/Source/_javascript_Core/heap/CodeBlockSet.h 2020-01-22 16:59:30 UTC (rev 254925)
+++ trunk/Source/_javascript_Core/heap/CodeBlockSet.h 2020-01-22 17:06:32 UTC (rev 254926)
@@ -56,6 +56,10 @@
bool contains(const AbstractLocker&, void* candidateCodeBlock);
Lock& getLock() { return m_lock; }
+ // This is expected to run only when we're not adding to the set for now. If
+ // this needs to run concurrently in the future, we'll need to lock around this.
+ bool isCurrentlyExecuting(CodeBlock*);
+
// Visits each CodeBlock in the heap until the visitor function returns true
// to indicate that it is done iterating, or until every CodeBlock has been
// visited.
Modified: trunk/Source/_javascript_Core/heap/Heap.cpp (254925 => 254926)
--- trunk/Source/_javascript_Core/heap/Heap.cpp 2020-01-22 16:59:30 UTC (rev 254925)
+++ trunk/Source/_javascript_Core/heap/Heap.cpp 2020-01-22 17:06:32 UTC (rev 254926)
@@ -605,11 +605,11 @@
vm().builtinExecutables()->finalizeUnconditionally();
finalizeMarkedUnconditionalFinalizers<FunctionExecutable>(vm().functionExecutableSpace.space);
finalizeMarkedUnconditionalFinalizers<SymbolTable>(vm().symbolTableSpace);
+ finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm().executableToCodeBlockEdgesWithFinalizers); // We run this before CodeBlock's unconditional finalizer since CodeBlock looks at the owner executable's installed CodeBlock in its finalizeUnconditionally.
vm().forEachCodeBlockSpace(
[&] (auto& space) {
this->finalizeMarkedUnconditionalFinalizers<CodeBlock>(space.set);
});
- finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm().executableToCodeBlockEdgesWithFinalizers);
finalizeMarkedUnconditionalFinalizers<StructureRareData>(vm().structureRareDataSpace);
finalizeMarkedUnconditionalFinalizers<UnlinkedFunctionExecutable>(vm().unlinkedFunctionExecutableSpace.set);
if (vm().m_weakSetSpace)
@@ -1509,7 +1509,7 @@
pruneStaleEntriesFromWeakGCMaps();
sweepArrayBuffers();
snapshotUnswept();
- finalizeUnconditionalFinalizers();
+ finalizeUnconditionalFinalizers(); // We rely on these unconditional finalizers running before clearCurrentlyExecuting since CodeBlock's finalizer relies on querying currently executing.
removeDeadCompilerWorklistEntries();
notifyIncrementalSweeper();