Diff
Modified: trunk/JSTests/ChangeLog (254479 => 254480)
--- trunk/JSTests/ChangeLog 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/JSTests/ChangeLog 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,3 +1,18 @@
+2020-01-13 Saam Barati <[email protected]>
+
+ Throw away baseline code if there is an optimized replacement
+ https://bugs.webkit.org/show_bug.cgi?id=202503
+
+ Reviewed by Yusuke Suzuki.
+
+ * stress/dfg-compare-eq-via-nonSpeculativeNonPeepholeCompareNullOrUndefined.js:
+ * stress/getter-setter-inlining-should-emit-movhint.js:
+ * stress/make-dictionary-repatch.js:
+ * stress/merging-ic-variants-should-bail-if-structures-overlap.js:
+ * stress/proxy-getter-stack-overflow.js:
+ * stress/regress-192717.js:
+ * stress/retry-cache-later.js:
+
2020-01-13 Mark Lam <[email protected]>
Replace uses of Box<Identifier> with a new CacheableIdentifier class.
Modified: trunk/JSTests/stress/dfg-compare-eq-via-nonSpeculativeNonPeepholeCompareNullOrUndefined.js (254479 => 254480)
--- trunk/JSTests/stress/dfg-compare-eq-via-nonSpeculativeNonPeepholeCompareNullOrUndefined.js 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/JSTests/stress/dfg-compare-eq-via-nonSpeculativeNonPeepholeCompareNullOrUndefined.js 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,4 +1,4 @@
-//@ runDefault("--collectContinuously=true", "--collectContinuouslyPeriodMS=0.15", "--useLLInt=false", "--useFTLJIT=false", "--jitPolicyScale=0")
+//@ runDefault("--collectContinuously=true", "--collectContinuouslyPeriodMS=0.15", "--forceBaseline=true", "--useFTLJIT=false", "--jitPolicyScale=0")
// This test exercises DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined().
Modified: trunk/JSTests/stress/getter-setter-inlining-should-emit-movhint.js (254479 => 254480)
--- trunk/JSTests/stress/getter-setter-inlining-should-emit-movhint.js 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/JSTests/stress/getter-setter-inlining-should-emit-movhint.js 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,4 +1,4 @@
-//@ runDefault("--useRandomizingFuzzerAgent=1", "--usePolymorphicCallInliningForNonStubStatus=1", "--seedOfRandomizingFuzzerAgent=2896922505", "--useLLInt=0", "--useConcurrentJIT=0")
+//@ runDefault("--useRandomizingFuzzerAgent=1", "--usePolymorphicCallInliningForNonStubStatus=1", "--seedOfRandomizingFuzzerAgent=2896922505", "--forceBaseline=1", "--useConcurrentJIT=0")
function foo(o) {
o.f = 0;
return o.f;
Modified: trunk/JSTests/stress/make-dictionary-repatch.js (254479 => 254480)
--- trunk/JSTests/stress/make-dictionary-repatch.js 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/JSTests/stress/make-dictionary-repatch.js 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,4 +1,4 @@
-//@ if $jitTests then runNoCJIT("--useDFGJIT=false", "--useLLInt=false") else skip end
+//@ if $jitTests then runNoCJIT("--useDFGJIT=false", "--forceBaseline=true") else skip end
function foo(o) {
return o.f;
Modified: trunk/JSTests/stress/merging-ic-variants-should-bail-if-structures-overlap.js (254479 => 254480)
--- trunk/JSTests/stress/merging-ic-variants-should-bail-if-structures-overlap.js 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/JSTests/stress/merging-ic-variants-should-bail-if-structures-overlap.js 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,4 +1,4 @@
-//@ runDefault("--validateGraphAtEachPhase=1", "--useLLInt=0")
+//@ runDefault("--validateGraphAtEachPhase=1", "--forceBaseline=1")
let items = [];
for (let i = 0; i < 8; ++i) {
Modified: trunk/JSTests/stress/proxy-getter-stack-overflow.js (254479 => 254480)
--- trunk/JSTests/stress/proxy-getter-stack-overflow.js 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/JSTests/stress/proxy-getter-stack-overflow.js 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,4 +1,4 @@
-//@ if $jitTests then runDefault("--useLLInt=0") else skip end
+//@ if $jitTests then runDefault("--forceBaseline=1") else skip end
const o = {};
const handler = {
Modified: trunk/JSTests/stress/regress-192717.js (254479 => 254480)
--- trunk/JSTests/stress/regress-192717.js 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/JSTests/stress/regress-192717.js 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,5 +1,5 @@
//@ skip if $memoryLimited or $buildType == "debug"
-//@ runDefault("--useLLInt=false", "--forceCodeBlockToJettisonDueToOldAge=true", "--maxPerThreadStackUsage=200000", "--exceptionStackTraceLimit=1", "--defaultErrorStackTraceLimit=1")
+//@ runDefault("--forceBaseline=true", "--forceCodeBlockToJettisonDueToOldAge=true", "--maxPerThreadStackUsage=200000", "--exceptionStackTraceLimit=1", "--defaultErrorStackTraceLimit=1")
let foo = 'let a';
for (let i = 0; i < 400000; i++)
Modified: trunk/JSTests/stress/retry-cache-later.js (254479 => 254480)
--- trunk/JSTests/stress/retry-cache-later.js 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/JSTests/stress/retry-cache-later.js 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,4 +1,4 @@
-//@ runNoCJIT("--useLLInt=false", "--useDFGJIT=false")
+//@ runNoCJIT("--forceBaseline=true", "--useDFGJIT=false")
function foo(o) {
return o.i7;
Modified: trunk/Source/_javascript_Core/API/tests/ExecutionTimeLimitTest.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/API/tests/ExecutionTimeLimitTest.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/API/tests/ExecutionTimeLimitTest.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -119,11 +119,11 @@
int testExecutionTimeLimit()
{
static const TierOptions tierOptionsList[] = {
- { "LLINT", 0_ms, "--useConcurrentJIT=false --useLLInt=true --useJIT=false" },
- { "Baseline", 0_ms, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=false" },
- { "DFG", 200_ms, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=true --useFTLJIT=false" },
+ { "LLINT", 0_ms, "--useConcurrentJIT=false --useJIT=false" },
+ { "Baseline", 0_ms, "--useConcurrentJIT=false --useJIT=true --useDFGJIT=false" },
+ { "DFG", 200_ms, "--useConcurrentJIT=false --useJIT=true --useDFGJIT=true --useFTLJIT=false" },
#if ENABLE(FTL_JIT)
- { "FTL", 500_ms, "--useConcurrentJIT=false --useLLInt=true --useJIT=true --useDFGJIT=true --useFTLJIT=true" },
+ { "FTL", 500_ms, "--useConcurrentJIT=false --useJIT=true --useDFGJIT=true --useFTLJIT=true" },
#endif
};
Modified: trunk/Source/_javascript_Core/API/tests/PingPongStackOverflowTest.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/API/tests/PingPongStackOverflowTest.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/API/tests/PingPongStackOverflowTest.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -122,7 +122,7 @@
auto origSoftReservedZoneSize = Options::softReservedZoneSize();
auto origReservedZoneSize = Options::reservedZoneSize();
- auto origUseLLInt = Options::useLLInt();
+ auto origForceBaseline = Options::forceBaseline();
auto origMaxPerThreadStackUsage = Options::maxPerThreadStackUsage();
Options::softReservedZoneSize() = 128 * KB;
@@ -131,7 +131,7 @@
// Normally, we want to disable the LLINT to force the use of JITted code which is necessary for
// reproducing the regression in https://bugs.webkit.org/show_bug.cgi?id=148749. However, we only
// want to do this if the LLINT isn't the only available execution engine.
- Options::useLLInt() = false;
+ Options::forceBaseline() = true;
#endif
const char* scriptString =
@@ -178,7 +178,7 @@
Options::softReservedZoneSize() = origSoftReservedZoneSize;
Options::reservedZoneSize() = origReservedZoneSize;
- Options::useLLInt() = origUseLLInt;
+ Options::forceBaseline() = origForceBaseline;
Options::maxPerThreadStackUsage() = origMaxPerThreadStackUsage;
return failed;
Modified: trunk/Source/_javascript_Core/ChangeLog (254479 => 254480)
--- trunk/Source/_javascript_Core/ChangeLog 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/ChangeLog 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,3 +1,64 @@
+2020-01-13 Saam Barati <[email protected]>
+
+ Throw away baseline code if there is an optimized replacement
+ https://bugs.webkit.org/show_bug.cgi?id=202503
+
+ Reviewed by Yusuke Suzuki.
+
+ This patch's goal is to help us save JIT executable memory by throwing
+ away baseline code when it has an optimized replacement. To make it
+ easy to reason about, we do this when finalizing a GC, and when the
+ CodeBlock is not on the stack. When we do this, we throw away all JIT
+ data and unlink all incoming calls.
+
+ This patch also paves the way for the LOL tier by making it so we always
+ exit to the LLInt. This allows the code in CodeBlock finalization to not
+ have to consider whether or not an an OSR exit is wired to baseline
+ JIT code, since all exits are now to the LLInt. Because of this, this
+ patch removes the forceOSRExitToLLInt option. Also, this patch renames
+ the useLLInt option to forceBaseline and inverts its meaning.
+ Options::forceBaseline=true implies that code will start off executing in
+ the baseline JIT instead of the LLInt. However, it won't prevent us from
+ emitting an OSR exit which jumps to LLInt code.
+
+ * API/tests/ExecutionTimeLimitTest.cpp:
+ (testExecutionTimeLimit):
+ * API/tests/PingPongStackOverflowTest.cpp:
+ (testPingPongStackOverflow):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::finishCreation):
+ (JSC::CodeBlock::finalizeUnconditionally):
+ (JSC::CodeBlock::resetJITData):
+ (JSC::CodeBlock::optimizedReplacement):
+ (JSC::CodeBlock::hasOptimizedReplacement):
+ (JSC::CodeBlock::tallyFrequentExitSites):
+ (JSC::CodeBlock::findStubInfo): Deleted.
+ (JSC::CodeBlock::getCallLinkInfoForBytecodeIndex): Deleted.
+ * bytecode/CodeBlock.h:
+ (JSC::CodeBlock::setJITCode):
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compileImpl):
+ * dfg/DFGOSRExitCompilerCommon.cpp:
+ (JSC::DFG::callerReturnPC):
+ (JSC::DFG::reifyInlinedCallFrames):
+ (JSC::DFG::adjustAndJumpToTarget):
+ * dfg/DFGOSRExitCompilerCommon.h:
+ * heap/CodeBlockSet.cpp:
+ (JSC::CodeBlockSet::isCurrentlyExecuting):
+ * heap/CodeBlockSet.h:
+ * heap/Heap.cpp:
+ (JSC::Heap::finalizeUnconditionalFinalizers):
+ (JSC::Heap::runEndPhase):
+ * llint/LLIntSlowPaths.cpp:
+ (JSC::LLInt::dispatchToNextInstruction):
+ * runtime/Options.cpp:
+ (JSC::recomputeDependentOptions):
+ (JSC::Options::initialize):
+ (JSC::Options::ensureOptionsAreCoherent):
+ * runtime/OptionsList.h:
+ * runtime/ScriptExecutable.cpp:
+ (JSC::ScriptExecutable::prepareForExecutionImpl):
+
2020-01-13 Mark Lam <[email protected]>
Replace uses of Box<Identifier> with a new CacheableIdentifier class.
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -442,14 +442,8 @@
const UnlinkedHandlerInfo& unlinkedHandler = unlinkedCodeBlock->exceptionHandler(i);
HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
#if ENABLE(JIT)
- auto instruction = instructions().at(unlinkedHandler.target);
- MacroAssemblerCodePtr<BytecodePtrTag> codePtr;
- if (instruction->isWide32())
- codePtr = LLInt::getWide32CodePtr<BytecodePtrTag>(op_catch);
- else if (instruction->isWide16())
- codePtr = LLInt::getWide16CodePtr<BytecodePtrTag>(op_catch);
- else
- codePtr = LLInt::getCodePtr<BytecodePtrTag>(op_catch);
+ auto& instruction = *instructions().at(unlinkedHandler.target).ptr();
+ MacroAssemblerCodePtr<BytecodePtrTag> codePtr = LLInt::getCodePtr<BytecodePtrTag>(instruction);
handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
#else
handler.initialize(unlinkedHandler);
@@ -1388,6 +1382,36 @@
UNUSED_PARAM(vm);
updateAllPredictions();
+
+#if ENABLE(JIT)
+ // If BaselineJIT code is not executing, and an optimized replacement exists, we attempt
+ // to discard baseline JIT code and reinstall LLInt code to save JIT memory.
+ if (!Options::forceBaseline() && jitType() == JITType::BaselineJIT && !m_vm->heap.codeBlockSet().isCurrentlyExecuting(this)) {
+ if (CodeBlock* optimizedCodeBlock = optimizedReplacement()) {
+ if (!optimizedCodeBlock->m_osrExitCounter) {
+ m_jitCode = nullptr;
+ LLInt::setEntrypoint(this);
+ RELEASE_ASSERT(jitType() == JITType::InterpreterThunk);
+
+ for (size_t i = 0; i < m_unlinkedCode->numberOfExceptionHandlers(); i++) {
+ const UnlinkedHandlerInfo& unlinkedHandler = m_unlinkedCode->exceptionHandler(i);
+ HandlerInfo& handler = m_rareData->m_exceptionHandlers[i];
+ auto& instruction = *instructions().at(unlinkedHandler.target).ptr();
+ MacroAssemblerCodePtr<BytecodePtrTag> codePtr = LLInt::getCodePtr<BytecodePtrTag>(instruction);
+ handler.initialize(unlinkedHandler, CodeLocationLabel<ExceptionHandlerPtrTag>(codePtr.retagged<ExceptionHandlerPtrTag>()));
+ }
+
+ unlinkIncomingCalls();
+
+ // It's safe to clear these out here because in finalizeUnconditionally all compiler threads
+ // are safepointed, meaning they're running either before or after bytecode parser, and bytecode
+ // parser is the only data structure pointing into the various *infos.
+ resetJITData();
+ }
+ }
+ }
+
+#endif
if (JITCode::couldBeInterpreted(jitType()))
finalizeLLIntInlineCaches();
@@ -1515,18 +1539,6 @@
return ensureJITData(locker).m_negICs.add(arithProfile);
}
-StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
-{
- ConcurrentJSLocker locker(m_lock);
- if (auto* jitData = m_jitData.get()) {
- for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
- if (stubInfo->codeOrigin == codeOrigin)
- return stubInfo;
- }
- }
- return nullptr;
-}
-
ByValInfo* CodeBlock::addByValInfo()
{
ConcurrentJSLocker locker(m_lock);
@@ -1539,18 +1551,6 @@
return ensureJITData(locker).m_callLinkInfos.add();
}
-CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(BytecodeIndex index)
-{
- ConcurrentJSLocker locker(m_lock);
- if (auto* jitData = m_jitData.get()) {
- for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
- if (callLinkInfo->codeOrigin() == CodeOrigin(index))
- return callLinkInfo;
- }
- }
- return nullptr;
-}
-
RareCaseProfile* CodeBlock::addRareCaseProfile(BytecodeIndex bytecodeIndex)
{
ConcurrentJSLocker locker(m_lock);
@@ -1598,13 +1598,22 @@
// We can clear these because no other thread will have references to any stub infos, call
// link infos, or by val infos if we don't have JIT code. Attempts to query these data
// structures using the concurrent API (getICStatusMap and friends) will return nothing if we
- // don't have JIT code.
- jitData->m_stubInfos.clear();
- jitData->m_callLinkInfos.clear();
- jitData->m_byValInfos.clear();
+ // don't have JIT code. So it's safe to call this if we fail a baseline JIT compile.
+ //
+ // We also call this from finalizeUnconditionally when we degrade from baseline JIT to LLInt
+ // code. This is safe to do since all compiler threads are safepointed in finalizeUnconditionally,
+ // which means we've made it past bytecode parsing. Only the bytecode parser will hold onto
+ // references to these various *infos via its use of ICStatusMap.
+
+ for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
+ stubInfo->aboutToDie();
+ stubInfo->deref();
+ }
+
// We can clear this because the DFG's queries to these data structures are guarded by whether
// there is JIT code.
- jitData->m_rareCaseProfiles.clear();
+
+ m_jitData = nullptr;
}
}
#endif
@@ -1736,12 +1745,26 @@
}
#if ENABLE(JIT)
-bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
+CodeBlock* CodeBlock::optimizedReplacement(JITType typeToReplace)
{
CodeBlock* replacement = this->replacement();
- return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace);
+ if (!replacement)
+ return nullptr;
+ if (JITCode::isHigherTier(replacement->jitType(), typeToReplace))
+ return replacement;
+ return nullptr;
}
+CodeBlock* CodeBlock::optimizedReplacement()
+{
+ return optimizedReplacement(jitType());
+}
+
+bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace)
+{
+ return !!optimizedReplacement(typeToReplace);
+}
+
bool CodeBlock::hasOptimizedReplacement()
{
return hasOptimizedReplacement(jitType());
@@ -2801,7 +2824,7 @@
void CodeBlock::tallyFrequentExitSites()
{
ASSERT(JITCode::isOptimizingJIT(jitType()));
- ASSERT(alternative()->jitType() == JITType::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(alternative()->jitType()));
CodeBlock* profiledBlock = alternative();
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.h (254479 => 254480)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2020-01-14 01:43:03 UTC (rev 254480)
@@ -258,6 +258,11 @@
Optional<BytecodeIndex> bytecodeIndexFromCallSiteIndex(CallSiteIndex);
+ // Because we might throw out baseline JIT code and all its baseline JIT data (m_jitData),
+ // you need to be careful about the lifetime of when you use the return value of this function.
+ // The return value may have raw pointers into this data structure that gets thrown away.
+ // Specifically, you need to ensure that no GC can be finalized (typically that means no
+ // allocations) between calling this and the last use of it.
void getICStatusMap(const ConcurrentJSLocker&, ICStatusMap& result);
void getICStatusMap(ICStatusMap& result);
@@ -277,6 +282,10 @@
SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters;
+ // FIXME: Now that we unconditionally OSR exit to the LLInt, we might be able to prune
+ // the number of entries we have in this to contain entries only for opcodes we use
+ // it for. Today, that's only for loop OSR entry.
+ // https://bugs.webkit.org/show_bug.cgi?id=206207
JITCodeMap m_jitCodeMap;
};
@@ -307,18 +316,9 @@
StructureStubInfo* addStubInfo(AccessType);
- // O(n) operation. Use getStubInfoMap() unless you really only intend to get one
- // stub info.
- StructureStubInfo* findStubInfo(CodeOrigin);
-
ByValInfo* addByValInfo();
CallLinkInfo* addCallLinkInfo();
-
- // This is a slow function call used primarily for compiling OSR exits in the case
- // that there had been inlining. Chances are if you want to use this, you're really
- // looking for a CallLinkInfoMap to amortize the cost of calling this.
- CallLinkInfo* getCallLinkInfoForBytecodeIndex(BytecodeIndex);
void setJITCodeMap(JITCodeMap&& jitCodeMap)
{
@@ -412,7 +412,6 @@
void setJITCode(Ref<JITCode>&& code)
{
- ASSERT(heap()->isDeferred());
if (!code->isShared())
heap()->reportExtraMemoryAllocated(code->size());
@@ -444,6 +443,8 @@
DFG::CapabilityLevel capabilityLevel();
DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); }
+ CodeBlock* optimizedReplacement(JITType typeToReplace);
+ CodeBlock* optimizedReplacement(); // the typeToReplace is my JITType
bool hasOptimizedReplacement(JITType typeToReplace);
bool hasOptimizedReplacement(); // the typeToReplace is my JITType
#endif
Modified: trunk/Source/_javascript_Core/dfg/DFGDriver.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/dfg/DFGDriver.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/dfg/DFGDriver.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -81,7 +81,7 @@
ASSERT(codeBlock);
ASSERT(codeBlock->alternative());
- ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(codeBlock->alternative()->jitType()));
ASSERT(!profiledDFGCodeBlock || profiledDFGCodeBlock->jitType() == JITType::DFGJIT);
if (logCompilationChanges(mode))
Modified: trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -142,85 +142,53 @@
doneAdjusting.link(&jit);
}
-MacroAssemblerCodePtr<JSEntryPtrTag> callerReturnPC(CodeBlock* baselineCodeBlockForCaller, BytecodeIndex callBytecodeIndex, InlineCallFrame::Kind trueCallerCallKind, bool& callerIsLLInt)
+static MacroAssemblerCodePtr<JSEntryPtrTag> callerReturnPC(CodeBlock* baselineCodeBlockForCaller, BytecodeIndex callBytecodeIndex, InlineCallFrame::Kind trueCallerCallKind)
{
- callerIsLLInt = Options::forceOSRExitToLLInt() || baselineCodeBlockForCaller->jitType() == JITType::InterpreterThunk;
-
if (callBytecodeIndex.checkpoint())
return LLInt::getCodePtr<JSEntryPtrTag>(checkpoint_osr_exit_from_inlined_call_trampoline);
MacroAssemblerCodePtr<JSEntryPtrTag> jumpTarget;
- if (callerIsLLInt) {
- const Instruction& callInstruction = *baselineCodeBlockForCaller->instructions().at(callBytecodeIndex).ptr();
+ const Instruction& callInstruction = *baselineCodeBlockForCaller->instructions().at(callBytecodeIndex).ptr();
#define LLINT_RETURN_LOCATION(name) (callInstruction.isWide16() ? LLInt::getWide16CodePtr<JSEntryPtrTag>(name##_return_location) : (callInstruction.isWide32() ? LLInt::getWide32CodePtr<JSEntryPtrTag>(name##_return_location) : LLInt::getCodePtr<JSEntryPtrTag>(name##_return_location)))
- switch (trueCallerCallKind) {
- case InlineCallFrame::Call:
- jumpTarget = LLINT_RETURN_LOCATION(op_call);
- break;
- case InlineCallFrame::Construct:
- jumpTarget = LLINT_RETURN_LOCATION(op_construct);
- break;
- case InlineCallFrame::CallVarargs:
- jumpTarget = LLINT_RETURN_LOCATION(op_call_varargs_slow);
- break;
- case InlineCallFrame::ConstructVarargs:
- jumpTarget = LLINT_RETURN_LOCATION(op_construct_varargs_slow);
- break;
- case InlineCallFrame::GetterCall: {
- if (callInstruction.opcodeID() == op_get_by_id)
- jumpTarget = LLINT_RETURN_LOCATION(op_get_by_id);
- else if (callInstruction.opcodeID() == op_get_by_val)
- jumpTarget = LLINT_RETURN_LOCATION(op_get_by_val);
- else
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- case InlineCallFrame::SetterCall: {
- if (callInstruction.opcodeID() == op_put_by_id)
- jumpTarget = LLINT_RETURN_LOCATION(op_put_by_id);
- else if (callInstruction.opcodeID() == op_put_by_val)
- jumpTarget = LLINT_RETURN_LOCATION(op_put_by_val);
- else
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- default:
+ switch (trueCallerCallKind) {
+ case InlineCallFrame::Call:
+ jumpTarget = LLINT_RETURN_LOCATION(op_call);
+ break;
+ case InlineCallFrame::Construct:
+ jumpTarget = LLINT_RETURN_LOCATION(op_construct);
+ break;
+ case InlineCallFrame::CallVarargs:
+ jumpTarget = LLINT_RETURN_LOCATION(op_call_varargs_slow);
+ break;
+ case InlineCallFrame::ConstructVarargs:
+ jumpTarget = LLINT_RETURN_LOCATION(op_construct_varargs_slow);
+ break;
+ case InlineCallFrame::GetterCall: {
+ if (callInstruction.opcodeID() == op_get_by_id)
+ jumpTarget = LLINT_RETURN_LOCATION(op_get_by_id);
+ else if (callInstruction.opcodeID() == op_get_by_val)
+ jumpTarget = LLINT_RETURN_LOCATION(op_get_by_val);
+ else
RELEASE_ASSERT_NOT_REACHED();
- }
+ break;
+ }
+ case InlineCallFrame::SetterCall: {
+ if (callInstruction.opcodeID() == op_put_by_id)
+ jumpTarget = LLINT_RETURN_LOCATION(op_put_by_id);
+ else if (callInstruction.opcodeID() == op_put_by_val)
+ jumpTarget = LLINT_RETURN_LOCATION(op_put_by_val);
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
#undef LLINT_RETURN_LOCATION
- } else {
- switch (trueCallerCallKind) {
- case InlineCallFrame::Call:
- case InlineCallFrame::Construct:
- case InlineCallFrame::CallVarargs:
- case InlineCallFrame::ConstructVarargs: {
- CallLinkInfo* callLinkInfo =
- baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
- RELEASE_ASSERT(callLinkInfo);
-
- jumpTarget = callLinkInfo->callReturnLocation().retagged<JSEntryPtrTag>();
- break;
- }
-
- case InlineCallFrame::GetterCall:
- case InlineCallFrame::SetterCall: {
- StructureStubInfo* stubInfo =
- baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
- RELEASE_ASSERT(stubInfo);
-
- jumpTarget = stubInfo->doneLocation.retagged<JSEntryPtrTag>();
- break;
- }
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
-
return jumpTarget;
}
@@ -254,8 +222,6 @@
CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
GPRReg callerFrameGPR = GPRInfo::callFrameRegister;
- bool callerIsLLInt = false;
-
if (!trueCaller) {
ASSERT(inlineCallFrame->isTail());
jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
@@ -271,7 +237,7 @@
} else {
CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
auto callBytecodeIndex = trueCaller->bytecodeIndex();
- MacroAssemblerCodePtr<JSEntryPtrTag> jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind, callerIsLLInt);
+ MacroAssemblerCodePtr<JSEntryPtrTag> jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind);
if (trueCaller->inlineCallFrame()) {
jit.addPtr(
@@ -302,7 +268,8 @@
trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame,
GPRInfo::regT2);
- if (callerIsLLInt) {
+ if (trueCaller) {
+ // Set up LLInt registers for our caller in our callee saves.
CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->metadataTable()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::metadataTableGPR));
#if USE(JSVALUE64)
@@ -388,45 +355,27 @@
ASSERT(codeBlockForExit == codeBlockForExit->baselineVersion());
ASSERT(JITCode::isBaselineCode(codeBlockForExit->jitType()));
- void* jumpTarget;
- bool exitToLLInt = Options::forceOSRExitToLLInt() || codeBlockForExit->jitType() == JITType::InterpreterThunk;
- if (exitToLLInt) {
- auto bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
- const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeIndex).ptr();
- MacroAssemblerCodePtr<JSEntryPtrTag> destination;
- if (bytecodeIndex.checkpoint())
- destination = LLInt::getCodePtr<JSEntryPtrTag>(checkpoint_osr_exit_trampoline);
- else
- destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
+ auto bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
+ const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeIndex).ptr();
+ MacroAssemblerCodePtr<JSEntryPtrTag> destination;
+ if (bytecodeIndex.checkpoint())
+ destination = LLInt::getCodePtr<JSEntryPtrTag>(checkpoint_osr_exit_trampoline);
+ else
+ destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
- if (exit.isExceptionHandler()) {
- jit.move(CCallHelpers::TrustedImmPtr(¤tInstruction), GPRInfo::regT2);
- jit.storePtr(GPRInfo::regT2, &vm.targetInterpreterPCForThrow);
- }
+ if (exit.isExceptionHandler()) {
+ jit.move(CCallHelpers::TrustedImmPtr(¤tInstruction), GPRInfo::regT2);
+ jit.storePtr(GPRInfo::regT2, &vm.targetInterpreterPCForThrow);
+ }
- jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->metadataTable()), LLInt::Registers::metadataTableGPR);
+ jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->metadataTable()), LLInt::Registers::metadataTableGPR);
#if USE(JSVALUE64)
- jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->instructionsRawPointer()), LLInt::Registers::pbGPR);
- jit.move(CCallHelpers::TrustedImm32(bytecodeIndex.offset()), LLInt::Registers::pcGPR);
+ jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->instructionsRawPointer()), LLInt::Registers::pbGPR);
+ jit.move(CCallHelpers::TrustedImm32(bytecodeIndex.offset()), LLInt::Registers::pcGPR);
#else
- jit.move(CCallHelpers::TrustedImmPtr(¤tInstruction), LLInt::Registers::pcGPR);
+ jit.move(CCallHelpers::TrustedImmPtr(¤tInstruction), LLInt::Registers::pcGPR);
#endif
- jumpTarget = destination.retagged<OSRExitPtrTag>().executableAddress();
- } else {
- BytecodeIndex exitIndex = exit.m_codeOrigin.bytecodeIndex();
- MacroAssemblerCodePtr<JSEntryPtrTag> destination;
- if (exitIndex.checkpoint())
- destination = LLInt::getCodePtr<JSEntryPtrTag>(checkpoint_osr_exit_trampoline);
- else {
- ASSERT(codeBlockForExit->bytecodeIndexForExit(exitIndex) == exitIndex);
- destination = codeBlockForExit->jitCodeMap().find(exitIndex);
- }
- ASSERT(destination);
-
- jumpTarget = destination.retagged<OSRExitPtrTag>().executableAddress();
- }
-
jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
if (exit.isExceptionHandler()) {
// Since we're jumping to op_catch, we need to set callFrameForCatch.
@@ -433,7 +382,7 @@
jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
}
- jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
+ jit.move(AssemblyHelpers::TrustedImmPtr(destination.retagged<OSRExitPtrTag>().executableAddress()), GPRInfo::regT2);
jit.farJump(GPRInfo::regT2, OSRExitPtrTag);
}
Modified: trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.h (254479 => 254480)
--- trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.h 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.h 2020-01-14 01:43:03 UTC (rev 254480)
@@ -39,7 +39,6 @@
void handleExitCounts(VM&, CCallHelpers&, const OSRExitBase&);
void reifyInlinedCallFrames(CCallHelpers&, const OSRExitBase&);
void adjustAndJumpToTarget(VM&, CCallHelpers&, const OSRExitBase&);
-MacroAssemblerCodePtr<JSEntryPtrTag> callerReturnPC(CodeBlock* baselineCodeBlockForCaller, BytecodeIndex callBytecodeIndex, InlineCallFrame::Kind callerKind, bool& callerIsLLInt);
CCallHelpers::Address calleeSaveSlot(InlineCallFrame*, CodeBlock* baselineCodeBlock, GPRReg calleeSave);
template <typename JITCodeType>
Modified: trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -55,6 +55,11 @@
m_currentlyExecuting.clear();
}
+bool CodeBlockSet::isCurrentlyExecuting(CodeBlock* codeBlock)
+{
+ return m_currentlyExecuting.contains(codeBlock);
+}
+
void CodeBlockSet::dump(PrintStream& out) const
{
CommaPrinter comma;
Modified: trunk/Source/_javascript_Core/heap/CodeBlockSet.h (254479 => 254480)
--- trunk/Source/_javascript_Core/heap/CodeBlockSet.h 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/heap/CodeBlockSet.h 2020-01-14 01:43:03 UTC (rev 254480)
@@ -56,6 +56,10 @@
bool contains(const AbstractLocker&, void* candidateCodeBlock);
Lock& getLock() { return m_lock; }
+ // This is expected to run only when we're not adding to the set for now. If
+ // this needs to run concurrently in the future, we'll need to lock around this.
+ bool isCurrentlyExecuting(CodeBlock*);
+
// Visits each CodeBlock in the heap until the visitor function returns true
// to indicate that it is done iterating, or until every CodeBlock has been
// visited.
Modified: trunk/Source/_javascript_Core/heap/Heap.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/heap/Heap.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/heap/Heap.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -613,11 +613,11 @@
vm().builtinExecutables()->finalizeUnconditionally();
finalizeMarkedUnconditionalFinalizers<FunctionExecutable>(vm().functionExecutableSpace.space);
finalizeMarkedUnconditionalFinalizers<SymbolTable>(vm().symbolTableSpace);
+ finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm().executableToCodeBlockEdgesWithFinalizers); // We run this before CodeBlock's unconditional finalizer since CodeBlock looks at the owner executable's installed CodeBlock in its finalizeUnconditionally.
vm().forEachCodeBlockSpace(
[&] (auto& space) {
this->finalizeMarkedUnconditionalFinalizers<CodeBlock>(space.set);
});
- finalizeMarkedUnconditionalFinalizers<ExecutableToCodeBlockEdge>(vm().executableToCodeBlockEdgesWithFinalizers);
finalizeMarkedUnconditionalFinalizers<StructureRareData>(vm().structureRareDataSpace);
finalizeMarkedUnconditionalFinalizers<UnlinkedFunctionExecutable>(vm().unlinkedFunctionExecutableSpace.set);
if (vm().m_weakSetSpace)
@@ -1522,7 +1522,7 @@
pruneStaleEntriesFromWeakGCMaps();
sweepArrayBuffers();
snapshotUnswept();
- finalizeUnconditionalFinalizers();
+ finalizeUnconditionalFinalizers(); // We rely on these unconditional finalizers running before clearCurrentlyExecuting since CodeBlock's finalizer relies on querying currently executing.
removeDeadCompilerWorklistEntries();
notifyIncrementalSweeper();
Modified: trunk/Source/_javascript_Core/llint/LLIntSlowPaths.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/llint/LLIntSlowPaths.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/llint/LLIntSlowPaths.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1993,19 +1993,9 @@
inline SlowPathReturnType dispatchToNextInstruction(CodeBlock* codeBlock, InstructionStream::Ref pc)
{
RELEASE_ASSERT(!codeBlock->vm().exceptionForInspection());
- if (Options::forceOSRExitToLLInt() || codeBlock->jitType() == JITType::InterpreterThunk) {
- const Instruction* nextPC = pc.next().ptr();
- auto nextBytecode = LLInt::getCodePtr<JSEntryPtrTag>(*pc.next().ptr());
- return encodeResult(nextPC, nextBytecode.executableAddress());
- }
-
-#if ENABLE(JIT)
- ASSERT(codeBlock->jitType() == JITType::BaselineJIT);
- BytecodeIndex nextBytecodeIndex = pc.next().index();
- auto nextBytecode = codeBlock->jitCodeMap().find(nextBytecodeIndex);
- return encodeResult(nullptr, nextBytecode.executableAddress());
-#endif
- RELEASE_ASSERT_NOT_REACHED();
+ const Instruction* nextPC = pc.next().ptr();
+ auto nextBytecode = LLInt::getCodePtr<JSEntryPtrTag>(*pc.next().ptr());
+ return encodeResult(nextPC, nextBytecode.executableAddress());
}
extern "C" SlowPathReturnType slow_path_checkpoint_osr_exit_from_inlined_call(CallFrame* callFrame, EncodedJSValue result)
Modified: trunk/Source/_javascript_Core/runtime/Options.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/runtime/Options.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/runtime/Options.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -387,7 +387,7 @@
Options::validateDFGExceptionHandling() = true;
#endif
#if !ENABLE(JIT)
- Options::useLLInt() = true;
+ Options::forceBaseline() = false;
Options::useJIT() = false;
Options::useBaselineJIT() = false;
Options::useDFGJIT() = false;
@@ -419,7 +419,7 @@
}
if (!jitEnabledByDefault() && !Options::useJIT())
- Options::useLLInt() = true;
+ Options::forceBaseline() = false;
if (!Options::useWebAssembly())
Options::useFastTLSForWasmContext() = false;
@@ -546,8 +546,6 @@
RELEASE_ASSERT(Options::addressOfOptionDefault(useKernTCSMID) == &Options::useKernTCSMDefault());
RELEASE_ASSERT(Options::addressOfOption(gcMaxHeapSizeID) == &Options::gcMaxHeapSize());
RELEASE_ASSERT(Options::addressOfOptionDefault(gcMaxHeapSizeID) == &Options::gcMaxHeapSizeDefault());
- RELEASE_ASSERT(Options::addressOfOption(forceOSRExitToLLIntID) == &Options::forceOSRExitToLLInt());
- RELEASE_ASSERT(Options::addressOfOptionDefault(forceOSRExitToLLIntID) == &Options::forceOSRExitToLLIntDefault());
#ifndef NDEBUG
Config::enableRestrictedOptions();
@@ -945,9 +943,9 @@
void Options::ensureOptionsAreCoherent()
{
bool coherent = true;
- if (!(useLLInt() || useJIT())) {
+ if (forceBaseline() && !useJIT()) {
coherent = false;
- dataLog("INCOHERENT OPTIONS: at least one of useLLInt or useJIT must be true\n");
+ dataLog("INCOHERENT OPTIONS: forceBaseline can't be true if useJIT is false\n");
}
if (!coherent)
CRASH();
Modified: trunk/Source/_javascript_Core/runtime/OptionsList.h (254479 => 254480)
--- trunk/Source/_javascript_Core/runtime/OptionsList.h 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/runtime/OptionsList.h 2020-01-14 01:43:03 UTC (rev 254480)
@@ -81,7 +81,7 @@
v(Unsigned, dumpOptions, 0, Normal, "dumps JSC options (0 = None, 1 = Overridden only, 2 = All, 3 = Verbose)") \
v(OptionString, configFile, nullptr, Normal, "file to configure JSC options and logging location") \
\
- v(Bool, useLLInt, true, Normal, "allows the LLINT to be used if true") \
+ v(Bool, forceBaseline, false, Normal, "If true, we'll start running code in the baseline JIT and skip starting in the LLInt") \
v(Bool, useJIT, jitEnabledByDefault(), Normal, "allows the executable pages to be allocated for JIT and thunks if true") \
v(Bool, useBaselineJIT, true, Normal, "allows the baseline JIT to be used if true") \
v(Bool, useDFGJIT, true, Normal, "allows the DFG JIT to be used if true") \
@@ -494,7 +494,6 @@
v(OptionString, dumpJITMemoryPath, nullptr, Restricted, nullptr) \
v(Double, dumpJITMemoryFlushInterval, 10, Restricted, "Maximum time in between flushes of the JIT memory dump in seconds.") \
v(Bool, useUnlinkedCodeBlockJettisoning, false, Normal, "If true, UnlinkedCodeBlock can be jettisoned.") \
- v(Bool, forceOSRExitToLLInt, false, Normal, "If true, we always exit to the LLInt. If false, we exit to whatever is most convenient.") \
v(Unsigned, getByValICMaxNumberOfIdentifiers, 4, Normal, "Number of identifiers we see in the LLInt that could cause us to bail on generating an IC for get_by_val.") \
enum OptionEquivalence {
Modified: trunk/Source/_javascript_Core/runtime/ScriptExecutable.cpp (254479 => 254480)
--- trunk/Source/_javascript_Core/runtime/ScriptExecutable.cpp 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Source/_javascript_Core/runtime/ScriptExecutable.cpp 2020-01-14 01:43:03 UTC (rev 254480)
@@ -426,10 +426,10 @@
if (Options::validateBytecode())
codeBlock->validate();
- if (Options::useLLInt())
+ if (Options::forceBaseline())
+ setupJIT(vm, codeBlock);
+ else
setupLLInt(codeBlock);
- else
- setupJIT(vm, codeBlock);
installCode(vm, codeBlock, codeBlock->codeType(), codeBlock->specializationKind());
return nullptr;
Modified: trunk/Tools/ChangeLog (254479 => 254480)
--- trunk/Tools/ChangeLog 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Tools/ChangeLog 2020-01-14 01:43:03 UTC (rev 254480)
@@ -1,3 +1,12 @@
+2020-01-13 Saam Barati <[email protected]>
+
+ Throw away baseline code if there is an optimized replacement
+ https://bugs.webkit.org/show_bug.cgi?id=202503
+
+ Reviewed by Yusuke Suzuki.
+
+ * Scripts/run-jsc-stress-tests:
+
2020-01-13 Yoshiaki Jitsukawa <[email protected]>
Fix path-specific filters on Windows
Modified: trunk/Tools/Scripts/run-jsc-stress-tests (254479 => 254480)
--- trunk/Tools/Scripts/run-jsc-stress-tests 2020-01-14 01:37:33 UTC (rev 254479)
+++ trunk/Tools/Scripts/run-jsc-stress-tests 2020-01-14 01:43:03 UTC (rev 254480)
@@ -498,7 +498,6 @@
B3O1_OPTIONS = ["--defaultB3OptLevel=1"]
B3O0_OPTIONS = ["--maxDFGNodesInBasicBlockForPreciseAnalysis=100", "--defaultB3OptLevel=0"]
FTL_OPTIONS = ["--useFTLJIT=true"]
-FORCE_LLINT_EXIT_OPTIONS = ["--forceOSRExitToLLInt=true"]
require_relative "webkitruby/jsc-stress-test-writer-#{$testWriter}"
@@ -655,7 +654,7 @@
def runNoLLInt(*optionalTestSpecificOptions)
if $jitTests
- run("no-llint", "--useLLInt=false", *optionalTestSpecificOptions)
+ run("no-llint", "--forceBaseline=true", *optionalTestSpecificOptions)
end
end
@@ -708,7 +707,7 @@
end
def runFTLNoCJITB3O0(*optionalTestSpecificOptions)
- run("ftl-no-cjit-b3o0", "--useArrayAllocationProfiling=false", "--forcePolyProto=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + B3O0_OPTIONS + FORCE_LLINT_EXIT_OPTIONS + optionalTestSpecificOptions))
+ run("ftl-no-cjit-b3o0", "--useArrayAllocationProfiling=false", "--forcePolyProto=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + B3O0_OPTIONS + optionalTestSpecificOptions))
end
def runFTLNoCJITValidate(*optionalTestSpecificOptions)
@@ -728,7 +727,7 @@
end
def runDFGEager(*optionalTestSpecificOptions)
- run("dfg-eager", *(EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + FORCE_LLINT_EXIT_OPTIONS + optionalTestSpecificOptions))
+ run("dfg-eager", *(EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
end
def runDFGEagerNoCJITValidate(*optionalTestSpecificOptions)
@@ -745,7 +744,7 @@
end
def runFTLEagerNoCJITValidate(*optionalTestSpecificOptions)
- run("ftl-eager-no-cjit", "--validateGraph=true", "--airForceIRCAllocator=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + FORCE_LLINT_EXIT_OPTIONS + optionalTestSpecificOptions))
+ run("ftl-eager-no-cjit", "--validateGraph=true", "--airForceIRCAllocator=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
end
def runFTLEagerNoCJITB3O1(*optionalTestSpecificOptions)
@@ -1060,7 +1059,7 @@
return
end
- run("no-llint-modules", "-m", "--useLLInt=false") if noLLInt
+ run("no-llint-modules", "-m", "--forceBaseline=true") if noLLInt
run("no-cjit-validate-phases-modules", "-m", "--validateBytecode=true", "--validateGraphAtEachPhase=true", *NO_CJIT_OPTIONS)
run("dfg-eager-modules", "-m", *EAGER_OPTIONS)
run("dfg-eager-no-cjit-validate-modules", "-m", "--validateGraph=true", *(NO_CJIT_OPTIONS + EAGER_OPTIONS))
@@ -1261,7 +1260,7 @@
end
def runLayoutTestNoLLInt
- runLayoutTest("no-llint", "--useLLInt=false")
+ runLayoutTest("no-llint", "--forceBaseline=true")
end
def runLayoutTestNoCJIT
@@ -1427,7 +1426,7 @@
end
def runMozillaTestBaselineJIT(mode, *extraFiles)
- runMozillaTest("baseline", mode, extraFiles, "--useLLInt=false", "--useDFGJIT=false")
+ runMozillaTest("baseline", mode, extraFiles, "--forceBaseline=true", "--useDFGJIT=false")
end
def runMozillaTestDFGEagerNoCJITValidatePhases(mode, *extraFiles)