Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (222870 => 222871)
--- trunk/Source/_javascript_Core/ChangeLog 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/ChangeLog 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,3 +1,95 @@
+2017-10-04 Mark Lam <[email protected]>
+
+ Add support for using Probe DFG OSR Exit behind a runtime flag.
+ https://bugs.webkit.org/show_bug.cgi?id=177844
+ <rdar://problem/34801425>
+
+ Reviewed by Saam Barati.
+
+ This is based on the code originally posted in https://bugs.webkit.org/show_bug.cgi?id=175144
+ (in r221774 and r221832) with some optimizations and bug fixes added. The probe
+ based DFG OSR Exit is only enabled if Options::useProbeOSRExit() is true. We're
+ landing this behind an option switch to make it easier to tune performance using
+ the probe based OSR exit.
+
+ * _javascript_Core.xcodeproj/project.pbxproj:
+ * assembler/MacroAssembler.cpp:
+ (JSC::stdFunctionCallback):
+ * assembler/MacroAssemblerPrinter.cpp:
+ (JSC::Printer::printCallback):
+ * assembler/ProbeContext.cpp:
+ (JSC::Probe::executeProbe):
+ (JSC::Probe::flushDirtyStackPages):
+ * assembler/ProbeContext.h:
+ (JSC::Probe::Context::Context):
+ (JSC::Probe::Context::arg):
+ * assembler/ProbeFrame.h: Added.
+ (JSC::Probe::Frame::Frame):
+ (JSC::Probe::Frame::argument):
+ (JSC::Probe::Frame::operand):
+ (JSC::Probe::Frame::setArgument):
+ (JSC::Probe::Frame::setOperand):
+ (JSC::Probe::Frame::get):
+ (JSC::Probe::Frame::set):
+ * assembler/ProbeStack.cpp:
+ (JSC::Probe::Page::lowWatermarkFromVisitingDirtyChunks):
+ (JSC::Probe::Stack::Stack):
+ (JSC::Probe::Stack::lowWatermarkFromVisitingDirtyPages):
+ * assembler/ProbeStack.h:
+ (JSC::Probe::Stack::Stack):
+ (JSC::Probe::Stack::lowWatermark):
+ (JSC::Probe::Stack::set):
+ (JSC::Probe::Stack::savedStackPointer const):
+ (JSC::Probe::Stack::setSavedStackPointer):
+ (JSC::Probe::Stack::newStackPointer const): Deleted.
+ (JSC::Probe::Stack::setNewStackPointer): Deleted.
+ * bytecode/ArrayProfile.h:
+ (JSC::ArrayProfile::observeArrayMode):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize):
+ * bytecode/CodeBlock.h:
+ (JSC::CodeBlock::addressOfOSRExitCounter): Deleted.
+ * bytecode/ExecutionCounter.h:
+ (JSC::ExecutionCounter::hasCrossedThreshold const):
+ (JSC::ExecutionCounter::setNewThresholdForOSRExit):
+ * bytecode/MethodOfGettingAValueProfile.cpp:
+ (JSC::MethodOfGettingAValueProfile::reportValue):
+ * bytecode/MethodOfGettingAValueProfile.h:
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compileImpl):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::linkOSRExits):
+ (JSC::DFG::JITCompiler::link):
+ * dfg/DFGOSRExit.cpp:
+ (JSC::DFG::jsValueFor):
+ (JSC::DFG::restoreCalleeSavesFor):
+ (JSC::DFG::saveCalleeSavesFor):
+ (JSC::DFG::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer):
+ (JSC::DFG::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer):
+ (JSC::DFG::saveOrCopyCalleeSavesFor):
+ (JSC::DFG::createDirectArgumentsDuringExit):
+ (JSC::DFG::createClonedArgumentsDuringExit):
+ (JSC::DFG::emitRestoreArguments):
+ (JSC::DFG::OSRExit::executeOSRExit):
+ (JSC::DFG::reifyInlinedCallFrames):
+ (JSC::DFG::adjustAndJumpToTarget):
+ (JSC::DFG::printOSRExit):
+ * dfg/DFGOSRExit.h:
+ (JSC::DFG::OSRExitState::OSRExitState):
+ * dfg/DFGThunks.cpp:
+ (JSC::DFG::osrExitThunkGenerator):
+ * dfg/DFGThunks.h:
+ * dfg/DFGVariableEventStream.cpp:
+ (JSC::DFG::tryToSetConstantRecovery):
+ (JSC::DFG::VariableEventStream::reconstruct const):
+ (JSC::DFG::VariableEventStream::tryToSetConstantRecovery const): Deleted.
+ * dfg/DFGVariableEventStream.h:
+ * profiler/ProfilerOSRExit.h:
+ (JSC::Profiler::OSRExit::incCount):
+ * runtime/JSCJSValue.h:
+ * runtime/JSCJSValueInlines.h:
+ * runtime/Options.h:
+
2017-10-04 Ryan Haddad <[email protected]>
Unreviewed, rolling out r222840.
Modified: trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj (222870 => 222871)
--- trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj 2017-10-04 20:00:01 UTC (rev 222871)
@@ -4596,6 +4596,7 @@
FEA0C4001CDD7D0E00481991 /* FunctionWhitelist.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = FunctionWhitelist.cpp; sourceTree = "<group>"; };
FEA0C4011CDD7D0E00481991 /* FunctionWhitelist.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FunctionWhitelist.h; sourceTree = "<group>"; };
FEB137561BB11EEE00CD5100 /* MacroAssemblerARM64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MacroAssemblerARM64.cpp; sourceTree = "<group>"; };
+ FEB41CCB1F73284200C5481E /* ProbeFrame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ProbeFrame.h; sourceTree = "<group>"; };
FEB51F6A1A97B688001F921C /* Regress141809.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Regress141809.h; path = API/tests/Regress141809.h; sourceTree = "<group>"; };
FEB51F6B1A97B688001F921C /* Regress141809.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = Regress141809.mm; path = API/tests/Regress141809.mm; sourceTree = "<group>"; };
FEB58C12187B8B160098EF0B /* ErrorHandlingScope.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorHandlingScope.cpp; sourceTree = "<group>"; };
@@ -7295,6 +7296,7 @@
FE63DD531EA9B60E00103A69 /* Printer.h */,
FE10AAF31F46826D009DEDC5 /* ProbeContext.cpp */,
FE10AAED1F44D946009DEDC5 /* ProbeContext.h */,
+ FEB41CCB1F73284200C5481E /* ProbeFrame.h */,
FE10AAE91F44D510009DEDC5 /* ProbeStack.cpp */,
FE10AAEA1F44D512009DEDC5 /* ProbeStack.h */,
FE533CA01F217C310016A1FE /* testmasm.cpp */,
Modified: trunk/Source/_javascript_Core/assembler/MacroAssembler.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/assembler/MacroAssembler.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/assembler/MacroAssembler.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -38,7 +38,7 @@
#if ENABLE(MASM_PROBE)
static void stdFunctionCallback(Probe::Context& context)
{
- auto func = static_cast<const std::function<void(Probe::Context&)>*>(context.arg);
+ auto func = context.arg<const std::function<void(Probe::Context&)>*>();
(*func)(context);
}
Modified: trunk/Source/_javascript_Core/assembler/MacroAssemblerPrinter.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/assembler/MacroAssemblerPrinter.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/assembler/MacroAssemblerPrinter.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -175,7 +175,7 @@
void printCallback(Probe::Context& probeContext)
{
auto& out = WTF::dataFile();
- PrintRecordList& list = *reinterpret_cast<PrintRecordList*>(probeContext.arg);
+ PrintRecordList& list = *probeContext.arg<PrintRecordList*>();
for (size_t i = 0; i < list.size(); i++) {
auto& record = list[i];
Context context(probeContext, record.data);
Modified: trunk/Source/_javascript_Core/assembler/ProbeContext.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/assembler/ProbeContext.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/assembler/ProbeContext.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -52,8 +52,9 @@
#endif
if (context.hasWritesToFlush()) {
- context.stack().setNewStackPointer(state->cpu.sp());
- state->cpu.sp() = std::min(context.stack().lowWatermark(), state->cpu.sp());
+ context.stack().setSavedStackPointer(state->cpu.sp());
+ void* lowWatermark = context.stack().lowWatermark(state->cpu.sp());
+ state->cpu.sp() = std::min(lowWatermark, state->cpu.sp());
state->initializeStackFunction = flushDirtyStackPages;
state->initializeStackArg = context.releaseStack();
@@ -64,7 +65,7 @@
{
std::unique_ptr<Stack> stack(reinterpret_cast<Probe::Stack*>(state->initializeStackArg));
stack->flushWrites();
- state->cpu.sp() = stack->newStackPointer();
+ state->cpu.sp() = stack->savedStackPointer();
}
// Not for general use. This should only be for writing tests.
Modified: trunk/Source/_javascript_Core/assembler/ProbeContext.h (222870 => 222871)
--- trunk/Source/_javascript_Core/assembler/ProbeContext.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/assembler/ProbeContext.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -191,11 +191,13 @@
using FPRegisterID = MacroAssembler::FPRegisterID;
Context(State* state)
- : m_state(state)
- , arg(state->arg)
- , cpu(state->cpu)
+ : cpu(state->cpu)
+ , m_state(state)
{ }
+ template<typename T>
+ T arg() { return reinterpret_cast<T>(m_state->arg); }
+
uintptr_t& gpr(RegisterID id) { return cpu.gpr(id); }
uintptr_t& spr(SPRegisterID id) { return cpu.spr(id); }
double& fpr(FPRegisterID id) { return cpu.fpr(id); }
@@ -224,13 +226,10 @@
bool hasWritesToFlush() { return m_stack.hasWritesToFlush(); }
Stack* releaseStack() { return new Stack(WTFMove(m_stack)); }
-private:
- State* m_state;
-public:
- void* arg;
CPUState& cpu;
private:
+ State* m_state;
Stack m_stack;
friend JS_EXPORT_PRIVATE void* probeStateForContext(Context&); // Not for general use. This should only be for writing tests.
Added: trunk/Source/_javascript_Core/assembler/ProbeFrame.h (0 => 222871)
--- trunk/Source/_javascript_Core/assembler/ProbeFrame.h (rev 0)
+++ trunk/Source/_javascript_Core/assembler/ProbeFrame.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(MASM_PROBE)
+
+#include "CallFrame.h"
+#include "ProbeStack.h"
+
+namespace JSC {
+namespace Probe {
+
+class Frame {
+public:
+ Frame(void* frameBase, Stack& stack)
+ : m_frameBase { reinterpret_cast<uint8_t*>(frameBase) }
+ , m_stack { stack }
+ { }
+
+ template<typename T = JSValue>
+ T argument(int argument)
+ {
+ return get<T>(CallFrame::argumentOffset(argument) * sizeof(Register));
+ }
+ template<typename T = JSValue>
+ T operand(int operand)
+ {
+ return get<T>(static_cast<VirtualRegister>(operand).offset() * sizeof(Register));
+ }
+ template<typename T = JSValue>
+ T operand(int operand, ptrdiff_t offset)
+ {
+ return get<T>(static_cast<VirtualRegister>(operand).offset() * sizeof(Register) + offset);
+ }
+
+ template<typename T>
+ void setArgument(int argument, T value)
+ {
+ return set<T>(CallFrame::argumentOffset(argument) * sizeof(Register), value);
+ }
+ template<typename T>
+ void setOperand(int operand, T value)
+ {
+ set<T>(static_cast<VirtualRegister>(operand).offset() * sizeof(Register), value);
+ }
+ template<typename T>
+ void setOperand(int operand, ptrdiff_t offset, T value)
+ {
+ set<T>(static_cast<VirtualRegister>(operand).offset() * sizeof(Register) + offset, value);
+ }
+
+ template<typename T = JSValue>
+ T get(ptrdiff_t offset)
+ {
+ return m_stack.get<T>(m_frameBase + offset);
+ }
+ template<typename T>
+ void set(ptrdiff_t offset, T value)
+ {
+ m_stack.set<T>(m_frameBase + offset, value);
+ }
+
+private:
+ uint8_t* m_frameBase;
+ Stack& m_stack;
+};
+
+} // namespace Probe
+} // namespace JSC
+
+#endif // ENABLE(MASM_PROBE)
Modified: trunk/Source/_javascript_Core/assembler/ProbeStack.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/assembler/ProbeStack.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/assembler/ProbeStack.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -34,6 +34,8 @@
namespace JSC {
namespace Probe {
+static void* const maxLowWatermark = reinterpret_cast<void*>(std::numeric_limits<uintptr_t>::max());
+
#if ASAN_ENABLED
// FIXME: we should consider using the copy function for both ASan and non-ASan builds.
// https://bugs.webkit.org/show_bug.cgi?id=176961
@@ -49,7 +51,7 @@
*dstPointer++ = *srcPointer++;
}
#else
-#define copyStackPage(dst, src, size) std::memcpy(dst, src, size);
+#define copyStackPage(dst, src, size) std::memcpy(dst, src, size)
#endif
Page::Page(void* baseAddress)
@@ -84,12 +86,24 @@
m_dirtyBits = 0;
}
+void* Page::lowWatermarkFromVisitingDirtyChunks()
+{
+ uint64_t dirtyBits = m_dirtyBits;
+ size_t offset = 0;
+ while (dirtyBits) {
+ if (dirtyBits & 1)
+ return reinterpret_cast<uint8_t*>(m_baseLogicalAddress) + offset;
+ dirtyBits = dirtyBits >> 1;
+ offset += s_chunkSize;
+ }
+ return maxLowWatermark;
+}
+
Stack::Stack(Stack&& other)
- : m_newStackPointer(other.m_newStackPointer)
- , m_lowWatermark(other.m_lowWatermark)
- , m_stackBounds(WTFMove(other.m_stackBounds))
+ : m_stackBounds(WTFMove(other.m_stackBounds))
, m_pages(WTFMove(other.m_pages))
{
+ m_savedStackPointer = other.m_savedStackPointer;
#if !ASSERT_DISABLED
other.m_isValid = false;
#endif
@@ -128,6 +142,18 @@
return m_lastAccessedPage;
}
+void* Stack::lowWatermarkFromVisitingDirtyPages()
+{
+ void* low = maxLowWatermark;
+ for (auto it = m_pages.begin(); it != m_pages.end(); ++it) {
+ Page& page = *it->value;
+ if (!page.hasWritesToFlush() || low < page.baseAddress())
+ continue;
+ low = std::min(low, page.lowWatermarkFromVisitingDirtyChunks());
+ }
+ return low;
+}
+
} // namespace Probe
} // namespace JSC
Modified: trunk/Source/_javascript_Core/assembler/ProbeStack.h (222870 => 222871)
--- trunk/Source/_javascript_Core/assembler/ProbeStack.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/assembler/ProbeStack.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -95,6 +95,8 @@
flushWrites();
}
+ void* lowWatermarkFromVisitingDirtyChunks();
+
private:
uint64_t dirtyBitFor(void* logicalAddress)
{
@@ -146,17 +148,15 @@
WTF_MAKE_FAST_ALLOCATED;
public:
Stack()
- : m_lowWatermark(reinterpret_cast<void*>(-1))
- , m_stackBounds(Thread::current().stack())
+ : m_stackBounds(Thread::current().stack())
{ }
Stack(Stack&& other);
- void* lowWatermark()
+ void* lowWatermarkFromVisitingDirtyPages();
+ void* lowWatermark(void* stackPointer)
{
- // We use the chunkAddress for the low watermark because we'll be doing write backs
- // to the stack in increments of chunks. Hence, we'll treat the lowest address of
- // the chunk as the low watermark of any given set address.
- return Page::chunkAddressFor(m_lowWatermark);
+ ASSERT(Page::chunkAddressFor(stackPointer) == lowWatermarkFromVisitingDirtyPages());
+ return Page::chunkAddressFor(stackPointer);
}
template<typename T>
@@ -176,9 +176,6 @@
{
Page* page = pageFor(address);
page->set<T>(address, value);
-
- if (address < m_lowWatermark)
- m_lowWatermark = address;
}
template<typename T>
@@ -189,8 +186,8 @@
JS_EXPORT_PRIVATE Page* ensurePageFor(void* address);
- void* newStackPointer() const { return m_newStackPointer; };
- void setNewStackPointer(void* sp) { m_newStackPointer = sp; };
+ void* savedStackPointer() const { return m_savedStackPointer; }
+ void setSavedStackPointer(void* sp) { m_savedStackPointer = sp; }
bool hasWritesToFlush();
void flushWrites();
@@ -207,8 +204,7 @@
return ensurePageFor(address);
}
- void* m_newStackPointer { nullptr };
- void* m_lowWatermark;
+ void* m_savedStackPointer { nullptr };
// A cache of the last accessed page details for quick access.
void* m_lastAccessedPageBaseAddress { nullptr };
Modified: trunk/Source/_javascript_Core/bytecode/ArrayProfile.h (222870 => 222871)
--- trunk/Source/_javascript_Core/bytecode/ArrayProfile.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/bytecode/ArrayProfile.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -218,6 +218,7 @@
void computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock*);
void computeUpdatedPrediction(const ConcurrentJSLocker&, CodeBlock*, Structure* lastSeenStructure);
+ void observeArrayMode(ArrayModes mode) { m_observedArrayModes |= mode; }
ArrayModes observedArrayModes(const ConcurrentJSLocker&) const { return m_observedArrayModes; }
bool mayInterceptIndexedAccesses(const ConcurrentJSLocker&) const { return m_mayInterceptIndexedAccesses; }
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -2320,6 +2320,53 @@
return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
}
+auto CodeBlock::updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState& exitState) -> OptimizeAction
+{
+ DFG::OSRExitBase& exit = exitState.exit;
+ if (!exitKindMayJettison(exit.m_kind)) {
+ // FIXME: We may want to notice that we're frequently exiting
+ // at an op_catch that we didn't compile an entrypoint for, and
+ // then trigger a reoptimization of this CodeBlock:
+ // https://bugs.webkit.org/show_bug.cgi?id=175842
+ return OptimizeAction::None;
+ }
+
+ exit.m_count++;
+ m_osrExitCounter++;
+
+ CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
+ ASSERT(baselineCodeBlock == baselineAlternative());
+ if (UNLIKELY(baselineCodeBlock->jitExecuteCounter().hasCrossedThreshold()))
+ return OptimizeAction::ReoptimizeNow;
+
+ // We want to figure out if there's a possibility that we're in a loop. For the outermost
+ // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
+ // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
+ // problem is the inlined functions, which might also have loops, but whose baseline versions
+ // don't know where to look for the exit count. Figure out if those loops are severe enough
+ // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
+ // Otherwise, we should use the normal reoptimization trigger.
+
+ bool didTryToEnterInLoop = false;
+ for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
+ if (inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->didTryToEnterInLoop()) {
+ didTryToEnterInLoop = true;
+ break;
+ }
+ }
+
+ uint32_t exitCountThreshold = didTryToEnterInLoop
+ ? exitCountThresholdForReoptimizationFromLoop()
+ : exitCountThresholdForReoptimization();
+
+ if (m_osrExitCounter > exitCountThreshold)
+ return OptimizeAction::ReoptimizeNow;
+
+ // Too few fails. Adjust the execution counter such that the target is to only optimize after a while.
+ baselineCodeBlock->m_jitExecuteCounter.setNewThresholdForOSRExit(exitState.activeThreshold, exitState.memoryUsageAdjustedThreshold);
+ return OptimizeAction::None;
+}
+
void CodeBlock::optimizeNextInvocation()
{
if (Options::verboseOSR())
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.h (222870 => 222871)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -77,6 +77,10 @@
namespace JSC {
+namespace DFG {
+struct OSRExitState;
+} // namespace DFG
+
class BytecodeLivenessAnalysis;
class CodeBlockSet;
class ExecState;
@@ -762,7 +766,8 @@
void countOSRExit() { m_osrExitCounter++; }
- uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
+ enum class OptimizeAction { None, ReoptimizeNow };
+ OptimizeAction updateOSRExitCounterAndCheckIfNeedToReoptimize(DFG::OSRExitState&);
static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
Modified: trunk/Source/_javascript_Core/bytecode/ExecutionCounter.h (222870 => 222871)
--- trunk/Source/_javascript_Core/bytecode/ExecutionCounter.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/bytecode/ExecutionCounter.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -50,7 +50,7 @@
u.f = value;
return u.i;
}
-
+
template<CountingVariant countingVariant>
class ExecutionCounter {
public:
@@ -57,11 +57,19 @@
ExecutionCounter();
void forceSlowPathConcurrently(); // If you use this, checkIfThresholdCrossedAndSet() may still return false.
bool checkIfThresholdCrossedAndSet(CodeBlock*);
+ bool hasCrossedThreshold() const { return m_counter >= 0; }
void setNewThreshold(int32_t threshold, CodeBlock*);
void deferIndefinitely();
double count() const { return static_cast<double>(m_totalCount) + m_counter; }
void dump(PrintStream&) const;
+ void setNewThresholdForOSRExit(uint32_t activeThreshold, double memoryUsageAdjustedThreshold)
+ {
+ m_activeThreshold = activeThreshold;
+ m_counter = static_cast<int32_t>(-memoryUsageAdjustedThreshold);
+ m_totalCount = memoryUsageAdjustedThreshold;
+ }
+
static int32_t maximumExecutionCountsBetweenCheckpoints()
{
switch (countingVariant) {
Modified: trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -74,6 +74,34 @@
RELEASE_ASSERT_NOT_REACHED();
}
+void MethodOfGettingAValueProfile::reportValue(JSValue value)
+{
+ switch (m_kind) {
+ case None:
+ return;
+
+ case Ready:
+ *u.profile->specFailBucket(0) = JSValue::encode(value);
+ return;
+
+ case LazyOperand: {
+ LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand));
+
+ ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
+ LazyOperandValueProfile* profile =
+ u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(locker, key);
+ *profile->specFailBucket(0) = JSValue::encode(value);
+ return;
+ }
+
+ case ArithProfileReady: {
+ u.arithProfile->observeResult(value);
+ return;
+ } }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
} // namespace JSC
#endif // ENABLE(DFG_JIT)
Modified: trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.h (222870 => 222871)
--- trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -70,9 +70,10 @@
CodeBlock*, const LazyOperandValueProfileKey&);
explicit operator bool() const { return m_kind != None; }
-
+
void emitReportValue(CCallHelpers&, JSValueRegs) const;
-
+ void reportValue(JSValue);
+
private:
enum Kind {
None,
Modified: trunk/Source/_javascript_Core/dfg/DFGDriver.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/dfg/DFGDriver.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/dfg/DFGDriver.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2014, 2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -89,6 +89,7 @@
// Make sure that any stubs that the DFG is going to use are initialized. We want to
// make sure that all JIT code generation does finalization on the main thread.
+ vm.getCTIStub(osrExitThunkGenerator);
vm.getCTIStub(osrExitGenerationThunkGenerator);
vm.getCTIStub(throwExceptionFromCallSlowPathGenerator);
vm.getCTIStub(linkCallThunkGenerator);
Modified: trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -85,8 +85,9 @@
}
}
+ MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitThunkGenerator);
+ CodeLocationLabel osrExitThunkLabel = CodeLocationLabel(osrExitThunk.code());
for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
- OSRExit& exit = m_jitCode->osrExit[i];
OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
JumpList& failureJumps = info.m_failureJumps;
if (!failureJumps.empty())
@@ -96,7 +97,15 @@
jitAssertHasValidCallFrame();
store32(TrustedImm32(i), &vm()->osrExitIndex);
- exit.setPatchableCodeOffset(patchableJump());
+ if (Options::useProbeOSRExit()) {
+ Jump target = jump();
+ addLinkTask([target, osrExitThunkLabel] (LinkBuffer& linkBuffer) {
+ linkBuffer.link(target, osrExitThunkLabel);
+ });
+ } else {
+ OSRExit& exit = m_jitCode->osrExit[i];
+ exit.setPatchableCodeOffset(patchableJump());
+ }
}
}
@@ -306,10 +315,12 @@
MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) {
- OSRExit& exit = m_jitCode->osrExit[i];
OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
- linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
- exit.correctJump(linkBuffer);
+ if (!Options::useProbeOSRExit()) {
+ OSRExit& exit = m_jitCode->osrExit[i];
+ linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
+ exit.correctJump(linkBuffer);
+ }
if (info.m_replacementSource.isSet()) {
m_jitCode->common.jumpReplacements.append(JumpReplacement(
linkBuffer.locationOf(info.m_replacementSource),
Modified: trunk/Source/_javascript_Core/dfg/DFGOSRExit.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/dfg/DFGOSRExit.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/dfg/DFGOSRExit.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -29,6 +29,7 @@
#if ENABLE(DFG_JIT)
#include "AssemblyHelpers.h"
+#include "ClonedArguments.h"
#include "DFGGraph.h"
#include "DFGMayExit.h"
#include "DFGOSRExitCompilerCommon.h"
@@ -35,12 +36,870 @@
#include "DFGOSRExitPreparation.h"
#include "DFGOperations.h"
#include "DFGSpeculativeJIT.h"
+#include "DirectArguments.h"
#include "FrameTracers.h"
+#include "InlineCallFrame.h"
#include "JSCInlines.h"
+#include "JSCJSValue.h"
#include "OperandsInlines.h"
+#include "ProbeContext.h"
+#include "ProbeFrame.h"
namespace JSC { namespace DFG {
+// Probe based OSR Exit.
+
+using CPUState = Probe::CPUState;
+using Context = Probe::Context;
+using Frame = Probe::Frame;
+
+static void reifyInlinedCallFrames(Probe::Context&, CodeBlock* baselineCodeBlock, const OSRExitBase&);
+static void adjustAndJumpToTarget(Probe::Context&, VM&, CodeBlock*, CodeBlock* baselineCodeBlock, OSRExit&);
+static void printOSRExit(Context&, uint32_t osrExitIndex, const OSRExit&);
+
+static JSValue jsValueFor(CPUState& cpu, JSValueSource source)
+{
+ if (source.isAddress()) {
+ JSValue result;
+ std::memcpy(&result, cpu.gpr<uint8_t*>(source.base()) + source.offset(), sizeof(JSValue));
+ return result;
+ }
+#if USE(JSVALUE64)
+ return JSValue::decode(cpu.gpr<EncodedJSValue>(source.gpr()));
+#else
+ if (source.hasKnownTag())
+ return JSValue(source.tag(), cpu.gpr<int32_t>(source.payloadGPR()));
+ return JSValue(cpu.gpr<int32_t>(source.tagGPR()), cpu.gpr<int32_t>(source.payloadGPR()));
+#endif
+}
+
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+
+static_assert(is64Bit(), "we only support callee save registers on 64-bit");
+
+// Based on AssemblyHelpers::emitRestoreCalleeSavesFor().
+static void restoreCalleeSavesFor(Context& context, CodeBlock* codeBlock)
+{
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+ uintptr_t* physicalStackFrame = context.fp<uintptr_t*>();
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontRestoreRegisters.get(entry.reg()))
+ continue;
+ // The callee saved values come from the original stack, not the recovered stack.
+ // Hence, we read the values directly from the physical stack memory instead of
+ // going through context.stack().
+ ASSERT(!(entry.offset() % sizeof(uintptr_t)));
+ context.gpr(entry.reg().gpr()) = physicalStackFrame[entry.offset() / sizeof(uintptr_t)];
+ }
+}
+
+// Based on AssemblyHelpers::emitSaveCalleeSavesFor().
+static void saveCalleeSavesFor(Context& context, CodeBlock* codeBlock)
+{
+ auto& stack = context.stack();
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontSaveRegisters.get(entry.reg()))
+ continue;
+ stack.set(context.fp(), entry.offset(), context.gpr<uintptr_t>(entry.reg().gpr()));
+ }
+}
+
+// Based on AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer().
+static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context& context)
+{
+ VM& vm = *context.arg<VM*>();
+
+ RegisterAtOffsetList* allCalleeSaves = VM::getAllCalleeSaveRegisterOffsets();
+ RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
+ unsigned registerCount = allCalleeSaves->size();
+
+ VMEntryRecord* entryRecord = vmEntryRecord(vm.topEntryFrame);
+ uintptr_t* calleeSaveBuffer = reinterpret_cast<uintptr_t*>(entryRecord->calleeSaveRegistersBuffer);
+
+ // Restore all callee saves.
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = allCalleeSaves->at(i);
+ if (dontRestoreRegisters.get(entry.reg()))
+ continue;
+ size_t uintptrOffset = entry.offset() / sizeof(uintptr_t);
+ if (entry.reg().isGPR())
+ context.gpr(entry.reg().gpr()) = calleeSaveBuffer[uintptrOffset];
+ else
+ context.fpr(entry.reg().fpr()) = bitwise_cast<double>(calleeSaveBuffer[uintptrOffset]);
+ }
+}
+
+// Based on AssemblyHelpers::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer().
+static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context& context)
+{
+ VM& vm = *context.arg<VM*>();
+ auto& stack = context.stack();
+
+ VMEntryRecord* entryRecord = vmEntryRecord(vm.topEntryFrame);
+ void* calleeSaveBuffer = entryRecord->calleeSaveRegistersBuffer;
+
+ RegisterAtOffsetList* allCalleeSaves = VM::getAllCalleeSaveRegisterOffsets();
+ RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
+ unsigned registerCount = allCalleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = allCalleeSaves->at(i);
+ if (dontCopyRegisters.get(entry.reg()))
+ continue;
+ if (entry.reg().isGPR())
+ stack.set(calleeSaveBuffer, entry.offset(), context.gpr<uintptr_t>(entry.reg().gpr()));
+ else
+ stack.set(calleeSaveBuffer, entry.offset(), context.fpr<uintptr_t>(entry.reg().fpr()));
+ }
+}
+
+// Based on AssemblyHelpers::emitSaveOrCopyCalleeSavesFor().
+static void saveOrCopyCalleeSavesFor(Context& context, CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, bool wasCalledViaTailCall)
+{
+ Frame frame(context.fp(), context.stack());
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+ RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontSaveRegisters.get(entry.reg()))
+ continue;
+
+ uintptr_t savedRegisterValue;
+
+ if (wasCalledViaTailCall && baselineCalleeSaves.get(entry.reg()))
+ savedRegisterValue = frame.get<uintptr_t>(entry.offset());
+ else
+ savedRegisterValue = context.gpr(entry.reg().gpr());
+
+ frame.set(offsetVirtualRegister.offsetInBytes() + entry.offset(), savedRegisterValue);
+ }
+}
+#else // not NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+
+static void restoreCalleeSavesFor(Context&, CodeBlock*) { }
+static void saveCalleeSavesFor(Context&, CodeBlock*) { }
+static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context&) { }
+static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context&) { }
+static void saveOrCopyCalleeSavesFor(Context&, CodeBlock*, VirtualRegister, bool) { }
+
+#endif // NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+
+static JSCell* createDirectArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
+{
+ VM& vm = *context.arg<VM*>();
+
+ ASSERT(vm.heap.isDeferred());
+
+ if (inlineCallFrame)
+ codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+
+ unsigned length = argumentCount - 1;
+ unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1));
+ DirectArguments* result = DirectArguments::create(
+ vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity);
+
+ result->callee().set(vm, result, callee);
+
+ void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
+ Frame frame(frameBase, context.stack());
+ for (unsigned i = length; i--;)
+ result->setIndexQuickly(vm, i, frame.argument(i));
+
+ return result;
+}
+
+static JSCell* createClonedArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
+{
+ VM& vm = *context.arg<VM*>();
+ ExecState* exec = context.fp<ExecState*>();
+
+ ASSERT(vm.heap.isDeferred());
+
+ if (inlineCallFrame)
+ codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+
+ unsigned length = argumentCount - 1;
+ ClonedArguments* result = ClonedArguments::createEmpty(
+ vm, codeBlock->globalObject()->clonedArgumentsStructure(), callee, length);
+
+ void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
+ Frame frame(frameBase, context.stack());
+ for (unsigned i = length; i--;)
+ result->putDirectIndex(exec, i, frame.argument(i));
+ return result;
+}
+
+static void emitRestoreArguments(Context& context, CodeBlock* codeBlock, DFG::JITCode* dfgJITCode, const Operands<ValueRecovery>& operands)
+{
+ Frame frame(context.fp(), context.stack());
+
+ HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ int operand = operands.operandForIndex(index);
+
+ if (recovery.technique() != DirectArgumentsThatWereNotCreated
+ && recovery.technique() != ClonedArgumentsThatWereNotCreated)
+ continue;
+
+ MinifiedID id = recovery.nodeID();
+ auto iter = alreadyAllocatedArguments.find(id);
+ if (iter != alreadyAllocatedArguments.end()) {
+ frame.setOperand(operand, frame.operand(iter->value));
+ continue;
+ }
+
+ InlineCallFrame* inlineCallFrame =
+ dfgJITCode->minifiedDFG.at(id)->inlineCallFrame();
+
+ int stackOffset;
+ if (inlineCallFrame)
+ stackOffset = inlineCallFrame->stackOffset;
+ else
+ stackOffset = 0;
+
+ JSFunction* callee;
+ if (!inlineCallFrame || inlineCallFrame->isClosureCall)
+ callee = jsCast<JSFunction*>(frame.operand(stackOffset + CallFrameSlot::callee).asCell());
+ else
+ callee = jsCast<JSFunction*>(inlineCallFrame->calleeRecovery.constant().asCell());
+
+ int32_t argumentCount;
+ if (!inlineCallFrame || inlineCallFrame->isVarargs())
+ argumentCount = frame.operand<int32_t>(stackOffset + CallFrameSlot::argumentCount, PayloadOffset);
+ else
+ argumentCount = inlineCallFrame->argumentCountIncludingThis;
+
+ JSCell* argumentsObject;
+ switch (recovery.technique()) {
+ case DirectArgumentsThatWereNotCreated:
+ argumentsObject = createDirectArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
+ break;
+ case ClonedArgumentsThatWereNotCreated:
+ argumentsObject = createClonedArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ frame.setOperand(operand, JSValue(argumentsObject));
+
+ alreadyAllocatedArguments.add(id, operand);
+ }
+}
+
+// The following is a list of extra initializations that need to be done in order
+// of most likely needed (lower enum value) to least likely needed (higher enum value).
+// Each level initialization includes the previous lower enum value (see use of the
+// extraInitializationLevel value below).
+enum class ExtraInitializationLevel {
+ None,
+ SpeculationRecovery,
+ ValueProfileUpdate,
+ ArrayProfileUpdate,
+ Other
+};
+
+void OSRExit::executeOSRExit(Context& context)
+{
+ VM& vm = *context.arg<VM*>();
+ auto scope = DECLARE_THROW_SCOPE(vm);
+
+ ExecState* exec = context.fp<ExecState*>();
+ ASSERT(&exec->vm() == &vm);
+ auto& cpu = context.cpu;
+
+ if (vm.callFrameForCatch) {
+ exec = vm.callFrameForCatch;
+ context.fp() = exec;
+ }
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ ASSERT(codeBlock);
+ ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
+
+ // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
+ // really be profitable.
+ DeferGCForAWhile deferGC(vm.heap);
+
+ uint32_t exitIndex = vm.osrExitIndex;
+ DFG::JITCode* dfgJITCode = codeBlock->jitCode()->dfg();
+ OSRExit& exit = dfgJITCode->osrExit[exitIndex];
+
+ ASSERT(!vm.callFrameForCatch || exit.m_kind == GenericUnwind);
+ EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler());
+
+ if (UNLIKELY(!exit.exitState)) {
+ ExtraInitializationLevel extraInitializationLevel = ExtraInitializationLevel::None;
+
+ // We only need to execute this block once for each OSRExit record. The computed
+ // results will be cached in the OSRExitState record for use of the rest of the
+ // exit ramp code.
+
+ // Ensure we have baseline codeBlocks to OSR exit to.
+ prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin);
+
+ CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative();
+ ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT);
+
+ SpeculationRecovery* recovery = nullptr;
+ if (exit.m_recoveryIndex != UINT_MAX) {
+ recovery = &dfgJITCode->speculationRecovery[exit.m_recoveryIndex];
+ extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::SpeculationRecovery);
+ }
+
+ if (UNLIKELY(exit.m_kind == GenericUnwind))
+ extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
+
+ ArrayProfile* arrayProfile = nullptr;
+ if (!!exit.m_jsValueSource) {
+ if (exit.m_valueProfile)
+ extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::ValueProfileUpdate);
+ if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
+ CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
+ CodeBlock* profiledCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
+ arrayProfile = profiledCodeBlock->getArrayProfile(codeOrigin.bytecodeIndex);
+ if (arrayProfile)
+ extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::ArrayProfileUpdate);
+ }
+ }
+
+ int32_t activeThreshold = baselineCodeBlock->adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp());
+ double adjustedThreshold = applyMemoryUsageHeuristicsAndConvertToInt(activeThreshold, baselineCodeBlock);
+ ASSERT(adjustedThreshold > 0);
+ adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold);
+
+ CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
+ Vector<BytecodeAndMachineOffset> decodedCodeMap;
+ codeBlockForExit->jitCodeMap()->decode(decodedCodeMap);
+
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
+
+ ASSERT(mapping);
+ ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
+
+ void* jumpTarget = codeBlockForExit->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
+
+ // Compute the value recoveries.
+ Operands<ValueRecovery> operands;
+ Vector<UndefinedOperandSpan> undefinedOperandSpans;
+ unsigned numVariables = dfgJITCode->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, dfgJITCode->minifiedDFG, exit.m_streamIndex, operands, &undefinedOperandSpans);
+ ptrdiff_t stackPointerOffset = -static_cast<ptrdiff_t>(numVariables) * sizeof(Register);
+
+ exit.exitState = adoptRef(new OSRExitState(exit, codeBlock, baselineCodeBlock, operands, WTFMove(undefinedOperandSpans), recovery, stackPointerOffset, activeThreshold, adjustedThreshold, jumpTarget, arrayProfile));
+
+ if (UNLIKELY(vm.m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
+ Profiler::Database& database = *vm.m_perBytecodeProfiler;
+ Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
+
+ Profiler::OSRExit* profilerExit = compilation->addOSRExit(
+ exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
+ exit.m_kind, exit.m_kind == UncountableInvalidation);
+ exit.exitState->profilerExit = profilerExit;
+ extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
+ }
+
+ if (UNLIKELY(Options::printEachOSRExit()))
+ extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
+
+ exit.exitState->extraInitializationLevel = extraInitializationLevel;
+
+ if (UNLIKELY(Options::verboseOSR() || Options::verboseDFGOSRExit())) {
+ dataLogF("DFG OSR exit #%u (%s, %s) from %s, with operands = %s\n",
+ exitIndex, toCString(exit.m_codeOrigin).data(),
+ exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
+ toCString(ignoringContext<DumpContext>(operands)).data());
+ }
+ }
+
+ OSRExitState& exitState = *exit.exitState.get();
+ CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
+ ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT);
+
+ Operands<ValueRecovery>& operands = exitState.operands;
+ Vector<UndefinedOperandSpan>& undefinedOperandSpans = exitState.undefinedOperandSpans;
+
+ context.sp() = context.fp<uint8_t*>() + exitState.stackPointerOffset;
+
+ // The only reason for using this do while look is so we can break out midway when appropriate.
+ do {
+ auto extraInitializationLevel = static_cast<ExtraInitializationLevel>(exitState.extraInitializationLevel);
+
+ if (extraInitializationLevel == ExtraInitializationLevel::None) {
+ context.sp() = context.fp<uint8_t*>() + exitState.stackPointerOffset;
+ break;
+ }
+
+ // Begin extra initilization level: SpeculationRecovery
+
+ // We need to do speculation recovery first because array profiling and value profiling
+ // may rely on a value that it recovers. However, that doesn't mean that it is likely
+ // to have a recovery value. So, we'll decorate it as UNLIKELY.
+ SpeculationRecovery* recovery = exitState.recovery;
+ if (UNLIKELY(recovery)) {
+ switch (recovery->type()) {
+ case SpeculativeAdd:
+ cpu.gpr(recovery->dest()) = cpu.gpr<uint32_t>(recovery->dest()) - cpu.gpr<uint32_t>(recovery->src());
+#if USE(JSVALUE64)
+ ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
+ cpu.gpr(recovery->dest()) |= TagTypeNumber;
+#endif
+ break;
+
+ case SpeculativeAddImmediate:
+ cpu.gpr(recovery->dest()) = (cpu.gpr<uint32_t>(recovery->dest()) - recovery->immediate());
+#if USE(JSVALUE64)
+ ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
+ cpu.gpr(recovery->dest()) |= TagTypeNumber;
+#endif
+ break;
+
+ case BooleanSpeculationCheck:
+#if USE(JSVALUE64)
+ cpu.gpr(recovery->dest()) = cpu.gpr(recovery->dest()) ^ ValueFalse;
+#endif
+ break;
+
+ default:
+ break;
+ }
+ }
+ if (extraInitializationLevel <= ExtraInitializationLevel::SpeculationRecovery)
+ break;
+
+ // Begin extra initilization level: ValueProfileUpdate
+ JSValue profiledValue;
+ if (!!exit.m_jsValueSource) {
+ profiledValue = jsValueFor(cpu, exit.m_jsValueSource);
+ if (MethodOfGettingAValueProfile profile = ""
+ profile.reportValue(profiledValue);
+ }
+ if (extraInitializationLevel <= ExtraInitializationLevel::ValueProfileUpdate)
+ break;
+
+ // Begin extra initilization level: ArrayProfileUpdate
+ ArrayProfile* arrayProfile = exitState.arrayProfile;
+ if (arrayProfile) {
+ ASSERT(!!exit.m_jsValueSource);
+ ASSERT(exit.m_kind == BadCache || exit.m_kind == BadIndexingType);
+ Structure* structure = profiledValue.asCell()->structure(vm);
+ arrayProfile->observeStructure(structure);
+ arrayProfile->observeArrayMode(asArrayModes(structure->indexingType()));
+ }
+ if (extraInitializationLevel <= ExtraInitializationLevel::ArrayProfileUpdate)
+ break;
+
+ // Begin Extra initilization level: Other
+ if (UNLIKELY(exit.m_kind == GenericUnwind)) {
+ // We are acting as a defacto op_catch because we arrive here from genericUnwind().
+ // So, we must restore our call frame and stack pointer.
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(context);
+ ASSERT(context.fp() == vm.callFrameForCatch);
+ }
+
+ if (exitState.profilerExit)
+ exitState.profilerExit->incCount();
+
+ if (UNLIKELY(Options::printEachOSRExit()))
+ printOSRExit(context, vm.osrExitIndex, exit);
+
+ } while (false); // End extra initialization.
+
+ Frame frame(cpu.fp(), context.stack());
+ ASSERT(!(context.fp<uintptr_t>() & 0x7));
+
+#if USE(JSVALUE64)
+ ASSERT(cpu.gpr(GPRInfo::tagTypeNumberRegister) == TagTypeNumber);
+ ASSERT(cpu.gpr(GPRInfo::tagMaskRegister) == TagMask);
+#endif
+
+ // Do all data format conversions and store the results into the stack.
+ // Note: we need to recover values before restoring callee save registers below
+ // because the recovery may rely on values in some of callee save registers.
+
+ int calleeSaveSpaceAsVirtualRegisters = static_cast<int>(baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters());
+ size_t numberOfOperands = operands.size();
+ size_t numUndefinedOperandSpans = undefinedOperandSpans.size();
+
+ size_t nextUndefinedSpanIndex = 0;
+ size_t nextUndefinedOperandIndex = numberOfOperands;
+ if (numUndefinedOperandSpans)
+ nextUndefinedOperandIndex = undefinedOperandSpans[nextUndefinedSpanIndex].firstIndex;
+
+ JSValue undefined = jsUndefined();
+ for (size_t spanIndex = 0; spanIndex < numUndefinedOperandSpans; ++spanIndex) {
+ auto& span = undefinedOperandSpans[spanIndex];
+ int firstOffset = span.minOffset;
+ int lastOffset = firstOffset + span.numberOfRegisters;
+
+ for (int offset = firstOffset; offset < lastOffset; ++offset)
+ frame.setOperand(offset, undefined);
+ }
+
+ for (size_t index = 0; index < numberOfOperands; ++index) {
+ const ValueRecovery& recovery = operands[index];
+ VirtualRegister reg = operands.virtualRegisterForIndex(index);
+
+ if (UNLIKELY(index == nextUndefinedOperandIndex)) {
+ index += undefinedOperandSpans[nextUndefinedSpanIndex++].numberOfRegisters - 1;
+ if (nextUndefinedSpanIndex < numUndefinedOperandSpans)
+ nextUndefinedOperandIndex = undefinedOperandSpans[nextUndefinedSpanIndex].firstIndex;
+ else
+ nextUndefinedOperandIndex = numberOfOperands;
+ continue;
+ }
+
+ if (reg.isLocal() && reg.toLocal() < calleeSaveSpaceAsVirtualRegisters)
+ continue;
+
+ int operand = reg.offset();
+
+ switch (recovery.technique()) {
+ case DisplacedInJSStack:
+ frame.setOperand(operand, exec->r(recovery.virtualRegister()).jsValue());
+ break;
+
+ case InFPR:
+ frame.setOperand(operand, cpu.fpr<JSValue>(recovery.fpr()));
+ break;
+
+#if USE(JSVALUE64)
+ case InGPR:
+ frame.setOperand(operand, cpu.gpr<JSValue>(recovery.gpr()));
+ break;
+#else
+ case InPair:
+ frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.tagGPR()), cpu.gpr<int32_t>(recovery.payloadGPR())));
+ break;
+#endif
+
+ case UnboxedCellInGPR:
+ frame.setOperand(operand, JSValue(cpu.gpr<JSCell*>(recovery.gpr())));
+ break;
+
+ case CellDisplacedInJSStack:
+ frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedCell()));
+ break;
+
+#if USE(JSVALUE32_64)
+ case UnboxedBooleanInGPR:
+ frame.setOperand(operand, jsBoolean(cpu.gpr<bool>(recovery.gpr())));
+ break;
+#endif
+
+ case BooleanDisplacedInJSStack:
+#if USE(JSVALUE64)
+ frame.setOperand(operand, exec->r(recovery.virtualRegister()).jsValue());
+#else
+ frame.setOperand(operand, jsBoolean(exec->r(recovery.virtualRegister()).jsValue().payload()));
+#endif
+ break;
+
+ case UnboxedInt32InGPR:
+ frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.gpr())));
+ break;
+
+ case Int32DisplacedInJSStack:
+ frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedInt32()));
+ break;
+
+#if USE(JSVALUE64)
+ case UnboxedInt52InGPR:
+ frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr()) >> JSValue::int52ShiftAmount));
+ break;
+
+ case Int52DisplacedInJSStack:
+ frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedInt52()));
+ break;
+
+ case UnboxedStrictInt52InGPR:
+ frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr())));
+ break;
+
+ case StrictInt52DisplacedInJSStack:
+ frame.setOperand(operand, JSValue(exec->r(recovery.virtualRegister()).unboxedStrictInt52()));
+ break;
+#endif
+
+ case UnboxedDoubleInFPR:
+ frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(cpu.fpr(recovery.fpr()))));
+ break;
+
+ case DoubleDisplacedInJSStack:
+ frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(exec->r(recovery.virtualRegister()).unboxedDouble())));
+ break;
+
+ case Constant:
+ frame.setOperand(operand, recovery.constant());
+ break;
+
+ case DirectArgumentsThatWereNotCreated:
+ case ClonedArgumentsThatWereNotCreated:
+ // Don't do this, yet.
+ break;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ // Restore the DFG callee saves and then save the ones the baseline JIT uses.
+ restoreCalleeSavesFor(context, codeBlock);
+ saveCalleeSavesFor(context, baselineCodeBlock);
+
+#if USE(JSVALUE64)
+ cpu.gpr(GPRInfo::tagTypeNumberRegister) = TagTypeNumber;
+ cpu.gpr(GPRInfo::tagMaskRegister) = TagTypeNumber | TagBitTypeOther;
+#endif
+
+ if (exit.isExceptionHandler())
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(context);
+
+ // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
+ // recoveries don't recursively refer to each other. But, we don't try to assume that they only
+ // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
+ // Note that we also roughly assume that the arguments might still be materialized outside of its
+ // inline call frame scope - but for now the DFG wouldn't do that.
+
+ DFG::emitRestoreArguments(context, codeBlock, dfgJITCode, operands);
+
+ // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
+ //
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
+ //
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
+ //
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
+ //
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
+ //
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
+
+ if (UNLIKELY(codeBlock->updateOSRExitCounterAndCheckIfNeedToReoptimize(exitState) == CodeBlock::OptimizeAction::ReoptimizeNow))
+ triggerReoptimizationNow(baselineCodeBlock, &exit);
+
+ reifyInlinedCallFrames(context, baselineCodeBlock, exit);
+ adjustAndJumpToTarget(context, vm, codeBlock, baselineCodeBlock, exit);
+}
+
+static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselineCodeBlock, const OSRExitBase& exit)
+{
+ auto& cpu = context.cpu;
+ Frame frame(cpu.fp(), context.stack());
+
+ // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
+ // in presence of inlined tail calls.
+ // https://bugs.webkit.org/show_bug.cgi?id=147511
+ ASSERT(outermostBaselineCodeBlock->jitType() == JITCode::BaselineJIT);
+ frame.setOperand<CodeBlock*>(CallFrameSlot::codeBlock, outermostBaselineCodeBlock);
+
+ const CodeOrigin* codeOrigin;
+ for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingTailCalls()) {
+ InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
+ CodeBlock* baselineCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(*codeOrigin, outermostBaselineCodeBlock);
+ InlineCallFrame::Kind trueCallerCallKind;
+ CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
+ void* callerFrame = cpu.fp();
+
+ if (!trueCaller) {
+ ASSERT(inlineCallFrame->isTail());
+ void* returnPC = frame.get<void*>(CallFrame::returnPCOffset());
+ frame.set<void*>(inlineCallFrame->returnPCOffset(), returnPC);
+ callerFrame = frame.get<void*>(CallFrame::callerFrameOffset());
+ } else {
+ CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
+ unsigned callBytecodeIndex = trueCaller->bytecodeIndex;
+ void* jumpTarget = nullptr;
+
+ switch (trueCallerCallKind) {
+ case InlineCallFrame::Call:
+ case InlineCallFrame::Construct:
+ case InlineCallFrame::CallVarargs:
+ case InlineCallFrame::ConstructVarargs:
+ case InlineCallFrame::TailCall:
+ case InlineCallFrame::TailCallVarargs: {
+ CallLinkInfo* callLinkInfo =
+ baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
+ RELEASE_ASSERT(callLinkInfo);
+
+ jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
+ break;
+ }
+
+ case InlineCallFrame::GetterCall:
+ case InlineCallFrame::SetterCall: {
+ StructureStubInfo* stubInfo =
+ baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
+ RELEASE_ASSERT(stubInfo);
+
+ jumpTarget = stubInfo->doneLocation().executableAddress();
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ if (trueCaller->inlineCallFrame)
+ callerFrame = cpu.fp<uint8_t*>() + trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue);
+
+ frame.set<void*>(inlineCallFrame->returnPCOffset(), jumpTarget);
+ }
+
+ frame.setOperand<void*>(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock, baselineCodeBlock);
+
+ // Restore the inline call frame's callee save registers.
+ // If this inlined frame is a tail call that will return back to the original caller, we need to
+ // copy the prior contents of the tag registers already saved for the outer frame to this frame.
+ saveOrCopyCalleeSavesFor(context, baselineCodeBlock, VirtualRegister(inlineCallFrame->stackOffset), !trueCaller);
+
+ if (!inlineCallFrame->isVarargs())
+ frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, PayloadOffset, inlineCallFrame->argumentCountIncludingThis);
+ ASSERT(callerFrame);
+ frame.set<void*>(inlineCallFrame->callerFrameOffset(), callerFrame);
+#if USE(JSVALUE64)
+ uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
+ frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
+ if (!inlineCallFrame->isClosureCall)
+ frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, JSValue(inlineCallFrame->calleeConstant()));
+#else // USE(JSVALUE64) // so this is the 32-bit part
+ Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
+ uint32_t locationBits = CallSiteIndex(instruction).bits();
+ frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
+ frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::callee, TagOffset, static_cast<uint32_t>(JSValue::CellTag));
+ if (!inlineCallFrame->isClosureCall)
+ frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, PayloadOffset, inlineCallFrame->calleeConstant());
+#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
+ }
+
+ // Don't need to set the toplevel code origin if we only did inline tail calls
+ if (codeOrigin) {
+#if USE(JSVALUE64)
+ uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
+#else
+ Instruction* instruction = outermostBaselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
+ uint32_t locationBits = CallSiteIndex(instruction).bits();
+#endif
+ frame.setOperand<uint32_t>(CallFrameSlot::argumentCount, TagOffset, locationBits);
+ }
+}
+
+static void adjustAndJumpToTarget(Context& context, VM& vm, CodeBlock* codeBlock, CodeBlock* baselineCodeBlock, OSRExit& exit)
+{
+ OSRExitState* exitState = exit.exitState.get();
+
+ WTF::storeLoadFence(); // The optimizing compiler expects that the OSR exit mechanism will execute this fence.
+ vm.heap.writeBarrier(baselineCodeBlock);
+
+ // We barrier all inlined frames -- and not just the current inline stack --
+ // because we don't know which inlined function owns the value profile that
+ // we'll update when we exit. In the case of "f() { a(); b(); }", if both
+ // a and b are inlined, we might exit inside b due to a bad value loaded
+ // from a.
+ // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
+ // the value profile.
+ InlineCallFrameSet* inlineCallFrames = codeBlock->jitCode()->dfgCommon()->inlineCallFrames.get();
+ if (inlineCallFrames) {
+ for (InlineCallFrame* inlineCallFrame : *inlineCallFrames)
+ vm.heap.writeBarrier(inlineCallFrame->baselineCodeBlock.get());
+ }
+
+ if (exit.m_codeOrigin.inlineCallFrame)
+ context.fp() = context.fp<uint8_t*>() + exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue);
+
+ void* jumpTarget = exitState->jumpTarget;
+ ASSERT(jumpTarget);
+
+ if (exit.isExceptionHandler()) {
+ // Since we're jumping to op_catch, we need to set callFrameForCatch.
+ vm.callFrameForCatch = context.fp<ExecState*>();
+ }
+
+ vm.topCallFrame = context.fp<ExecState*>();
+ context.pc() = jumpTarget;
+}
+
+static void printOSRExit(Context& context, uint32_t osrExitIndex, const OSRExit& exit)
+{
+ ExecState* exec = context.fp<ExecState*>();
+ CodeBlock* codeBlock = exec->codeBlock();
+ CodeBlock* alternative = codeBlock->alternative();
+ ExitKind kind = exit.m_kind;
+ unsigned bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
+
+ dataLog("Speculation failure in ", *codeBlock);
+ dataLog(" @ exit #", osrExitIndex, " (bc#", bytecodeOffset, ", ", exitKindToString(kind), ") with ");
+ if (alternative) {
+ dataLog(
+ "executeCounter = ", alternative->jitExecuteCounter(),
+ ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
+ ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
+ } else
+ dataLog("no alternative code block (i.e. we've been jettisoned)");
+ dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
+ dataLog(" GPRs at time of exit:");
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+ GPRReg gpr = GPRInfo::toRegister(i);
+ dataLog(" ", context.gprName(gpr), ":", RawPointer(context.gpr<void*>(gpr)));
+ }
+ dataLog("\n");
+ dataLog(" FPRs at time of exit:");
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ FPRReg fpr = FPRInfo::toRegister(i);
+ dataLog(" ", context.fprName(fpr), ":");
+ uint64_t bits = context.fpr<uint64_t>(fpr);
+ double value = context.fpr(fpr);
+ dataLogF("%llx:%lf", static_cast<long long>(bits), value);
+ }
+ dataLog("\n");
+}
+
+// JIT based OSR Exit.
+
OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex)
: OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic, jit->m_origin.wasHoisted)
, m_jsValueSource(jsValueSource)
Modified: trunk/Source/_javascript_Core/dfg/DFGOSRExit.h (222870 => 222871)
--- trunk/Source/_javascript_Core/dfg/DFGOSRExit.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/dfg/DFGOSRExit.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -28,16 +28,27 @@
#if ENABLE(DFG_JIT)
#include "DFGOSRExitBase.h"
+#include "DFGVariableEventStream.h"
#include "GPRInfo.h"
#include "MacroAssembler.h"
#include "MethodOfGettingAValueProfile.h"
#include "Operands.h"
#include "ValueRecovery.h"
+#include <wtf/RefPtr.h>
namespace JSC {
+class ArrayProfile;
class CCallHelpers;
+namespace Probe {
+class Context;
+} // namespace Probe
+
+namespace Profiler {
+class OSRExit;
+} // namespace Profiler
+
namespace DFG {
class SpeculativeJIT;
@@ -91,6 +102,39 @@
SpeculationRecoveryType m_type;
};
+enum class ExtraInitializationLevel;
+
+struct OSRExitState : RefCounted<OSRExitState> {
+ OSRExitState(OSRExitBase& exit, CodeBlock* codeBlock, CodeBlock* baselineCodeBlock, Operands<ValueRecovery>& operands, Vector<UndefinedOperandSpan>&& undefinedOperandSpans, SpeculationRecovery* recovery, ptrdiff_t stackPointerOffset, int32_t activeThreshold, double memoryUsageAdjustedThreshold, void* jumpTarget, ArrayProfile* arrayProfile)
+ : exit(exit)
+ , codeBlock(codeBlock)
+ , baselineCodeBlock(baselineCodeBlock)
+ , operands(operands)
+ , undefinedOperandSpans(undefinedOperandSpans)
+ , recovery(recovery)
+ , stackPointerOffset(stackPointerOffset)
+ , activeThreshold(activeThreshold)
+ , memoryUsageAdjustedThreshold(memoryUsageAdjustedThreshold)
+ , jumpTarget(jumpTarget)
+ , arrayProfile(arrayProfile)
+ { }
+
+ OSRExitBase& exit;
+ CodeBlock* codeBlock;
+ CodeBlock* baselineCodeBlock;
+ Operands<ValueRecovery> operands;
+ Vector<UndefinedOperandSpan> undefinedOperandSpans;
+ SpeculationRecovery* recovery;
+ ptrdiff_t stackPointerOffset;
+ uint32_t activeThreshold;
+ double memoryUsageAdjustedThreshold;
+ void* jumpTarget;
+ ArrayProfile* arrayProfile;
+
+ ExtraInitializationLevel extraInitializationLevel;
+ Profiler::OSRExit* profilerExit { nullptr };
+};
+
// === OSRExit ===
//
// This structure describes how to exit the speculative path by
@@ -99,10 +143,13 @@
OSRExit(ExitKind, JSValueSource, MethodOfGettingAValueProfile, SpeculativeJIT*, unsigned streamIndex, unsigned recoveryIndex = UINT_MAX);
static void JIT_OPERATION compileOSRExit(ExecState*) WTF_INTERNAL;
+ static void executeOSRExit(Probe::Context&);
unsigned m_patchableCodeOffset { 0 };
MacroAssemblerCodeRef m_code;
+
+ RefPtr<OSRExitState> exitState;
JSValueSource m_jsValueSource;
MethodOfGettingAValueProfile m_valueProfile;
Modified: trunk/Source/_javascript_Core/dfg/DFGThunks.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/dfg/DFGThunks.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/dfg/DFGThunks.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -40,6 +40,14 @@
namespace JSC { namespace DFG {
+MacroAssemblerCodeRef osrExitThunkGenerator(VM* vm)
+{
+ MacroAssembler jit;
+ jit.probe(OSRExit::executeOSRExit, vm);
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("DFG OSR exit thunk"));
+}
+
MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM* vm)
{
MacroAssembler jit;
Modified: trunk/Source/_javascript_Core/dfg/DFGThunks.h (222870 => 222871)
--- trunk/Source/_javascript_Core/dfg/DFGThunks.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/dfg/DFGThunks.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
namespace DFG {
+MacroAssemblerCodeRef osrExitThunkGenerator(VM*);
MacroAssemblerCodeRef osrExitGenerationThunkGenerator(VM*);
MacroAssemblerCodeRef osrEntryThunkGenerator(VM*);
Modified: trunk/Source/_javascript_Core/dfg/DFGVariableEventStream.cpp (222870 => 222871)
--- trunk/Source/_javascript_Core/dfg/DFGVariableEventStream.cpp 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/dfg/DFGVariableEventStream.cpp 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -91,7 +91,7 @@
} // namespace
-bool VariableEventStream::tryToSetConstantRecovery(ValueRecovery& recovery, MinifiedNode* node) const
+static bool tryToSetConstantRecovery(ValueRecovery& recovery, MinifiedNode* node)
{
if (!node)
return false;
@@ -114,14 +114,39 @@
return false;
}
-void VariableEventStream::reconstruct(
+template<VariableEventStream::ReconstructionStyle style>
+unsigned VariableEventStream::reconstruct(
CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
- unsigned index, Operands<ValueRecovery>& valueRecoveries) const
+ unsigned index, Operands<ValueRecovery>& valueRecoveries, Vector<UndefinedOperandSpan>* undefinedOperandSpans) const
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
CodeBlock* baselineCodeBlock = codeBlock->baselineVersion();
-
+
unsigned numVariables;
+ static const unsigned invalidIndex = std::numeric_limits<unsigned>::max();
+ unsigned firstUndefined = invalidIndex;
+ bool firstUndefinedIsArgument = false;
+
+ auto flushUndefinedOperandSpan = [&] (unsigned i) {
+ if (firstUndefined == invalidIndex)
+ return;
+ int firstOffset = valueRecoveries.virtualRegisterForIndex(firstUndefined).offset();
+ int lastOffset = valueRecoveries.virtualRegisterForIndex(i - 1).offset();
+ int minOffset = std::min(firstOffset, lastOffset);
+ undefinedOperandSpans->append({ firstUndefined, minOffset, i - firstUndefined });
+ firstUndefined = invalidIndex;
+ };
+ auto recordUndefinedOperand = [&] (unsigned i) {
+ // We want to separate the span of arguments from the span of locals even if they have adjacent operands indexes.
+ if (firstUndefined != invalidIndex && firstUndefinedIsArgument != valueRecoveries.isArgument(i))
+ flushUndefinedOperandSpan(i);
+
+ if (firstUndefined == invalidIndex) {
+ firstUndefined = i;
+ firstUndefinedIsArgument = valueRecoveries.isArgument(i);
+ }
+ };
+
if (codeOrigin.inlineCallFrame)
numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeLocals + VirtualRegister(codeOrigin.inlineCallFrame->stackOffset).toLocal() + 1;
else
@@ -136,7 +161,7 @@
valueRecoveries[i] = ValueRecovery::displacedInJSStack(
VirtualRegister(valueRecoveries.operandForIndex(i)), DataFormatJS);
}
- return;
+ return numVariables;
}
// Step 1: Find the last checkpoint, and figure out the number of virtual registers as we go.
@@ -191,6 +216,12 @@
ValueSource& source = operandSources[i];
if (source.isTriviallyRecoverable()) {
valueRecoveries[i] = source.valueRecovery();
+ if (style == ReconstructionStyle::Separated) {
+ if (valueRecoveries[i].isConstant() && valueRecoveries[i].constant() == jsUndefined())
+ recordUndefinedOperand(i);
+ else
+ flushUndefinedOperandSpan(i);
+ }
continue;
}
@@ -199,14 +230,25 @@
MinifiedGenerationInfo info = generationInfos.get(source.id());
if (!info.alive) {
valueRecoveries[i] = ValueRecovery::constant(jsUndefined());
+ if (style == ReconstructionStyle::Separated)
+ recordUndefinedOperand(i);
continue;
}
- if (tryToSetConstantRecovery(valueRecoveries[i], node))
+ if (tryToSetConstantRecovery(valueRecoveries[i], node)) {
+ if (style == ReconstructionStyle::Separated) {
+ if (node->hasConstant() && node->constant() == jsUndefined())
+ recordUndefinedOperand(i);
+ else
+ flushUndefinedOperandSpan(i);
+ }
continue;
+ }
ASSERT(info.format != DataFormatNone);
-
+ if (style == ReconstructionStyle::Separated)
+ flushUndefinedOperandSpan(i);
+
if (info.filled) {
if (info.format == DataFormatDouble) {
valueRecoveries[i] = ValueRecovery::inFPR(info.u.fpr, DataFormatDouble);
@@ -225,8 +267,26 @@
valueRecoveries[i] =
ValueRecovery::displacedInJSStack(static_cast<VirtualRegister>(info.u.virtualReg), info.format);
}
+ if (style == ReconstructionStyle::Separated)
+ flushUndefinedOperandSpan(operandSources.size());
+
+ return numVariables;
}
+unsigned VariableEventStream::reconstruct(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
+ unsigned index, Operands<ValueRecovery>& valueRecoveries) const
+{
+ return reconstruct<ReconstructionStyle::Combined>(codeBlock, codeOrigin, graph, index, valueRecoveries, nullptr);
+}
+
+unsigned VariableEventStream::reconstruct(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph,
+ unsigned index, Operands<ValueRecovery>& valueRecoveries, Vector<UndefinedOperandSpan>* undefinedOperandSpans) const
+{
+ return reconstruct<ReconstructionStyle::Separated>(codeBlock, codeOrigin, graph, index, valueRecoveries, undefinedOperandSpans);
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
Modified: trunk/Source/_javascript_Core/dfg/DFGVariableEventStream.h (222870 => 222871)
--- trunk/Source/_javascript_Core/dfg/DFGVariableEventStream.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/dfg/DFGVariableEventStream.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -35,6 +35,12 @@
namespace JSC { namespace DFG {
+struct UndefinedOperandSpan {
+ unsigned firstIndex;
+ int minOffset;
+ unsigned numberOfRegisters;
+};
+
class VariableEventStream : public Vector<VariableEvent> {
public:
void appendAndLog(const VariableEvent& event)
@@ -42,13 +48,19 @@
append(event);
}
- void reconstruct(
+ unsigned reconstruct(CodeBlock*, CodeOrigin, MinifiedGraph&, unsigned index, Operands<ValueRecovery>&) const;
+ unsigned reconstruct(CodeBlock*, CodeOrigin, MinifiedGraph&, unsigned index, Operands<ValueRecovery>&, Vector<UndefinedOperandSpan>*) const;
+
+private:
+ enum class ReconstructionStyle {
+ Combined,
+ Separated
+ };
+ template<ReconstructionStyle style>
+ unsigned reconstruct(
CodeBlock*, CodeOrigin, MinifiedGraph&,
- unsigned index, Operands<ValueRecovery>&) const;
+ unsigned index, Operands<ValueRecovery>&, Vector<UndefinedOperandSpan>*) const;
-private:
- bool tryToSetConstantRecovery(ValueRecovery&, MinifiedNode*) const;
-
void logEvent(const VariableEvent&);
};
Modified: trunk/Source/_javascript_Core/profiler/ProfilerOSRExit.h (222870 => 222871)
--- trunk/Source/_javascript_Core/profiler/ProfilerOSRExit.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/profiler/ProfilerOSRExit.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -43,7 +43,8 @@
uint64_t* counterAddress() { return &m_counter; }
uint64_t count() const { return m_counter; }
-
+ void incCount() { m_counter++; }
+
JSValue toJS(ExecState*) const;
private:
Modified: trunk/Source/_javascript_Core/runtime/JSCJSValue.h (222870 => 222871)
--- trunk/Source/_javascript_Core/runtime/JSCJSValue.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/runtime/JSCJSValue.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2001 Harri Porten ([email protected])
* Copyright (C) 2001 Peter Kelly ([email protected])
- * Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009, 2012, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2003-2017 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -344,12 +344,9 @@
uint32_t tag() const;
int32_t payload() const;
-#if !ENABLE(JIT)
- // This should only be used by the LLInt C Loop interpreter who needs
- // synthesize JSValue from its "register"s holding tag and payload
- // values.
+ // This should only be used by the LLInt C Loop interpreter and OSRExit code who needs
+ // synthesize JSValue from its "register"s holding tag and payload values.
explicit JSValue(int32_t tag, int32_t payload);
-#endif
#elif USE(JSVALUE64)
/*
Modified: trunk/Source/_javascript_Core/runtime/JSCJSValueInlines.h (222870 => 222871)
--- trunk/Source/_javascript_Core/runtime/JSCJSValueInlines.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/runtime/JSCJSValueInlines.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -341,7 +341,7 @@
u.asBits.payload = i;
}
-#if !ENABLE(JIT)
+#if USE(JSVALUE32_64)
inline JSValue::JSValue(int32_t tag, int32_t payload)
{
u.asBits.tag = tag;
Modified: trunk/Source/_javascript_Core/runtime/Options.h (222870 => 222871)
--- trunk/Source/_javascript_Core/runtime/Options.h 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Source/_javascript_Core/runtime/Options.h 2017-10-04 20:00:01 UTC (rev 222871)
@@ -172,6 +172,7 @@
v(bool, verboseCompilation, false, Normal, nullptr) \
v(bool, verboseFTLCompilation, false, Normal, nullptr) \
v(bool, logCompilationChanges, false, Normal, nullptr) \
+ v(bool, useProbeOSRExit, false, Normal, nullptr) \
v(bool, printEachOSRExit, false, Normal, nullptr) \
v(bool, validateGraph, false, Normal, nullptr) \
v(bool, validateGraphAtEachPhase, false, Normal, nullptr) \
Modified: trunk/Tools/ChangeLog (222870 => 222871)
--- trunk/Tools/ChangeLog 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Tools/ChangeLog 2017-10-04 20:00:01 UTC (rev 222871)
@@ -1,3 +1,16 @@
+2017-10-04 Mark Lam <[email protected]>
+
+ Add support for using Probe DFG OSR Exit behind a runtime flag.
+ https://bugs.webkit.org/show_bug.cgi?id=177844
+ <rdar://problem/34801425>
+
+ Reviewed by Saam Barati.
+
+ Enable --useProbeOSrExit=true for dfg-eager and ftl-no-cjit-validate-sampling-profiler
+ test configurations.
+
+ * Scripts/run-jsc-stress-tests:
+
2017-10-04 Jonathan Bedard <[email protected]>
webkitpy.tool.steps.steps_unittest.StepsTest.test_runtests_api is flakey
Modified: trunk/Tools/Scripts/run-jsc-stress-tests (222870 => 222871)
--- trunk/Tools/Scripts/run-jsc-stress-tests 2017-10-04 19:47:32 UTC (rev 222870)
+++ trunk/Tools/Scripts/run-jsc-stress-tests 2017-10-04 20:00:01 UTC (rev 222871)
@@ -457,6 +457,7 @@
NO_CJIT_OPTIONS = ["--useConcurrentJIT=false", "--thresholdForJITAfterWarmUp=100", "--scribbleFreeCells=true"]
B3O1_OPTIONS = ["--defaultB3OptLevel=1"]
FTL_OPTIONS = ["--useFTLJIT=true"]
+PROBE_OSR_EXIT_OPTION = ["--useProbeOSRExit=true"]
require_relative "webkitruby/jsc-stress-test-writer-#{$testWriter}"
@@ -623,7 +624,7 @@
end
def runFTLNoCJITValidate(*optionalTestSpecificOptions)
- run("ftl-no-cjit-validate-sampling-profiler", "--validateGraph=true", "--useSamplingProfiler=true", "--airForceIRCAllocator=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + optionalTestSpecificOptions))
+ run("ftl-no-cjit-validate-sampling-profiler", "--validateGraph=true", "--useSamplingProfiler=true", "--airForceIRCAllocator=true", *(FTL_OPTIONS + NO_CJIT_OPTIONS + PROBE_OSR_EXIT_OPTION + optionalTestSpecificOptions))
end
def runFTLNoCJITNoPutStackValidate(*optionalTestSpecificOptions)
@@ -639,7 +640,7 @@
end
def runDFGEager(*optionalTestSpecificOptions)
- run("dfg-eager", *(EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + optionalTestSpecificOptions))
+ run("dfg-eager", *(EAGER_OPTIONS + COLLECT_CONTINUOUSLY_OPTIONS + PROBE_OSR_EXIT_OPTION + optionalTestSpecificOptions))
end
def runDFGEagerNoCJITValidate(*optionalTestSpecificOptions)