Diff
Modified: branches/jsc-tailcall/Source/_javascript_Core/ChangeLog (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/ChangeLog 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/ChangeLog 2015-08-04 00:39:19 UTC (rev 187791)
@@ -1,3 +1,119 @@
+2015-07-31 Basile Clement <basile_clem...@apple.com>
+
+ jsc-tailcall: Implement the tail call opcodes in the DFG
+ https://bugs.webkit.org/show_bug.cgi?id=146850
+
+ Reviewed by Michael Saboff.
+
+ This patch adds support for tail calls in the DFG. This requires a slightly high number of nodes:
+
+ - TailCall and TailCallVarargs are straightforward. They are terminal
+ nodes and have the semantics of an actual tail call.
+
+ - TailCallInlinedCaller and TailCallVarargsInlinedCaller are here to perform a
+ tail call inside an inlined function. They are non terminal nodes,
+ and are performing the call as a regular call after popping an
+ appropriate number of inlined tail call frames.
+
+ - TailCallForwardVarargs and TailCallForwardVarargsInlinedCaller are the
+ extension of TailCallVarargs and TailCallVarargsInlinedCaller to enable
+ the varargs forwarding optimization so that we don't lose
+ performance with a tail call instead of a regular call.
+
+ This also required two broad kind of changes:
+
+ - Changes in the JIT itself (DFGSpeculativeJIT) are pretty
+ straightforward since they are just an extension of the baseline JIT
+ changes introduced previously.
+
+ - Changes in the runtime are mostly related with handling inline call
+ frames. The idea here is that we have a special TailCall type for
+ call frames that indicates to the various pieces of code walking the
+ inline call frame that they should (recursively) skip the caller in
+ their analysis.
+
+ * bytecode/CallMode.h:
+ (JSC::specializationKindFor):
+ * bytecode/CodeOrigin.cpp:
+ (JSC::CodeOrigin::inlineDepthForCallFrame):
+ (JSC::CodeOrigin::isApproximatelyEqualTo):
+ (JSC::CodeOrigin::approximateHash):
+ (JSC::CodeOrigin::inlineStack):
+ (JSC::InlineCallFrame::dumpInContext):
+ (WTF::printInternal):
+ * bytecode/CodeOrigin.h:
+ (JSC::InlineCallFrame::callModeFor):
+ (JSC::InlineCallFrame::kindFor):
+ (JSC::InlineCallFrame::varargsKindFor):
+ (JSC::InlineCallFrame::specializationKindFor):
+ (JSC::InlineCallFrame::isVarargs):
+ (JSC::InlineCallFrame::isTail):
+ (JSC::InlineCallFrame::computeCallerSkippingDeadFrames):
+ (JSC::InlineCallFrame::getCallerSkippingDeadFrames):
+ (JSC::InlineCallFrame::getCallerInlineFrameSkippingDeadFrames):
+ * dfg/DFGAbstractInterpreterInlines.h:
+ (JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects):
+ * dfg/DFGArgumentsEliminationPhase.cpp:
+ * dfg/DFGBasicBlock.h:
+ (JSC::DFG::BasicBlock::findTerminal):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::allInlineFramesAreTailCalls):
+ (JSC::DFG::ByteCodeParser::addCallWithoutSettingResult):
+ (JSC::DFG::ByteCodeParser::addCall):
+ (JSC::DFG::ByteCodeParser::getPredictionWithoutOSRExit):
+ (JSC::DFG::ByteCodeParser::getPrediction):
+ (JSC::DFG::ByteCodeParser::handleCall):
+ (JSC::DFG::ByteCodeParser::handleVarargsCall):
+ (JSC::DFG::ByteCodeParser::inliningCost):
+ (JSC::DFG::ByteCodeParser::inlineCall):
+ (JSC::DFG::ByteCodeParser::attemptToInlineCall):
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ (JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
+ (JSC::DFG::ByteCodeParser::parseCodeBlock):
+ * dfg/DFGCapabilities.cpp:
+ (JSC::DFG::capabilityLevel):
+ * dfg/DFGClobberize.h:
+ (JSC::DFG::clobberize):
+ * dfg/DFGDoesGC.cpp:
+ (JSC::DFG::doesGC):
+ * dfg/DFGFixupPhase.cpp:
+ (JSC::DFG::FixupPhase::fixupNode):
+ * dfg/DFGGraph.cpp:
+ (JSC::DFG::Graph::isLiveInBytecode):
+ * dfg/DFGGraph.h:
+ (JSC::DFG::Graph::forAllLocalsLiveInBytecode):
+ * dfg/DFGInPlaceAbstractState.cpp:
+ (JSC::DFG::InPlaceAbstractState::mergeToSuccessors):
+ * dfg/DFGNode.h:
+ (JSC::DFG::Node::hasCallVarargsData):
+ (JSC::DFG::Node::isTerminal):
+ (JSC::DFG::Node::hasHeapPrediction):
+ * dfg/DFGNodeType.h:
+ * dfg/DFGOSRExitCompilerCommon.cpp:
+ (JSC::DFG::handleExitCounts):
+ (JSC::DFG::reifyInlinedCallFrames):
+ * dfg/DFGOSRExitPreparation.cpp:
+ (JSC::DFG::prepareCodeOriginForOSRExit):
+ * dfg/DFGOperations.cpp:
+ * dfg/DFGPreciseLocalClobberize.h:
+ (JSC::DFG::PreciseLocalClobberizeAdaptor::readTop):
+ * dfg/DFGPredictionPropagationPhase.cpp:
+ (JSC::DFG::PredictionPropagationPhase::propagate):
+ * dfg/DFGSafeToExecute.h:
+ (JSC::DFG::safeToExecute):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::emitCall):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::emitCall):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGVarargsForwardingPhase.cpp:
+ * interpreter/CallFrame.cpp:
+ (JSC::CallFrame::bytecodeOffsetFromCodeOriginIndex):
+ * interpreter/StackVisitor.cpp:
+ (JSC::StackVisitor::gotoNextFrame):
+ * jit/CCallHelpers.h:
+ * tests/stress/dfg-tail-calls.js: Added.
+
2015-08-03 Basile Clement <basile_clem...@apple.com>
jsc-tailcall: Kraken/stanford-crypto-ccm crashes
Modified: branches/jsc-tailcall/Source/_javascript_Core/bytecode/CallMode.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/bytecode/CallMode.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/bytecode/CallMode.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -26,12 +26,26 @@
#ifndef CallMode_h
#define CallMode_h
+#include "CodeSpecializationKind.h"
+
namespace JSC {
enum class CallMode { Regular, Tail, Construct };
enum FrameAction { KeepTheFrame = 0, ReuseTheFrame };
+inline CodeSpecializationKind specializationKindFor(CallMode callMode)
+{
+ switch (callMode) {
+ case CallMode::Tail:
+ case CallMode::Regular:
+ return CodeForCall;
+
+ case CallMode::Construct:
+ return CodeForConstruct;
+ }
+}
+
} // namespace JSC
namespace WTF {
Modified: branches/jsc-tailcall/Source/_javascript_Core/bytecode/CodeOrigin.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/bytecode/CodeOrigin.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/bytecode/CodeOrigin.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -36,7 +36,7 @@
unsigned CodeOrigin::inlineDepthForCallFrame(InlineCallFrame* inlineCallFrame)
{
unsigned result = 1;
- for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
+ for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
result++;
return result;
}
@@ -77,8 +77,8 @@
if (a.inlineCallFrame->executable.get() != b.inlineCallFrame->executable.get())
return false;
- a = a.inlineCallFrame->caller;
- b = b.inlineCallFrame->caller;
+ a = a.inlineCallFrame->directCaller;
+ b = b.inlineCallFrame->directCaller;
}
}
@@ -99,7 +99,7 @@
result += WTF::PtrHash<JSCell*>::hash(codeOrigin.inlineCallFrame->executable.get());
- codeOrigin = codeOrigin.inlineCallFrame->caller;
+ codeOrigin = codeOrigin.inlineCallFrame->directCaller;
}
}
@@ -108,8 +108,8 @@
Vector<CodeOrigin> result(inlineDepth());
result.last() = *this;
unsigned index = result.size() - 2;
- for (InlineCallFrame* current = inlineCallFrame; current; current = current->caller.inlineCallFrame)
- result[index--] = current->caller;
+ for (InlineCallFrame* current = inlineCallFrame; current; current = current->directCaller.inlineCallFrame)
+ result[index--] = current->directCaller;
RELEASE_ASSERT(!result[0].inlineCallFrame);
return result;
}
@@ -190,7 +190,7 @@
out.print(briefFunctionInformation(), ":<", RawPointer(executable.get()));
if (executable->isStrictMode())
out.print(" (StrictMode)");
- out.print(", bc#", caller.bytecodeIndex, ", ", kind);
+ out.print(", bc#", directCaller.bytecodeIndex, ", ", static_cast<Kind>(kind));
if (isClosureCall)
out.print(", closure call");
else
@@ -218,12 +218,18 @@
case JSC::InlineCallFrame::Construct:
out.print("Construct");
return;
+ case JSC::InlineCallFrame::TailCall:
+ out.print("TailCall");
+ return;
case JSC::InlineCallFrame::CallVarargs:
out.print("CallVarargs");
return;
case JSC::InlineCallFrame::ConstructVarargs:
out.print("ConstructVarargs");
return;
+ case JSC::InlineCallFrame::TailCallVarargs:
+ out.print("TailCallVarargs");
+ return;
case JSC::InlineCallFrame::GetterCall:
out.print("GetterCall");
return;
Modified: branches/jsc-tailcall/Source/_javascript_Core/bytecode/CodeOrigin.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/bytecode/CodeOrigin.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/bytecode/CodeOrigin.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -26,6 +26,7 @@
#ifndef CodeOrigin_h
#define CodeOrigin_h
+#include "CallMode.h"
#include "CodeBlockHash.h"
#include "CodeSpecializationKind.h"
#include "ValueRecovery.h"
@@ -120,37 +121,58 @@
enum Kind {
Call,
Construct,
+ TailCall,
CallVarargs,
ConstructVarargs,
+ TailCallVarargs,
// For these, the stackOffset incorporates the argument count plus the true return PC
// slot.
GetterCall,
SetterCall
};
-
- static Kind kindFor(CodeSpecializationKind kind)
+
+ static CallMode callModeFor(Kind kind)
{
switch (kind) {
- case CodeForCall:
+ case Call:
+ case CallVarargs:
+ case GetterCall:
+ case SetterCall:
+ return CallMode::Regular;
+ case TailCall:
+ case TailCallVarargs:
+ return CallMode::Tail;
+ case Construct:
+ case ConstructVarargs:
+ return CallMode::Construct;
+ }
+ }
+
+ static Kind kindFor(CallMode callMode)
+ {
+ switch (callMode) {
+ case CallMode::Regular:
return Call;
- case CodeForConstruct:
+ case CallMode::Construct:
return Construct;
+ case CallMode::Tail:
+ return TailCall;
}
RELEASE_ASSERT_NOT_REACHED();
- return Call;
}
- static Kind varargsKindFor(CodeSpecializationKind kind)
+ static Kind varargsKindFor(CallMode callMode)
{
- switch (kind) {
- case CodeForCall:
+ switch (callMode) {
+ case CallMode::Regular:
return CallVarargs;
- case CodeForConstruct:
+ case CallMode::Construct:
return ConstructVarargs;
+ case CallMode::Tail:
+ return TailCallVarargs;
}
RELEASE_ASSERT_NOT_REACHED();
- return Call;
}
static CodeSpecializationKind specializationKindFor(Kind kind)
@@ -158,6 +180,8 @@
switch (kind) {
case Call:
case CallVarargs:
+ case TailCall:
+ case TailCallVarargs:
case GetterCall:
case SetterCall:
return CodeForCall;
@@ -166,7 +190,6 @@
return CodeForConstruct;
}
RELEASE_ASSERT_NOT_REACHED();
- return CodeForCall;
}
static bool isVarargs(Kind kind)
@@ -174,6 +197,7 @@
switch (kind) {
case CallVarargs:
case ConstructVarargs:
+ case TailCallVarargs:
return true;
default:
return false;
@@ -183,11 +207,49 @@
{
return isVarargs(static_cast<Kind>(kind));
}
+
+ static bool isTail(Kind kind)
+ {
+ switch (kind) {
+ case TailCall:
+ case TailCallVarargs:
+ return true;
+ default:
+ return false;
+ }
+ }
+ bool isTail() const
+ {
+ return isTail(static_cast<Kind>(kind));
+ }
+
+ static CodeOrigin* computeCallerSkippingDeadFrames(InlineCallFrame* inlineCallFrame)
+ {
+ CodeOrigin* codeOrigin;
+ bool tailCallee;
+ do {
+ tailCallee = inlineCallFrame->isTail();
+ codeOrigin = &inlineCallFrame->directCaller;
+ inlineCallFrame = codeOrigin->inlineCallFrame;
+ } while (inlineCallFrame && tailCallee);
+ if (tailCallee)
+ return nullptr;
+ return codeOrigin;
+ }
+ CodeOrigin* getCallerSkippingDeadFrames()
+ {
+ return computeCallerSkippingDeadFrames(this);
+ }
+ InlineCallFrame* getCallerInlineFrameSkippingDeadFrames()
+ {
+ CodeOrigin* caller = getCallerSkippingDeadFrames();
+ return caller ? caller->inlineCallFrame : nullptr;
+ }
Vector<ValueRecovery> arguments; // Includes 'this'.
WriteBarrier<ScriptExecutable> executable;
ValueRecovery calleeRecovery;
- CodeOrigin caller;
+ CodeOrigin directCaller;
signed stackOffset : 28;
unsigned kind : 3; // real type is Kind
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGAbstractInterpreterInlines.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGAbstractInterpreterInlines.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGAbstractInterpreterInlines.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -1442,6 +1442,13 @@
case Return:
m_state.setIsValid(false);
break;
+
+ case TailCall:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
+ clobberWorld(node->origin.semantic, clobberLimit);
+ m_state.setIsValid(false);
+ break;
case Throw:
case ThrowReferenceError:
@@ -2253,13 +2260,16 @@
break;
case Call:
+ case TailCallInlinedCaller:
case Construct:
case NativeCall:
case NativeConstruct:
case CallVarargs:
case CallForwardVarargs:
+ case TailCallVarargsInlinedCaller:
case ConstructVarargs:
case ConstructForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
clobberWorld(node->origin.semantic, clobberLimit);
forNode(node).makeHeapTop();
break;
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGArgumentsEliminationPhase.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGArgumentsEliminationPhase.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGArgumentsEliminationPhase.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -167,6 +167,8 @@
case CallVarargs:
case ConstructVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
escape(node->child1());
escape(node->child3());
break;
@@ -553,7 +555,9 @@
}
case CallVarargs:
- case ConstructVarargs: {
+ case ConstructVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller: {
Node* candidate = node->child2().node();
if (!m_candidates.contains(candidate))
break;
@@ -578,16 +582,44 @@
m_graph.m_varArgChildren.append(node->child3());
for (Node* argument : arguments)
m_graph.m_varArgChildren.append(Edge(argument));
- node->setOpAndDefaultFlags(
- node->op() == CallVarargs ? Call : Construct);
+ switch (node->op()) {
+ case CallVarargs:
+ node->setOpAndDefaultFlags(Call);
+ break;
+ case ConstructVarargs:
+ node->setOpAndDefaultFlags(Construct);
+ break;
+ case TailCallVarargs:
+ node->setOpAndDefaultFlags(TailCall);
+ break;
+ case TailCallVarargsInlinedCaller:
+ node->setOpAndDefaultFlags(TailCallInlinedCaller);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
node->children = AdjacencyList(
AdjacencyList::Variable,
firstChild, m_graph.m_varArgChildren.size() - firstChild);
break;
}
- node->setOpAndDefaultFlags(
- node->op() == CallVarargs ? CallForwardVarargs : ConstructForwardVarargs);
+ switch (node->op()) {
+ case CallVarargs:
+ node->setOpAndDefaultFlags(CallForwardVarargs);
+ break;
+ case ConstructVarargs:
+ node->setOpAndDefaultFlags(ConstructForwardVarargs);
+ break;
+ case TailCallVarargs:
+ node->setOpAndDefaultFlags(TailCallForwardVarargs);
+ break;
+ case TailCallVarargsInlinedCaller:
+ node->setOpAndDefaultFlags(TailCallForwardVarargsInlinedCaller);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
break;
}
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGBasicBlock.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGBasicBlock.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGBasicBlock.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -92,6 +92,8 @@
case Branch:
case Switch:
case Return:
+ case TailCall:
+ case TailCallVarargs:
case Unreachable:
return NodeAndIndex(node, i);
// The bitter end can contain Phantoms and the like. There will probably only be one or two nodes after the terminal. They are all no-ops and will not have any checked children.
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -180,14 +180,14 @@
Node* callTarget, int argCount, int registerOffset, CallLinkStatus,
SpeculatedType prediction);
void handleCall(
- int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,
+ int result, NodeType op, CallMode, unsigned instructionSize,
Node* callTarget, int argCount, int registerOffset, CallLinkStatus);
- void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset);
- void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind);
- void handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind);
+ void handleCall(int result, NodeType op, CallMode, unsigned instructionSize, int callee, int argCount, int registerOffset);
+ void handleCall(Instruction* pc, NodeType op, CallMode);
+ void handleVarargsCall(Instruction* pc, NodeType op, CallMode);
void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt);
void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis);
- unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
+ unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CallMode); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
// Handle inlining. Return true if it succeeded, false if we need to plant a call.
bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);
enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually };
@@ -600,6 +600,11 @@
return m_inlineStackTop->m_inlineCallFrame;
}
+ bool allInlineFramesAreTailCalls()
+ {
+ return !inlineCallFrame() || !inlineCallFrame()->getCallerSkippingDeadFrames();
+ }
+
CodeOrigin currentCodeOrigin()
{
return CodeOrigin(m_currentIndex, inlineCallFrame());
@@ -680,7 +685,7 @@
Node* addCallWithoutSettingResult(
NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
- SpeculatedType prediction)
+ OpInfo prediction)
{
addVarArgChild(callee);
size_t frameSize = JSStack::CallFrameHeaderSize + argCount;
@@ -693,15 +698,22 @@
for (int i = 0; i < argCount; ++i)
addVarArgChild(get(virtualRegisterForArgument(i, registerOffset)));
- return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction));
+ return addToGraph(Node::VarArg, op, opInfo, prediction);
}
Node* addCall(
int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset,
SpeculatedType prediction)
{
+ if (op == TailCall) {
+ if (allInlineFramesAreTailCalls())
+ return addCallWithoutSettingResult(op, OpInfo(), callee, argCount, registerOffset, OpInfo());
+ op = TailCallInlinedCaller;
+ }
+
+
Node* call = addCallWithoutSettingResult(
- op, opInfo, callee, argCount, registerOffset, prediction);
+ op, opInfo, callee, argCount, registerOffset, OpInfo(prediction));
VirtualRegister resultReg(result);
if (resultReg.isValid())
set(resultReg, call);
@@ -717,14 +729,62 @@
SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex)
{
- ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
- return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
+ SpeculatedType prediction;
+ CodeBlock* profiledBlock = nullptr;
+
+ {
+ ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
+ prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
+
+ if (prediction == SpecNone) {
+ // If we have no information about the values this
+ // node generates, we check if by any chance it is
+ // a tail call opcode. In that case, we walk up the
+ // inline frames to find a call higher in the call
+ // chain and use its prediction. If we only have
+ // inlined tail call frames, we use SpecFullTop
+ // to avoid a spurious OSR exit.
+ Instruction* instruction = m_inlineStackTop->m_profiledBlock->instructions().begin() + bytecodeIndex;
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instruction->u.opcode);
+
+ switch (opcodeID) {
+ case op_tail_call:
+ case op_tail_call_varargs: {
+ if (!inlineCallFrame()) {
+ prediction = SpecFullTop;
+ break;
+ }
+ CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingDeadFrames();
+ if (!codeOrigin) {
+ prediction = SpecFullTop;
+ break;
+ }
+ InlineStackEntry* stack = m_inlineStackTop;
+ while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame)
+ stack = stack->m_caller;
+ bytecodeIndex = codeOrigin->bytecodeIndex;
+ profiledBlock = stack->m_profiledBlock;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (profiledBlock) {
+ ConcurrentJITLocker locker(profiledBlock->m_lock);
+ prediction = profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex);
+ }
+
+ return prediction;
}
SpeculatedType getPrediction(unsigned bytecodeIndex)
{
SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex);
-
+
if (prediction == SpecNone) {
// We have no information about what values this node generates. Give up
// on executing this code, since we're likely to do more damage than good.
@@ -1014,16 +1074,17 @@
m_currentIndex += OPCODE_LENGTH(name); \
return shouldContinueParsing
-void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
+void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode)
{
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call));
handleCall(
- pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call),
+ pc[1].u.operand, op, callMode, OPCODE_LENGTH(op_call),
pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand);
}
void ByteCodeParser::handleCall(
- int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize,
+ int result, NodeType op, CallMode callMode, unsigned instructionSize,
int callee, int argumentCountIncludingThis, int registerOffset)
{
Node* callTarget = get(VirtualRegister(callee));
@@ -1033,17 +1094,17 @@
m_inlineStackTop->m_callLinkInfos, m_callContextMap);
handleCall(
- result, op, InlineCallFrame::kindFor(kind), instructionSize, callTarget,
+ result, op, callMode, instructionSize, callTarget,
argumentCountIncludingThis, registerOffset, callLinkStatus);
}
void ByteCodeParser::handleCall(
- int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,
+ int result, NodeType op, CallMode callMode, unsigned instructionSize,
Node* callTarget, int argumentCountIncludingThis, int registerOffset,
CallLinkStatus callLinkStatus)
{
handleCall(
- result, op, kind, instructionSize, callTarget, argumentCountIncludingThis,
+ result, op, InlineCallFrame::kindFor(callMode), instructionSize, callTarget, argumentCountIncludingThis,
registerOffset, callLinkStatus, getPrediction());
}
@@ -1063,7 +1124,7 @@
if (!callLinkStatus.canOptimize()) {
// Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
// that we cannot optimize them.
-
+
addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction);
return;
}
@@ -1096,13 +1157,14 @@
}
}
#endif
-
+
addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction);
}
-void ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CodeSpecializationKind kind)
+void ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode)
{
ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs));
+ ASSERT(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs));
int result = pc[1].u.operand;
int callee = pc[2].u.operand;
@@ -1125,7 +1187,7 @@
dataLog(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus, "\n");
if (callLinkStatus.canOptimize()
- && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(kind), prediction)) {
+ && handleInlining(callTarget, result, callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments), firstVarArgOffset, 0, m_currentIndex + OPCODE_LENGTH(op_call_varargs), op, InlineCallFrame::varargsKindFor(callMode), prediction)) {
if (m_graph.compilation())
m_graph.compilation()->noticeInlinedCall();
return;
@@ -1135,7 +1197,15 @@
data->firstVarArgOffset = firstVarArgOffset;
Node* thisChild = get(VirtualRegister(thisReg));
-
+
+ if (op == TailCallVarargs) {
+ if (allInlineFramesAreTailCalls()) {
+ addToGraph(op, OpInfo(data), OpInfo(), callTarget, get(VirtualRegister(arguments)), thisChild);
+ return;
+ }
+ op = TailCallVarargsInlinedCaller;
+ }
+
Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, get(VirtualRegister(arguments)), thisChild);
VirtualRegister resultReg(result);
if (resultReg.isValid())
@@ -1170,8 +1240,9 @@
addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset)));
}
-unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind)
+unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CallMode callMode)
{
+ CodeSpecializationKind kind = specializationKindFor(callMode);
if (verbose)
dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n");
@@ -1213,7 +1284,7 @@
CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel(
codeBlock, kind, callee.isClosureCall());
if (verbose) {
- dataLog(" Kind: ", kind, "\n");
+ dataLog(" Call mode: ", callMode, "\n");
dataLog(" Is closure call: ", callee.isClosureCall(), "\n");
dataLog(" Capability level: ", capabilityLevel, "\n");
dataLog(" Might inline function: ", mightInlineFunctionFor(codeBlock, kind), "\n");
@@ -1284,7 +1355,7 @@
{
CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);
- ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX);
+ ASSERT(inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind)) != UINT_MAX);
CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind);
insertChecks(codeBlock);
@@ -1379,7 +1450,7 @@
m_currentBlock = m_graph.lastBlock();
return;
}
-
+
if (Options::verboseDFGByteCodeParsing())
dataLog(" Creating new block after inlining.\n");
@@ -1486,7 +1557,7 @@
}
}
- unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind);
+ unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, InlineCallFrame::callModeFor(kind));
if (myInliningCost > inliningBalance)
return false;
@@ -3354,6 +3425,12 @@
}
case op_ret:
+ if (m_currentBlock->terminal()) {
+ // We could be the dummy return after a non-inlined, non-emulated tail call
+ Node* terminal = m_currentBlock->terminal();
+ ASSERT_UNUSED(terminal, terminal->op() == Return || terminal->op() == TailCall || terminal->op() == TailCallVarargs);
+ LAST_OPCODE(op_ret);
+ }
if (inlineCallFrame()) {
flushForReturn();
if (m_inlineStackTop->m_returnValue.isValid())
@@ -3401,22 +3478,37 @@
LAST_OPCODE(op_throw_static_error);
case op_call:
- handleCall(currentInstruction, Call, CodeForCall);
+ handleCall(currentInstruction, Call, CallMode::Regular);
// Verify that handleCall(), which could have inlined the callee, didn't trash m_currentInstruction
ASSERT(m_currentInstruction == currentInstruction);
NEXT_OPCODE(op_call);
-
+
+ case op_tail_call:
+ flushForReturn();
+ handleCall(currentInstruction, TailCall, CallMode::Tail);
+ // Verify that handleCall(), which could have inlined the callee, didn't trash m_currentInstruction
+ ASSERT(m_currentInstruction == currentInstruction);
+ // We let the following op_ret handle cases related to
+ // inlining to keep things simple.
+ NEXT_OPCODE(op_tail_call);
+
case op_construct:
- handleCall(currentInstruction, Construct, CodeForConstruct);
+ handleCall(currentInstruction, Construct, CallMode::Construct);
NEXT_OPCODE(op_construct);
case op_call_varargs: {
- handleVarargsCall(currentInstruction, CallVarargs, CodeForCall);
+ handleVarargsCall(currentInstruction, CallVarargs, CallMode::Regular);
NEXT_OPCODE(op_call_varargs);
}
+
+ case op_tail_call_varargs: {
+ flushForReturn();
+ handleVarargsCall(currentInstruction, TailCallVarargs, CallMode::Tail);
+ NEXT_OPCODE(op_tail_call_varargs);
+ }
case op_construct_varargs: {
- handleVarargsCall(currentInstruction, ConstructVarargs, CodeForConstruct);
+ handleVarargsCall(currentInstruction, ConstructVarargs, CallMode::Construct);
NEXT_OPCODE(op_construct_varargs);
}
@@ -4038,7 +4130,7 @@
m_inlineCallFrame->isClosureCall = false;
} else
m_inlineCallFrame->isClosureCall = true;
- m_inlineCallFrame->caller = byteCodeParser->currentCodeOrigin();
+ m_inlineCallFrame->directCaller = byteCodeParser->currentCodeOrigin();
m_inlineCallFrame->arguments.resizeToFit(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet.
m_inlineCallFrame->kind = kind;
@@ -4113,7 +4205,7 @@
if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback->ensureDeferredSourceDump();
if (inlineCallFrame()) {
- DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->caller);
+ DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller);
deferredSourceDump.append(dump);
} else
deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion()));
@@ -4124,7 +4216,7 @@
if (inlineCallFrame()) {
dataLog(
" for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT),
- " ", inlineCallFrame()->caller);
+ " ", inlineCallFrame()->directCaller);
}
dataLog(
": needsActivation = ", codeBlock->needsActivation(),
@@ -4208,7 +4300,7 @@
return;
}
- m_currentBlock = 0;
+ m_currentBlock = nullptr;
} while (m_currentIndex < limit);
}
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGCapabilities.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGCapabilities.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGCapabilities.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -179,8 +179,10 @@
case op_throw:
case op_throw_static_error:
case op_call:
+ case op_tail_call:
case op_construct:
case op_call_varargs:
+ case op_tail_call_varargs:
case op_construct_varargs:
case op_create_direct_arguments:
case op_create_scoped_arguments:
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGClobberize.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGClobberize.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGClobberize.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -376,11 +376,14 @@
case ArrayPush:
case ArrayPop:
case Call:
+ case TailCallInlinedCaller:
case Construct:
case NativeCall:
case NativeConstruct:
case CallVarargs:
case CallForwardVarargs:
+ case TailCallVarargsInlinedCaller:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructVarargs:
case ConstructForwardVarargs:
case ToPrimitive:
@@ -389,6 +392,13 @@
read(World);
write(Heap);
return;
+
+ case TailCall:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
+ read(World);
+ write(SideState);
+ return;
case GetGetter:
read(GetterSetter_getter);
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGDoesGC.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGDoesGC.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGDoesGC.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -117,12 +117,16 @@
case CompareEqConstant:
case CompareStrictEq:
case Call:
+ case TailCallInlinedCaller:
case Construct:
case CallVarargs:
+ case TailCallVarargsInlinedCaller:
case ConstructVarargs:
case LoadVarargs:
case CallForwardVarargs:
case ConstructForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case NativeCall:
case NativeConstruct:
case Breakpoint:
@@ -149,6 +153,8 @@
case Branch:
case Switch:
case Return:
+ case TailCall:
+ case TailCallVarargs:
case Throw:
case CountExecution:
case ForceOSRExit:
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGFixupPhase.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGFixupPhase.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGFixupPhase.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -1278,11 +1278,15 @@
case NotifyWrite:
case VarInjectionWatchpoint:
case Call:
+ case TailCallInlinedCaller:
case Construct:
case CallVarargs:
+ case TailCallVarargsInlinedCaller:
case ConstructVarargs:
case CallForwardVarargs:
case ConstructForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case LoadVarargs:
case ProfileControlFlow:
case NativeCall:
@@ -1302,6 +1306,8 @@
case CreateClonedArguments:
case Jump:
case Return:
+ case TailCall:
+ case TailCallVarargs:
case Throw:
case ThrowReferenceError:
case CountExecution:
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGGraph.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGGraph.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGGraph.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -884,30 +884,31 @@
bool Graph::isLiveInBytecode(VirtualRegister operand, CodeOrigin codeOrigin)
{
+ CodeOrigin* codeOriginPtr = &codeOrigin;
for (;;) {
VirtualRegister reg = VirtualRegister(
- operand.offset() - codeOrigin.stackOffset());
+ operand.offset() - codeOriginPtr->stackOffset());
- if (operand.offset() < codeOrigin.stackOffset() + JSStack::CallFrameHeaderSize) {
+ if (operand.offset() < codeOriginPtr->stackOffset() + JSStack::CallFrameHeaderSize) {
if (reg.isArgument()) {
RELEASE_ASSERT(reg.offset() < JSStack::CallFrameHeaderSize);
- if (codeOrigin.inlineCallFrame->isClosureCall
+ if (codeOriginPtr->inlineCallFrame->isClosureCall
&& reg.offset() == JSStack::Callee)
return true;
- if (codeOrigin.inlineCallFrame->isVarargs()
+ if (codeOriginPtr->inlineCallFrame->isVarargs()
&& reg.offset() == JSStack::ArgumentCount)
return true;
return false;
}
- return livenessFor(codeOrigin.inlineCallFrame).operandIsLive(
- reg.offset(), codeOrigin.bytecodeIndex);
+ return livenessFor(codeOriginPtr->inlineCallFrame).operandIsLive(
+ reg.offset(), codeOriginPtr->bytecodeIndex);
}
- InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
+ InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame;
if (!inlineCallFrame)
break;
@@ -917,7 +918,11 @@
&& static_cast<size_t>(reg.toArgument()) < inlineCallFrame->arguments.size())
return true;
- codeOrigin = inlineCallFrame->caller;
+ codeOriginPtr = inlineCallFrame->getCallerSkippingDeadFrames();
+
+ // The first inline call frame could be an inline tail call
+ if (!codeOriginPtr)
+ break;
}
return true;
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGGraph.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGGraph.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGGraph.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -696,9 +696,11 @@
// call, both callee and caller will see the variables live.
VirtualRegister exclusionStart;
VirtualRegister exclusionEnd;
+
+ CodeOrigin* codeOriginPtr = &codeOrigin;
for (;;) {
- InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
+ InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame;
VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
if (inlineCallFrame) {
@@ -710,7 +712,7 @@
CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
- const FastBitVector& liveness = fullLiveness.getLiveness(codeOrigin.bytecodeIndex);
+ const FastBitVector& liveness = fullLiveness.getLiveness(codeOriginPtr->bytecodeIndex);
for (unsigned relativeLocal = codeBlock->m_numCalleeRegisters; relativeLocal--;) {
VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
@@ -737,7 +739,11 @@
for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
functor(reg);
- codeOrigin = inlineCallFrame->caller;
+ codeOriginPtr = inlineCallFrame->getCallerSkippingDeadFrames();
+
+ // The first inline call frame could be an inline tail call
+ if (!codeOriginPtr)
+ break;
}
}
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGInPlaceAbstractState.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGInPlaceAbstractState.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGInPlaceAbstractState.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -393,6 +393,8 @@
}
case Return:
+ case TailCall:
+ case TailCallVarargs:
case Unreachable:
ASSERT(basicBlock->cfaBranchDirection == InvalidBranchDirection);
return false;
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGNode.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGNode.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGNode.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -995,6 +995,10 @@
switch (op()) {
case CallVarargs:
case CallForwardVarargs:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
+ case TailCallVarargsInlinedCaller:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructVarargs:
case ConstructForwardVarargs:
return true;
@@ -1092,6 +1096,8 @@
case Branch:
case Switch:
case Return:
+ case TailCall:
+ case TailCallVarargs:
case Unreachable:
return true;
default:
@@ -1242,10 +1248,13 @@
case GetByIdFlush:
case GetByVal:
case Call:
+ case TailCallInlinedCaller:
case Construct:
case CallVarargs:
+ case TailCallVarargsInlinedCaller:
case ConstructVarargs:
case CallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case NativeCall:
case NativeConstruct:
case GetByOffset:
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGNodeType.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGNodeType.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGNodeType.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -239,6 +239,9 @@
macro(CallForwardVarargs, NodeResultJS | NodeMustGenerate) \
macro(ConstructVarargs, NodeResultJS | NodeMustGenerate) \
macro(ConstructForwardVarargs, NodeResultJS | NodeMustGenerate) \
+ macro(TailCallInlinedCaller, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \
+ macro(TailCallVarargsInlinedCaller, NodeResultJS | NodeMustGenerate) \
+ macro(TailCallForwardVarargsInlinedCaller, NodeResultJS | NodeMustGenerate) \
macro(NativeCall, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \
macro(NativeConstruct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs) \
\
@@ -304,6 +307,9 @@
macro(Branch, NodeMustGenerate) \
macro(Switch, NodeMustGenerate) \
macro(Return, NodeMustGenerate) \
+ macro(TailCall, NodeMustGenerate | NodeHasVarArgs) \
+ macro(TailCallVarargs, NodeMustGenerate) \
+ macro(TailCallForwardVarargs, NodeMustGenerate) \
macro(Unreachable, NodeMustGenerate) \
\
/* Count execution. */\
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -64,7 +64,7 @@
AssemblyHelpers::JumpList loopThreshold;
- for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
+ for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
loopThreshold.append(
jit.branchTest8(
AssemblyHelpers::NonZero,
@@ -136,63 +136,80 @@
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
+ // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
+ // in presence of inlined tail calls.
+ // https://bugs.webkit.org/show_bug.cgi?id=147511
ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
- CodeOrigin codeOrigin;
- for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
- InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
- CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
- CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
- void* jumpTarget = nullptr;
+ const CodeOrigin* codeOrigin;
+ for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingDeadFrames()) {
+ InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
+ CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(*codeOrigin);
+ CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingDeadFrames();
void* trueReturnPC = nullptr;
-
- unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
-
- switch (inlineCallFrame->kind) {
- case InlineCallFrame::Call:
- case InlineCallFrame::Construct:
- case InlineCallFrame::CallVarargs:
- case InlineCallFrame::ConstructVarargs: {
- CallLinkInfo* callLinkInfo =
- baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
- RELEASE_ASSERT(callLinkInfo);
-
- jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
- break;
- }
-
- case InlineCallFrame::GetterCall:
- case InlineCallFrame::SetterCall: {
- StructureStubInfo* stubInfo =
- baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
- RELEASE_ASSERT(stubInfo);
-
+ GPRReg callerFrameGPR = GPRInfo::callFrameRegister;
+
+ if (!trueCaller) {
+ ASSERT(inlineCallFrame->isTail());
+ jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
+ jit.storePtr(GPRInfo::regT3, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
+ jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), GPRInfo::regT3);
+ callerFrameGPR = GPRInfo::regT3;
+ } else {
+ CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
+ unsigned callBytecodeIndex = trueCaller->bytecodeIndex;
+ void* jumpTarget = nullptr;
+
switch (inlineCallFrame->kind) {
+ case InlineCallFrame::Call:
+ case InlineCallFrame::Construct:
+ case InlineCallFrame::CallVarargs:
+ case InlineCallFrame::ConstructVarargs:
+ case InlineCallFrame::TailCall:
+ case InlineCallFrame::TailCallVarargs: {
+ CallLinkInfo* callLinkInfo =
+ baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
+ RELEASE_ASSERT(callLinkInfo);
+
+ jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
+ break;
+ }
+
case InlineCallFrame::GetterCall:
- jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress();
+ case InlineCallFrame::SetterCall: {
+ StructureStubInfo* stubInfo =
+ baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
+ RELEASE_ASSERT(stubInfo);
+
+ switch (inlineCallFrame->kind) {
+ case InlineCallFrame::GetterCall:
+ jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress();
+ break;
+ case InlineCallFrame::SetterCall:
+ jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress();
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ trueReturnPC = stubInfo->callReturnLocation.labelAtOffset(
+ stubInfo->patch.deltaCallToDone).executableAddress();
break;
- case InlineCallFrame::SetterCall:
- jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress();
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
+ } }
+
+ if (trueCaller->inlineCallFrame) {
+ jit.addPtr(
+ AssemblyHelpers::TrustedImm32(trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue)),
+ GPRInfo::callFrameRegister,
+ GPRInfo::regT3);
+ callerFrameGPR = GPRInfo::regT3;
}
-
- trueReturnPC = stubInfo->callReturnLocation.labelAtOffset(
- stubInfo->patch.deltaCallToDone).executableAddress();
- break;
- } }
- GPRReg callerFrameGPR;
- if (inlineCallFrame->caller.inlineCallFrame) {
- jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
- callerFrameGPR = GPRInfo::regT3;
- } else
- callerFrameGPR = GPRInfo::callFrameRegister;
-
- jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
+ jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
+ }
+
if (trueReturnPC)
jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset()));
@@ -202,13 +219,13 @@
jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
#if USE(JSVALUE64)
jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
+ uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin->bytecodeIndex);
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
if (!inlineCallFrame->isClosureCall)
jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
#else // USE(JSVALUE64) // so this is the 32-bit part
jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
- Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
+ Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
@@ -217,13 +234,16 @@
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
}
+ // Don't need to set the toplevel code origin if we only did inline tail calls
+ if (codeOrigin) {
#if USE(JSVALUE64)
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
+ uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin->bytecodeIndex);
#else
- Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
+ Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin->bytecodeIndex;
+ uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#endif
- jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
+ jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
+ }
}
#if ENABLE(GGC)
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOSRExitPreparation.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOSRExitPreparation.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOSRExitPreparation.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -41,7 +41,7 @@
VM& vm = exec->vm();
DeferGC deferGC(vm.heap);
- for (; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ for (; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->directCaller) {
FunctionExecutable* executable =
static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get());
CodeBlock* codeBlock = executable->baselineCodeBlockFor(
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOperations.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOperations.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGOperations.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -1276,7 +1276,7 @@
ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType()));
bool didTryToEnterIntoInlinedLoops = false;
- for (InlineCallFrame* inlineCallFrame = exit->m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
+ for (InlineCallFrame* inlineCallFrame = exit->m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
if (inlineCallFrame->executable->didTryToEnterInLoop()) {
didTryToEnterIntoInlinedLoops = true;
break;
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGPreciseLocalClobberize.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGPreciseLocalClobberize.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGPreciseLocalClobberize.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -111,7 +111,9 @@
case GetMyArgumentByVal:
case ForwardVarargs:
case CallForwardVarargs:
- case ConstructForwardVarargs: {
+ case ConstructForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller: {
InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
if (!inlineCallFrame) {
// Read the outermost arguments and argument count.
@@ -138,7 +140,7 @@
m_read(VirtualRegister(i));
// Read all of the inline arguments and call frame headers that we didn't already capture.
- for (InlineCallFrame* inlineCallFrame = m_node->origin.semantic.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
+ for (InlineCallFrame* inlineCallFrame = m_node->origin.semantic.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->getCallerInlineFrameSkippingDeadFrames()) {
for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;)
m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset()));
if (inlineCallFrame->isClosureCall)
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGPredictionPropagationPhase.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGPredictionPropagationPhase.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGPredictionPropagationPhase.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -194,11 +194,14 @@
case MultiGetByOffset:
case GetDirectPname:
case Call:
+ case TailCallInlinedCaller:
case Construct:
case CallVarargs:
+ case TailCallVarargsInlinedCaller:
case ConstructVarargs:
case CallForwardVarargs:
case ConstructForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case NativeCall:
case NativeConstruct:
case GetGlobalVar:
@@ -630,6 +633,9 @@
case PutClosureVar:
case PutToArguments:
case Return:
+ case TailCall:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
case Throw:
case PutById:
case PutByIdFlush:
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSafeToExecute.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSafeToExecute.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSafeToExecute.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -191,8 +191,11 @@
case CompareEqConstant:
case CompareStrictEq:
case Call:
+ case TailCallInlinedCaller:
case Construct:
case CallVarargs:
+ case TailCallVarargsInlinedCaller:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructVarargs:
case LoadVarargs:
case CallForwardVarargs:
@@ -235,6 +238,9 @@
case Branch:
case Switch:
case Return:
+ case TailCall:
+ case TailCallVarargs:
+ case TailCallForwardVarargs:
case Throw:
case ThrowReferenceError:
case CountExecution:
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -644,10 +644,20 @@
CallLinkInfo::CallType callType;
bool isVarargs = false;
bool isForwardVarargs = false;
+ bool isTail = false;
+ bool isEmulatedTail = false;
switch (node->op()) {
case Call:
callType = CallLinkInfo::Call;
break;
+ case TailCall:
+ callType = CallLinkInfo::TailCall;
+ isTail = true;
+ break;
+ case TailCallInlinedCaller:
+ callType = CallLinkInfo::Call;
+ isEmulatedTail = true;
+ break;
case Construct:
callType = CallLinkInfo::Construct;
break;
@@ -655,6 +665,16 @@
callType = CallLinkInfo::CallVarargs;
isVarargs = true;
break;
+ case TailCallVarargs:
+ callType = CallLinkInfo::TailCallVarargs;
+ isVarargs = true;
+ isTail = true;
+ break;
+ case TailCallVarargsInlinedCaller:
+ callType = CallLinkInfo::CallVarargs;
+ isVarargs = true;
+ isEmulatedTail = true;
+ break;
case ConstructVarargs:
callType = CallLinkInfo::ConstructVarargs;
isVarargs = true;
@@ -663,6 +683,16 @@
callType = CallLinkInfo::CallVarargs;
isForwardVarargs = true;
break;
+ case TailCallForwardVarargs:
+ callType = CallLinkInfo::TailCallVarargs;
+ isTail = true;
+ isForwardVarargs = true;
+ break;
+ case TailCallForwardVarargsInlinedCaller:
+ callType = CallLinkInfo::CallVarargs;
+ isEmulatedTail = true;
+ isForwardVarargs = true;
+ break;
case ConstructForwardVarargs:
callType = CallLinkInfo::ConstructVarargs;
isForwardVarargs = true;
@@ -785,7 +815,14 @@
m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(JSStack::Callee));
m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(JSStack::Callee));
- flushRegisters();
+ // FIXME: We should do an efficient move of the arguments into
+ // their target stack position instead of building then memmoving
+ // the callee frame.
+ // https://bugs.webkit.org/show_bug.cgi?id=147508
+ if (isTail)
+ ASSERT(isFlushed());
+ else
+ flushRegisters();
GPRFlushedCallResult resultPayload(this);
GPRFlushedCallResult2 resultTag(this);
@@ -795,15 +832,30 @@
JITCompiler::DataLabelPtr targetToCheck;
JITCompiler::JumpList slowPath;
- m_jit.emitStoreCodeOrigin(node->origin.semantic);
+ CodeOrigin staticOrigin = node->origin.semantic;
+ ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame->getCallerSkippingDeadFrames());
+ ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame && staticOrigin.inlineCallFrame->getCallerSkippingDeadFrames()));
+ CodeOrigin dynamicOrigin =
+ isEmulatedTail ? *staticOrigin.inlineCallFrame->getCallerSkippingDeadFrames() : staticOrigin;
+
+ m_jit.emitStoreCodeOrigin(dynamicOrigin);
CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo();
slowPath.append(m_jit.branchIfNotCell(callee.jsValueRegs()));
slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
- JITCompiler::Call fastCall = m_jit.nearCall();
+ if (isTail) {
+ m_jit.emitRestoreCalleeSaves();
+ // FIXME: We should do an efficient move of the arguments into
+ // their target stack position instead of building then memmoving
+ // the callee frame.
+ // https://bugs.webkit.org/show_bug.cgi?id=147508
+ m_jit.prepareForTailCallSlow();
+ }
+ JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall();
+
JITCompiler::Jump done = m_jit.jump();
slowPath.link(&m_jit);
@@ -825,17 +877,22 @@
done.link(&m_jit);
- m_jit.setupResults(resultPayloadGPR, resultTagGPR);
+ if (isTail)
+ m_jit.breakpoint();
+ else {
+ m_jit.setupResults(resultPayloadGPR, resultTagGPR);
- jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly);
+ // After the calls are done, we need to reestablish our stack
+ // pointer. We rely on this for varargs calls, calls with arity
+ // mismatch (the callframe is slided) and tail calls.
+ m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
+ }
+
info->setUpCall(callType, node->origin.semantic, calleePayloadGPR);
m_jit.addJSCall(fastCall, slowCall, targetToCheck, info);
-
- // After the calls are done, we need to reestablish our stack
- // pointer. We rely on this for varargs calls, calls with arity
- // mismatch (the callframe is slided) and tail calls.
- m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
+
}
template<bool strict>
@@ -4219,10 +4276,16 @@
break;
case Call:
+ case TailCall:
+ case TailCallInlinedCaller:
case Construct:
case CallVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
+ case ConstructVarargs:
case CallForwardVarargs:
- case ConstructVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
case ConstructForwardVarargs:
emitCall(node);
break;
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -630,10 +630,20 @@
CallLinkInfo::CallType callType;
bool isVarargs = false;
bool isForwardVarargs = false;
+ bool isTail = false;
+ bool isEmulatedTail = false;
switch (node->op()) {
case Call:
callType = CallLinkInfo::Call;
break;
+ case TailCall:
+ callType = CallLinkInfo::TailCall;
+ isTail = true;
+ break;
+ case TailCallInlinedCaller:
+ callType = CallLinkInfo::Call;
+ isEmulatedTail = true;
+ break;
case Construct:
callType = CallLinkInfo::Construct;
break;
@@ -641,6 +651,16 @@
callType = CallLinkInfo::CallVarargs;
isVarargs = true;
break;
+ case TailCallVarargs:
+ callType = CallLinkInfo::TailCallVarargs;
+ isVarargs = true;
+ isTail = true;
+ break;
+ case TailCallVarargsInlinedCaller:
+ callType = CallLinkInfo::CallVarargs;
+ isVarargs = true;
+ isEmulatedTail = true;
+ break;
case ConstructVarargs:
callType = CallLinkInfo::ConstructVarargs;
isVarargs = true;
@@ -653,6 +673,16 @@
callType = CallLinkInfo::ConstructVarargs;
isForwardVarargs = true;
break;
+ case TailCallForwardVarargs:
+ callType = CallLinkInfo::TailCallVarargs;
+ isTail = true;
+ isForwardVarargs = true;
+ break;
+ case TailCallForwardVarargsInlinedCaller:
+ callType = CallLinkInfo::CallVarargs;
+ isEmulatedTail = true;
+ isForwardVarargs = true;
+ break;
default:
DFG_CRASH(m_jit.graph(), node, "bad node type");
break;
@@ -761,23 +791,43 @@
GPRReg calleeGPR = callee.gpr();
callee.use();
m_jit.store64(calleeGPR, JITCompiler::calleeFrameSlot(JSStack::Callee));
-
- flushRegisters();
+ // FIXME: We should do an efficient move of the arguments into
+ // their target stack position instead of building then memmoving
+ // the callee frame.
+ // https://bugs.webkit.org/show_bug.cgi?id=147508
+ if (isTail)
+ ASSERT(isFlushed());
+ else
+ flushRegisters();
+
GPRFlushedCallResult result(this);
GPRReg resultGPR = result.gpr();
- JITCompiler::DataLabelPtr targetToCheck;
- JITCompiler::Jump slowPath;
+ CodeOrigin staticOrigin = node->origin.semantic;
+ ASSERT(!isTail || !staticOrigin.inlineCallFrame || !staticOrigin.inlineCallFrame->getCallerSkippingDeadFrames());
+ ASSERT(!isEmulatedTail || (staticOrigin.inlineCallFrame && staticOrigin.inlineCallFrame->getCallerSkippingDeadFrames()));
+ CodeOrigin dynamicOrigin =
+ isEmulatedTail ? *staticOrigin.inlineCallFrame->getCallerSkippingDeadFrames() : staticOrigin;
- m_jit.emitStoreCodeOrigin(node->origin.semantic);
+ m_jit.emitStoreCodeOrigin(dynamicOrigin);
CallLinkInfo* callLinkInfo = m_jit.codeBlock()->addCallLinkInfo();
-
- slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0));
- JITCompiler::Call fastCall = m_jit.nearCall();
+ JITCompiler::DataLabelPtr targetToCheck;
+ JITCompiler::Jump slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(0));
+ if (isTail) {
+ m_jit.emitRestoreCalleeSaves();
+ // FIXME: We should do an efficient move of the arguments into
+ // their target stack position instead of building then memmoving
+ // the callee frame.
+ // https://bugs.webkit.org/show_bug.cgi?id=147508
+ m_jit.prepareForTailCallSlow();
+ }
+
+ JITCompiler::Call fastCall = isTail ? m_jit.nearTailCall() : m_jit.nearCall();
+
JITCompiler::Jump done = m_jit.jump();
slowPath.link(&m_jit);
@@ -785,20 +835,25 @@
m_jit.move(calleeGPR, GPRInfo::regT0); // Callee needs to be in regT0
m_jit.move(MacroAssembler::TrustedImmPtr(callLinkInfo), GPRInfo::regT2); // Link info needs to be in regT2
JITCompiler::Call slowCall = m_jit.nearCall();
-
+
done.link(&m_jit);
-
- m_jit.move(GPRInfo::returnValueGPR, resultGPR);
-
- jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
-
+
+ if (isTail)
+ m_jit.breakpoint();
+ else {
+ m_jit.move(GPRInfo::returnValueGPR, resultGPR);
+
+ jsValueResult(resultGPR, m_currentNode, DataFormatJS, UseChildrenCalledExplicitly);
+
+ // After the calls are done, we need to reestablish our stack
+ // pointer. We rely on this for varargs calls, calls with arity
+ // mismatch (the callframe is slided) and tail calls.
+ m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
+ }
+
callLinkInfo->setUpCall(callType, m_currentNode->origin.semantic, calleeGPR);
m_jit.addJSCall(fastCall, slowCall, targetToCheck, callLinkInfo);
- // After the calls are done, we need to reestablish our stack
- // pointer. We rely on this for varargs calls, calls with arity
- // mismatch (the callframe is slided) and tail calls.
- m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
}
// Clang should allow unreachable [[clang::fallthrough]] in template functions if any template expansion uses it
@@ -4231,14 +4286,20 @@
break;
case Call:
+ case TailCall:
+ case TailCallInlinedCaller:
case Construct:
case CallVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
case CallForwardVarargs:
case ConstructVarargs:
case ConstructForwardVarargs:
+ case TailCallForwardVarargs:
+ case TailCallForwardVarargsInlinedCaller:
emitCall(node);
break;
-
+
case LoadVarargs: {
LoadVarargsData* data = ""
Modified: branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGVarargsForwardingPhase.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGVarargsForwardingPhase.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/dfg/DFGVarargsForwardingPhase.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -135,6 +135,8 @@
case CallVarargs:
case ConstructVarargs:
+ case TailCallVarargs:
+ case TailCallVarargsInlinedCaller:
if (node->child1() == candidate || node->child3() == candidate) {
if (verbose)
dataLog(" Escape at ", node, "\n");
@@ -282,7 +284,19 @@
break;
node->setOpAndDefaultFlags(ConstructForwardVarargs);
break;
-
+
+ case TailCallVarargs:
+ if (node->child2() != candidate)
+ break;
+ node->setOpAndDefaultFlags(TailCallForwardVarargs);
+ break;
+
+ case TailCallVarargsInlinedCaller:
+ if (node->child2() != candidate)
+ break;
+ node->setOpAndDefaultFlags(TailCallForwardVarargsInlinedCaller);
+ break;
+
case SetLocal:
// This is super odd. We don't have to do anything here, since in DFG IR, the phantom
// arguments nodes do produce a JSValue. Also, we know that if this SetLocal referenecs a
Modified: branches/jsc-tailcall/Source/_javascript_Core/interpreter/CallFrame.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/interpreter/CallFrame.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/interpreter/CallFrame.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -85,7 +85,7 @@
if (inlineCallFrame->baselineCodeBlock() == codeBlock)
return codeOrigin.bytecodeIndex;
- codeOrigin = inlineCallFrame->caller;
+ codeOrigin = *inlineCallFrame->getCallerSkippingDeadFrames();
inlineCallFrame = codeOrigin.inlineCallFrame;
}
return codeOrigin.bytecodeIndex;
Modified: branches/jsc-tailcall/Source/_javascript_Core/interpreter/StackVisitor.cpp (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/interpreter/StackVisitor.cpp 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/interpreter/StackVisitor.cpp 2015-08-04 00:39:19 UTC (rev 187791)
@@ -59,8 +59,16 @@
#if ENABLE(DFG_JIT)
if (m_frame.isInlinedFrame()) {
InlineCallFrame* inlineCallFrame = m_frame.inlineCallFrame();
- CodeOrigin* callerCodeOrigin = &inlineCallFrame->caller;
- readInlinedFrame(m_frame.callFrame(), callerCodeOrigin);
+ CodeOrigin* callerCodeOrigin = inlineCallFrame->getCallerSkippingDeadFrames();
+ if (!callerCodeOrigin) {
+ while (inlineCallFrame) {
+ readInlinedFrame(m_frame.callFrame(), &inlineCallFrame->directCaller);
+ inlineCallFrame = m_frame.inlineCallFrame();
+ }
+ m_frame.m_VMEntryFrame = m_frame.m_CallerVMEntryFrame;
+ readFrame(m_frame.callerFrame());
+ } else
+ readInlinedFrame(m_frame.callFrame(), callerCodeOrigin);
return;
}
#endif // ENABLE(DFG_JIT)
Modified: branches/jsc-tailcall/Source/_javascript_Core/jit/CCallHelpers.h (187790 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/jit/CCallHelpers.h 2015-08-04 00:30:45 UTC (rev 187790)
+++ branches/jsc-tailcall/Source/_javascript_Core/jit/CCallHelpers.h 2015-08-04 00:39:19 UTC (rev 187791)
@@ -2067,6 +2067,8 @@
#if USE(JSVALUE32_64)
COMPILE_ASSERT(sizeof(void*) * 2 == sizeof(Register), Register_is_two_pointers_sized);
lshift32(TrustedImm32(1), temp2);
+#else
+ COMPILE_ASSERT(sizeof(void*) == sizeof(Register), Register_is_one_pointer_sized);
#endif
// Do the sliding
Added: branches/jsc-tailcall/Source/_javascript_Core/tests/stress/dfg-tail-calls.js (0 => 187791)
--- branches/jsc-tailcall/Source/_javascript_Core/tests/stress/dfg-tail-calls.js (rev 0)
+++ branches/jsc-tailcall/Source/_javascript_Core/tests/stress/dfg-tail-calls.js 2015-08-04 00:39:19 UTC (rev 187791)
@@ -0,0 +1,56 @@
+(function nonInlinedTailCall() {
+ function callee() { if (callee.caller != nonInlinedTailCall) throw new Error(); }
+ noInline(callee);
+
+ function caller() { "use strict"; return callee(); }
+
+ for (var i = 0; i < 10000; ++i)
+ caller();
+
+ function loop(n) { "use strict"; if (n > 0) return loop(n - 1); }
+ noInline(loop);
+
+ loop(1000000);
+})();
+
+(function inlinedTailCall() {
+ function callee() { if (callee.caller != inlinedTailCall) throw new Error(); }
+ function caller() { "use strict"; return callee(); }
+
+ for (var i = 0; i < 10000; ++i)
+ caller();
+
+ function loop(n) { "use strict"; if (n > 0) return loop(n - 1); }
+
+ loop(1000000);
+})();
+
+(function nonInlinedEmulatedTailCall() {
+ function emulator() { caller(); }
+ function callee() { if (callee.caller != emulator) throw new Error(); }
+ noInline(callee);
+ function caller() { "use strict"; return callee(); }
+
+ for (var i = 0; i < 10000; ++i)
+ emulator();
+
+ function pad(n) { "use strict"; return loop(n); }
+ function loop(n) { "use strict"; if (n > 0) return pad(n - 1); }
+ noInline(loop);
+
+ loop(1000000);
+})();
+
+(function inlinedEmulatedTailCall() {
+ function emulator() { caller(); }
+ function callee() { if (callee.caller != emulator) throw new Error(); }
+ function caller() { "use strict"; return callee(); }
+
+ for (var i = 0; i < 10000; ++i)
+ emulator();
+
+ function pad(n) { "use strict"; return loop(n); }
+ function loop(n) { "use strict"; if (n > 0) return pad(n - 1); }
+
+ loop(1000000);
+})();