Modified: trunk/Source/_javascript_Core/ChangeLog (227430 => 227431)
--- trunk/Source/_javascript_Core/ChangeLog 2018-01-23 20:11:10 UTC (rev 227430)
+++ trunk/Source/_javascript_Core/ChangeLog 2018-01-23 20:16:56 UTC (rev 227431)
@@ -1,5 +1,30 @@
2018-01-23 Filip Pizlo <fpi...@apple.com>
+ DFG should always flush `this`
+ https://bugs.webkit.org/show_bug.cgi?id=181999
+
+ Reviewed by Saam Barati and Mark Lam.
+
+ This is going to make it possible to use precise index masking for arguments-on-the-stack
+ accesses with an index adjusted so that 0 is this. Without this change, we would have no way
+ of masking when the argument count is 0, unless we padded the argument area so that there was
+ always an argument slot after `this` and it was always initialized.
+
+ This is neutral on all benchmarks.
+
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::flushImpl):
+ (JSC::DFG::ByteCodeParser::flushForTerminalImpl):
+ (JSC::DFG::ByteCodeParser::flush):
+ (JSC::DFG::ByteCodeParser::flushForTerminal):
+ (JSC::DFG::ByteCodeParser::parse):
+ (JSC::DFG::flushImpl): Deleted.
+ (JSC::DFG::flushForTerminalImpl): Deleted.
+ * dfg/DFGPreciseLocalClobberize.h:
+ (JSC::DFG::PreciseLocalClobberizeAdaptor::readTop):
+
+2018-01-23 Filip Pizlo <fpi...@apple.com>
+
JSC should use a speculation fence on VM entry/exit
https://bugs.webkit.org/show_bug.cgi?id=181991
Modified: trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp (227430 => 227431)
--- trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp 2018-01-23 20:11:10 UTC (rev 227430)
+++ trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp 2018-01-23 20:16:56 UTC (rev 227431)
@@ -82,51 +82,6 @@
dataLog(__VA_ARGS__); \
} while (false)
-template <typename F1, typename F2>
-static ALWAYS_INLINE void flushImpl(Graph& graph, InlineCallFrame* inlineCallFrame, const F1& addFlushDirect, const F2& addPhantomLocalDirect)
-{
- int numArguments;
- if (inlineCallFrame) {
- ASSERT(!graph.hasDebuggerEnabled());
- numArguments = inlineCallFrame->argumentsWithFixup.size();
- if (inlineCallFrame->isClosureCall)
- addFlushDirect(remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
- if (inlineCallFrame->isVarargs())
- addFlushDirect(remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
- } else
- numArguments = graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
-
- for (unsigned argument = numArguments; argument-- > 1;)
- addFlushDirect(remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
-
- if (!inlineCallFrame && graph.needsFlushedThis())
- addFlushDirect(remapOperand(inlineCallFrame, virtualRegisterForArgument(0)));
- else
- addPhantomLocalDirect(remapOperand(inlineCallFrame, virtualRegisterForArgument(0)));
-
- if (graph.needsScopeRegister())
- addFlushDirect(graph.m_codeBlock->scopeRegister());
-}
-
-template <typename F1, typename F2>
-static ALWAYS_INLINE void flushForTerminalImpl(Graph& graph, CodeOrigin origin, const F1& addFlushDirect, const F2& addPhantomLocalDirect)
-{
- origin.walkUpInlineStack([&] (CodeOrigin origin) {
- unsigned bytecodeIndex = origin.bytecodeIndex;
- InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
- flushImpl(graph, inlineCallFrame, addFlushDirect, addPhantomLocalDirect);
-
- CodeBlock* codeBlock = graph.baselineCodeBlockFor(inlineCallFrame);
- FullBytecodeLiveness& fullLiveness = graph.livenessFor(codeBlock);
- const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
-
- for (unsigned local = codeBlock->m_numCalleeLocals; local--;) {
- if (livenessAtBytecode[local])
- addPhantomLocalDirect(remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
- }
- });
-}
-
// === ByteCodeParser ===
//
// This class is used to compile the dataflow graph from a CodeBlock.
@@ -564,6 +519,47 @@
return findArgumentPositionForLocal(operand);
}
+ template<typename AddFlushDirectFunc>
+ void flushImpl(InlineCallFrame* inlineCallFrame, const AddFlushDirectFunc& addFlushDirect)
+ {
+ int numArguments;
+ if (inlineCallFrame) {
+ ASSERT(!m_graph.hasDebuggerEnabled());
+ numArguments = inlineCallFrame->argumentsWithFixup.size();
+ if (inlineCallFrame->isClosureCall)
+ addFlushDirect(remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::callee)));
+ if (inlineCallFrame->isVarargs())
+ addFlushDirect(remapOperand(inlineCallFrame, VirtualRegister(CallFrameSlot::argumentCount)));
+ } else
+ numArguments = m_graph.baselineCodeBlockFor(inlineCallFrame)->numParameters();
+
+ for (unsigned argument = numArguments; argument--;)
+ addFlushDirect(remapOperand(inlineCallFrame, virtualRegisterForArgument(argument)));
+
+ if (m_graph.needsScopeRegister())
+ addFlushDirect(m_graph.m_codeBlock->scopeRegister());
+ }
+
+ template<typename AddFlushDirectFunc, typename AddPhantomLocalDirectFunc>
+ void flushForTerminalImpl(CodeOrigin origin, const AddFlushDirectFunc& addFlushDirect, const AddPhantomLocalDirectFunc& addPhantomLocalDirect)
+ {
+ origin.walkUpInlineStack(
+ [&] (CodeOrigin origin) {
+ unsigned bytecodeIndex = origin.bytecodeIndex;
+ InlineCallFrame* inlineCallFrame = origin.inlineCallFrame;
+ flushImpl(inlineCallFrame, addFlushDirect);
+
+ CodeBlock* codeBlock = m_graph.baselineCodeBlockFor(inlineCallFrame);
+ FullBytecodeLiveness& fullLiveness = m_graph.livenessFor(codeBlock);
+ const FastBitVector& livenessAtBytecode = fullLiveness.getLiveness(bytecodeIndex);
+
+ for (unsigned local = codeBlock->m_numCalleeLocals; local--;) {
+ if (livenessAtBytecode[local])
+ addPhantomLocalDirect(remapOperand(inlineCallFrame, virtualRegisterForLocal(local)));
+ }
+ });
+ }
+
void flush(VirtualRegister operand)
{
flushDirect(m_inlineStackTop->remapOperand(operand));
@@ -607,8 +603,7 @@
void flush(InlineStackEntry* inlineStackEntry)
{
auto addFlushDirect = [&] (VirtualRegister reg) { flushDirect(reg); };
- auto addPhantomLocalDirect = [&] (VirtualRegister reg) { phantomLocalDirect(reg); };
- flushImpl(m_graph, inlineStackEntry->m_inlineCallFrame, addFlushDirect, addPhantomLocalDirect);
+ flushImpl(inlineStackEntry->m_inlineCallFrame, addFlushDirect);
}
void flushForTerminal()
@@ -615,7 +610,7 @@
{
auto addFlushDirect = [&] (VirtualRegister reg) { flushDirect(reg); };
auto addPhantomLocalDirect = [&] (VirtualRegister reg) { phantomLocalDirect(reg); };
- flushForTerminalImpl(m_graph, currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
+ flushForTerminalImpl(currentCodeOrigin(), addFlushDirect, addPhantomLocalDirect);
}
void flushForReturn()
@@ -6632,7 +6627,7 @@
auto addFlushDirect = [&] (VirtualRegister operand) { insertLivenessPreservingOp(Flush, operand); };
auto addPhantomLocalDirect = [&] (VirtualRegister operand) { insertLivenessPreservingOp(PhantomLocal, operand); };
- flushForTerminalImpl(m_graph, endOrigin.semantic, addFlushDirect, addPhantomLocalDirect);
+ flushForTerminalImpl(endOrigin.semantic, addFlushDirect, addPhantomLocalDirect);
insertionSet.insertNode(block->size(), SpecNone, Unreachable, endOrigin);
insertionSet.execute(block);
Modified: trunk/Source/_javascript_Core/dfg/DFGPreciseLocalClobberize.h (227430 => 227431)
--- trunk/Source/_javascript_Core/dfg/DFGPreciseLocalClobberize.h 2018-01-23 20:11:10 UTC (rev 227430)
+++ trunk/Source/_javascript_Core/dfg/DFGPreciseLocalClobberize.h 2018-01-23 20:16:56 UTC (rev 227431)
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2014-2018 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -108,13 +108,13 @@
auto readFrame = [&] (InlineCallFrame* inlineCallFrame, unsigned numberOfArgumentsToSkip) {
if (!inlineCallFrame) {
// Read the outermost arguments and argument count.
- for (unsigned i = 1 + numberOfArgumentsToSkip; i < static_cast<unsigned>(m_graph.m_codeBlock->numParameters()); i++)
+ for (unsigned i = numberOfArgumentsToSkip; i < static_cast<unsigned>(m_graph.m_codeBlock->numParameters()); i++)
m_read(virtualRegisterForArgument(i));
m_read(VirtualRegister(CallFrameSlot::argumentCount));
return;
}
- for (unsigned i = 1 + numberOfArgumentsToSkip; i < inlineCallFrame->argumentsWithFixup.size(); i++)
+ for (unsigned i = numberOfArgumentsToSkip; i < inlineCallFrame->argumentsWithFixup.size(); i++)
m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset()));
if (inlineCallFrame->isVarargs())
m_read(VirtualRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount));
@@ -239,7 +239,7 @@
default: {
// All of the outermost arguments, except this, are read in sloppy mode.
if (!m_graph.m_codeBlock->isStrictMode()) {
- for (unsigned i = m_graph.m_codeBlock->numParameters(); i-- > 1;)
+ for (unsigned i = m_graph.m_codeBlock->numParameters(); i--;)
m_read(virtualRegisterForArgument(i));
}
@@ -250,7 +250,7 @@
// Read all of the inline arguments and call frame headers that we didn't already capture.
for (InlineCallFrame* inlineCallFrame = m_node->origin.semantic.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->getCallerInlineFrameSkippingTailCalls()) {
if (!inlineCallFrame->isStrictMode()) {
- for (unsigned i = inlineCallFrame->argumentsWithFixup.size(); i-- > 1;)
+ for (unsigned i = inlineCallFrame->argumentsWithFixup.size(); i--;)
m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset()));
}
if (inlineCallFrame->isClosureCall)