- Revision
- 199675
- Author
- [email protected]
- Date
- 2016-04-18 10:13:33 -0700 (Mon, 18 Apr 2016)
Log Message
FTL should pin the tag registers at inline caches
https://bugs.webkit.org/show_bug.cgi?id=156678
Reviewed by Saam Barati.
This is a long-overdue fix to our inline caches. Back when we had LLVM, we couldn't rely on the tags
being pinned to any registers. So, if the inline caches needed tags, they'd have to materialize them.
This removes those materializations. This should reduce the amount of code generated in inline caches
and it should make inline caches faster. The effect appears to be small.
It may be that after this change, we'll even be able to kill the
HaveTagRegisters/DoNotHaveTagRegisters logic.
* bytecode/PolymorphicAccess.cpp:
(JSC::AccessCase::generateWithGuard):
(JSC::AccessCase::generateImpl):
* ftl/FTLLowerDFGToB3.cpp:
(JSC::FTL::DFG::LowerDFGToB3::compilePutById):
(JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstruct):
(JSC::FTL::DFG::LowerDFGToB3::compileTailCall):
(JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargs):
(JSC::FTL::DFG::LowerDFGToB3::compileIn):
(JSC::FTL::DFG::LowerDFGToB3::getById):
* jit/Repatch.cpp:
(JSC::readCallTarget):
(JSC::linkPolymorphicCall):
* jit/ThunkGenerators.cpp:
(JSC::virtualThunkFor):
Modified Paths
Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (199674 => 199675)
--- trunk/Source/_javascript_Core/ChangeLog 2016-04-18 17:13:04 UTC (rev 199674)
+++ trunk/Source/_javascript_Core/ChangeLog 2016-04-18 17:13:33 UTC (rev 199675)
@@ -1,3 +1,35 @@
+2016-04-17 Filip Pizlo <[email protected]>
+
+ FTL should pin the tag registers at inline caches
+ https://bugs.webkit.org/show_bug.cgi?id=156678
+
+ Reviewed by Saam Barati.
+
+ This is a long-overdue fix to our inline caches. Back when we had LLVM, we couldn't rely on the tags
+ being pinned to any registers. So, if the inline caches needed tags, they'd have to materialize them.
+
+ This removes those materializations. This should reduce the amount of code generated in inline caches
+ and it should make inline caches faster. The effect appears to be small.
+
+ It may be that after this change, we'll even be able to kill the
+ HaveTagRegisters/DoNotHaveTagRegisters logic.
+
+ * bytecode/PolymorphicAccess.cpp:
+ (JSC::AccessCase::generateWithGuard):
+ (JSC::AccessCase::generateImpl):
+ * ftl/FTLLowerDFGToB3.cpp:
+ (JSC::FTL::DFG::LowerDFGToB3::compilePutById):
+ (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstruct):
+ (JSC::FTL::DFG::LowerDFGToB3::compileTailCall):
+ (JSC::FTL::DFG::LowerDFGToB3::compileCallOrConstructVarargs):
+ (JSC::FTL::DFG::LowerDFGToB3::compileIn):
+ (JSC::FTL::DFG::LowerDFGToB3::getById):
+ * jit/Repatch.cpp:
+ (JSC::readCallTarget):
+ (JSC::linkPolymorphicCall):
+ * jit/ThunkGenerators.cpp:
+ (JSC::virtualThunkFor):
+
2016-04-18 Yusuke Suzuki <[email protected]>
[ES7] yield star should not return if the inner iterator.throw returns { done: true }
Modified: trunk/Source/_javascript_Core/bytecode/PolymorphicAccess.cpp (199674 => 199675)
--- trunk/Source/_javascript_Core/bytecode/PolymorphicAccess.cpp 2016-04-18 17:13:04 UTC (rev 199674)
+++ trunk/Source/_javascript_Core/bytecode/PolymorphicAccess.cpp 2016-04-18 17:13:33 UTC (rev 199675)
@@ -610,7 +610,7 @@
jit.load32(
CCallHelpers::Address(baseGPR, DirectArguments::offsetOfLength()),
valueRegs.payloadGPR());
- jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
state.succeed();
return;
}
@@ -630,7 +630,7 @@
jit.load32(
CCallHelpers::Address(baseGPR, ScopedArguments::offsetOfTotalLength()),
valueRegs.payloadGPR());
- jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
state.succeed();
return;
}
@@ -1126,7 +1126,7 @@
dataLog("Have type: ", type->descriptor(), "\n");
state.failAndRepatch.append(
jit.branchIfNotType(
- valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
+ valueRegs, scratchGPR, type->descriptor(), CCallHelpers::HaveTagRegisters));
} else if (verbose)
dataLog("Don't have type.\n");
@@ -1157,7 +1157,7 @@
dataLog("Have type: ", type->descriptor(), "\n");
state.failAndRepatch.append(
jit.branchIfNotType(
- valueRegs, scratchGPR, type->descriptor(), CCallHelpers::DoNotHaveTagRegisters));
+ valueRegs, scratchGPR, type->descriptor(), CCallHelpers::HaveTagRegisters));
} else if (verbose)
dataLog("Don't have type.\n");
@@ -1361,14 +1361,14 @@
jit.load32(CCallHelpers::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
state.failAndIgnore.append(
jit.branch32(CCallHelpers::LessThan, scratchGPR, CCallHelpers::TrustedImm32(0)));
- jit.boxInt32(scratchGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ jit.boxInt32(scratchGPR, valueRegs);
state.succeed();
return;
}
case StringLength: {
jit.load32(CCallHelpers::Address(baseGPR, JSString::offsetOfLength()), valueRegs.payloadGPR());
- jit.boxInt32(valueRegs.payloadGPR(), valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ jit.boxInt32(valueRegs.payloadGPR(), valueRegs);
state.succeed();
return;
}
Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp (199674 => 199675)
--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2016-04-18 17:13:04 UTC (rev 199674)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2016-04-18 17:13:33 UTC (rev 199675)
@@ -2403,6 +2403,8 @@
B3::PatchpointValue* patchpoint = m_out.patchpoint(Void);
patchpoint->appendSomeRegister(base);
patchpoint->appendSomeRegister(value);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
patchpoint->clobber(RegisterSet::macroScratchRegisters());
// FIXME: If this is a PutByIdFlush, we might want to late-clobber volatile registers.
@@ -4865,6 +4867,8 @@
RefPtr<PatchpointExceptionHandle> exceptionHandle =
preparePatchpointForExceptions(patchpoint);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
patchpoint->clobber(RegisterSet::macroScratchRegisters());
patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
@@ -4954,6 +4958,9 @@
PatchpointValue* patchpoint = m_out.patchpoint(Void);
patchpoint->appendVector(arguments);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+
// Prevent any of the arguments from using the scratch register.
patchpoint->clobberEarly(RegisterSet::macroScratchRegisters());
@@ -5071,6 +5078,9 @@
RefPtr<PatchpointExceptionHandle> exceptionHandle =
preparePatchpointForExceptions(patchpoint);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
+
patchpoint->clobber(RegisterSet::macroScratchRegisters());
patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
@@ -5934,6 +5944,8 @@
UniquedStringImpl* str = bitwise_cast<UniquedStringImpl*>(string->tryGetValueImpl());
B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64);
patchpoint->appendSomeRegister(cell);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
patchpoint->clobber(RegisterSet::macroScratchRegisters());
State* state = &m_ftlState;
@@ -7393,6 +7405,8 @@
B3::PatchpointValue* patchpoint = m_out.patchpoint(Int64);
patchpoint->appendSomeRegister(base);
+ patchpoint->append(m_tagMask, ValueRep::reg(GPRInfo::tagMaskRegister));
+ patchpoint->append(m_tagTypeNumber, ValueRep::reg(GPRInfo::tagTypeNumberRegister));
// FIXME: If this is a GetByIdFlush, we might get some performance boost if we claim that it
// clobbers volatile registers late. It's not necessary for correctness, though, since the
Modified: trunk/Source/_javascript_Core/jit/Repatch.cpp (199674 => 199675)
--- trunk/Source/_javascript_Core/jit/Repatch.cpp 2016-04-18 17:13:04 UTC (rev 199674)
+++ trunk/Source/_javascript_Core/jit/Repatch.cpp 2016-04-18 17:13:33 UTC (rev 199675)
@@ -55,11 +55,6 @@
namespace JSC {
-// Beware: in this code, it is not safe to assume anything about the following registers
-// that would ordinarily have well-known values:
-// - tagTypeNumberRegister
-// - tagMaskRegister
-
static FunctionPtr readCallTarget(CodeBlock* codeBlock, CodeLocationCall call)
{
FunctionPtr result = MacroAssembler::readCallTarget(call);
@@ -795,10 +790,7 @@
// Verify that we have a function and stash the executable in scratchGPR.
#if USE(JSVALUE64)
- // We can't rely on tagMaskRegister being set, so we do this the hard
- // way.
- stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratchGPR);
- slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratchGPR));
+ slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, GPRInfo::tagMaskRegister));
#else
// We would have already checked that the callee is a cell.
#endif
Modified: trunk/Source/_javascript_Core/jit/ThunkGenerators.cpp (199674 => 199675)
--- trunk/Source/_javascript_Core/jit/ThunkGenerators.cpp 2016-04-18 17:13:04 UTC (rev 199674)
+++ trunk/Source/_javascript_Core/jit/ThunkGenerators.cpp 2016-04-18 17:13:33 UTC (rev 199675)
@@ -180,11 +180,9 @@
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
- jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
-
slowCase.append(
jit.branchTest64(
- CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
+ CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister));
#else
slowCase.append(
jit.branch32(