Diff
Modified: branches/jsCStack/Source/_javascript_Core/ChangeLog (161005 => 161006)
--- branches/jsCStack/Source/_javascript_Core/ChangeLog 2013-12-23 19:04:11 UTC (rev 161005)
+++ branches/jsCStack/Source/_javascript_Core/ChangeLog 2013-12-23 19:15:09 UTC (rev 161006)
@@ -1,3 +1,41 @@
+2013-12-23 Filip Pizlo <[email protected]>
+
+ cStack branch doesn't run navier-stokes because closure calls aren't implemented yet
+ https://bugs.webkit.org/show_bug.cgi?id=126141
+
+ Reviewed by Michael Saboff.
+
+ Add a bunch of assertions regarding the sanity of argument counts. This is a great way of
+ making sure that SP/BP point to the right place.
+
+ Used that to diagnose problems with closure calls. Closure call linking expects that SP is
+ set up correctly very early on. The DFG JIT was doing this correctly, but the baseline JIT
+ wasn't: it was setting up T1 as a fake SP and then setting up SP for real very late. This
+ wasn't doing us any good and it was making closure calls fail.
+
+ Also, closure calls were still referring to BP instead of SP and assuming that the caller
+ was adjusting BP. Obviously, that's not what we do anymore.
+
+ These fixes make navier-stokes run. It probably fixes other bugs, but I haven't looked at
+ those yet.
+
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compileCurrentBlock):
+ * jit/AssemblyHelpers.cpp:
+ (JSC::AssemblyHelpers::jitAssertArgumentCountSane):
+ * jit/AssemblyHelpers.h:
+ (JSC::AssemblyHelpers::jitAssertArgumentCountSane):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileMainPass):
+ * jit/JITCall.cpp:
+ (JSC::JIT::compileLoadVarargs):
+ (JSC::JIT::compileCallEval):
+ (JSC::JIT::compileCallEvalSlowCase):
+ (JSC::JIT::compileOpCall):
+ (JSC::JIT::compileOpCallSlowCase):
+ * jit/Repatch.cpp:
+ (JSC::linkClosureCall):
+
2013-12-22 Mark Lam <[email protected]>
CStack: Add #if ENABLE(LLINT_C_LOOP) to C loop LLINT only parts of JSStack.
Modified: branches/jsCStack/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp (161005 => 161006)
--- branches/jsCStack/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2013-12-23 19:04:11 UTC (rev 161005)
+++ branches/jsCStack/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2013-12-23 19:15:09 UTC (rev 161006)
@@ -1467,6 +1467,7 @@
m_jit.jitAssertHasValidCallFrame();
m_jit.jitAssertTagsInPlace();
+ m_jit.jitAssertArgumentCountSane();
for (size_t i = 0; i < m_block->variablesAtHead.numberOfArguments(); ++i) {
m_stream->appendAndLog(
Modified: branches/jsCStack/Source/_javascript_Core/jit/AssemblyHelpers.cpp (161005 => 161006)
--- branches/jsCStack/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2013-12-23 19:04:11 UTC (rev 161005)
+++ branches/jsCStack/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2013-12-23 19:15:09 UTC (rev 161006)
@@ -175,6 +175,13 @@
breakpoint();
checkNull.link(this);
}
+
+void AssemblyHelpers::jitAssertArgumentCountSane()
+{
+ Jump ok = branch32(Below, payloadFor(JSStack::ArgumentCount), TrustedImm32(10000000));
+ breakpoint();
+ ok.link(this);
+}
#endif // !ASSERT_DISABLED
} // namespace JSC
Modified: branches/jsCStack/Source/_javascript_Core/jit/AssemblyHelpers.h (161005 => 161006)
--- branches/jsCStack/Source/_javascript_Core/jit/AssemblyHelpers.h 2013-12-23 19:04:11 UTC (rev 161005)
+++ branches/jsCStack/Source/_javascript_Core/jit/AssemblyHelpers.h 2013-12-23 19:15:09 UTC (rev 161006)
@@ -344,6 +344,7 @@
void jitAssertHasValidCallFrame();
void jitAssertIsNull(GPRReg);
void jitAssertTagsInPlace();
+ void jitAssertArgumentCountSane();
#else
void jitAssertIsInt32(GPRReg) { }
void jitAssertIsJSInt32(GPRReg) { }
@@ -353,6 +354,7 @@
void jitAssertHasValidCallFrame() { }
void jitAssertIsNull(GPRReg) { }
void jitAssertTagsInPlace() { }
+ void jitAssertArgumentCountSane() { }
#endif
// These methods convert between doubles, and doubles boxed and JSValues.
Modified: branches/jsCStack/Source/_javascript_Core/jit/JIT.cpp (161005 => 161006)
--- branches/jsCStack/Source/_javascript_Core/jit/JIT.cpp 2013-12-23 19:04:11 UTC (rev 161005)
+++ branches/jsCStack/Source/_javascript_Core/jit/JIT.cpp 2013-12-23 19:15:09 UTC (rev 161006)
@@ -135,6 +135,7 @@
void JIT::privateCompileMainPass()
{
jitAssertTagsInPlace();
+ jitAssertArgumentCountSane();
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
unsigned instructionCount = m_codeBlock->instructions().size();
Modified: branches/jsCStack/Source/_javascript_Core/jit/JITCall.cpp (161005 => 161006)
--- branches/jsCStack/Source/_javascript_Core/jit/JITCall.cpp 2013-12-23 19:04:11 UTC (rev 161005)
+++ branches/jsCStack/Source/_javascript_Core/jit/JITCall.cpp 2013-12-23 19:15:09 UTC (rev 161006)
@@ -119,11 +119,13 @@
if (canOptimize)
end.link(this);
+
+ addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
}
void JIT::compileCallEval(Instruction* instruction)
{
- addPtr(TrustedImm32(JSStack::CallerFrameAndPCSize * static_cast<int>(sizeof(Register))), regT1, stackPointerRegister);
+ addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
callOperationNoExceptionCheck(operationCallEval, regT1);
// Recalculate newCallFrame and put in regT1 for possible use in slow case
addPtr(TrustedImm32(-JSStack::CallerFrameAndPCSize * static_cast<int>(sizeof(Register))), stackPointerRegister, regT1);
@@ -141,8 +143,7 @@
{
linkSlowCase(iter);
- emitGetFromCallFrameHeader64(JSStack::Callee, regT0, regT1);
- addPtr(TrustedImm32(JSStack::CallerFrameAndPCSize * static_cast<int>(sizeof(Register))), regT1, stackPointerRegister);
+ load64(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code());
addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
checkStackPointerAlignment();
@@ -184,16 +185,16 @@
done.link(this);
}
- addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
- store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- } // regT1 holds newCallFrame with ArgumentCount initialized.
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+ store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
+ } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin();
uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset);
- store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset));
emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
- store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
+ store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
if (opcodeID == op_call_eval) {
compileCallEval(instruction);
@@ -211,8 +212,7 @@
m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT2);
- store64(regT2, Address(regT1, JSStack::ScopeChain * sizeof(Register)));
- addPtr(TrustedImm32(JSStack::CallerFrameAndPCSize * static_cast<int>(sizeof(Register))), regT1, stackPointerRegister);
+ store64(regT2, Address(MacroAssembler::stackPointerRegister, JSStack::ScopeChain * sizeof(Register) - sizeof(CallerFrameAndPC)));
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
@@ -233,8 +233,6 @@
linkSlowCase(iter);
- addPtr(TrustedImm32(JSStack::CallerFrameAndPCSize * static_cast<int>(sizeof(Register))), regT1, stackPointerRegister);
-
ThunkGenerator generator = linkThunkGeneratorFor(
opcodeID == op_construct ? CodeForConstruct : CodeForCall,
RegisterPreservationNotRequired);
Modified: branches/jsCStack/Source/_javascript_Core/jit/Repatch.cpp (161005 => 161006)
--- branches/jsCStack/Source/_javascript_Core/jit/Repatch.cpp 2013-12-23 19:04:11 UTC (rev 161005)
+++ branches/jsCStack/Source/_javascript_Core/jit/Repatch.cpp 2013-12-23 19:15:09 UTC (rev 161006)
@@ -1342,6 +1342,15 @@
CCallHelpers::JumpList slowPath;
+ ptrdiff_t offsetToFrame = -sizeof(CallerFrameAndPC);
+
+ if (!ASSERT_DISABLED) {
+ CCallHelpers::Jump okArgumentCount = stubJit.branch32(
+ CCallHelpers::Below, CCallHelpers::Address(CCallHelpers::stackPointerRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ArgumentCount) + offsetToFrame + PayloadOffset), CCallHelpers::TrustedImm32(10000000));
+ stubJit.breakpoint();
+ okArgumentCount.link(&stubJit);
+ }
+
#if USE(JSVALUE64)
// We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister
// being set. So we do this the hard way.
@@ -1367,18 +1376,18 @@
stubJit.loadPtr(
CCallHelpers::Address(calleeGPR, JSFunction::offsetOfScopeChain()),
GPRInfo::returnValueGPR);
-
+
#if USE(JSVALUE64)
stubJit.store64(
GPRInfo::returnValueGPR,
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain)));
+ CCallHelpers::Address(MacroAssembler::stackPointerRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + offsetToFrame));
#else
stubJit.storePtr(
GPRInfo::returnValueGPR,
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ CCallHelpers::Address(MacroAssembler::stackPointerRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) + offsetToFrame));
stubJit.store32(
CCallHelpers::TrustedImm32(JSValue::CellTag),
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ CCallHelpers::Address(MacroAssembler::stackPointerRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) + offsetToFrame));
#endif
AssemblyHelpers::Call call = stubJit.nearCall();
@@ -1391,8 +1400,6 @@
#endif
stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation.executableAddress()), GPRInfo::regT2);
- // FIXME: CStack - Think the restoreReturnAddressBeforeReturn() instruction should be a poke
- stubJit.breakpoint();
stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT2);
AssemblyHelpers::Jump slow = stubJit.jump();