Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (88603 => 88604)
--- trunk/Source/_javascript_Core/ChangeLog 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/ChangeLog 2011-06-11 20:07:48 UTC (rev 88604)
@@ -1,3 +1,39 @@
+2011-06-11 Gavin Barraclough <[email protected]>
+
+ Rubber stamped by Geoff Garen.
+
+ https://bugs.webkit.org/show_bug.cgi?id=62503
+ Remove JIT_OPTIMIZE_* switches
+
+ The alternative code paths are untested, and not well maintained.
+ These were useful when there was more churn in the JIT, but now
+ are a maintenance overhead. Time to move on, removing.
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::visitAggregate):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileSlowCases):
+ (JSC::JIT::privateCompile):
+ (JSC::JIT::linkConstruct):
+ * jit/JIT.h:
+ * jit/JITCall.cpp:
+ * jit/JITCall32_64.cpp:
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ (JSC::JIT::privateCompileCTINativeCall):
+ * jit/JITOpcodes32_64.cpp:
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ (JSC::JIT::privateCompileCTINativeCall):
+ (JSC::JIT::softModulo):
+ * jit/JITPropertyAccess.cpp:
+ * jit/JITPropertyAccess32_64.cpp:
+ * jit/JITStubs.cpp:
+ (JSC::DEFINE_STUB_FUNCTION):
+ * runtime/Lookup.cpp:
+ (JSC::setUpStaticFunctionSlot):
+ * runtime/Lookup.h:
+ * wtf/Platform.h:
+
2011-06-10 Gavin Barraclough <[email protected]>
Reviewed by Sam Weinig.
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -1479,11 +1479,9 @@
visitor.append(&m_functionExprs[i]);
for (size_t i = 0; i < m_functionDecls.size(); ++i)
visitor.append(&m_functionDecls[i]);
-#if ENABLE(JIT_OPTIMIZE_CALL)
for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i)
if (callLinkInfo(i).isLinked())
visitor.append(&callLinkInfo(i).callee);
-#endif
#if ENABLE(INTERPRETER)
for (size_t size = m_propertyAccessInstructions.size(), i = 0; i < size; ++i)
visitStructures(visitor, &m_instructions[m_propertyAccessInstructions[i]]);
Modified: trunk/Source/_javascript_Core/jit/JIT.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JIT.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JIT.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -448,9 +448,7 @@
emitJumpSlowToHot(jump(), 0);
}
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
-#endif
ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
#ifndef NDEBUG
@@ -569,22 +567,18 @@
for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) {
StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
}
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
}
-#endif
unsigned methodCallCount = m_methodCallCompilationInfo.size();
m_codeBlock->addMethodCallLinkInfos(methodCallCount);
for (unsigned i = 0; i < methodCallCount; ++i) {
@@ -599,8 +593,6 @@
return patchBuffer.finalizeCode();
}
-#if ENABLE(JIT_OPTIMIZE_CALL)
-
void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
{
RepatchBuffer repatchBuffer(callerCodeBlock);
@@ -634,7 +626,6 @@
// patch the call so we do not continue to try to link.
repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct());
}
-#endif // ENABLE(JIT_OPTIMIZE_CALL)
} // namespace JSC
Modified: trunk/Source/_javascript_Core/jit/JIT.h (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JIT.h 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JIT.h 2011-06-11 20:07:48 UTC (rev 88604)
@@ -328,10 +328,8 @@
void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag);
void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, unsigned virtualRegisterIndex);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
void compileGetByIdHotPath();
void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
-#endif
void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset);
void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset);
@@ -569,10 +567,8 @@
void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes);
#endif
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
void compileGetByIdHotPath(int baseVReg, Identifier*);
void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
-#endif
void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset);
void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch);
Modified: trunk/Source/_javascript_Core/jit/JITCall.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JITCall.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JITCall.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -102,67 +102,6 @@
sampleCodeBlock(m_codeBlock);
}
-#if !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- // Handle eval
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee, regT0);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue())));
- }
-
- emitGetVirtualRegister(callee, regT0);
-
- // Check for JSFunctions.
- emitJumpSlowCaseIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
-{
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.addArgument(regT0);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
-
- sampleCodeBlock(m_codeBlock);
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
int callee = instruction[1].u.operand;
@@ -254,10 +193,6 @@
sampleCodeBlock(m_codeBlock);
}
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-
} // namespace JSC
#endif // USE(JSVALUE64)
Modified: trunk/Source/_javascript_Core/jit/JITCall32_64.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JITCall32_64.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JITCall32_64.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -183,67 +183,6 @@
compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
}
-#if !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
- }
-
- emitLoad(callee, regT1, regT0);
-
- emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
- storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(TrustedImm32(argCount), regT1);
-
- emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, callee);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
-
- sampleCodeBlock(m_codeBlock);
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
int callee = instruction[1].u.operand;
@@ -338,10 +277,6 @@
sampleCodeBlock(m_codeBlock);
}
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-
} // namespace JSC
#endif // USE(JSVALUE32_64)
Modified: trunk/Source/_javascript_Core/jit/JITOpcodes.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JITOpcodes.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JITOpcodes.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -43,7 +43,6 @@
void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
{
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
// (2) The second function provides fast property access for string length
Label stringLengthBegin = align();
@@ -60,7 +59,6 @@
emitFastArithIntToImmNoCheck(regT0, regT0);
ret();
-#endif
// (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
@@ -148,24 +146,18 @@
Label nativeCallThunk = privateCompileCTINativeCall(globalData);
Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
LinkBuffer patchBuffer(*m_globalData, this, m_globalData->executableAllocator);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
-#endif
patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
@@ -178,9 +170,7 @@
trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
-#endif
}
JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
@@ -266,9 +256,8 @@
restoreReturnAddressBeforeReturn(regT3);
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
#else
+#error "JIT not supported on this platform."
UNUSED_PARAM(executableOffsetToFunction);
breakpoint();
#endif
Modified: trunk/Source/_javascript_Core/jit/JITOpcodes32_64.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JITOpcodes32_64.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JITOpcodes32_64.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -46,7 +46,6 @@
Label softModBegin = align();
softModulo();
#endif
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
// (1) This function provides fast property access for string length
Label stringLengthBegin = align();
@@ -63,11 +62,9 @@
move(TrustedImm32(JSValue::Int32Tag), regT1);
ret();
-#endif
JumpList callLinkFailures;
// (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
-#if ENABLE(JIT_OPTIMIZE_CALL)
// VirtualCallLink Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallLinkBegin = align();
@@ -94,8 +91,6 @@
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
jump(regT0);
-#endif // ENABLE(JIT_OPTIMIZE_CALL)
-
// VirtualCall Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallBegin = align();
@@ -152,24 +147,18 @@
Label nativeCallThunk = privateCompileCTINativeCall(globalData);
Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
LinkBuffer patchBuffer(*m_globalData, this, m_globalData->executableAllocator);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
-#endif
patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
patchBuffer.link(callCompileCconstruct, FunctionPtr(cti_op_construct_jitCompile));
@@ -180,13 +169,9 @@
trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
-#endif
#if ENABLE(JIT_USE_SOFT_MODULO)
trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
#endif
@@ -296,9 +281,8 @@
restoreReturnAddressBeforeReturn(regT3);
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
#else
+#error "JIT not supported on this platform."
UNUSED_PARAM(executableOffsetToFunction);
breakpoint();
#endif // CPU(X86)
@@ -435,9 +419,8 @@
nativeCall = call();
restoreReturnAddressBeforeReturn(regT3);
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
#else
+#error "JIT not supported on this platform."
breakpoint();
#endif // CPU(X86)
@@ -1800,7 +1783,7 @@
#if CPU(ARM)
elementSizeByShift = 3;
#else
-#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
+#error "JIT_USE_SOFT_MODULO not yet supported on this platform."
#endif
relativeTableJump(regT1, elementSizeByShift);
@@ -1816,7 +1799,7 @@
m_assembler.it(ARMv7Assembler::ConditionCS);
m_assembler.mov(regT2, regT1);
#else
-#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
+#error "JIT_USE_SOFT_MODULO not yet supported on this platform."
#endif
}
Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -255,63 +255,6 @@
stubCall.call(currentInstruction[1].u.operand);
}
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- unsigned resultVReg = currentInstruction[1].u.operand;
- unsigned baseVReg = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(baseVReg, regT0);
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.call(resultVReg);
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- unsigned baseVReg = currentInstruction[1].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- unsigned valueVReg = currentInstruction[3].u.operand;
- unsigned direct = currentInstruction[8].u.operand;
-
- emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
-
- JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(regT1);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
void JIT::emit_op_method_check(Instruction* currentInstruction)
{
// Assert that the following instruction is a get_by_id.
@@ -376,14 +319,6 @@
m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
}
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
-
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
{
unsigned resultVReg = currentInstruction[1].u.operand;
@@ -1010,10 +945,6 @@
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
#endif // USE(JSVALUE64)
void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -100,94 +100,6 @@
stubCall.call(dst);
}
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
void JIT::emit_op_method_check(Instruction* currentInstruction)
{
// Assert that the following instruction is a get_by_id.
@@ -253,14 +165,6 @@
m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
}
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
-
JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
JSInterfaceJIT jit;
@@ -1039,10 +943,6 @@
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset)
{
ASSERT(sizeof(JSValue) == 8);
Modified: trunk/Source/_javascript_Core/jit/JITStubs.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/jit/JITStubs.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/jit/JITStubs.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -801,8 +801,6 @@
{
}
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
{
// The interpreter checks for recursion here; I do not believe this can occur in CTI.
@@ -942,8 +940,6 @@
JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
}
-#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
#ifndef NDEBUG
extern "C" {
@@ -1427,8 +1423,6 @@
return JSValue::encode(result);
}
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
DEFINE_STUB_FUNCTION(void, op_put_by_id)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1821,8 +1815,6 @@
return JSValue::encode(result);
}
-#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
DEFINE_STUB_FUNCTION(void, op_check_has_instance)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2083,7 +2075,6 @@
return callFrame;
}
-#if ENABLE(JIT_OPTIMIZE_CALL)
DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2151,7 +2142,6 @@
return codePtr.executableAddress();
}
-#endif // !ENABLE(JIT_OPTIMIZE_CALL)
DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
{
Modified: trunk/Source/_javascript_Core/runtime/Lookup.cpp (88603 => 88604)
--- trunk/Source/_javascript_Core/runtime/Lookup.cpp 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/runtime/Lookup.cpp 2011-06-11 20:07:48 UTC (rev 88604)
@@ -78,7 +78,7 @@
if (!location) {
JSFunction* function;
JSGlobalObject* globalObject = asGlobalObject(thisObj->getAnonymousValue(0).asCell());
-#if ENABLE(JIT) && ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#if ENABLE(JIT)
if (entry->generator())
function = new (exec) JSFunction(exec, globalObject, globalObject->functionStructure(), entry->functionLength(), propertyName, exec->globalData().getHostFunction(entry->function(), entry->generator()));
else
Modified: trunk/Source/_javascript_Core/runtime/Lookup.h (88603 => 88604)
--- trunk/Source/_javascript_Core/runtime/Lookup.h 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/runtime/Lookup.h 2011-06-11 20:07:48 UTC (rev 88604)
@@ -77,7 +77,7 @@
unsigned char attributes() const { return m_attributes; }
-#if ENABLE(JIT) && ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#if ENABLE(JIT)
ThunkGenerator generator() const { ASSERT(m_attributes & Function); return m_u.function.generator; }
#endif
NativeFunction function() const { ASSERT(m_attributes & Function); return m_u.function.functionValue; }
Modified: trunk/Source/_javascript_Core/wtf/Platform.h (88603 => 88604)
--- trunk/Source/_javascript_Core/wtf/Platform.h 2011-06-11 18:09:14 UTC (rev 88603)
+++ trunk/Source/_javascript_Core/wtf/Platform.h 2011-06-11 20:07:48 UTC (rev 88604)
@@ -1042,33 +1042,14 @@
#if CPU(SH4) && PLATFORM(QT)
#define ENABLE_JIT 1
-#define ENABLE_YARR 1
-#define ENABLE_YARR_JIT 1
-#define WTF_USE_JIT_STUB_ARGUMENT_REGISTER 1
-#define ENABLE_ASSEMBLER 1
#endif
/* Configure the JIT */
-#if ENABLE(JIT)
- #if CPU(ARM)
- #if !defined(ENABLE_JIT_USE_SOFT_MODULO) && WTF_ARM_ARCH_AT_LEAST(5)
- #define ENABLE_JIT_USE_SOFT_MODULO 1
- #endif
- #endif
-
- #ifndef ENABLE_JIT_OPTIMIZE_CALL
- #define ENABLE_JIT_OPTIMIZE_CALL 1
- #endif
- #ifndef ENABLE_JIT_OPTIMIZE_NATIVE_CALL
- #define ENABLE_JIT_OPTIMIZE_NATIVE_CALL 1
- #endif
- #ifndef ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS
- #define ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS 1
- #endif
- #ifndef ENABLE_JIT_OPTIMIZE_METHOD_CALLS
- #define ENABLE_JIT_OPTIMIZE_METHOD_CALLS 1
- #endif
+#if CPU(ARM)
+#if !defined(ENABLE_JIT_USE_SOFT_MODULO) && WTF_ARM_ARCH_AT_LEAST(5)
+#define ENABLE_JIT_USE_SOFT_MODULO 1
#endif
+#endif
#if CPU(X86) && COMPILER(MSVC)
#define JSC_HOST_CALL __fastcall