Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (249879 => 249880)
--- trunk/Source/_javascript_Core/ChangeLog 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/ChangeLog 2019-09-14 19:36:29 UTC (rev 249880)
@@ -1,3 +1,74 @@
+2019-09-14 Yusuke Suzuki <[email protected]>
+
+ Retire x86 32bit JIT support
+ https://bugs.webkit.org/show_bug.cgi?id=201790
+
+ Reviewed by Mark Lam.
+
+ Now, Xcode no longer has ability to build 32bit binary, so we cannot even test it on macOS.
+ Fedora stops shipping x86 32bit kernel. Our x86/x86_64 JIT requires SSE2, and so such relatively modern CPUs
+ can use JIT by switching x86 to x86_64. And these CPUs are modern enough to run CLoop at high speed.
+ WebKit already disabled x86 JIT by default while the implementation exists. So literary, it is not tested.
+
+ While x86 32bit becomes less useful, x86 32bit JIT backend is very complicated and is being a major maintenance burden.
+ This is due to very few # of registers. Which scatters a lot of isX86 / CPU(X86) in Baseline, DFG, and Yarr.
+
+ This patch retires x86 JIT support from _javascript_Core and CSS JIT. We still keep MacroAssembler and GPRInfo / FPRInfo,
+ MachineContext information since they are useful even though JIT is not supported.
+
+ * dfg/DFGArrayMode.cpp:
+ (JSC::DFG::ArrayMode::refine const):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::handleIntrinsicCall):
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ * dfg/DFGFixupPhase.cpp:
+ (JSC::DFG::FixupPhase::fixupNode):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::compileExceptionHandlers):
+ * dfg/DFGOSRExitCompilerCommon.cpp:
+ (JSC::DFG::osrWriteBarrier):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compileArithDiv):
+ (JSC::DFG::SpeculativeJIT::compileArithMod):
+ (JSC::DFG::SpeculativeJIT::compileCreateRest):
+ (JSC::DFG::SpeculativeJIT::compileGetDirectPname):
+ * dfg/DFGSpeculativeJIT.h:
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::emitCall):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGThunks.cpp:
+ (JSC::DFG::osrExitGenerationThunkGenerator):
+ * ftl/FTLThunks.cpp:
+ (JSC::FTL::slowPathCallThunkGenerator):
+ * jit/AssemblyHelpers.cpp:
+ (JSC::AssemblyHelpers::callExceptionFuzz):
+ (JSC::AssemblyHelpers::debugCall):
+ * jit/AssemblyHelpers.h:
+ (JSC::AssemblyHelpers::emitComputeButterflyIndexingMask):
+ * jit/CCallHelpers.h:
+ (JSC::CCallHelpers::setupArgumentsImpl):
+ (JSC::CCallHelpers::prepareForTailCallSlow):
+ * jit/CallFrameShuffler.cpp:
+ (JSC::CallFrameShuffler::prepareForTailCall):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileExceptionHandlers):
+ * jit/JITArithmetic32_64.cpp:
+ (JSC::JIT::emit_op_mod):
+ (JSC::JIT::emitSlow_op_mod):
+ * jit/SlowPathCall.h:
+ (JSC::JITSlowPathCall::call):
+ * jit/ThunkGenerators.cpp:
+ (JSC::nativeForGenerator):
+ (JSC::arityFixupGenerator):
+ * wasm/WasmAirIRGenerator.cpp:
+ (JSC::Wasm::AirIRGenerator::emitModOrDiv):
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::YarrGenerator::generateDotStarEnclosure):
+ (JSC::Yarr::YarrGenerator::generateEnter):
+ (JSC::Yarr::YarrGenerator::generateReturn):
+ (JSC::Yarr::YarrGenerator::compile):
+ * yarr/YarrJIT.h:
+
2019-09-13 Mark Lam <[email protected]>
jsc -d stopped working.
Modified: trunk/Source/_javascript_Core/dfg/DFGArrayMode.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGArrayMode.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGArrayMode.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -303,8 +303,6 @@
return ArrayMode(type, Array::NonArray, Array::OutOfBounds, Array::AsIs, action());
return ArrayMode(Array::Generic, action());
}
- if (isX86() && is32Bit() && isScopedArgumentsSpeculation(base))
- return ArrayMode(Array::Generic, action());
return withType(type);
}
Modified: trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -2282,13 +2282,6 @@
}
case ArrayPushIntrinsic: {
-#if USE(JSVALUE32_64)
- if (isX86()) {
- if (argumentCountIncludingThis > 2)
- return false;
- }
-#endif
-
if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX)
return false;
@@ -2316,12 +2309,6 @@
}
case ArraySliceIntrinsic: {
-#if USE(JSVALUE32_64)
- if (isX86()) {
- // There aren't enough registers for this to be done easily.
- return false;
- }
-#endif
if (argumentCountIncludingThis < 1)
return false;
@@ -5426,14 +5413,7 @@
auto bytecode = currentInstruction->as<OpStrcat>();
int startOperand = bytecode.m_src.offset();
int numOperands = bytecode.m_count;
-#if CPU(X86)
- // X86 doesn't have enough registers to compile MakeRope with three arguments. The
- // StrCat we emit here may be turned into a MakeRope. Rather than try to be clever,
- // we just make StrCat dumber on this processor.
- const unsigned maxArguments = 2;
-#else
const unsigned maxArguments = 3;
-#endif
Node* operands[AdjacencyList::Size];
unsigned indexInOperands = 0;
for (unsigned i = 0; i < AdjacencyList::Size; ++i)
Modified: trunk/Source/_javascript_Core/dfg/DFGFixupPhase.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGFixupPhase.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGFixupPhase.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -1747,10 +1747,6 @@
case HasOwnProperty: {
fixEdge<ObjectUse>(node->child1());
-#if CPU(X86)
- // We don't have enough registers to do anything interesting on x86 and mips.
- fixEdge<UntypedUse>(node->child2());
-#else
if (node->child2()->shouldSpeculateString())
fixEdge<StringUse>(node->child2());
else if (node->child2()->shouldSpeculateSymbol())
@@ -1757,7 +1753,6 @@
fixEdge<SymbolUse>(node->child2());
else
fixEdge<UntypedUse>(node->child2());
-#endif
break;
}
Modified: trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGJITCompiler.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -154,11 +154,6 @@
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
-#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
-#endif
m_calls.append(CallLinkRecord(call(OperationPtrTag), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame)));
jumpToExceptionHandler(vm());
@@ -173,11 +168,6 @@
move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
-#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
-#endif
m_calls.append(CallLinkRecord(call(OperationPtrTag), FunctionPtr<OperationPtrTag>(lookupExceptionHandler)));
jumpToExceptionHandler(vm());
Modified: trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGOSRExitCompilerCommon.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -262,19 +262,10 @@
{
AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.barrierBranchWithoutFence(owner);
- // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
-#if CPU(X86)
- jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister);
-#endif
-
jit.setupArguments<decltype(operationOSRWriteBarrier)>(owner);
jit.move(MacroAssembler::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationOSRWriteBarrier)), scratch);
jit.call(scratch, OperationPtrTag);
-#if CPU(X86)
- jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister);
-#endif
-
ownerIsRememberedOrInEden.link(&jit);
}
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -5099,7 +5099,7 @@
{
switch (node->binaryUseKind()) {
case Int32Use: {
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86_64)
SpeculateInt32Operand op1(this, node->child1());
SpeculateInt32Operand op2(this, node->child2());
GPRTemporary eax(this, X86Registers::eax);
@@ -5362,7 +5362,7 @@
}
}
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86_64)
if (node->child2()->isInt32Constant()) {
int32_t divisor = node->child2()->asInt32();
if (divisor && divisor != -1) {
@@ -5403,7 +5403,7 @@
#endif
SpeculateInt32Operand op2(this, node->child2());
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86_64)
GPRTemporary eax(this, X86Registers::eax);
GPRTemporary edx(this, X86Registers::edx);
GPRReg op1GPR = op1.gpr();
@@ -7780,7 +7780,6 @@
{
ASSERT(node->op() == CreateRest);
-#if !CPU(X86)
if (m_jit.graph().isWatchingHavingABadTimeWatchpoint(node)) {
SpeculateStrictInt32Operand arrayLength(this, node->child1());
GPRTemporary arrayResult(this);
@@ -7825,7 +7824,6 @@
cellResult(arrayResultGPR, node);
return;
}
-#endif // !CPU(X86)
SpeculateStrictInt32Operand arrayLength(this, node->child1());
GPRTemporary argumentsStart(this);
@@ -13389,16 +13387,6 @@
GPRReg baseGPR = base.gpr();
GPRReg propertyGPR = property.gpr();
-#if CPU(X86)
- // Not enough registers on X86 for this code, so always use the slow path.
- speculate(node, indexEdge);
- flushRegisters();
- JSValueRegsFlushedCallResult result(this);
- JSValueRegs resultRegs = result.regs();
- callOperation(operationGetByValCell, resultRegs, baseGPR, CCallHelpers::CellValue(propertyGPR));
- m_jit.exceptionCheck();
- jsValueResult(resultRegs, node);
-#else
Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
SpeculateStrictInt32Operand index(this, indexEdge);
SpeculateCellOperand enumerator(this, enumeratorEdge);
@@ -13445,7 +13433,6 @@
addSlowPathGenerator(slowPathCall(slowPath, this, operationGetByValCell, resultRegs, baseGPR, CCallHelpers::CellValue(propertyGPR)));
jsValueResult(resultRegs, node);
-#endif
}
void SpeculativeJIT::compileExtractCatchLocal(Node* node)
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.h (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.h 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.h 2019-09-14 19:36:29 UTC (rev 249880)
@@ -1044,20 +1044,10 @@
#endif
}
-#if CPU(X86)
+#if CPU(ARM_THUMB2) && !CPU(ARM_HARDFP)
JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, FPRReg result)
{
JITCompiler::Call call = appendCall(function);
- if (result != InvalidFPRReg) {
- m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
- m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
- }
- return call;
- }
-#elif CPU(ARM_THUMB2) && !CPU(ARM_HARDFP)
- JITCompiler::Call appendCallSetResult(const FunctionPtr<CFunctionPtrTag> function, FPRReg result)
- {
- JITCompiler::Call call = appendCall(function);
if (result != InvalidFPRReg)
m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
return call;
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -864,9 +864,6 @@
JITCompiler::Jump done = m_jit.jump();
JITCompiler::Label slowPath = m_jit.label();
- if (isX86())
- m_jit.pop(JITCompiler::selectScratchGPR(calleePayloadGPR));
-
callOperation(operationLinkDirectCall, info, calleePayloadGPR);
m_jit.exceptionCheck();
m_jit.jump().linkTo(mainPath, &m_jit);
@@ -2707,47 +2704,6 @@
}
case PutByValWithThis: {
-#if CPU(X86)
- // We don't have enough registers on X86 to do this
- // without setting up the call frame incrementally.
- unsigned index = 0;
- m_jit.poke(GPRInfo::callFrameRegister, index++);
-
- {
- JSValueOperand base(this, m_jit.graph().varArgChild(node, 0));
- GPRReg baseTag = base.tagGPR();
- GPRReg basePayload = base.payloadGPR();
-
- JSValueOperand thisValue(this, m_jit.graph().varArgChild(node, 1));
- GPRReg thisValueTag = thisValue.tagGPR();
- GPRReg thisValuePayload = thisValue.payloadGPR();
-
- JSValueOperand property(this, m_jit.graph().varArgChild(node, 2));
- GPRReg propertyTag = property.tagGPR();
- GPRReg propertyPayload = property.payloadGPR();
-
- m_jit.poke(basePayload, index++);
- m_jit.poke(baseTag, index++);
-
- m_jit.poke(thisValuePayload, index++);
- m_jit.poke(thisValueTag, index++);
-
- m_jit.poke(propertyPayload, index++);
- m_jit.poke(propertyTag, index++);
-
- flushRegisters();
- }
-
- JSValueOperand value(this, m_jit.graph().varArgChild(node, 3));
- GPRReg valueTag = value.tagGPR();
- GPRReg valuePayload = value.payloadGPR();
- m_jit.poke(valuePayload, index++);
- m_jit.poke(valueTag, index++);
-
- flushRegisters();
- appendCall(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis);
- m_jit.exceptionCheck();
-#else
static_assert(GPRInfo::numberOfRegisters >= 8, "We are assuming we have enough registers to make this call without incrementally setting up the arguments.");
JSValueOperand base(this, m_jit.graph().varArgChild(node, 0));
@@ -2766,7 +2722,6 @@
callOperation(m_jit.isStrictModeFor(node->origin.semantic) ? operationPutByValWithThisStrict : operationPutByValWithThis,
NoResult, baseRegs, thisRegs, propertyRegs, valueRegs);
m_jit.exceptionCheck();
-#endif // CPU(X86)
noResult(node);
break;
@@ -3866,23 +3821,7 @@
break;
case HasOwnProperty: {
-#if CPU(X86)
- ASSERT(node->child2().useKind() == UntypedUse);
SpeculateCellOperand object(this, node->child1());
- JSValueOperand key(this, node->child2());
- GPRTemporary result(this, Reuse, object);
-
- JSValueRegs keyRegs = key.jsValueRegs();
- GPRReg objectGPR = object.gpr();
- GPRReg resultGPR = result.gpr();
-
- speculateObject(node->child1());
-
- flushRegisters();
- callOperation(operationHasOwnProperty, resultGPR, objectGPR, keyRegs);
- booleanResult(resultGPR, node);
-#else
- SpeculateCellOperand object(this, node->child1());
GPRTemporary uniquedStringImpl(this);
GPRTemporary temp(this);
GPRTemporary hash(this);
@@ -3980,7 +3919,6 @@
done.link(&m_jit);
booleanResult(resultGPR, node);
-#endif // CPU(X86)
break;
}
Modified: trunk/Source/_javascript_Core/dfg/DFGThunks.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/dfg/DFGThunks.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/dfg/DFGThunks.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -76,11 +76,7 @@
jit.storePtr(MacroAssembler::TrustedImmPtr(scratchSize), MacroAssembler::Address(GPRInfo::regT0));
// Set up one argument.
-#if CPU(X86)
- jit.poke(GPRInfo::callFrameRegister, 0);
-#else
jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
-#endif
MacroAssembler::Call functionCall = jit.call(OperationPtrTag);
Modified: trunk/Source/_javascript_Core/ftl/FTLThunks.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/ftl/FTLThunks.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/ftl/FTLThunks.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -181,7 +181,7 @@
size_t currentOffset = key.offset() + sizeof(void*);
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86_64)
currentOffset += sizeof(void*);
#endif
Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -250,11 +250,7 @@
}
// Set up one argument.
-#if CPU(X86)
- poke(GPRInfo::callFrameRegister, 0);
-#else
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
-#endif
move(TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR);
call(GPRInfo::nonPreservedNonReturnGPR, OperationPtrTag);
@@ -932,11 +928,6 @@
move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
-#elif CPU(X86)
- poke(GPRInfo::callFrameRegister, 0);
- poke(TrustedImmPtr(argument), 1);
- poke(TrustedImmPtr(buffer), 2);
- GPRReg scratch = GPRInfo::regT0;
#else
#error "JIT not supported on this platform."
#endif
Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.h (249879 => 249880)
--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.h 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.h 2019-09-14 19:36:29 UTC (rev 249880)
@@ -481,7 +481,7 @@
}
}
-#if CPU(X86_64) || CPU(X86)
+#if CPU(X86_64)
static constexpr size_t prologueStackPointerDelta()
{
// Prologue only saves the framePointerRegister
@@ -519,7 +519,7 @@
{
push(address);
}
-#endif // CPU(X86_64) || CPU(X86)
+#endif // CPU(X86_64)
#if CPU(ARM_THUMB2) || CPU(ARM64)
static constexpr size_t prologueStackPointerDelta()
@@ -1627,12 +1627,6 @@
{
ASSERT(scratchGPR != resultGPR);
Jump done;
- if (isX86() && !isX86_64()) {
- Jump nonZero = branchTest32(NonZero, vectorLengthGPR);
- move(TrustedImm32(0), resultGPR);
- done = jump();
- nonZero.link(this);
- }
// If vectorLength == 0 then clz will return 32 on both ARM and x86. On 64-bit systems, we can then do a 64-bit right shift on a 32-bit -1 to get a 0 mask for zero vectorLength. On 32-bit ARM, shift masks with 0xff, which means it will still create a 0 mask.
countLeadingZeros32(vectorLengthGPR, scratchGPR);
move(TrustedImm32(-1), resultGPR);
Modified: trunk/Source/_javascript_Core/jit/CCallHelpers.h (249879 => 249880)
--- trunk/Source/_javascript_Core/jit/CCallHelpers.h 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/jit/CCallHelpers.h 2019-09-14 19:36:29 UTC (rev 249880)
@@ -412,45 +412,9 @@
}
#else // USE(JSVALUE64)
-#if CPU(X86)
+#if CPU(ARM_THUMB2) || CPU(MIPS)
template<typename OperationType, unsigned numGPRArgs, unsigned numGPRSources, unsigned numFPRArgs, unsigned numFPRSources, unsigned numCrossSources, unsigned extraGPRArgs, unsigned extraPoke, typename... Args>
- ALWAYS_INLINE void setupArgumentsImpl(ArgCollection<numGPRArgs, numGPRSources, numFPRArgs, numFPRSources, numCrossSources, extraGPRArgs, extraPoke> argSourceRegs, FPRReg arg, Args... args)
- {
- static_assert(std::is_same<CURRENT_ARGUMENT_TYPE, double>::value, "We should only be passing FPRRegs to a double");
- pokeForArgument(arg, numGPRArgs, numFPRArgs, numCrossSources, extraGPRArgs, extraPoke);
- setupArgumentsImpl<OperationType>(argSourceRegs.addStackArg(arg).addPoke(), args...);
- }
-
- template<typename OperationType, unsigned numGPRArgs, unsigned numGPRSources, unsigned numFPRArgs, unsigned numFPRSources, unsigned numCrossSources, unsigned extraGPRArgs, unsigned extraPoke, typename... Args>
- ALWAYS_INLINE std::enable_if_t<sizeof(CURRENT_ARGUMENT_TYPE) <= 4>
- setupArgumentsImpl(ArgCollection<numGPRArgs, numGPRSources, numFPRArgs, numFPRSources, numCrossSources, extraGPRArgs, extraPoke> argSourceRegs, GPRReg arg, Args... args)
- {
- pokeForArgument(arg, numGPRArgs, numFPRArgs, numCrossSources, extraGPRArgs, extraPoke);
- setupArgumentsImpl<OperationType>(argSourceRegs.addGPRArg(), args...);
- }
-
- template<typename OperationType, unsigned numGPRArgs, unsigned numGPRSources, unsigned numFPRArgs, unsigned numFPRSources, unsigned numCrossSources, unsigned extraGPRArgs, unsigned extraPoke, typename... Args>
- ALWAYS_INLINE std::enable_if_t<std::is_same<CURRENT_ARGUMENT_TYPE, EncodedJSValue>::value>
- setupArgumentsImpl(ArgCollection<numGPRArgs, numGPRSources, numFPRArgs, numFPRSources, numCrossSources, extraGPRArgs, extraPoke> argSourceRegs, CellValue payload, Args... args)
- {
- pokeForArgument(payload.gpr(), numGPRArgs, numFPRArgs, numCrossSources, extraGPRArgs, extraPoke);
- pokeForArgument(TrustedImm32(JSValue::CellTag), numGPRArgs, numFPRArgs, numCrossSources, extraGPRArgs, extraPoke + 1);
- setupArgumentsImpl<OperationType>(argSourceRegs.addGPRArg().addPoke(), args...);
- }
-
- template<typename OperationType, unsigned numGPRArgs, unsigned numGPRSources, unsigned numFPRArgs, unsigned numFPRSources, unsigned numCrossSources, unsigned extraGPRArgs, unsigned extraPoke, typename... Args>
- ALWAYS_INLINE std::enable_if_t<std::is_same<CURRENT_ARGUMENT_TYPE, EncodedJSValue>::value>
- setupArgumentsImpl(ArgCollection<numGPRArgs, numGPRSources, numFPRArgs, numFPRSources, numCrossSources, extraGPRArgs, extraPoke> argSourceRegs, JSValueRegs arg, Args... args)
- {
- pokeForArgument(arg.payloadGPR(), numGPRArgs, numFPRArgs, numCrossSources, extraGPRArgs, extraPoke);
- pokeForArgument(arg.tagGPR(), numGPRArgs, numFPRArgs, numCrossSources, extraGPRArgs, extraPoke + 1);
- setupArgumentsImpl<OperationType>(argSourceRegs.addGPRArg().addPoke(), args...);
- }
-
-#elif CPU(ARM_THUMB2) || CPU(MIPS)
-
- template<typename OperationType, unsigned numGPRArgs, unsigned numGPRSources, unsigned numFPRArgs, unsigned numFPRSources, unsigned numCrossSources, unsigned extraGPRArgs, unsigned extraPoke, typename... Args>
void setupArgumentsImpl(ArgCollection<numGPRArgs, numGPRSources, numFPRArgs, numFPRSources, numCrossSources, extraGPRArgs, extraPoke> argSourceRegs, FPRReg arg, Args... args)
{
static_assert(std::is_same<CURRENT_ARGUMENT_TYPE, double>::value, "We should only be passing FPRRegs to a double");
@@ -669,7 +633,7 @@
ALWAYS_INLINE void setupArgumentsImpl(ArgCollection<numGPRArgs, numGPRSources, numFPRArgs, numFPRSources, numCrossSources, extraGPRArgs, extraPoke> argSourceRegs)
{
static_assert(FunctionTraits<OperationType>::arity == numGPRArgs + numFPRArgs, "One last sanity check");
-#if USE(JSVALUE64) || CPU(X86)
+#if USE(JSVALUE64)
static_assert(FunctionTraits<OperationType>::cCallArity() == numGPRArgs + numFPRArgs + extraPoke, "Check the CCall arity");
#endif
setupStubArgs<numGPRSources, GPRReg>(clampArrayToSize<numGPRSources, GPRReg>(argSourceRegs.gprDestinations), clampArrayToSize<numGPRSources, GPRReg>(argSourceRegs.gprSources));
@@ -812,7 +776,7 @@
#elif CPU(MIPS)
loadPtr(Address(framePointerRegister, sizeof(void*)), returnAddressRegister);
subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
-#elif CPU(X86) || CPU(X86_64)
+#elif CPU(X86_64)
loadPtr(Address(framePointerRegister, sizeof(void*)), tempGPR);
push(tempGPR);
subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
Modified: trunk/Source/_javascript_Core/jit/CallFrameShuffler.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/jit/CallFrameShuffler.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/jit/CallFrameShuffler.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -377,19 +377,7 @@
m_oldFrameBase = MacroAssembler::stackPointerRegister;
m_oldFrameOffset = numLocals();
m_newFrameBase = acquireGPR();
-#if CPU(X86)
- // We load the frame pointer manually, but we need to ask the
- // algorithm to move the return PC for us (it'd probably
- // require a write to the danger zone). Since it'd be awkward
- // to ask for half a value move, we ask that the whole thing
- // be moved for us.
- addNew(VirtualRegister { 0 },
- ValueRecovery::displacedInJSStack(VirtualRegister(0), DataFormatJS));
-
- // sp will point to head0 and we will move it up half a slot
- // manually
- m_newFrameOffset = 0;
-#elif CPU(ARM_THUMB2) || CPU(MIPS)
+#if CPU(ARM_THUMB2) || CPU(MIPS)
// We load the frame pointer and link register
// manually. We could ask the algorithm to load them for us,
// and it would allow us to use the link register as an extra
@@ -476,13 +464,7 @@
prepareAny();
-#if CPU(X86)
if (verbose)
- dataLog(" Simulating pop of the call frame register\n");
- m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*)), MacroAssembler::stackPointerRegister);
-#endif
-
- if (verbose)
dataLog("Ready for tail call!\n");
}
Modified: trunk/Source/_javascript_Core/jit/JIT.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/jit/JIT.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/jit/JIT.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -945,12 +945,6 @@
move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
-
-#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
-#endif
m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandlerFromCallerFrame)));
jumpToExceptionHandler(vm());
}
@@ -964,12 +958,6 @@
// lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
-
-#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
-#endif
m_calls.append(CallRecord(call(OperationPtrTag), std::numeric_limits<unsigned>::max(), FunctionPtr<OperationPtrTag>(lookupExceptionHandler)));
jumpToExceptionHandler(vm());
}
Modified: trunk/Source/_javascript_Core/jit/JITArithmetic32_64.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/jit/JITArithmetic32_64.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/jit/JITArithmetic32_64.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -334,53 +334,15 @@
void JIT::emit_op_mod(const Instruction* currentInstruction)
{
-#if CPU(X86)
- auto bytecode = instruction->as<OpMod>();
- int dst = bytecode.m_dst.offset();
- int op1 = bytecode.m_lhs.offset();
- int op2 = bytecode.m_rhs.offset();
-
- // Make sure registers are correct for x86 IDIV instructions.
- ASSERT(regT0 == X86Registers::eax);
- ASSERT(regT1 == X86Registers::edx);
- ASSERT(regT2 == X86Registers::ecx);
- ASSERT(regT3 == X86Registers::ebx);
-
- emitLoad2(op1, regT0, regT3, op2, regT1, regT2);
- addSlowCase(branchIfNotInt32(regT1));
- addSlowCase(branchIfNotInt32(regT0));
-
- move(regT3, regT0);
- addSlowCase(branchTest32(Zero, regT2));
- Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
- addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
- denominatorNotNeg1.link(this);
- x86ConvertToDoubleWord32();
- x86Div32(regT2);
- Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
- addSlowCase(branchTest32(Zero, regT1));
- numeratorPositive.link(this);
- emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
-#else
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
slowPathCall.call();
-#endif
}
-void JIT::emitSlow_op_mod(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&)
{
-#if CPU(X86)
- linkAllSlowCases(iter);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
- slowPathCall.call();
-#else
- UNUSED_PARAM(currentInstruction);
- UNUSED_PARAM(iter);
// We would have really useful assertions here if it wasn't for the compiler's
// insistence on attribute noreturn.
// RELEASE_ASSERT_NOT_REACHED();
-#endif
}
/* ------------------------------ END: OP_MOD ------------------------------ */
Modified: trunk/Source/_javascript_Core/jit/SlowPathCall.h (249879 => 249880)
--- trunk/Source/_javascript_Core/jit/SlowPathCall.h 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/jit/SlowPathCall.h 2019-09-14 19:36:29 UTC (rev 249880)
@@ -49,11 +49,7 @@
m_jit->sampleInstruction(&m_jit->m_codeBlock->instructions()[m_jit->m_bytecodeOffset], true);
#endif
m_jit->updateTopCallFrame();
-#if CPU(X86) && USE(JSVALUE32_64)
- m_jit->addPtr(MacroAssembler::TrustedImm32(-8), MacroAssembler::stackPointerRegister);
- m_jit->push(JIT::TrustedImm32(JIT::TrustedImmPtr(m_pc)));
- m_jit->push(JIT::callFrameRegister);
-#elif CPU(X86_64) && OS(WINDOWS)
+#if CPU(X86_64) && OS(WINDOWS)
m_jit->addPtr(MacroAssembler::TrustedImm32(-16), MacroAssembler::stackPointerRegister);
m_jit->move(MacroAssembler::stackPointerRegister, JIT::argumentGPR0);
m_jit->move(JIT::callFrameRegister, JIT::argumentGPR1);
@@ -65,9 +61,7 @@
JIT::Call call = m_jit->call(OperationPtrTag);
m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, FunctionPtr<OperationPtrTag>(m_slowPathFunction)));
-#if CPU(X86) && USE(JSVALUE32_64)
- m_jit->addPtr(MacroAssembler::TrustedImm32(16), MacroAssembler::stackPointerRegister);
-#elif CPU(X86_64) && OS(WINDOWS)
+#if CPU(X86_64) && OS(WINDOWS)
m_jit->pop(JIT::regT0); // vPC
m_jit->pop(JIT::regT1); // callFrame register
#endif
Modified: trunk/Source/_javascript_Core/jit/ThunkGenerators.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/jit/ThunkGenerators.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/jit/ThunkGenerators.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -268,24 +268,7 @@
jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm.topCallFrame);
-#if CPU(X86)
- // Calling convention: f(ecx, edx, ...);
- // Host function signature: f(ExecState*);
- jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
-
- jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
-
- // call the function
- jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::regT1);
- if (thunkFunctionType == ThunkFunctionType::JSFunction) {
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction), JSEntryPtrTag);
- } else
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, InternalFunction::offsetOfNativeFunctionFor(kind)), JSEntryPtrTag);
-
- jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
-
-#elif CPU(X86_64)
+#if CPU(X86_64)
#if !OS(WINDOWS)
// Calling convention: f(edi, esi, edx, ecx, ...);
// Host function signature: f(ExecState*);
@@ -382,22 +365,14 @@
jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm.topCallFrame);
-#if CPU(X86) && USE(JSVALUE32_64)
- jit.subPtr(JSInterfaceJIT::TrustedImm32(4), JSInterfaceJIT::stackPointerRegister);
- jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
- jit.push(JSInterfaceJIT::regT0);
-#else
#if OS(WINDOWS)
// Allocate space on stack for the 4 parameter registers.
jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
-#endif
jit.move(JSInterfaceJIT::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationVMHandleException)), JSInterfaceJIT::regT3);
jit.call(JSInterfaceJIT::regT3, OperationPtrTag);
-#if CPU(X86) && USE(JSVALUE32_64)
- jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
-#elif OS(WINDOWS)
+#if OS(WINDOWS)
jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
@@ -523,9 +498,6 @@
# endif
jit.ret();
#else // USE(JSVALUE64) section above, USE(JSVALUE32_64) section below.
-# if CPU(X86)
- jit.pop(JSInterfaceJIT::regT4);
-# endif
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
jit.load32(JSInterfaceJIT::addressFor(CallFrameSlot::argumentCount), JSInterfaceJIT::argumentGPR2);
jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
@@ -580,9 +552,6 @@
done.link(&jit);
-# if CPU(X86)
- jit.push(JSInterfaceJIT::regT4);
-# endif
jit.ret();
#endif // End of USE(JSVALUE32_64) section.
Modified: trunk/Source/_javascript_Core/wasm/WasmAirIRGenerator.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/wasm/WasmAirIRGenerator.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/wasm/WasmAirIRGenerator.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -2344,7 +2344,7 @@
return;
}
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86_64)
Tmp eax(X86Registers::eax);
Tmp edx(X86Registers::edx);
Modified: trunk/Source/_javascript_Core/yarr/YarrJIT.cpp (249879 => 249880)
--- trunk/Source/_javascript_Core/yarr/YarrJIT.cpp 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/yarr/YarrJIT.cpp 2019-09-14 19:36:29 UTC (rev 249880)
@@ -54,7 +54,6 @@
static const RegisterID returnRegister = ARMRegisters::r0;
static const RegisterID returnRegister2 = ARMRegisters::r1;
-#define HAVE_INITIAL_START_REG
#elif CPU(ARM64)
// Argument registers
static const RegisterID input = ARM64Registers::x0;
@@ -80,7 +79,6 @@
static const RegisterID returnRegister2 = ARM64Registers::x1;
const TrustedImm32 surrogateTagMask = TrustedImm32(0xfffffc00);
-#define HAVE_INITIAL_START_REG
#define JIT_UNICODE_EXPRESSIONS
#elif CPU(MIPS)
static const RegisterID input = MIPSRegisters::a0;
@@ -95,18 +93,6 @@
static const RegisterID returnRegister = MIPSRegisters::v0;
static const RegisterID returnRegister2 = MIPSRegisters::v1;
-#define HAVE_INITIAL_START_REG
-#elif CPU(X86)
- static const RegisterID input = X86Registers::eax;
- static const RegisterID index = X86Registers::edx;
- static const RegisterID length = X86Registers::ecx;
- static const RegisterID output = X86Registers::edi;
-
- static const RegisterID regT0 = X86Registers::ebx;
- static const RegisterID regT1 = X86Registers::esi;
-
- static const RegisterID returnRegister = X86Registers::eax;
- static const RegisterID returnRegister2 = X86Registers::edx;
#elif CPU(X86_64)
#if !OS(WINDOWS)
// Argument registers
@@ -152,7 +138,6 @@
const TrustedImm32 supplementaryPlanesBase = TrustedImm32(0x10000);
const TrustedImm32 trailingSurrogateTag = TrustedImm32(0xdc00);
const TrustedImm32 surrogateTagMask = TrustedImm32(0xfffffc00);
-#define HAVE_INITIAL_START_REG
#define JIT_UNICODE_EXPRESSIONS
#endif
@@ -2021,10 +2006,6 @@
const RegisterID character = regT0;
const RegisterID matchPos = regT1;
-#ifndef HAVE_INITIAL_START_REG
- const RegisterID initialStart = character;
-#endif
-
JumpList foundBeginningNewLine;
JumpList saveStartIndex;
JumpList foundEndingNewLine;
@@ -2039,9 +2020,6 @@
ASSERT(!m_pattern.m_body->m_hasFixedSize);
getMatchStart(matchPos);
-#ifndef HAVE_INITIAL_START_REG
- loadFromFrame(m_pattern.m_initialStartValueFrameLocation, initialStart);
-#endif
saveStartIndex.append(branch32(BelowOrEqual, matchPos, initialStart));
Label findBOLLoop(this);
sub32(TrustedImm32(1), matchPos);
@@ -2051,9 +2029,6 @@
load16(BaseIndex(input, matchPos, TimesTwo, 0), character);
matchCharacterClass(character, foundBeginningNewLine, m_pattern.newlineCharacterClass());
-#ifndef HAVE_INITIAL_START_REG
- loadFromFrame(m_pattern.m_initialStartValueFrameLocation, initialStart);
-#endif
branch32(Above, matchPos, initialStart).linkTo(findBOLLoop, this);
saveStartIndex.append(jump());
@@ -3753,24 +3728,6 @@
// rcx is the pointer to the allocated space for result in x64 Windows.
push(X86Registers::ecx);
#endif
-#elif CPU(X86)
- push(X86Registers::ebp);
- move(stackPointerRegister, X86Registers::ebp);
- // TODO: do we need spill registers to fill the output pointer if there are no sub captures?
- push(X86Registers::ebx);
- push(X86Registers::edi);
- push(X86Registers::esi);
- // load output into edi (2 = saved ebp + return address).
- #if COMPILER(MSVC)
- loadPtr(Address(X86Registers::ebp, 2 * sizeof(void*)), input);
- loadPtr(Address(X86Registers::ebp, 3 * sizeof(void*)), index);
- loadPtr(Address(X86Registers::ebp, 4 * sizeof(void*)), length);
- if (compileMode == IncludeSubpatterns)
- loadPtr(Address(X86Registers::ebp, 5 * sizeof(void*)), output);
- #else
- if (compileMode == IncludeSubpatterns)
- loadPtr(Address(X86Registers::ebp, 2 * sizeof(void*)), output);
- #endif
#elif CPU(ARM64)
tagReturnAddress();
if (m_decodeSurrogatePairs) {
@@ -3828,11 +3785,6 @@
if (m_pattern.m_saveInitialStartValue)
pop(X86Registers::ebx);
pop(X86Registers::ebp);
-#elif CPU(X86)
- pop(X86Registers::esi);
- pop(X86Registers::edi);
- pop(X86Registers::ebx);
- pop(X86Registers::ebp);
#elif CPU(ARM64)
if (m_decodeSurrogatePairs)
popPair(framePointerRegister, linkRegister);
@@ -3941,13 +3893,8 @@
}
#endif
- if (m_pattern.m_saveInitialStartValue) {
-#ifdef HAVE_INITIAL_START_REG
+ if (m_pattern.m_saveInitialStartValue)
move(index, initialStart);
-#else
- storeToFrame(index, m_pattern.m_initialStartValueFrameLocation);
-#endif
- }
generate();
if (m_disassembler)
Modified: trunk/Source/_javascript_Core/yarr/YarrJIT.h (249879 => 249880)
--- trunk/Source/_javascript_Core/yarr/YarrJIT.h 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/_javascript_Core/yarr/YarrJIT.h 2019-09-14 19:36:29 UTC (rev 249880)
@@ -32,11 +32,7 @@
#include "Yarr.h"
#include "YarrPattern.h"
-#if CPU(X86) && !COMPILER(MSVC)
-#define YARR_CALL __attribute__ ((regparm (3)))
-#else
#define YARR_CALL
-#endif
namespace JSC {
Modified: trunk/Source/WTF/ChangeLog (249879 => 249880)
--- trunk/Source/WTF/ChangeLog 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/WTF/ChangeLog 2019-09-14 19:36:29 UTC (rev 249880)
@@ -1,3 +1,12 @@
+2019-09-14 Yusuke Suzuki <[email protected]>
+
+ Retire x86 32bit JIT support
+ https://bugs.webkit.org/show_bug.cgi?id=201790
+
+ Reviewed by Mark Lam.
+
+ * wtf/Platform.h:
+
2019-09-13 Jonathan Bedard <[email protected]>
iOS 13: Some SPI targets 13.1
Modified: trunk/Source/WTF/wtf/Platform.h (249879 => 249880)
--- trunk/Source/WTF/wtf/Platform.h 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/WTF/wtf/Platform.h 2019-09-14 19:36:29 UTC (rev 249880)
@@ -809,7 +809,7 @@
/* If possible, try to enable a disassembler. This is optional. We proceed in two
steps: first we try to find some disassembler that we can use, and then we
decide if the high-level disassembler API can be enabled. */
-#if !defined(USE_UDIS86) && ENABLE(JIT) && (CPU(X86) || CPU(X86_64)) && !USE(CAPSTONE)
+#if !defined(USE_UDIS86) && ENABLE(JIT) && CPU(X86_64) && !USE(CAPSTONE)
#define USE_UDIS86 1
#endif
@@ -823,7 +823,7 @@
#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT)
/* Enable the DFG JIT on X86 and X86_64. */
-#if (CPU(X86) || CPU(X86_64)) && (OS(DARWIN) || OS(LINUX) || OS(FREEBSD) || OS(HURD) || OS(WINDOWS))
+#if CPU(X86_64) && (OS(DARWIN) || OS(LINUX) || OS(FREEBSD) || OS(HURD) || OS(WINDOWS))
#define ENABLE_DFG_JIT 1
#endif
/* Enable the DFG JIT on ARMv7. Only tested on iOS, Linux, and FreeBSD. */
@@ -1047,7 +1047,7 @@
#endif
#endif
-#if ENABLE(DFG_JIT) && HAVE(MACHINE_CONTEXT) && (CPU(X86) || CPU(X86_64) || CPU(ARM64))
+#if ENABLE(DFG_JIT) && HAVE(MACHINE_CONTEXT) && (CPU(X86_64) || CPU(ARM64))
#define ENABLE_SIGNAL_BASED_VM_TRAPS 1
#endif
Modified: trunk/Source/WebCore/ChangeLog (249879 => 249880)
--- trunk/Source/WebCore/ChangeLog 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/WebCore/ChangeLog 2019-09-14 19:36:29 UTC (rev 249880)
@@ -1,3 +1,14 @@
+2019-09-14 Yusuke Suzuki <[email protected]>
+
+ Retire x86 32bit JIT support
+ https://bugs.webkit.org/show_bug.cgi?id=201790
+
+ Reviewed by Mark Lam.
+
+ * cssjit/FunctionCall.h:
+ (WebCore::FunctionCall::callAndBranchOnBooleanReturnValue):
+ (WebCore::FunctionCall::swapArguments):
+
2019-09-14 Zalan Bujtas <[email protected]>
[LFC] FormattingContext::Geometry::inFlowPositionedPositionOffset should not read containing block's width
Modified: trunk/Source/WebCore/cssjit/FunctionCall.h (249879 => 249880)
--- trunk/Source/WebCore/cssjit/FunctionCall.h 2019-09-14 18:52:02 UTC (rev 249879)
+++ trunk/Source/WebCore/cssjit/FunctionCall.h 2019-09-14 19:36:29 UTC (rev 249880)
@@ -74,7 +74,7 @@
JSC::MacroAssembler::Jump callAndBranchOnBooleanReturnValue(JSC::MacroAssembler::ResultCondition condition)
{
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86_64)
return callAndBranchOnCondition(condition, JSC::MacroAssembler::TrustedImm32(0xff));
#elif CPU(ARM64) || CPU(ARM)
return callAndBranchOnCondition(condition, JSC::MacroAssembler::TrustedImm32(-1));
@@ -97,7 +97,7 @@
JSC::MacroAssembler::RegisterID a = m_firstArgument;
JSC::MacroAssembler::RegisterID b = m_secondArgument;
// x86 can swap without a temporary register. On other architectures, we need allocate a temporary register to switch the values.
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86_64)
m_assembler.swap(a, b);
#elif CPU(ARM64) || CPU(ARM_THUMB2)
m_assembler.move(a, tempRegister);