Modified: branches/safari-605-branch/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp (227915 => 227916)
--- branches/safari-605-branch/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2018-01-31 19:21:57 UTC (rev 227915)
+++ branches/safari-605-branch/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2018-01-31 19:21:59 UTC (rev 227916)
@@ -31,6 +31,7 @@
#include "AirCode.h"
#include "AirGenerationContext.h"
#include "AllowMacroScratchRegisterUsage.h"
+#include "AllowMacroScratchRegisterUsageIf.h"
#include "AtomicsObject.h"
#include "B3CheckValue.h"
#include "B3FenceValue.h"
@@ -12008,6 +12009,10 @@
LBasicBlock lastNext = m_out.insertNewBlocksBefore(continuation);
PatchpointValue* patchpoint = m_out.patchpoint(pointerType());
+ if (isARM64()) {
+ // emitAllocateWithNonNullAllocator uses the scratch registers on ARM.
+ patchpoint->clobber(RegisterSet::macroScratchRegisters());
+ }
patchpoint->effects.terminal = true;
if (actualAllocator.isConstant())
patchpoint->numGPScratchRegisters++;
@@ -12021,6 +12026,7 @@
patchpoint->setGenerator(
[=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ AllowMacroScratchRegisterUsageIf allowScratchIf(jit, isARM64());
CCallHelpers::JumpList jumpToSlowPath;
GPRReg allocatorGPR;
Modified: branches/safari-605-branch/Source/_javascript_Core/jit/AssemblyHelpers.cpp (227915 => 227916)
--- branches/safari-605-branch/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2018-01-31 19:21:57 UTC (rev 227915)
+++ branches/safari-605-branch/Source/_javascript_Core/jit/AssemblyHelpers.cpp 2018-01-31 19:21:59 UTC (rev 227916)
@@ -584,14 +584,16 @@
void AssemblyHelpers::emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
{
- // NOTE: This is carefully written so that we can call it while we disallow scratch
- // register usage.
-
if (Options::forceGCSlowPaths()) {
slowPath.append(jump());
return;
}
-
+
+ // NOTE, some invariants of this function:
+ // - When going to the slow path, we must leave resultGPR with zero in it.
+ // - We *can not* use RegisterSet::macroScratchRegisters on x86.
+ // - We *can* use RegisterSet::macroScratchRegisters on ARM.
+
Jump popPath;
Jump done;
@@ -600,19 +602,11 @@
#else
loadPtr(&vm().threadLocalCacheData, scratchGPR);
#endif
- if (!isX86())
- load32(Address(scratchGPR, ThreadLocalCache::offsetOfSizeInData()), resultGPR);
if (allocator.isConstant()) {
- if (isX86())
- slowPath.append(branch32(BelowOrEqual, Address(scratchGPR, ThreadLocalCache::offsetOfSizeInData()), TrustedImm32(allocator.allocator().offset())));
- else
- slowPath.append(branch32(BelowOrEqual, resultGPR, TrustedImm32(allocator.allocator().offset())));
+ slowPath.append(branch32(BelowOrEqual, Address(scratchGPR, ThreadLocalCache::offsetOfSizeInData()), TrustedImm32(allocator.allocator().offset())));
addPtr(TrustedImm32(ThreadLocalCache::offsetOfFirstAllocatorInData() + allocator.allocator().offset()), scratchGPR, allocatorGPR);
} else {
- if (isX86())
- slowPath.append(branch32(BelowOrEqual, Address(scratchGPR, ThreadLocalCache::offsetOfSizeInData()), allocatorGPR));
- else
- slowPath.append(branch32(BelowOrEqual, resultGPR, allocatorGPR));
+ slowPath.append(branch32(BelowOrEqual, Address(scratchGPR, ThreadLocalCache::offsetOfSizeInData()), allocatorGPR));
addPtr(TrustedImm32(ThreadLocalCache::offsetOfFirstAllocatorInData()), allocatorGPR);
addPtr(scratchGPR, allocatorGPR);
}
@@ -622,35 +616,20 @@
if (allocator.isConstant())
add32(TrustedImm32(-allocator.allocator().cellSize(vm().heap)), resultGPR, scratchGPR);
else {
- if (isX86()) {
- move(resultGPR, scratchGPR);
- sub32(Address(allocatorGPR, LocalAllocator::offsetOfCellSize()), scratchGPR);
- } else {
- load32(Address(allocatorGPR, LocalAllocator::offsetOfCellSize()), scratchGPR);
- sub32(resultGPR, scratchGPR, scratchGPR);
- }
+ move(resultGPR, scratchGPR);
+ sub32(Address(allocatorGPR, LocalAllocator::offsetOfCellSize()), scratchGPR);
}
negPtr(resultGPR);
store32(scratchGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfRemaining()));
Address payloadEndAddr = Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfPayloadEnd());
- if (isX86())
- addPtr(payloadEndAddr, resultGPR);
- else {
- loadPtr(payloadEndAddr, scratchGPR);
- addPtr(scratchGPR, resultGPR);
- }
-
+ addPtr(payloadEndAddr, resultGPR);
+
done = jump();
popPath.link(this);
loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()), resultGPR);
- if (isX86())
- xorPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
- else {
- loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), scratchGPR);
- xorPtr(scratchGPR, resultGPR);
- }
+ xorPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
slowPath.append(branchTestPtr(Zero, resultGPR));
// The object is half-allocated: we have what we know is a fresh object, but