Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (192496 => 192497)
--- trunk/Source/_javascript_Core/ChangeLog 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/ChangeLog 2015-11-17 00:44:28 UTC (rev 192497)
@@ -1,3 +1,47 @@
+2015-11-16 Benjamin Poulain <bpoul...@apple.com>
+
+ [JSC] Add support for the extra registers that can be clobbered by Specials
+ https://bugs.webkit.org/show_bug.cgi?id=151246
+
+ Reviewed by Geoffrey Garen.
+
+ Specials can clobber arbitrary registers. This was not handled correctly by Air
+ and nothing was preventing us from re-allocating those registers.
+
+ This patch adds support for the extra clobbered registers in the two register allocators.
+
+ I also fixed the re-spilling FIXME of the iterated allocator because the test might
+ not always converge without it. Since we are at maximum register pressure at the patch point,
+ we could be always spilling the return value, which would loop forever.
+
+ To fix the re-spilling, I just kept a Set of every value spilled or filled so far. When selecting
+ a spill candidate, we never pick a Tmp from that set.
+
+ * b3/air/AirGenerate.cpp:
+ (JSC::B3::Air::generate):
+ * b3/air/AirHandleCalleeSaves.cpp:
+ (JSC::B3::Air::handleCalleeSaves):
+ * b3/air/AirInst.h:
+ * b3/air/AirInstInlines.h:
+ (JSC::B3::Air::Inst::forEachDefAndExtraClobberedTmp):
+ * b3/air/AirIteratedRegisterCoalescing.cpp:
+ (JSC::B3::Air::IteratedRegisterCoalescingAllocator::IteratedRegisterCoalescingAllocator):
+ (JSC::B3::Air::IteratedRegisterCoalescingAllocator::build):
+ (JSC::B3::Air::IteratedRegisterCoalescingAllocator::addEdges):
+ (JSC::B3::Air::IteratedRegisterCoalescingAllocator::selectSpill):
+ (JSC::B3::Air::addSpillAndFillToProgram):
+ (JSC::B3::Air::iteratedRegisterCoalescingOnType):
+ (JSC::B3::Air::iteratedRegisterCoalescing):
+ * b3/air/AirSpillEverything.cpp:
+ (JSC::B3::Air::spillEverything):
+ * b3/testb3.cpp:
+ (JSC::B3::testSimplePatchpointWithoutOuputClobbersGPArgs):
+ (JSC::B3::testSimplePatchpointWithOuputClobbersGPArgs):
+ (JSC::B3::testSimplePatchpointWithoutOuputClobbersFPArgs):
+ (JSC::B3::testSimplePatchpointWithOuputClobbersFPArgs):
+ (JSC::B3::run):
+ * jit/RegisterSet.h:
+
2015-11-16 Benjamin Poulain <benja...@webkit.org>
Build fix after r192492
Modified: trunk/Source/_javascript_Core/b3/air/AirGenerate.cpp (192496 => 192497)
--- trunk/Source/_javascript_Core/b3/air/AirGenerate.cpp 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/b3/air/AirGenerate.cpp 2015-11-17 00:44:28 UTC (rev 192497)
@@ -67,8 +67,10 @@
eliminateDeadCode(code);
- // This is where we would have a real register allocator. Then, we could use spillEverything()
- // in place of the register allocator only for testing.
+ // Register allocation for all the Tmps that do not have a corresponding machine register.
+ // After this phase, every Tmp has a reg.
+ //
+ // For debugging, you can use spillEverything() to put everything to the stack between each Inst.
iteratedRegisterCoalescing(code);
// Prior to this point the prologue and epilogue is implicit. This makes it explicit. It also
Modified: trunk/Source/_javascript_Core/b3/air/AirHandleCalleeSaves.cpp (192496 => 192497)
--- trunk/Source/_javascript_Core/b3/air/AirHandleCalleeSaves.cpp 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/b3/air/AirHandleCalleeSaves.cpp 2015-11-17 00:44:28 UTC (rev 192497)
@@ -30,6 +30,7 @@
#include "AirCode.h"
#include "AirInsertionSet.h"
+#include "AirInstInlines.h"
#include "AirPhaseScope.h"
namespace JSC { namespace B3 { namespace Air {
@@ -47,6 +48,9 @@
// At first we just record all used regs.
usedCalleeSaves.set(tmp.reg());
});
+
+ if (inst.hasSpecial())
+ usedCalleeSaves.merge(inst.extraClobberedRegs());
}
}
Modified: trunk/Source/_javascript_Core/b3/air/AirInst.h (192496 => 192497)
--- trunk/Source/_javascript_Core/b3/air/AirInst.h 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/b3/air/AirInst.h 2015-11-17 00:44:28 UTC (rev 192497)
@@ -126,6 +126,10 @@
// extraClobberedRegs() only works if hasSpecial() returns true.
const RegisterSet& extraClobberedRegs();
+ // Iterate over the Defs and the extra clobbered registers.
+ template<typename Functor>
+ void forEachDefAndExtraClobberedTmp(Arg::Type, const Functor& functor);
+
// Use this to report which registers are live. This should be done just before codegen. Note
// that for efficiency, reportUsedRegisters() only works if hasSpecial() returns true.
void reportUsedRegisters(const RegisterSet&);
Modified: trunk/Source/_javascript_Core/b3/air/AirInstInlines.h (192496 => 192497)
--- trunk/Source/_javascript_Core/b3/air/AirInstInlines.h 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/b3/air/AirInstInlines.h 2015-11-17 00:44:28 UTC (rev 192497)
@@ -89,6 +89,26 @@
return args[0].special()->extraClobberedRegs(*this);
}
+template<typename Functor>
+inline void Inst::forEachDefAndExtraClobberedTmp(Arg::Type type, const Functor& functor)
+{
+ forEachTmp([&] (Tmp& tmpArg, Arg::Role role, Arg::Type argType) {
+ if (argType == type && Arg::isDef(role))
+ functor(tmpArg);
+ });
+
+ if (!hasSpecial())
+ return;
+
+ const RegisterSet& clobberedRegisters = extraClobberedRegs();
+ clobberedRegisters.forEach([functor, type] (Reg reg) {
+ if (reg.isGPR() == (type == Arg::GP)) {
+ Tmp registerTmp(reg);
+ functor(registerTmp);
+ }
+ });
+}
+
inline void Inst::reportUsedRegisters(const RegisterSet& usedRegisters)
{
ASSERT(hasSpecial());
Modified: trunk/Source/_javascript_Core/b3/air/AirIteratedRegisterCoalescing.cpp (192496 => 192497)
--- trunk/Source/_javascript_Core/b3/air/AirIteratedRegisterCoalescing.cpp 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/b3/air/AirIteratedRegisterCoalescing.cpp 2015-11-17 00:44:28 UTC (rev 192497)
@@ -130,8 +130,9 @@
template<Arg::Type type>
class IteratedRegisterCoalescingAllocator {
public:
- IteratedRegisterCoalescingAllocator(Code& code)
- : m_numberOfRegisters(regsInPriorityOrder(type).size())
+ IteratedRegisterCoalescingAllocator(Code& code, const HashSet<Tmp>& unspillableTmp)
+ : m_unspillableTmp(unspillableTmp)
+ , m_numberOfRegisters(regsInPriorityOrder(type).size())
{
initializeDegrees(code);
@@ -144,20 +145,17 @@
void build(Inst& inst, const Liveness<Tmp>::LocalCalc& localCalc)
{
- // All the Def()s interfere with eachother.
- inst.forEachTmp([&] (Tmp& arg, Arg::Role role, Arg::Type argType) {
- if (argType != type)
- return;
+ inst.forEachDefAndExtraClobberedTmp(type, [&] (Tmp& arg) {
+ // All the Def()s interfere with eachother and with all the extra clobbered Tmps.
+ // We should not use forEachDefAndExtraClobberedTmp() here since colored Tmps
+ // do not need interference edges in our implementation.
+ inst.forEachTmp([&] (Tmp& otherArg, Arg::Role role, Arg::Type otherArgType) {
+ if (otherArgType != type)
+ return;
- if (Arg::isDef(role)) {
- inst.forEachTmp([&] (Tmp& otherArg, Arg::Role role, Arg::Type) {
- if (argType != type)
- return;
-
- if (Arg::isDef(role))
- addEdge(arg, otherArg);
- });
- }
+ if (Arg::isDef(role))
+ addEdge(arg, otherArg);
+ });
});
if (MoveInstHelper<type>::mayBeCoalescable(inst)) {
@@ -282,12 +280,10 @@
void addEdges(Inst& inst, const HashSet<Tmp>& liveTmp)
{
// All the Def()s interfere with everthing live.
- inst.forEachTmp([&] (Tmp& arg, Arg::Role role, Arg::Type argType) {
- if (argType == type && Arg::isDef(role)) {
- for (const Tmp& liveTmp : liveTmp) {
- if (liveTmp.isGP() == (type == Arg::GP))
- addEdge(arg, liveTmp);
- }
+ inst.forEachDefAndExtraClobberedTmp(type, [&] (Tmp& arg) {
+ for (const Tmp& liveTmp : liveTmp) {
+ if (liveTmp.isGP() == (type == Arg::GP))
+ addEdge(arg, liveTmp);
}
});
}
@@ -573,10 +569,13 @@
void selectSpill()
{
// FIXME: we should select a good candidate based on all the information we have.
- // FIXME: we should never select a spilled tmp as we would never converge.
-
auto iterator = m_spillWorklist.begin();
+ while (iterator != m_spillWorklist.end() && m_unspillableTmp.contains(*iterator))
+ ++iterator;
+
+ RELEASE_ASSERT_WITH_MESSAGE(iterator != m_spillWorklist.end(), "It is not possible to color the Air graph with the number of available registers.");
+
auto victimIterator = iterator;
unsigned maxDegree = m_degrees[AbsoluteTmpHelper<type>::absoluteIndex(*iterator)];
@@ -584,6 +583,9 @@
for (;iterator != m_spillWorklist.end(); ++iterator) {
unsigned tmpDegree = m_degrees[AbsoluteTmpHelper<type>::absoluteIndex(*iterator)];
if (tmpDegree > maxDegree) {
+ if (m_unspillableTmp.contains(*iterator))
+ continue;
+
victimIterator = iterator;
maxDegree = tmpDegree;
}
@@ -749,6 +751,7 @@
};
typedef SimpleClassHashTraits<InterferenceEdge> InterferenceEdgeHashTraits;
+ const HashSet<Tmp>& m_unspillableTmp;
unsigned m_numberOfRegisters { 0 };
// The interference graph.
@@ -901,8 +904,11 @@
}
template<Arg::Type type>
-static void addSpillAndFillToProgram(Code& code, const HashSet<Tmp>& spilledTmp)
+static void addSpillAndFillToProgram(Code& code, const HashSet<Tmp>& spilledTmp, HashSet<Tmp>& unspillableTmp)
{
+ // All the spilled values become unspillable.
+ unspillableTmp.add(spilledTmp.begin(), spilledTmp.end());
+
// Allocate stack slot for each spilled value.
HashMap<Tmp, StackSlot*> stackSlots;
for (Tmp tmp : spilledTmp) {
@@ -947,6 +953,9 @@
Tmp newTmp = code.newTmp(type);
insertionSet.insert(instIndex, move, inst.origin, arg, newTmp);
tmp = newTmp;
+
+ // Any new Fill() should never be spilled.
+ unspillableTmp.add(tmp);
}
if (Arg::isDef(role))
insertionSet.insert(instIndex + 1, move, inst.origin, tmp, arg);
@@ -957,10 +966,10 @@
}
template<Arg::Type type>
-static void iteratedRegisterCoalescingOnType(Code& code)
+static void iteratedRegisterCoalescingOnType(Code& code, HashSet<Tmp>& unspillableTmps)
{
while (true) {
- IteratedRegisterCoalescingAllocator<type> allocator(code);
+ IteratedRegisterCoalescingAllocator<type> allocator(code, unspillableTmps);
Liveness<Tmp> liveness(code);
for (BasicBlock* block : code) {
Liveness<Tmp>::LocalCalc localCalc(liveness, block);
@@ -976,7 +985,7 @@
assignRegisterToTmpInProgram(code, allocator);
return;
}
- addSpillAndFillToProgram<type>(code, allocator.spilledTmp());
+ addSpillAndFillToProgram<type>(code, allocator.spilledTmp(), unspillableTmps);
}
}
@@ -987,10 +996,13 @@
bool gpIsColored = false;
bool fpIsColored = false;
+ HashSet<Tmp> unspillableGPs;
+ HashSet<Tmp> unspillableFPs;
+
// First we run both allocator together as long as they both spill.
while (!gpIsColored && !fpIsColored) {
- IteratedRegisterCoalescingAllocator<Arg::GP> gpAllocator(code);
- IteratedRegisterCoalescingAllocator<Arg::FP> fpAllocator(code);
+ IteratedRegisterCoalescingAllocator<Arg::GP> gpAllocator(code, unspillableGPs);
+ IteratedRegisterCoalescingAllocator<Arg::FP> fpAllocator(code, unspillableFPs);
// Liveness Analysis can be prohibitively expensive. It is shared
// between the two allocators to avoid doing it twice.
@@ -1014,19 +1026,19 @@
assignRegisterToTmpInProgram(code, gpAllocator);
gpIsColored = true;
} else
- addSpillAndFillToProgram<Arg::GP>(code, gpAllocator.spilledTmp());
+ addSpillAndFillToProgram<Arg::GP>(code, gpAllocator.spilledTmp(), unspillableGPs);
if (fpAllocator.spilledTmp().isEmpty()) {
assignRegisterToTmpInProgram(code, fpAllocator);
fpIsColored = true;
} else
- addSpillAndFillToProgram<Arg::FP>(code, fpAllocator.spilledTmp());
+ addSpillAndFillToProgram<Arg::FP>(code, fpAllocator.spilledTmp(), unspillableFPs);
};
if (!gpIsColored)
- iteratedRegisterCoalescingOnType<Arg::GP>(code);
+ iteratedRegisterCoalescingOnType<Arg::GP>(code, unspillableGPs);
if (!fpIsColored)
- iteratedRegisterCoalescingOnType<Arg::FP>(code);
+ iteratedRegisterCoalescingOnType<Arg::FP>(code, unspillableFPs);
}
} } } // namespace JSC::B3::Air
Modified: trunk/Source/_javascript_Core/b3/air/AirSpillEverything.cpp (192496 => 192497)
--- trunk/Source/_javascript_Core/b3/air/AirSpillEverything.cpp 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/b3/air/AirSpillEverything.cpp 2015-11-17 00:44:28 UTC (rev 192497)
@@ -58,11 +58,12 @@
// Gotta account for dead assignments to registers. These may happen because the input
// code is suboptimal.
- inst.forEachArg(
- [&] (Arg& arg, Arg::Role role, Arg::Type) {
- if (Arg::isDef(role) && arg.isReg())
- registerSet.set(arg.reg());
- });
+ auto updateRegisterSet = [®isterSet] (const Tmp& tmp) {
+ if (tmp.isReg())
+ registerSet.set(tmp.reg());
+ };
+ inst.forEachDefAndExtraClobberedTmp(Arg::GP, updateRegisterSet);
+ inst.forEachDefAndExtraClobberedTmp(Arg::FP, updateRegisterSet);
};
for (unsigned instIndex = block->size(); instIndex--;) {
Modified: trunk/Source/_javascript_Core/b3/testb3.cpp (192496 => 192497)
--- trunk/Source/_javascript_Core/b3/testb3.cpp 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/b3/testb3.cpp 2015-11-17 00:44:28 UTC (rev 192497)
@@ -2760,6 +2760,154 @@
CHECK(compileAndRun<int>(proc, 1, 2) == 3);
}
+void testSimplePatchpointWithoutOuputClobbersGPArgs()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* const1 = root->appendNew<Const64Value>(proc, Origin(), 42);
+ Value* const2 = root->appendNew<Const64Value>(proc, Origin(), 13);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ patchpoint->clobber(RegisterSet(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1));
+ patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ CHECK(params.reps.size() == 2);
+ CHECK(params.reps[0].isGPR());
+ CHECK(params.reps[1].isGPR());
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), params.reps[0].gpr());
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), params.reps[1].gpr());
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), GPRInfo::argumentGPR0);
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), GPRInfo::argumentGPR1);
+ });
+
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), arg1, arg2);
+ root->appendNew<ControlValue>(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 3);
+}
+
+void testSimplePatchpointWithOuputClobbersGPArgs()
+{
+ // We can't predict where the output will be but we want to be sure it is not
+ // one of the clobbered registers which is a bit hard to test.
+ //
+ // What we do is force the hand of our register allocator by clobbering absolutely
+ // everything but 1. The only valid allocation is to give it to the result and
+ // spill everything else.
+
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+ Value* const1 = root->appendNew<Const64Value>(proc, Origin(), 42);
+ Value* const2 = root->appendNew<Const64Value>(proc, Origin(), 13);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Int64, Origin());
+
+ RegisterSet clobberAll = RegisterSet::allGPRs();
+ clobberAll.exclude(RegisterSet::stackRegisters());
+ clobberAll.exclude(RegisterSet::reservedHardwareRegisters());
+ clobberAll.clear(GPRInfo::argumentGPR2);
+ patchpoint->clobber(clobberAll);
+
+ patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ CHECK(params.reps.size() == 3);
+ CHECK(params.reps[0].isGPR());
+ CHECK(params.reps[1].isGPR());
+ CHECK(params.reps[2].isGPR());
+ jit.move(params.reps[1].gpr(), params.reps[0].gpr());
+ jit.add64(params.reps[2].gpr(), params.reps[0].gpr());
+
+ clobberAll.forEach([&] (Reg reg) {
+ jit.move(CCallHelpers::TrustedImm32(0x00ff00ff), reg.gpr());
+ });
+ });
+
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), patchpoint,
+ root->appendNew<Value>(proc, Add, Origin(), arg1, arg2));
+ root->appendNew<ControlValue>(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<int>(proc, 1, 2) == 58);
+}
+
+void testSimplePatchpointWithoutOuputClobbersFPArgs()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* const1 = root->appendNew<ConstDoubleValue>(proc, Origin(), 42.5);
+ Value* const2 = root->appendNew<ConstDoubleValue>(proc, Origin(), 13.1);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Void, Origin());
+ patchpoint->clobber(RegisterSet(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1));
+ patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ CHECK(params.reps.size() == 2);
+ CHECK(params.reps[0].isFPR());
+ CHECK(params.reps[1].isFPR());
+ jit.moveZeroToDouble(params.reps[0].fpr());
+ jit.moveZeroToDouble(params.reps[1].fpr());
+ jit.moveZeroToDouble(FPRInfo::argumentFPR0);
+ jit.moveZeroToDouble(FPRInfo::argumentFPR1);
+ });
+
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), arg1, arg2);
+ root->appendNew<ControlValue>(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<double>(proc, 1.5, 2.5) == 4);
+}
+
+void testSimplePatchpointWithOuputClobbersFPArgs()
+{
+ Procedure proc;
+ BasicBlock* root = proc.addBlock();
+ Value* arg1 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR0);
+ Value* arg2 = root->appendNew<ArgumentRegValue>(proc, Origin(), FPRInfo::argumentFPR1);
+ Value* const1 = root->appendNew<ConstDoubleValue>(proc, Origin(), 42.5);
+ Value* const2 = root->appendNew<ConstDoubleValue>(proc, Origin(), 13.1);
+
+ PatchpointValue* patchpoint = root->appendNew<PatchpointValue>(proc, Double, Origin());
+
+ RegisterSet clobberAll = RegisterSet::allFPRs();
+ clobberAll.exclude(RegisterSet::stackRegisters());
+ clobberAll.exclude(RegisterSet::reservedHardwareRegisters());
+ clobberAll.clear(FPRInfo::argumentFPR2);
+ patchpoint->clobber(clobberAll);
+
+ patchpoint->append(ConstrainedValue(const1, ValueRep::SomeRegister));
+ patchpoint->append(ConstrainedValue(const2, ValueRep::SomeRegister));
+
+ patchpoint->setGenerator(
+ [&] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+ CHECK(params.reps.size() == 3);
+ CHECK(params.reps[0].isFPR());
+ CHECK(params.reps[1].isFPR());
+ CHECK(params.reps[2].isFPR());
+ jit.addDouble(params.reps[1].fpr(), params.reps[2].fpr(), params.reps[0].fpr());
+
+ clobberAll.forEach([&] (Reg reg) {
+ jit.moveZeroToDouble(reg.fpr());
+ });
+ });
+
+ Value* result = root->appendNew<Value>(proc, Add, Origin(), patchpoint,
+ root->appendNew<Value>(proc, Add, Origin(), arg1, arg2));
+ root->appendNew<ControlValue>(proc, Return, Origin(), result);
+
+ CHECK(compileAndRun<double>(proc, 1.5, 2.5) == 59.6);
+}
+
void testPatchpointCallArg()
{
Procedure proc;
@@ -4516,6 +4664,10 @@
RUN(testComplex(4, 384));
RUN(testSimplePatchpoint());
+ RUN(testSimplePatchpointWithoutOuputClobbersGPArgs());
+ RUN(testSimplePatchpointWithOuputClobbersGPArgs());
+ RUN(testSimplePatchpointWithoutOuputClobbersFPArgs());
+ RUN(testSimplePatchpointWithOuputClobbersFPArgs());
RUN(testPatchpointCallArg());
RUN(testPatchpointFixedRegister());
RUN(testPatchpointAny());
Modified: trunk/Source/_javascript_Core/jit/RegisterSet.h (192496 => 192497)
--- trunk/Source/_javascript_Core/jit/RegisterSet.h 2015-11-17 00:26:57 UTC (rev 192496)
+++ trunk/Source/_javascript_Core/jit/RegisterSet.h 2015-11-17 00:44:28 UTC (rev 192497)
@@ -45,8 +45,8 @@
setMany(regs...);
}
- static RegisterSet stackRegisters();
- static RegisterSet reservedHardwareRegisters();
+ JS_EXPORT_PRIVATE static RegisterSet stackRegisters();
+ JS_EXPORT_PRIVATE static RegisterSet reservedHardwareRegisters();
static RegisterSet runtimeRegisters();
static RegisterSet specialRegisters(); // The union of stack, reserved hardware, and runtime registers.
static RegisterSet calleeSaveRegisters();
@@ -59,8 +59,8 @@
#endif
static RegisterSet volatileRegistersForJSCall();
static RegisterSet stubUnavailableRegisters(); // The union of callee saves and special registers.
- static RegisterSet allGPRs();
- static RegisterSet allFPRs();
+ JS_EXPORT_PRIVATE static RegisterSet allGPRs();
+ JS_EXPORT_PRIVATE static RegisterSet allFPRs();
static RegisterSet allRegisters();
static RegisterSet registersToNotSaveForJSCall();