Title: [194334] trunk/Source/_javascript_Core
Revision
194334
Author
[email protected]
Date
2015-12-21 10:56:54 -0800 (Mon, 21 Dec 2015)

Log Message

FTL B3 should do vararg calls
https://bugs.webkit.org/show_bug.cgi?id=152468

Reviewed by Benjamin Poulain.

This adds FTL->B3 lowering of all kinds of varargs calls - forwarding or not, tail or not,
and construct or not. Like all other such lowerings, all of the code is in one place in
FTLLower.

I removed code for varargs and exception spill slots from the B3 path, since it won't need
it. The plan is to rely on B3 doing the spilling for us by using some combination of early
clobber and late use.

This adds ValueRep::emitRestore(), a helpful method for emitting code to restore any ValueRep
into any 64-bit Reg (FPR or GPR).

I wrote new tests for vararg calls, because I wasn't sure which of the existing ones we can
run. These are short-running tests, so I'm not worried about bloating our test suite.

* b3/B3ValueRep.cpp:
(JSC::B3::ValueRep::dump):
(JSC::B3::ValueRep::emitRestore):
* b3/B3ValueRep.h:
* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::DFG::LowerDFGToLLVM::lower):
(JSC::FTL::DFG::LowerDFGToLLVM::compileCallOrConstructVarargs):
(JSC::FTL::DFG::LowerDFGToLLVM::compileInvalidationPoint):
* ftl/FTLState.h:
* tests/stress/varargs-no-forward.js: Added.
* tests/stress/varargs-simple.js: Added.
* tests/stress/varargs-two-level.js: Added.

Modified Paths

Added Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (194333 => 194334)


--- trunk/Source/_javascript_Core/ChangeLog	2015-12-21 18:40:10 UTC (rev 194333)
+++ trunk/Source/_javascript_Core/ChangeLog	2015-12-21 18:56:54 UTC (rev 194334)
@@ -1,3 +1,37 @@
+2015-12-21  Filip Pizlo  <[email protected]>
+
+        FTL B3 should do vararg calls
+        https://bugs.webkit.org/show_bug.cgi?id=152468
+
+        Reviewed by Benjamin Poulain.
+
+        This adds FTL->B3 lowering of all kinds of varargs calls - forwarding or not, tail or not,
+        and construct or not. Like all other such lowerings, all of the code is in one place in
+        FTLLower.
+
+        I removed code for varargs and exception spill slots from the B3 path, since it won't need
+        it. The plan is to rely on B3 doing the spilling for us by using some combination of early
+        clobber and late use.
+
+        This adds ValueRep::emitRestore(), a helpful method for emitting code to restore any ValueRep
+        into any 64-bit Reg (FPR or GPR).
+
+        I wrote new tests for vararg calls, because I wasn't sure which of the existing ones we can
+        run. These are short-running tests, so I'm not worried about bloating our test suite.
+
+        * b3/B3ValueRep.cpp:
+        (JSC::B3::ValueRep::dump):
+        (JSC::B3::ValueRep::emitRestore):
+        * b3/B3ValueRep.h:
+        * ftl/FTLLowerDFGToLLVM.cpp:
+        (JSC::FTL::DFG::LowerDFGToLLVM::lower):
+        (JSC::FTL::DFG::LowerDFGToLLVM::compileCallOrConstructVarargs):
+        (JSC::FTL::DFG::LowerDFGToLLVM::compileInvalidationPoint):
+        * ftl/FTLState.h:
+        * tests/stress/varargs-no-forward.js: Added.
+        * tests/stress/varargs-simple.js: Added.
+        * tests/stress/varargs-two-level.js: Added.
+
 2015-12-18  Mark Lam  <[email protected]>
 
         Add unary operator tests to compare JIT and LLINT results.

Modified: trunk/Source/_javascript_Core/b3/B3ValueRep.cpp (194333 => 194334)


--- trunk/Source/_javascript_Core/b3/B3ValueRep.cpp	2015-12-21 18:40:10 UTC (rev 194333)
+++ trunk/Source/_javascript_Core/b3/B3ValueRep.cpp	2015-12-21 18:56:54 UTC (rev 194334)
@@ -28,6 +28,8 @@
 
 #if ENABLE(B3_JIT)
 
+#include "AssemblyHelpers.h"
+
 namespace JSC { namespace B3 {
 
 void ValueRep::dump(PrintStream& out) const
@@ -55,6 +57,49 @@
     RELEASE_ASSERT_NOT_REACHED();
 }
 
+void ValueRep::emitRestore(AssemblyHelpers& jit, Reg reg)
+{
+    if (reg.isGPR()) {
+        switch (kind()) {
+        case Register:
+            if (isGPR())
+                jit.move(gpr(), reg.gpr());
+            else
+                jit.moveDoubleTo64(fpr(), reg.gpr());
+            break;
+        case Stack:
+            jit.load64(AssemblyHelpers::Address(GPRInfo::callFrameRegister, offsetFromFP()), reg.gpr());
+            break;
+        case Constant:
+            jit.move(AssemblyHelpers::TrustedImm64(value()), reg.gpr());
+            break;
+        default:
+            RELEASE_ASSERT_NOT_REACHED();
+            break;
+        }
+        return;
+    }
+    
+    switch (kind()) {
+    case Register:
+        if (isGPR())
+            jit.move64ToDouble(gpr(), reg.fpr());
+        else
+            jit.moveDouble(fpr(), reg.fpr());
+        break;
+    case Stack:
+        jit.loadDouble(AssemblyHelpers::Address(GPRInfo::callFrameRegister, offsetFromFP()), reg.fpr());
+        break;
+    case Constant:
+        jit.move(AssemblyHelpers::TrustedImm64(value()), jit.scratchRegister());
+        jit.move64ToDouble(jit.scratchRegister(), reg.fpr());
+        break;
+    default:
+        RELEASE_ASSERT_NOT_REACHED();
+        break;
+    }
+}
+
 } } // namespace JSC::B3
 
 namespace WTF {

Modified: trunk/Source/_javascript_Core/b3/B3ValueRep.h (194333 => 194334)


--- trunk/Source/_javascript_Core/b3/B3ValueRep.h	2015-12-21 18:40:10 UTC (rev 194333)
+++ trunk/Source/_javascript_Core/b3/B3ValueRep.h	2015-12-21 18:56:54 UTC (rev 194334)
@@ -34,8 +34,12 @@
 #include "Reg.h"
 #include <wtf/PrintStream.h>
 
-namespace JSC { namespace B3 {
+namespace JSC {
 
+class AssemblyHelpers;
+
+namespace B3 {
+
 // We use this class to describe value representations at stackmaps. It's used both to force a
 // representation and to get the representation. When the B3 client forces a representation, we say
 // that it's an input. When B3 tells the client what representation it picked, we say that it's an
@@ -216,6 +220,10 @@
 
     JS_EXPORT_PRIVATE void dump(PrintStream&) const;
 
+    // This has a simple contract: it emits code to restore the value into the given register. This
+    // will work even if it requires moving between bits a GPR and a FPR.
+    void emitRestore(AssemblyHelpers&, Reg);
+
 private:
     Kind m_kind;
     union U {

Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp (194333 => 194334)


--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp	2015-12-21 18:40:10 UTC (rev 194333)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp	2015-12-21 18:56:54 UTC (rev 194334)
@@ -58,6 +58,7 @@
 #include "ScopedArguments.h"
 #include "ScopedArgumentsTable.h"
 #include "ScratchRegisterAllocator.h"
+#include "SetupVarargsFrame.h"
 #include "VirtualRegister.h"
 #include "Watchdog.h"
 #include <atomic>
@@ -205,8 +206,8 @@
 #endif // FTL_USE_B3
 
         auto preOrder = m_graph.blocksInPreOrder();
-        
-        // If we have any CallVarargs then we need to have a spill slot for it.
+
+#if !FTL_USES_B3
         bool hasVarargs = false;
         size_t maxNumberOfCatchSpills = 0;
         for (DFG::BasicBlock* block : preOrder) {
@@ -270,10 +271,8 @@
             }
         }
 
-#if FTL_USES_B3
-        UNUSED_PARAM(hasVarargs);
-        // FIXME
-#else
+        // B3 doesn't need the varargs spill slot because we just use call arg area size as a way to
+        // request spill slots.
         if (hasVarargs) {
             LValue varargsSpillSlots = m_out.alloca(
                 arrayType(m_out.int64, JSCallVarargs::numSpillSlotsNeeded()));
@@ -284,6 +283,7 @@
                 m_out.int32Zero, varargsSpillSlots);
         }
 
+        // B3 doesn't need the exception spill slot because we just use the 
         if (m_graph.m_hasExceptionHandlers && maxNumberOfCatchSpills) {
             RegisterSet volatileRegisters = RegisterSet::volatileRegistersForJSCall();
             maxNumberOfCatchSpills = std::min(volatileRegisters.numberOfSetRegisters(), maxNumberOfCatchSpills);
@@ -296,7 +296,7 @@
                 m_out.constInt64(m_ftlState.exceptionHandlingSpillSlotStackmapID),
                 m_out.int32Zero, exceptionHandlingVolatileRegistersSpillSlots);
         }
-#endif
+#endif // !FTL_USES_B3
         
         // We should not create any alloca's after this point, since they will cease to
         // be mem2reg candidates.
@@ -5076,32 +5076,263 @@
     
     void compileCallOrConstructVarargs()
     {
-#if FTL_USES_B3
-        if (verboseCompilationEnabled() || !verboseCompilationEnabled())
-            CRASH();
-#else
+        Node* node = m_node;
         LValue jsCallee = lowJSValue(m_node->child1());
         LValue thisArg = lowJSValue(m_node->child3());
         
         LValue jsArguments = nullptr;
+        bool forwarding = false;
         
-        switch (m_node->op()) {
+        switch (node->op()) {
         case CallVarargs:
         case TailCallVarargs:
         case TailCallVarargsInlinedCaller:
         case ConstructVarargs:
-            jsArguments = lowJSValue(m_node->child2());
+            jsArguments = lowJSValue(node->child2());
             break;
         case CallForwardVarargs:
         case TailCallForwardVarargs:
         case TailCallForwardVarargsInlinedCaller:
         case ConstructForwardVarargs:
+            forwarding = true;
             break;
         default:
-            DFG_CRASH(m_graph, m_node, "bad node type");
+            DFG_CRASH(m_graph, node, "bad node type");
             break;
         }
         
+#if FTL_USES_B3
+        // FIXME: Need a story for exceptions.
+        // https://bugs.webkit.org/show_bug.cgi?id=151686
+
+        PatchpointValue* patchpoint = m_out.patchpoint(Int64);
+
+        // Append the forms of the arguments that we will use before any clobbering happens.
+        patchpoint->append(jsCallee, ValueRep::reg(GPRInfo::regT0));
+        if (jsArguments)
+            patchpoint->append(jsArguments, ValueRep::SomeRegister);
+        patchpoint->append(thisArg, ValueRep::SomeRegister);
+
+        if (!forwarding) {
+            // Now append them again for after clobbering. Note that the compiler may ask us to use a
+            // different register for the late for the post-clobbering version of the value. This gives
+            // the compiler a chance to spill these values without having to burn any callee-saves.
+            patchpoint->append(jsCallee, ValueRep::LateColdAny);
+            patchpoint->append(jsArguments, ValueRep::LateColdAny);
+            patchpoint->append(thisArg, ValueRep::LateColdAny);
+        }
+
+        patchpoint->clobber(RegisterSet::macroScratchRegisters());
+        patchpoint->clobberLate(RegisterSet::volatileRegistersForJSCall());
+        patchpoint->resultConstraint = ValueRep::reg(GPRInfo::returnValueGPR);
+
+        // This is the minimum amount of call arg area stack space that all JS->JS calls always have.
+        unsigned minimumJSCallAreaSize =
+            sizeof(CallerFrameAndPC) +
+            WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(EncodedJSValue));
+
+        m_proc.requestCallArgAreaSize(minimumJSCallAreaSize);
+        
+        CodeOrigin codeOrigin = codeOriginDescriptionOfCallSite();
+        State* state = &m_ftlState;
+        patchpoint->setGenerator(
+            [=] (CCallHelpers& jit, const StackmapGenerationParams& params) {
+                AllowMacroScratchRegisterUsage allowScratch(jit);
+                CallSiteIndex callSiteIndex =
+                    state->jitCode->common.addUniqueCallSiteIndex(codeOrigin);
+
+                // FIXME: We would ask the OSR exit descriptor to prepare and then we would modify
+                // the OSRExit data structure inside the OSRExitHandle to link it up to this call.
+                // Also, the exception checks JumpList should be linked to somewhere.
+                // https://bugs.webkit.org/show_bug.cgi?id=151686
+                CCallHelpers::JumpList exceptions;
+
+                jit.store32(
+                    CCallHelpers::TrustedImm32(callSiteIndex.bits()),
+                    CCallHelpers::tagFor(VirtualRegister(JSStack::ArgumentCount)));
+
+                CallLinkInfo* callLinkInfo = jit.codeBlock()->addCallLinkInfo();
+                CallVarargsData* data = ""
+
+                unsigned argIndex = 1;
+                GPRReg calleeGPR = params[argIndex++].gpr();
+                ASSERT(calleeGPR == GPRInfo::regT0);
+                GPRReg argumentsGPR = jsArguments ? params[argIndex++].gpr() : InvalidGPRReg;
+                GPRReg thisGPR = params[argIndex++].gpr();
+
+                B3::ValueRep calleeLateRep;
+                B3::ValueRep argumentsLateRep;
+                B3::ValueRep thisLateRep;
+                if (!forwarding) {
+                    // If we're not forwarding then we'll need callee, arguments, and this after we
+                    // have potentially clobbered calleeGPR, argumentsGPR, and thisGPR. Our technique
+                    // for this is to supply all of those operands as late uses in addition to
+                    // specifying them as early uses. It's possible that the late use uses a spill
+                    // while the early use uses a register, and it's possible for the late and early
+                    // uses to use different registers. We do know that the late uses interfere with
+                    // all volatile registers and so won't use those, but the early uses may use
+                    // volatile registers and in the case of calleeGPR, it's pinned to regT0 so it
+                    // definitely will.
+                    //
+                    // Note that we have to be super careful with these. It's possible that these
+                    // use a shuffling of the registers used for calleeGPR, argumentsGPR, and
+                    // thisGPR. If that happens and we do for example:
+                    //
+                    //     calleeLateRep.emitRestore(jit, calleeGPR);
+                    //     argumentsLateRep.emitRestore(jit, calleeGPR);
+                    //
+                    // Then we might end up with garbage if calleeLateRep.gpr() == argumentsGPR and
+                    // argumentsLateRep.gpr() == calleeGPR.
+                    //
+                    // We do a variety of things to prevent this from happening. For example, we use
+                    // argumentsLateRep before needing the other two and after we've already stopped
+                    // using the *GPRs. Also, we pin calleeGPR to regT0, and rely on the fact that
+                    // the *LateReps cannot use volatile registers (so they cannot be regT0, so
+                    // calleeGPR != argumentsLateRep.gpr() and calleeGPR != thisLateRep.gpr()).
+                    //
+                    // An alternative would have been to just use early uses and early-clobber all
+                    // volatile registers. But that would force callee, arguments, and this into
+                    // callee-save registers even if we have to spill them. We don't want spilling to
+                    // use up three callee-saves.
+                    //
+                    // TL;DR: The way we use LateReps here is dangerous and barely works but achieves
+                    // some desirable performance properties, so don't mistake the cleverness for
+                    // elegance.
+                    calleeLateRep = params[argIndex++];
+                    argumentsLateRep = params[argIndex++];
+                    thisLateRep = params[argIndex++];
+                }
+
+                // Get some scratch registers.
+                RegisterSet usedRegisters;
+                usedRegisters.merge(RegisterSet::stackRegisters());
+                usedRegisters.merge(RegisterSet::reservedHardwareRegisters());
+                usedRegisters.merge(RegisterSet::calleeSaveRegisters());
+                usedRegisters.set(calleeGPR);
+                if (argumentsGPR != InvalidGPRReg)
+                    usedRegisters.set(argumentsGPR);
+                usedRegisters.set(thisGPR);
+                if (calleeLateRep.isReg())
+                    usedRegisters.set(calleeLateRep.reg());
+                if (argumentsLateRep.isReg())
+                    usedRegisters.set(argumentsLateRep.reg());
+                if (thisLateRep.isReg())
+                    usedRegisters.set(thisLateRep.reg());
+                ScratchRegisterAllocator allocator(usedRegisters);
+                GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+                GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+                GPRReg scratchGPR3 = forwarding ? allocator.allocateScratchGPR() : InvalidGPRReg;
+                RELEASE_ASSERT(!allocator.numberOfReusedRegisters());
+
+                auto callWithExceptionCheck = [&] (void* callee) {
+                    jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR);
+                    jit.call(GPRInfo::nonPreservedNonArgumentGPR);
+                    exceptions.append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth));
+                };
+
+                auto adjustStack = [&] (GPRReg amount) {
+                    jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), amount, CCallHelpers::stackPointerRegister);
+                };
+
+                unsigned originalStackHeight = params.proc().frameSize();
+
+                if (forwarding) {
+                    jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
+                    
+                    CCallHelpers::JumpList slowCase;
+                    emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
+
+                    CCallHelpers::Jump done = jit.jump();
+                    slowCase.link(&jit);
+                    jit.setupArgumentsExecState();
+                    callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs));
+                    jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
+                    
+                    done.link(&jit);
+
+                    adjustStack(scratchGPR2);
+                } else {
+                    jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR1);
+                    jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset));
+                    callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs));
+
+                    jit.move(GPRInfo::returnValueGPR, scratchGPR1);
+                    jit.move(CCallHelpers::TrustedImm32(originalStackHeight / sizeof(EncodedJSValue)), scratchGPR2);
+                    argumentsLateRep.emitRestore(jit, argumentsGPR);
+                    emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2);
+                    jit.addPtr(CCallHelpers::TrustedImm32(-minimumJSCallAreaSize), scratchGPR2, CCallHelpers::stackPointerRegister);
+                    jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1);
+                    callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame));
+                    
+                    adjustStack(GPRInfo::returnValueGPR);
+
+                    calleeLateRep.emitRestore(jit, GPRInfo::regT0);
+
+                    // This may not emit code if thisGPR got a callee-save. Also, we're guaranteed
+                    // that thisGPR != GPRInfo::regT0 because regT0 interferes with it.
+                    thisLateRep.emitRestore(jit, thisGPR);
+                }
+                
+                jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(JSStack::Callee));
+                jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0));
+                
+                CallLinkInfo::CallType callType;
+                if (node->op() == ConstructVarargs || node->op() == ConstructForwardVarargs)
+                    callType = CallLinkInfo::ConstructVarargs;
+                else if (node->op() == TailCallVarargs || node->op() == TailCallForwardVarargs)
+                    callType = CallLinkInfo::TailCallVarargs;
+                else
+                    callType = CallLinkInfo::CallVarargs;
+                
+                bool isTailCall = CallLinkInfo::callModeFor(callType) == CallMode::Tail;
+                
+                CCallHelpers::DataLabelPtr targetToCheck;
+                CCallHelpers::Jump slowPath = jit.branchPtrWithPatch(
+                    CCallHelpers::NotEqual, GPRInfo::regT0, targetToCheck,
+                    CCallHelpers::TrustedImmPtr(0));
+                
+                CCallHelpers::Call fastCall;
+                CCallHelpers::Jump done;
+                
+                if (isTailCall) {
+                    jit.prepareForTailCallSlow();
+                    fastCall = jit.nearTailCall();
+                } else {
+                    fastCall = jit.nearCall();
+                    done = jit.jump();
+                }
+                
+                slowPath.link(&jit);
+                
+                jit.move(CCallHelpers::TrustedImmPtr(callLinkInfo), GPRInfo::regT2);
+                CCallHelpers::Call slowCall = jit.nearCall();
+                
+                if (isTailCall)
+                    jit.abortWithReason(JITDidReturnFromTailCall);
+                else
+                    done.link(&jit);
+                
+                callLinkInfo->setUpCall(callType, node->origin.semantic, GPRInfo::regT0);
+                
+                jit.addPtr(
+                    CCallHelpers::TrustedImm32(-originalStackHeight),
+                    GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
+                
+                jit.addLinkTask(
+                    [=] (LinkBuffer& linkBuffer) {
+                        MacroAssemblerCodePtr linkCall =
+                            linkBuffer.vm().getCTIStub(linkCallThunkGenerator).code();
+                        linkBuffer.link(slowCall, FunctionPtr(linkCall.executableAddress()));
+                        
+                        callLinkInfo->setCallLocations(
+                            linkBuffer.locationOfNearCall(slowCall),
+                            linkBuffer.locationOf(targetToCheck),
+                            linkBuffer.locationOfNearCall(fastCall));
+                    });
+            });
+
+        setJSValue(patchpoint);
+#else
         unsigned stackmapID = m_stackmapIDs++;
         
         StackmapArgumentList arguments;
@@ -5115,15 +5346,15 @@
 
         arguments.insert(0, m_out.constInt32(2 + !!jsArguments));
         arguments.insert(0, constNull(m_out.ref8));
-        arguments.insert(0, m_out.constInt32(sizeOfICFor(m_node)));
+        arguments.insert(0, m_out.constInt32(sizeOfICFor(node)));
         arguments.insert(0, m_out.constInt64(stackmapID));
         
         LValue call = m_out.call(m_out.int64, m_out.patchpointInt64Intrinsic(), arguments);
         setInstructionCallingConvention(call, LLVMCCallConv);
         
-        m_ftlState.jsCallVarargses.append(JSCallVarargs(stackmapID, m_node, codeOriginDescriptionOfCallSite()));
+        m_ftlState.jsCallVarargses.append(JSCallVarargs(stackmapID, node, codeOriginDescriptionOfCallSite()));
 
-        switch (m_node->op()) {
+        switch (node->op()) {
         case TailCallVarargs:
         case TailCallForwardVarargs:
             m_out.unreachable();
@@ -5495,7 +5726,7 @@
         DFG_ASSERT(m_graph, m_node, m_origin.exitOK);
         
 #if FTL_USES_B3
-        B3::PatchpointValue* patchpoint = m_out.patchpoint(Void);
+        PatchpointValue* patchpoint = m_out.patchpoint(Void);
         OSRExitDescriptor* descriptor = appendOSRExitDescriptor(noValue(), nullptr);
         NodeOrigin origin = m_origin;
         patchpoint->appendColdAnys(buildExitArguments(descriptor, origin.forExit, noValue()));

Modified: trunk/Source/_javascript_Core/ftl/FTLState.h (194333 => 194334)


--- trunk/Source/_javascript_Core/ftl/FTLState.h	2015-12-21 18:40:10 UTC (rev 194333)
+++ trunk/Source/_javascript_Core/ftl/FTLState.h	2015-12-21 18:56:54 UTC (rev 194334)
@@ -85,8 +85,6 @@
     B3::PatchpointValue* handleStackOverflowExceptionValue { nullptr };
     B3::PatchpointValue* handleExceptionValue { nullptr };
     B3::StackSlotValue* capturedValue { nullptr };
-    B3::StackSlotValue* varargsSpillSlotsValue { nullptr };
-    B3::StackSlotValue* exceptionHandlingSpillSlotValue { nullptr };
 #else // FTL_USES_B3
     unsigned handleStackOverflowExceptionStackmapID { UINT_MAX };
     unsigned handleExceptionStackmapID { UINT_MAX };

Added: trunk/Source/_javascript_Core/tests/stress/varargs-no-forward.js (0 => 194334)


--- trunk/Source/_javascript_Core/tests/stress/varargs-no-forward.js	                        (rev 0)
+++ trunk/Source/_javascript_Core/tests/stress/varargs-no-forward.js	2015-12-21 18:56:54 UTC (rev 194334)
@@ -0,0 +1,18 @@
+function foo(a, b, c) {
+    return a + b * 2 + c * 3;
+}
+
+noInline(foo);
+
+function baz(args) {
+    return foo.apply(this, args);
+}
+
+noInline(baz);
+
+for (var i = 0; i < 10000; ++i) {
+    var result = baz([5, 6, 7]);
+    if (result != 5 + 6 * 2 + 7 * 3)
+        throw "Error: bad result: " + result;
+}
+

Added: trunk/Source/_javascript_Core/tests/stress/varargs-simple.js (0 => 194334)


--- trunk/Source/_javascript_Core/tests/stress/varargs-simple.js	                        (rev 0)
+++ trunk/Source/_javascript_Core/tests/stress/varargs-simple.js	2015-12-21 18:56:54 UTC (rev 194334)
@@ -0,0 +1,18 @@
+function foo(a, b, c) {
+    return a + b * 2 + c * 3;
+}
+
+noInline(foo);
+
+function baz() {
+    return foo.apply(this, arguments);
+}
+
+noInline(baz);
+
+for (var i = 0; i < 10000; ++i) {
+    var result = baz(5, 6, 7);
+    if (result != 5 + 6 * 2 + 7 * 3)
+        throw "Error: bad result: " + result;
+}
+

Added: trunk/Source/_javascript_Core/tests/stress/varargs-two-level.js (0 => 194334)


--- trunk/Source/_javascript_Core/tests/stress/varargs-two-level.js	                        (rev 0)
+++ trunk/Source/_javascript_Core/tests/stress/varargs-two-level.js	2015-12-21 18:56:54 UTC (rev 194334)
@@ -0,0 +1,22 @@
+function foo(a, b, c) {
+    return a + b * 2 + c * 3;
+}
+
+noInline(foo);
+
+function bar() {
+    return foo.apply(this, arguments);
+}
+
+function baz() {
+    return bar.apply(this, arguments);
+}
+
+noInline(baz);
+
+for (var i = 0; i < 10000; ++i) {
+    var result = baz(5, 6, 7);
+    if (result != 5 + 6 * 2 + 7 * 3)
+        throw "Error: bad result: " + result;
+}
+
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to