Title: [161234] branches/jsCStack/Source/_javascript_Core
Revision
161234
Author
[email protected]
Date
2014-01-02 15:28:36 -0800 (Thu, 02 Jan 2014)

Log Message

CStack LLInt 32-bit should build
https://bugs.webkit.org/show_bug.cgi?id=126409

Not yet reviewed.
        
And the stuff we do to make it build should at least try to be correct.

* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:

Modified Paths

Diff

Modified: branches/jsCStack/Source/_javascript_Core/ChangeLog (161233 => 161234)


--- branches/jsCStack/Source/_javascript_Core/ChangeLog	2014-01-02 23:24:34 UTC (rev 161233)
+++ branches/jsCStack/Source/_javascript_Core/ChangeLog	2014-01-02 23:28:36 UTC (rev 161234)
@@ -1,3 +1,15 @@
+2014-01-02  Filip Pizlo  <[email protected]>
+
+        CStack LLInt 32-bit should build
+        https://bugs.webkit.org/show_bug.cgi?id=126409
+
+        Not yet reviewed.
+        
+        And the stuff we do to make it build should at least try to be correct.
+
+        * llint/LowLevelInterpreter32_64.asm:
+        * llint/LowLevelInterpreter64.asm:
+
 2014-01-02  Mark Lam  <[email protected]>
 
         CStack: Interpreter::executeCall() should check for exceptions after calling callToNativeFunction().

Modified: branches/jsCStack/Source/_javascript_Core/llint/LowLevelInterpreter32_64.asm (161233 => 161234)


--- branches/jsCStack/Source/_javascript_Core/llint/LowLevelInterpreter32_64.asm	2014-01-02 23:24:34 UTC (rev 161233)
+++ branches/jsCStack/Source/_javascript_Core/llint/LowLevelInterpreter32_64.asm	2014-01-02 23:28:36 UTC (rev 161234)
@@ -145,13 +145,12 @@
     move t1, cfr
 end
 
-macro doCallToJavaScript(makeCall, doReturn)
+macro doCallToJavaScript(makeCall)
     if X86
         const entry = t4
-        const vmTopCallFrame = t2
+        const vm = t2
         const protoCallFrame = t5
 
-        const extraStackSpace = 28
         const previousCFR = t0
         const previousPC = t1
         const temp1 = t0 # Same as previousCFR
@@ -160,9 +159,8 @@
         const temp4 = t3
     elsif ARM or ARMv7_TRADITIONAL
         const entry = a0
-        const vmTopCallFrame = a1
+        const vm = a1
         const protoCallFrame = a2
-        const topOfStack = a3
 
         const extraStackSpace = 16
         const previousCFR = t3
@@ -173,9 +171,8 @@
         const temp4 = t4
     elsif ARMv7
         const entry = a0
-        const vmTopCallFrame = a1
+        const vm = a1
         const protoCallFrame = a2
-        const topOfStack = a3
 
         const extraStackSpace = 28
         const previousCFR = t3
@@ -216,45 +213,73 @@
         loadp [sp], previousPC
         move cfr, previousCFR
     end
-    functionPrologue(extraStackSpace)
     if X86
-        loadp extraStackSpace+20[sp], entry
-        loadp extraStackSpace+24[sp], vmTopCallFrame
-        loadp extraStackSpace+28[sp], protoCallFrame
-        loadp extraStackSpace+32[sp], cfr
+        loadp 20[sp], entry
+        loadp 24[sp], vm
+        loadp 28[sp], protoCallFrame
     else
         move cfr, previousCFR
         move topOfStack, cfr
     end
+    callToJavaScriptPrologue()
+    
+    checkStackPointerAlignment(temp2, 0xbad0dc01)
 
-    subp (CallFrameHeaderSlots-1)*8, cfr
-    storep 0, ArgumentCount+4[cfr]
+    # The jsStackLimit was previously computed in VMEntryScope using an
+    # estimated stackPointerAtVMEntry value. Adjust the jsStackLimit by
+    # the delta between the actual stackPointerAtVMEntry and the estimate
+    # that we used previously.
+    subp VM::stackPointerAtVMEntry[vm], sp, temp2
+    subp VM::m_jsStackLimit[vm], temp2, temp2
+    storep temp2, VM::m_jsStackLimit[vm]
+    storep sp, VM::stackPointerAtVMEntry[vm]
+
+    # The stack host zone ensures that we have adequate space for the
+    # VMEntrySentinelFrame. Proceed with allocating and initializing the
+    # sentinel frame.
+    move sp, cfr
+    subp CallFrameHeaderSlots * 8, cfr
     storep 0, ArgumentCount[cfr]
-    storep 0, Callee+4[cfr]
-    storep vmTopCallFrame, Callee[cfr]
-    loadp [vmTopCallFrame], temp4
-    storep 0, ScopeChain+4[cfr]
-    storep temp4, ScopeChain[cfr]
-    storep 0, CodeBlock+4[cfr]
+    storep vm, Callee[cfr]
+    loadp VM::topCallFrame[vm], temp2
+    storep temp2, ScopeChain[cfr]
     storep 1, CodeBlock[cfr]
     storep previousPC, ReturnPC[cfr]
     storep previousCFR, CallerFrame[cfr]
-    move cfr, temp1
 
     loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2
     addp CallFrameHeaderSlots, temp2, temp2
     lshiftp 3, temp2
-    subp temp2, cfr
-    storep temp1, CallerFrame[cfr]
+    subp cfr, temp2, temp1
 
+    # Ensure that we have enough additional stack capacity for the incoming args,
+    # and the frame for the JS code we're executing. We need to do this check
+    # before we start copying the args from the protoCallFrame below.
+    bpaeq temp1, VM::m_jsStackLimit[vm], .stackHeightOK
+
+    move cfr, sp
+
+    if C_LOOP
+    # FIXME: Need to call stack check here to see if we can grow the stack.
+    # Will need to preserve registers so that we can recover if we do not end
+    # up throwing a StackOverflowError.
+    end
+
+    storep 0, VM::stackPointerAtVMEntry[vm]
+    cCall2(_llint_throw_stack_overflow_error, vm, protoCallFrame)
+    callToJavaScriptEpilogue()
+    ret
+
+.stackHeightOK:
+    move temp1, sp
     move 5, temp1
 
 .copyHeaderLoop:
     subi 1, temp1
-    loadp [protoCallFrame, temp1, 8], temp3
-    storep temp3, CodeBlock[cfr, temp1, 8]
-    loadp 4[protoCallFrame, temp1, 8], temp3
-    storep temp3, CodeBlock+4[cfr, temp1, 8]
+    loadi TagOffset[protoCallFrame, temp1, 8], temp3
+    storei temp3, TagOffset + CodeBlock[sp, temp1, 8]
+    loadi PayloadOffset[protoCallFrame, temp1, 8], temp3
+    storei temp3, PayloadOffset + CodeBlock[sp, temp1, 8]
     btinz temp1, .copyHeaderLoop
 
     loadi ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
@@ -263,12 +288,10 @@
     subi 1, temp3
 
     bieq temp2, temp3, .copyArgs
-    move 0, temp1
-    move UndefinedTag, temp4
 .fillExtraArgsLoop:
     subi 1, temp3
-    storep temp1, ThisArgumentOffset+8+PayloadOffset[cfr, temp3, 8]
-    storep temp4, ThisArgumentOffset+8+TagOffset[cfr, temp3, 8]
+    storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, temp3, 8]
+    storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, temp3, 8]
     bineq temp2, temp3, .fillExtraArgsLoop
 
 .copyArgs:
@@ -277,29 +300,33 @@
 .copyArgsLoop:
     btiz temp2, .copyArgsDone
     subi 1, temp2
-    loadp PayloadOffset[temp1, temp2, 8], temp3
-    loadp TagOffset[temp1, temp2, 8], temp4
-    storep temp3, ThisArgumentOffset+8+PayloadOffset[cfr, temp2, 8]
-    storep temp4, ThisArgumentOffset+8+TagOffset[cfr, temp2, 8]
+    loadi TagOffset[temp1, temp2, 8], temp3
+    storei temp3, ThisArgumentOffset + 8 + TagOffset[sp, temp2, 8]
+    loadi PayloadOffset[temp1, temp2, 8], temp3
+    storei temp3, ThisArgumentOffset + 8 + PayloadOffset[sp, temp2, 8]
     jmp .copyArgsLoop
 
 .copyArgsDone:
-    if X86
-        loadp extraStackSpace+24[sp], vmTopCallFrame
-    end
-    storep cfr, [vmTopCallFrame]
+    storep sp, VM::topCallFrame[vm]
 
+    checkStackPointerAlignment(temp3, 0xbad0dc02)
+
     makeCall(entry, temp1)
 
+    checkStackPointerAlignment(temp3, 0xbad0dc03)
+
     bpeq CodeBlock[cfr], 1, .calleeFramePopped
     loadp CallerFrame[cfr], cfr
 
 .calleeFramePopped:
-    loadp Callee[cfr], temp3 # VM.topCallFrame
-    loadp ScopeChain[cfr], temp4
-    storep temp4, [temp3]
+    loadp Callee[cfr], temp2 # VM
+    loadp ScopeChain[cfr], temp3 # previous topCallFrame
+    storep temp3, VM::topCallFrame[temp2]
 
-    doReturn(extraStackSpace)
+    checkStackPointerAlignment(temp3, 0xbad0dc04)
+
+    callToJavaScriptEpilogue()
+    ret
 end
 
 macro makeJavaScriptCall(entry, temp)
@@ -330,8 +357,8 @@
     loadp CallerFrame + PayloadOffset[cfr], cfr
 
     loadp Callee + PayloadOffset[cfr], t3 # VM.topCallFrame
-    loadp ScopeChain + PayloadOffset[cfr], t6
-    storep t6, [t3]
+    loadp ScopeChain + PayloadOffset[cfr], t5
+    storep t5, [t3]
 
     callToJavaScriptEpilogue()
     ret
@@ -498,14 +525,30 @@
     loadi PayloadOffset + ArgumentCount[cfr], t0
     biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
     cCall2(slowPath, cfr, PC)   # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error
-    btiz t0, .isArityFixupNeeded
+    btiz t0, .noError
     move t1, cfr   # t1 contains caller frame
     jmp _llint_throw_from_slow_path_trampoline
 
-.isArityFixupNeeded:
+.noError:
+    # t1 points to ArityCheckData.
+    loadp CommonSlowPaths::ArityCheckData::thunkToCall[t1], t2
+    btpz t2, .proceedInline
+    
+    loadp CommonSlowPaths::ArityCheckData::returnPC[t1], t5
+    loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t0
+    call t2
+    if ASSERT_ENABLED
+        loadp ReturnPC[cfr], t0
+        loadp [t0], t0
+    end
+    jmp .continue
+
+.proceedInline:
+    loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[t1], t1
     btiz t1, .continue
 
-    // Move frame up "t1" slots
+    // Move frame up "t1 * 2" slots
+    lshiftp 1, t1
     negi t1
     move cfr, t3
     loadi PayloadOffset + ArgumentCount[cfr], t2
@@ -530,6 +573,7 @@
 
     lshiftp 3, t1
     addp t1, cfr
+    addp t1, sp
 .continue:
     # Reload CodeBlock and PC, since the slow_path clobbered it.
     loadp CodeBlock[cfr], t1
@@ -537,7 +581,6 @@
     jmp doneLabel
 end
 
-
 macro branchIfException(label)
     loadp ScopeChain[cfr], t3
     andp MarkedBlockMask, t3
@@ -550,6 +593,7 @@
 
 _llint_op_enter:
     traceExecution()
+    checkStackPointerAlignment(t2, 0xdead00e1)
     loadp CodeBlock[cfr], t2                // t2<CodeBlock> = cfr.CodeBlock
     loadi CodeBlock::m_numVars[t2], t2      // t2<size_t> = t2<CodeBlock>.m_numVars
     btiz t2, .opEnterDone
@@ -1858,12 +1902,11 @@
     storei t0, ScopeChain + PayloadOffset[t3]
     loadi 12[PC], t2
     storei PC, ArgumentCount + TagOffset[cfr]
-    storep cfr, CallerFrame[t3]
     storei t2, ArgumentCount + PayloadOffset[t3]
     storei CellTag, Callee + TagOffset[t3]
     storei CellTag, ScopeChain + TagOffset[t3]
-    move t3, cfr
-    callTargetFunction(t1)
+    addp CallerFrameAndPCSize, t3
+    callTargetFunction(t1, t3)
 
 .opCallSlow:
     slowPathForCall(slowPath)
@@ -2056,6 +2099,8 @@
 
 
 macro nativeCallTrampoline(executableOffsetToFunction)
+
+    functionPrologue()
     storep 0, CodeBlock[cfr]
     loadp CallerFrame[cfr], t0
     loadi ScopeChain + PayloadOffset[t0], t1
@@ -2066,15 +2111,12 @@
         andp MarkedBlockMask, t3
         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
         storep cfr, VM::topCallFrame[t3]
-        peek 0, t1
-        storep t1, ReturnPC[cfr]
         move cfr, t2  # t2 = ecx
-        subp 16 - 4, sp
+        subp 16 - 8, sp
         loadi Callee + PayloadOffset[cfr], t1
         loadp JSFunction::m_executable[t1], t1
-        move t0, cfr
         call executableOffsetToFunction[t1]
-        addp 16 - 4, sp
+        addp 16 - 8, sp
         loadp ScopeChain[cfr], t3
         andp MarkedBlockMask, t3
         loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
@@ -2118,11 +2160,12 @@
     else
         error
     end
+    
+    functionEpilogue()
     bineq VM::m_exception + TagOffset[t3], EmptyValueTag, .handleException
     ret
 
 .handleException:
-    preserveReturnAddressAfterCall(t1) # This is really only needed on X86
     storep cfr, VM::topCallFrame[t3]
     restoreStackPointerAfterCall()
     jmp _llint_throw_from_slow_path_trampoline

Modified: branches/jsCStack/Source/_javascript_Core/llint/LowLevelInterpreter64.asm (161233 => 161234)


--- branches/jsCStack/Source/_javascript_Core/llint/LowLevelInterpreter64.asm	2014-01-02 23:24:34 UTC (rev 161233)
+++ branches/jsCStack/Source/_javascript_Core/llint/LowLevelInterpreter64.asm	2014-01-02 23:28:36 UTC (rev 161234)
@@ -134,7 +134,7 @@
     # VMEntrySentinelFrame. Proceed with allocating and initializing the
     # sentinel frame.
     move sp, cfr
-    subp CallFrameHeaderSlots*8, cfr
+    subp CallFrameHeaderSlots * 8, cfr
     storep 0, ArgumentCount[cfr]
     storep vm, Callee[cfr]
     loadp VM::topCallFrame[vm], temp2
@@ -172,8 +172,8 @@
 
 .copyHeaderLoop:
     subi 1, temp1
-    loadp [protoCallFrame, temp1, 8], temp3
-    storep temp3, CodeBlock[sp, temp1, 8]
+    loadq [protoCallFrame, temp1, 8], temp3
+    storeq temp3, CodeBlock[sp, temp1, 8]
     btinz temp1, .copyHeaderLoop
 
     loadi ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2
@@ -185,7 +185,7 @@
     move ValueUndefined, temp1
 .fillExtraArgsLoop:
     subi 1, temp3
-    storep temp1, ThisArgumentOffset+8[sp, temp3, 8]
+    storeq temp1, ThisArgumentOffset + 8[sp, temp3, 8]
     bineq temp2, temp3, .fillExtraArgsLoop
 
 .copyArgs:
@@ -194,8 +194,8 @@
 .copyArgsLoop:
     btiz temp2, .copyArgsDone
     subi 1, temp2
-    loadp [temp1, temp2, 8], temp3
-    storep temp3, ThisArgumentOffset+8[sp, temp2, 8]
+    loadq [temp1, temp2, 8], temp3
+    storeq temp3, ThisArgumentOffset + 8[sp, temp2, 8]
     jmp .copyArgsLoop
 
 .copyArgsDone:
@@ -257,8 +257,8 @@
     loadp CallerFrame[cfr], cfr
 
     loadp Callee[cfr], t3 # VM
-    loadp ScopeChain[cfr], t6 # previous topCallFrame
-    storep t6, VM::topCallFrame[t3]
+    loadp ScopeChain[cfr], t5 # previous topCallFrame
+    storep t5, VM::topCallFrame[t3]
 
     callToJavaScriptEpilogue()
     ret
@@ -429,7 +429,6 @@
     jmp doneLabel
 end
 
-
 macro branchIfException(label)
     loadp ScopeChain[cfr], t3
     andp MarkedBlockMask, t3
@@ -1874,10 +1873,7 @@
 
 
 _llint_throw_during_call_trampoline:
-if C_LOOP
-else
-    pop t2
-end
+    preserveReturnAddressAfterCall(t2)
     jmp _llint_throw_from_slow_path_trampoline
 
 
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to