Revision: 16649
Author:   [email protected]
Date:     Wed Sep 11 12:39:00 2013 UTC
Log:      Pass PC offset into runtime when compiling for OSR.

[email protected]
BUG=

Review URL: https://codereview.chromium.org/23842004
http://code.google.com/p/v8/source/detail?r=16649

Modified:
 /branches/bleeding_edge/src/arm/builtins-arm.cc
 /branches/bleeding_edge/src/compiler.cc
 /branches/bleeding_edge/src/compiler.h
 /branches/bleeding_edge/src/ia32/builtins-ia32.cc
 /branches/bleeding_edge/src/mips/builtins-mips.cc
 /branches/bleeding_edge/src/optimizing-compiler-thread.cc
 /branches/bleeding_edge/src/optimizing-compiler-thread.h
 /branches/bleeding_edge/src/runtime-profiler.cc
 /branches/bleeding_edge/src/runtime.cc
 /branches/bleeding_edge/src/runtime.h
 /branches/bleeding_edge/src/x64/builtins-x64.cc

=======================================
--- /branches/bleeding_edge/src/arm/builtins-arm.cc Mon Sep 9 16:34:40 2013 UTC +++ /branches/bleeding_edge/src/arm/builtins-arm.cc Wed Sep 11 12:39:00 2013 UTC
@@ -966,13 +966,22 @@


 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  // Lookup the function in the JavaScript frame and push it as an
-  // argument to the on-stack replacement function.
+  // Lookup the function in the JavaScript frame.
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
+    // Lookup and calculate pc offset.
+    __ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+    __ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+    __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ sub(r1, r1, r2);
+    __ SmiTag(r1);
+
+    // Pass both function and pc offset as arguments.
     __ push(r0);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+    __ push(r1);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
   }

   // If the code object is null, just return to the unoptimized code.
=======================================
--- /branches/bleeding_edge/src/compiler.cc     Tue Sep 10 14:30:36 2013 UTC
+++ /branches/bleeding_edge/src/compiler.cc     Wed Sep 11 12:39:00 2013 UTC
@@ -1118,166 +1118,6 @@
   return (status == OptimizingCompiler::SUCCEEDED) ? info->code()
                                                    : Handle<Code>::null();
 }
-
-
-static uint32_t CurrentPcOffset(Isolate* isolate,
-                                Handle<JSFunction> function,
-                                Handle<Code> unoptimized) {
-  JavaScriptFrameIterator it(isolate);
-  JavaScriptFrame* frame = it.frame();
-  ASSERT(frame->function() == *function);
-  ASSERT(frame->LookupCode() == *unoptimized);
-  ASSERT(unoptimized->contains(frame->pc()));
-
-  // Use linear search of the unoptimized code's back edge table to find
-  // the AST id matching the PC.
- return static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start());
-}
-
-
-static bool IsSuitableForOnStackReplacement(Isolate* isolate,
-                                            Handle<JSFunction> function,
-                                            Handle<Code> unoptimized) {
-  // Keep track of whether we've succeeded in optimizing.
-  if (!unoptimized->optimizable()) return false;
-  // If we are trying to do OSR when there are already optimized
-  // activations of the function, it means (a) the function is directly or
-  // indirectly recursive and (b) an optimized invocation has been
-  // deoptimized so that we are currently in an unoptimized activation.
-  // Check for optimized activations of this function.
-  for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
-    JavaScriptFrame* frame = it.frame();
- if (frame->is_optimized() && frame->function() == *function) return false;
-  }
-
-  return true;
-}
-
-
-Handle<Code> Compiler::CompileForOnStackReplacement(
-      Handle<JSFunction> function) {
-  Isolate* isolate = function->GetIsolate();
-  Handle<Code> unoptimized(function->shared()->code(), isolate);
-
-  Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
-  if (FLAG_trace_osr) {
-    PrintF("[OSR - restored original interrupt calls in ");
-    function->PrintName();
-    PrintF("]\n");
-  }
-
-  if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
-    // Find the PC offset in unoptimized code and translate to an AST id.
-    uint32_t pc_offset = CurrentPcOffset(isolate, function, unoptimized);
-    BailoutId ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
-    ASSERT(!ast_id.IsNone());
-    if (FLAG_trace_osr) {
-      PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
-      function->PrintName();
-      PrintF("]\n");
-    }
-
-    // Attempt OSR compilation.
-    Handle<Code> result = JSFunction::CompileOsr(
-        function, ast_id, CLEAR_EXCEPTION);
-
-    if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
-      // OSR compilation succeeded.
-      DeoptimizationInputData* data =
-          DeoptimizationInputData::cast(result->deoptimization_data());
-      if (FLAG_trace_osr) {
-        PrintF("[OSR - entry, offset %d in optimized code]\n",
-            data->OsrPcOffset()->value());
-      }
-      ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
-      return result;
-    }
-  }
-
-  if (FLAG_trace_osr) {
-    PrintF("[OSR - attempt failed for ");
-    function->PrintName();
-    PrintF("]\n");
-  }
-  return Handle<Code>::null();
-}
-
-
-Handle<Code> Compiler::CompileForConcurrentOSR(Handle<JSFunction> function) {
-  Isolate* isolate = function->GetIsolate();
-  Handle<Code> unoptimized(function->shared()->code(), isolate);
-
-  uint32_t pc_offset = CurrentPcOffset(isolate, function, unoptimized);
-
-  if (isolate->optimizing_compiler_thread()->
-          IsQueuedForOSR(function, pc_offset)) {
- // Still waiting for the optimizing compiler thread to finish. Carry on.
-    if (FLAG_trace_osr) {
-      PrintF("[COSR - polling recompile tasks for ");
-      function->PrintName();
-      PrintF("]\n");
-    }
-    return Handle<Code>::null();
-  }
-
-  OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
- FindReadyOSRCandidate(function, pc_offset);
-
-  if (compiler != NULL) {
-    BailoutId ast_id = compiler->info()->osr_ast_id();
-
-    if (FLAG_trace_osr) {
-      PrintF("[COSR - optimization complete for ");
-      function->PrintName();
-      PrintF(", restoring interrupt calls]\n");
-    }
-    Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
-
-    // TODO(titzer): don't install the OSR code into the function.
-    Handle<Code> result = InstallOptimizedCode(compiler);
-
-    isolate->optimizing_compiler_thread()->RemoveStaleOSRCandidates();
-
-    if (result.is_null()) {
-      if (FLAG_trace_osr) {
-        PrintF("[COSR - optimization failed for ");
-        function->PrintName();
-        PrintF("]\n");
-      }
-      return Handle<Code>::null();
-    }
- // Check the result matches our expectations, and don't use it otherwise.
-    if (result->kind() == Code::OPTIMIZED_FUNCTION) {
-      DeoptimizationInputData* data =
-          DeoptimizationInputData::cast(result->deoptimization_data());
-
-      if (data->OsrPcOffset()->value() >= 0) {
-        ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
-        if (FLAG_trace_osr) {
- PrintF("[COSR - entry at AST id %d, offset %d in optimized code]\n",
-                 ast_id.ToInt(), data->OsrPcOffset()->value());
-        }
-        return result;
-      }
-    }
-    return Handle<Code>::null();
-  }
-
-  if (!IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
-    if (FLAG_trace_osr) {
-      PrintF("[COSR - ");
-      function->PrintName();
-      PrintF(" is unsuitable, restoring interrupt calls]\n");
-    }
-    Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
-    return Handle<Code>::null();
-  }
-
-  if (!RecompileConcurrent(function, pc_offset)) {
-    Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
-  }
-  return Handle<Code>::null();
-}


Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
=======================================
--- /branches/bleeding_edge/src/compiler.h      Tue Sep 10 11:09:22 2013 UTC
+++ /branches/bleeding_edge/src/compiler.h      Wed Sep 11 12:39:00 2013 UTC
@@ -627,10 +627,6 @@

   static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info);

- static Handle<Code> CompileForOnStackReplacement(Handle<JSFunction> function);
-
-  static Handle<Code> CompileForConcurrentOSR(Handle<JSFunction> function);
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static bool MakeCodeForLiveEdit(CompilationInfo* info);
 #endif
=======================================
--- /branches/bleeding_edge/src/ia32/builtins-ia32.cc Mon Sep 9 16:34:40 2013 UTC +++ /branches/bleeding_edge/src/ia32/builtins-ia32.cc Wed Sep 11 12:39:00 2013 UTC
@@ -1327,14 +1327,21 @@


 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  // Lookup the function in the JavaScript frame.
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // Pass the function to optimize as the argument to the on-stack
-  // replacement runtime function.
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
+    // Lookup and calculate pc offset.
+    __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+    __ mov(ebx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+    __ sub(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ sub(edx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+    __ SmiTag(edx);
+
+    // Pass both function and pc offset as arguments.
     __ push(eax);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+    __ push(edx);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
   }

   Label skip;
=======================================
--- /branches/bleeding_edge/src/mips/builtins-mips.cc Mon Sep 9 16:34:40 2013 UTC +++ /branches/bleeding_edge/src/mips/builtins-mips.cc Wed Sep 11 12:39:00 2013 UTC
@@ -1000,13 +1000,22 @@


 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  // Lookup the function in the JavaScript frame and push it as an
-  // argument to the on-stack replacement function.
+  // Lookup the function in the JavaScript frame.
   __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
+    // Lookup and calculate pc offset.
+    __ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+    __ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+    __ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ Subu(a1, a1, a2);
+    __ SmiTag(a1);
+
+    // Pass both function and pc offset as arguments.
     __ push(a0);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+    __ push(a1);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
   }

   // If the code object is null, just return to the unoptimized code.
=======================================
--- /branches/bleeding_edge/src/optimizing-compiler-thread.cc Wed Sep 4 12:55:59 2013 UTC +++ /branches/bleeding_edge/src/optimizing-compiler-thread.cc Wed Sep 11 12:39:00 2013 UTC
@@ -234,14 +234,18 @@
 OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
     Handle<JSFunction> function, uint32_t osr_pc_offset) {
   ASSERT(!IsOptimizerThread());
-  LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
-  for (int i = 0; i < ready_for_osr_.length(); i++) {
- if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
-      osr_hits_++;
-      return ready_for_osr_.Remove(i);
+  OptimizingCompiler* result = NULL;
+  { LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+    for (int i = 0; i < ready_for_osr_.length(); i++) {
+ if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+        osr_hits_++;
+        result = ready_for_osr_.Remove(i);
+        break;
+      }
     }
   }
-  return NULL;
+  RemoveStaleOSRCandidates();
+  return result;
 }


@@ -256,6 +260,18 @@
   }
   return false;
 }
+
+
+bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
+  ASSERT(!IsOptimizerThread());
+  LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
+  for (int i = 0; i < osr_candidates_.length(); i++) {
+    if (*osr_candidates_[i]->info()->closure() == function) {
+      return true;
+    }
+  }
+  return false;
+}


 void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
=======================================
--- /branches/bleeding_edge/src/optimizing-compiler-thread.h Wed Sep 4 12:55:59 2013 UTC +++ /branches/bleeding_edge/src/optimizing-compiler-thread.h Wed Sep 11 12:39:00 2013 UTC
@@ -71,9 +71,7 @@
                                             uint32_t osr_pc_offset);
   bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);

-  // Remove the oldest OSR candidates that are ready so that we
-  // only have |limit| left waiting.
-  void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
+  bool IsQueuedForOSR(JSFunction* function);

   inline bool IsQueueAvailable() {
     // We don't need a barrier since we have a data dependency right
@@ -96,6 +94,10 @@
  private:
   enum StopFlag { CONTINUE, STOP, FLUSH };

+  // Remove the oldest OSR candidates that are ready so that we
+  // only have |limit| left waiting.
+  void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
+
   void FlushInputQueue(bool restore_function_code);
   void FlushOutputQueue(bool restore_function_code);
   void CompileNext();
=======================================
--- /branches/bleeding_edge/src/runtime-profiler.cc Wed Sep 4 12:55:59 2013 UTC +++ /branches/bleeding_edge/src/runtime-profiler.cc Wed Sep 11 12:39:00 2013 UTC
@@ -138,8 +138,17 @@
     }
     PrintF("]\n");
   }
+

if (FLAG_concurrent_recompilation && !isolate_->bootstrapper()->IsActive()) {
+    if (FLAG_concurrent_osr &&
+        isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
+ // Do not attempt regular recompilation if we already queued this for OSR. + // TODO(yangguo): This is necessary so that we don't install optimized + // code on a function that is already optimized, since OSR and regular + // recompilation race. This goes away as soon as OSR becomes one-shot.
+      return;
+    }
     ASSERT(!function->IsMarkedForInstallingRecompiledCode());
     ASSERT(!function->IsInRecompileQueue());
     function->MarkForConcurrentRecompilation();
@@ -223,6 +232,8 @@
     // output queue so that it does not unnecessarily keep objects alive.
     isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions();
   }
+
+  DisallowHeapAllocation no_gc;

   // Run through the JavaScript frames and collect them. If we already
   // have a sample of the function, we mark it for optimizations
=======================================
--- /branches/bleeding_edge/src/runtime.cc      Wed Sep 11 10:46:49 2013 UTC
+++ /branches/bleeding_edge/src/runtime.cc      Wed Sep 11 12:39:00 2013 UTC
@@ -8577,40 +8577,127 @@
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   return Smi::FromInt(function->shared()->opt_count());
 }
+
+
+static bool IsSuitableForOnStackReplacement(Isolate* isolate,
+                                            Handle<JSFunction> function,
+                                            Handle<Code> unoptimized) {
+  // Keep track of whether we've succeeded in optimizing.
+  if (!unoptimized->optimizable()) return false;
+  // If we are trying to do OSR when there are already optimized
+  // activations of the function, it means (a) the function is directly or
+  // indirectly recursive and (b) an optimized invocation has been
+  // deoptimized so that we are currently in an unoptimized activation.
+  // Check for optimized activations of this function.
+  for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+    JavaScriptFrame* frame = it.frame();
+ if (frame->is_optimized() && frame->function() == *function) return false;
+  }
+
+  return true;
+}


 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  ASSERT(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_NUMBER_CHECKED(uint32_t, pc_offset, Uint32, args[1]);
+  Handle<Code> unoptimized(function->shared()->code(), isolate);
+
+#ifdef DEBUG
+  JavaScriptFrameIterator it(isolate);
+  JavaScriptFrame* frame = it.frame();
+  ASSERT_EQ(frame->function(), *function);
+  ASSERT_EQ(frame->LookupCode(), *unoptimized);
+  ASSERT(unoptimized->contains(frame->pc()));
+
+  ASSERT(pc_offset ==
+ static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start()));
+#endif  // DEBUG

   // We're not prepared to handle a function with arguments object.
   ASSERT(!function->shared()->uses_arguments());

-  // If the optimization attempt succeeds, return the code object which
-  // the unoptimized code can jump into.
-  Handle<Code> code =
-      (FLAG_concurrent_recompilation && FLAG_concurrent_osr)
-          ? Compiler::CompileForConcurrentOSR(function)
-          : Compiler::CompileForOnStackReplacement(function);
-  if (!code.is_null()) {
-#if DEBUG
-    ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+  Handle<Code> result = Handle<Code>::null();
+  BailoutId ast_id = BailoutId::None();
+
+  if (FLAG_concurrent_recompilation && FLAG_concurrent_osr) {
+    if (isolate->optimizing_compiler_thread()->
+            IsQueuedForOSR(function, pc_offset)) {
+ // Still waiting for the optimizing compiler thread to finish. Carry on.
+      if (FLAG_trace_osr) {
+        PrintF("[COSR - polling recompile tasks for ");
+        function->PrintName();
+        PrintF("]\n");
+      }
+      return NULL;
+    }
+
+    OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()->
+        FindReadyOSRCandidate(function, pc_offset);
+
+    if (compiler == NULL) {
+ if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
+          Compiler::RecompileConcurrent(function, pc_offset)) {
+        if (function->IsMarkedForLazyRecompilation() ||
+            function->IsMarkedForConcurrentRecompilation()) {
+          // Prevent regular recompilation if we queue this for OSR.
+          // TODO(yangguo): remove this as soon as OSR becomes one-shot.
+          function->ReplaceCode(function->shared()->code());
+        }
+        return NULL;
+      }
+      // Fall through to the end in case of failure.
+    } else {
+      // TODO(titzer): don't install the OSR code into the function.
+      ast_id = compiler->info()->osr_ast_id();
+      result = Compiler::InstallOptimizedCode(compiler);
+    }
+ } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
+    ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
+    ASSERT(!ast_id.IsNone());
+    if (FLAG_trace_osr) {
+      PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
+      function->PrintName();
+      PrintF("]\n");
+    }
+    // Attempt OSR compilation.
+    result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
+  }
+
+  // Revert the patched interrupt now, regardless of whether OSR succeeds.
+  Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
+
+  // Check whether we ended up with usable optimized code.
+  if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
     DeoptimizationInputData* data =
-        DeoptimizationInputData::cast(code->deoptimization_data());
-    ASSERT(!BailoutId(data->OsrAstId()->value()).IsNone());
-#endif
-    // TODO(titzer): this is a massive hack to make the deopt counts
-    // match. Fix heuristics for reenabling optimizations!
-    function->shared()->increment_deopt_count();
-    return *code;
-  } else {
-    if (function->IsMarkedForLazyRecompilation() ||
-        function->IsMarkedForConcurrentRecompilation()) {
-      function->ReplaceCode(function->shared()->code());
+        DeoptimizationInputData::cast(result->deoptimization_data());
+
+    if (data->OsrPcOffset()->value() >= 0) {
+      ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
+      if (FLAG_trace_osr) {
+        PrintF("[OSR - entry at AST id %d, offset %d in optimized code]\n",
+               ast_id.ToInt(), data->OsrPcOffset()->value());
+      }
+      // TODO(titzer): this is a massive hack to make the deopt counts
+      // match. Fix heuristics for reenabling optimizations!
+      function->shared()->increment_deopt_count();
+      return *result;
     }
-    return NULL;
   }
+
+  if (FLAG_trace_osr) {
+    PrintF("[OSR - optimization failed for ");
+    function->PrintName();
+    PrintF("]\n");
+  }
+
+  if (function->IsMarkedForLazyRecompilation() ||
+      function->IsMarkedForConcurrentRecompilation()) {
+    function->ReplaceCode(function->shared()->code());
+  }
+  return NULL;
 }


=======================================
--- /branches/bleeding_edge/src/runtime.h       Sun Sep  8 19:05:29 2013 UTC
+++ /branches/bleeding_edge/src/runtime.h       Wed Sep 11 12:39:00 2013 UTC
@@ -100,7 +100,7 @@
   F(NeverOptimizeFunction, 1, 1) \
   F(GetOptimizationStatus, -1, 1) \
   F(GetOptimizationCount, 1, 1) \
-  F(CompileForOnStackReplacement, 1, 1) \
+  F(CompileForOnStackReplacement, 2, 1) \
   F(SetAllocationTimeout, 2, 1) \
   F(AllocateInNewSpace, 1, 1) \
   F(AllocateInOldPointerSpace, 1, 1) \
=======================================
--- /branches/bleeding_edge/src/x64/builtins-x64.cc Mon Sep 9 16:34:40 2013 UTC +++ /branches/bleeding_edge/src/x64/builtins-x64.cc Wed Sep 11 12:39:00 2013 UTC
@@ -1408,14 +1408,21 @@


 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  // Lookup the function in the JavaScript frame.
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // Pass the function to optimize as the argument to the on-stack
-  // replacement runtime function.
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
+    // Lookup and calculate pc offset.
+    __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+    __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+    __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
+    __ Integer32ToSmi(rdx, rdx);
+
+    // Pass both function and pc offset as arguments.
     __ push(rax);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+    __ push(rdx);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
   }

   Label skip;

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.

Reply via email to