Revision: 10503
Author:   [email protected]
Date:     Wed Jan 25 07:11:59 2012
Log: When preparing heap for breakpoints make sure not to flush away non-optimized code for inlined functions.

Debug::PrepareForBreakPoints was not fully populating active_functions list.

[email protected]
TEST=test/mjsunit/regress/regress-debug-code-recompilation.js

Review URL: https://chromiumcodereview.appspot.com/9290013
http://code.google.com/p/v8/source/detail?r=10503

Added:
/branches/bleeding_edge/test/mjsunit/regress/regress-debug-code-recompilation.js
Modified:
 /branches/bleeding_edge/src/debug.cc
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/objects-inl.h
 /branches/bleeding_edge/src/objects.h

=======================================
--- /dev/null
+++ /branches/bleeding_edge/test/mjsunit/regress/regress-debug-code-recompilation.js Wed Jan 25 07:11:59 2012
@@ -0,0 +1,47 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --hydrogen-filter=Debug.setBreakPoint --expose-debug-as debug
+
+Debug = debug.Debug
+
+function f() {a=1;b=2};
+function g() {
+  a=1;
+  b=2;
+}
+
+bp = Debug.setBreakPoint(f, 0, 0);
+Debug.clearBreakPoint(bp);
+%OptimizeFunctionOnNextCall(Debug.setBreakPoint);
+bp = Debug.setBreakPoint(f, 0, 0);
+Debug.clearBreakPoint(bp);
+bp = Debug.setBreakPoint(f, 0, 0);
+Debug.clearBreakPoint(bp);
+%OptimizeFunctionOnNextCall(Debug.setBreakPoint);
+bp = Debug.setBreakPoint(f, 0, 0);
+Debug.clearBreakPoint(bp);
=======================================
--- /branches/bleeding_edge/src/debug.cc        Mon Jan 16 01:46:21 2012
+++ /branches/bleeding_edge/src/debug.cc        Wed Jan 25 07:11:59 2012
@@ -1756,6 +1756,135 @@
 #endif
   return result;
 }
+
+
+static void CollectActiveFunctionsFromThread(
+    Isolate* isolate,
+    ThreadLocalTop* top,
+    List<Handle<JSFunction> >* active_functions,
+    Object* active_code_marker) {
+  // Find all non-optimized code functions with activation frames
+  // on the stack. This includes functions which have optimized
+  // activations (including inlined functions) on the stack as the
+  // non-optimized code is needed for the lazy deoptimization.
+ for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+    JavaScriptFrame* frame = it.frame();
+    if (frame->is_optimized()) {
+      List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
+      frame->GetFunctions(&functions);
+      for (int i = 0; i < functions.length(); i++) {
+        JSFunction* function = functions[i];
+        active_functions->Add(Handle<JSFunction>(function));
+        function->shared()->code()->set_gc_metadata(active_code_marker);
+      }
+    } else if (frame->function()->IsJSFunction()) {
+      JSFunction* function = JSFunction::cast(frame->function());
+      ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+      active_functions->Add(Handle<JSFunction>(function));
+      function->shared()->code()->set_gc_metadata(active_code_marker);
+    }
+  }
+}
+
+
+static void RedirectActivationsToRecompiledCodeOnThread(
+    Isolate* isolate,
+    ThreadLocalTop* top) {
+ for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+    JavaScriptFrame* frame = it.frame();
+
+ if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
+
+    JSFunction* function = JSFunction::cast(frame->function());
+
+    ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+
+    Handle<Code> frame_code(frame->LookupCode());
+    if (frame_code->has_debug_break_slots()) continue;
+
+    Handle<Code> new_code(function->shared()->code());
+    if (new_code->kind() != Code::FUNCTION ||
+        !new_code->has_debug_break_slots()) {
+      continue;
+    }
+
+    intptr_t delta = frame->pc() - frame_code->instruction_start();
+    int debug_break_slot_count = 0;
+    int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
+    for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
+      // Check if the pc in the new code with debug break
+      // slots is before this slot.
+      RelocInfo* info = it.rinfo();
+      int debug_break_slot_bytes =
+          debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+      intptr_t new_delta =
+          info->pc() -
+          new_code->instruction_start() -
+          debug_break_slot_bytes;
+      if (new_delta > delta) {
+        break;
+      }
+
+      // Passed a debug break slot in the full code with debug
+      // break slots.
+      debug_break_slot_count++;
+    }
+    int debug_break_slot_bytes =
+        debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+    if (FLAG_trace_deopt) {
+      PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+             "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
+             "for debugging, "
+             "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
+             reinterpret_cast<intptr_t>(
+                 frame_code->instruction_start()),
+             reinterpret_cast<intptr_t>(
+                 frame_code->instruction_start()) +
+             frame_code->instruction_size(),
+             frame_code->instruction_size(),
+             reinterpret_cast<intptr_t>(new_code->instruction_start()),
+             reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+             new_code->instruction_size(),
+             new_code->instruction_size(),
+             reinterpret_cast<intptr_t>(frame->pc()),
+             reinterpret_cast<intptr_t>(new_code->instruction_start()) +
+             delta + debug_break_slot_bytes);
+    }
+
+    // Patch the return address to return into the code with
+    // debug break slots.
+    frame->set_pc(
+        new_code->instruction_start() + delta + debug_break_slot_bytes);
+  }
+}
+
+
+class ActiveFunctionsCollector : public ThreadVisitor {
+ public:
+ explicit ActiveFunctionsCollector(List<Handle<JSFunction> >* active_functions,
+                                    Object* active_code_marker)
+      : active_functions_(active_functions),
+        active_code_marker_(active_code_marker) { }
+
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    CollectActiveFunctionsFromThread(isolate,
+                                     top,
+                                     active_functions_,
+                                     active_code_marker_);
+  }
+
+ private:
+  List<Handle<JSFunction> >* active_functions_;
+  Object* active_code_marker_;
+};
+
+
+class ActiveFunctionsRedirector : public ThreadVisitor {
+ public:
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    RedirectActivationsToRecompiledCodeOnThread(isolate, top);
+  }
+};


 void Debug::PrepareForBreakPoints() {
@@ -1776,71 +1905,59 @@
       // debug break slots.
       isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);

-      // Ensure no GC in this scope as we are comparing raw pointer
-      // values and performing a heap iteration.
+      // Ensure no GC in this scope as we are going to use gc_metadata
+      // field in the Code object to mark active functions.
       AssertNoAllocation no_allocation;

-      // Find all non-optimized code functions with activation frames
-      // on the stack. This includes functions which have optimized
-      // activations (including inlined functions) on the stack as the
-      // non-optimized code is needed for the lazy deoptimization.
- for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
-        JavaScriptFrame* frame = it.frame();
-        if (frame->is_optimized()) {
-          List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
-          frame->GetFunctions(&functions);
-          for (int i = 0; i < functions.length(); i++) {
-            if (!functions[i]->shared()->code()->has_debug_break_slots()) {
-              active_functions.Add(Handle<JSFunction>(functions[i]));
-            }
-          }
-        } else if (frame->function()->IsJSFunction()) {
-          JSFunction* function = JSFunction::cast(frame->function());
-          ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
-          if (!frame->LookupCode()->has_debug_break_slots() ||
-              !function->shared()->code()->has_debug_break_slots()) {
-            active_functions.Add(Handle<JSFunction>(function));
-          }
-        }
-      }
-
-      // Sort the functions on the object pointer value to prepare for
-      // the binary search below.
-      active_functions.Sort(HandleObjectPointerCompare<JSFunction>);
-
-      // Scan the heap for all non-optimized functions which has no
-      // debug break slots.
+      Object* active_code_marker = isolate_->heap()->the_hole_value();
+
+      CollectActiveFunctionsFromThread(isolate_,
+                                       isolate_->thread_local_top(),
+                                       &active_functions,
+                                       active_code_marker);
+ ActiveFunctionsCollector active_functions_collector(&active_functions, + active_code_marker);
+      isolate_->thread_manager()->IterateArchivedThreads(
+          &active_functions_collector);
+
+      // Scan the heap for all non-optimized functions which have no
+      // debug break slots and are not active or inlined into an active
+      // function and mark them for lazy compilation.
       HeapIterator iterator;
       HeapObject* obj = NULL;
       while (((obj = iterator.next()) != NULL)) {
         if (obj->IsJSFunction()) {
           JSFunction* function = JSFunction::cast(obj);
-          if (function->shared()->allows_lazy_compilation() &&
-              function->shared()->script()->IsScript() &&
+          SharedFunctionInfo* shared = function->shared();
+          if (shared->allows_lazy_compilation() &&
+              shared->script()->IsScript() &&
               function->code()->kind() == Code::FUNCTION &&
-              !function->code()->has_debug_break_slots()) {
-            bool has_activation =
-                SortedListBSearch<Handle<JSFunction> >(
-                    active_functions,
-                    Handle<JSFunction>(function),
-                    HandleObjectPointerCompare<JSFunction>) != -1;
-            if (!has_activation) {
-              function->set_code(*lazy_compile);
-              function->shared()->set_code(*lazy_compile);
-            }
+              !function->code()->has_debug_break_slots() &&
+              shared->code()->gc_metadata() != active_code_marker) {
+            function->set_code(*lazy_compile);
+            function->shared()->set_code(*lazy_compile);
           }
         }
       }
-    }
-
-    // Now the non-GC scope is left, and the sorting of the functions
-    // in active_function is not ensured any more. The code below does
-    // not rely on it.
+
+      // Clear gc_metadata field.
+      for (int i = 0; i < active_functions.length(); i++) {
+        Handle<JSFunction> function = active_functions[i];
+        function->shared()->code()->set_gc_metadata(Smi::FromInt(0));
+      }
+    }

     // Now recompile all functions with activation frames and and
     // patch the return address to run in the new compiled code.
     for (int i = 0; i < active_functions.length(); i++) {
       Handle<JSFunction> function = active_functions[i];
+
+      if (function->code()->kind() == Code::FUNCTION &&
+          function->code()->has_debug_break_slots()) {
+        // Nothing to do. Function code already had debug break slots.
+        continue;
+      }
+
       Handle<SharedFunctionInfo> shared(function->shared());
       // If recompilation is not possible just skip it.
       if (shared->is_toplevel() ||
@@ -1851,9 +1968,6 @@

       // Make sure that the shared full code is compiled with debug
       // break slots.
-      if (function->code() == *lazy_compile) {
-        function->set_code(shared->code());
-      }
       if (!shared->code()->has_debug_break_slots()) {
         // Try to compile the full code with debug break slots. If it
         // fails just keep the current code.
@@ -1872,70 +1986,17 @@
           continue;
         }
       }
-      Handle<Code> new_code(shared->code());
-
-      // Find the function and patch the return address.
- for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
-        JavaScriptFrame* frame = it.frame();
-        // If the current frame is for this function in its
-        // non-optimized form rewrite the return address to continue
-        // in the newly compiled full code with debug break slots.
-        if (!frame->is_optimized() &&
-            frame->function()->IsJSFunction() &&
-            frame->function() == *function) {
-          ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
-          Handle<Code> frame_code(frame->LookupCode());
-          if (frame_code->has_debug_break_slots()) continue;
-          intptr_t delta = frame->pc() - frame_code->instruction_start();
-          int debug_break_slot_count = 0;
-          int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
-          for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
-            // Check if the pc in the new code with debug break
-            // slots is before this slot.
-            RelocInfo* info = it.rinfo();
-            int debug_break_slot_bytes =
-                debug_break_slot_count * Assembler::kDebugBreakSlotLength;
-            intptr_t new_delta =
-                info->pc() -
-                new_code->instruction_start() -
-                debug_break_slot_bytes;
-            if (new_delta > delta) {
-              break;
-            }
-
-            // Passed a debug break slot in the full code with debug
-            // break slots.
-            debug_break_slot_count++;
-          }
-          int debug_break_slot_bytes =
-              debug_break_slot_count * Assembler::kDebugBreakSlotLength;
-          if (FLAG_trace_deopt) {
- PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
-                   "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
-                   "for debugging, "
- "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
-                   reinterpret_cast<intptr_t>(
-                       frame_code->instruction_start()),
-                   reinterpret_cast<intptr_t>(
-                       frame_code->instruction_start()) +
-                       frame_code->instruction_size(),
-                   frame_code->instruction_size(),
- reinterpret_cast<intptr_t>(new_code->instruction_start()), - reinterpret_cast<intptr_t>(new_code->instruction_start()) +
-                       new_code->instruction_size(),
-                   new_code->instruction_size(),
-                   reinterpret_cast<intptr_t>(frame->pc()),
- reinterpret_cast<intptr_t>(new_code->instruction_start()) +
-                       delta + debug_break_slot_bytes);
-          }
-
-          // Patch the return address to return into the code with
-          // debug break slots.
-          frame->set_pc(
- new_code->instruction_start() + delta + debug_break_slot_bytes);
-        }
-      }
-    }
+
+      // Keep function code in sync with shared function info.
+      function->set_code(shared->code());
+    }
+
+    RedirectActivationsToRecompiledCodeOnThread(isolate_,
+ isolate_->thread_local_top());
+
+    ActiveFunctionsRedirector active_functions_redirector;
+    isolate_->thread_manager()->IterateArchivedThreads(
+          &active_functions_redirector);
   }
 }

=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Jan 25 06:27:58 2012
+++ /branches/bleeding_edge/src/heap.cc Wed Jan 25 07:11:59 2012
@@ -3340,7 +3340,7 @@
   }
   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  code->set_next_code_flushing_candidate(undefined_value());
+  code->set_gc_metadata(Smi::FromInt(0));
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
   if (!self_reference.is_null()) {
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Wed Jan 25 06:22:59 2012
+++ /branches/bleeding_edge/src/mark-compact.cc Wed Jan 25 07:11:59 2012
@@ -710,16 +710,17 @@
       SharedFunctionInfo* candidate) {
     Code* code = candidate->code();
     return reinterpret_cast<SharedFunctionInfo**>(
-        code->address() + Code::kNextCodeFlushingCandidateOffset);
+        code->address() + Code::kGCMetadataOffset);
   }

static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
-    return *GetNextCandidateField(candidate);
+    return reinterpret_cast<SharedFunctionInfo*>(
+        candidate->code()->gc_metadata());
   }

   static void SetNextCandidate(SharedFunctionInfo* candidate,
                                SharedFunctionInfo* next_candidate) {
-    *GetNextCandidateField(candidate) = next_candidate;
+    candidate->code()->set_gc_metadata(next_candidate);
   }

   Isolate* isolate_;
=======================================
--- /branches/bleeding_edge/src/objects-inl.h   Wed Jan 18 09:01:57 2012
+++ /branches/bleeding_edge/src/objects-inl.h   Wed Jan 25 07:11:59 2012
@@ -4042,8 +4042,7 @@
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
 ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
 ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, next_code_flushing_candidate,
-          Object, kNextCodeFlushingCandidateOffset)
+ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)


 byte* Code::instruction_start()  {
=======================================
--- /branches/bleeding_edge/src/objects.h       Fri Jan 20 08:17:08 2012
+++ /branches/bleeding_edge/src/objects.h       Wed Jan 25 07:11:59 2012
@@ -4050,11 +4050,10 @@
   // [deoptimization_data]: Array containing data for deopt.
   DECL_ACCESSORS(deoptimization_data, FixedArray)

-  // [code_flushing_candidate]: Field only used during garbage
-  // collection to hold code flushing candidates. The contents of this
+ // [gc_metadata]: Field used to hold GC related metadata. The contents of this
   // field does not have to be traced during garbage collection since
   // it is only used by the garbage collector itself.
-  DECL_ACCESSORS(next_code_flushing_candidate, Object)
+  DECL_ACCESSORS(gc_metadata, Object)

   // Unchecked accessors to be used during GC.
   inline ByteArray* unchecked_relocation_info();
@@ -4278,10 +4277,8 @@
static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
   static const int kDeoptimizationDataOffset =
       kHandlerTableOffset + kPointerSize;
-  static const int kNextCodeFlushingCandidateOffset =
-      kDeoptimizationDataOffset + kPointerSize;
-  static const int kFlagsOffset =
-      kNextCodeFlushingCandidateOffset + kPointerSize;
+ static const int kGCMetadataOffset = kDeoptimizationDataOffset + kPointerSize;
+  static const int kFlagsOffset = kGCMetadataOffset + kPointerSize;

   static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
   static const int kKindSpecificFlagsSize = 2 * kIntSize;

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to