Revision: 11213
Author:   [email protected]
Date:     Tue Apr  3 00:32:19 2012
Log: Make progress in incremental marking if scavenge is delaying mark-sweep.

[email protected]

Review URL: https://chromiumcodereview.appspot.com/9965054
http://code.google.com/p/v8/source/detail?r=11213

Modified:
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/incremental-marking.cc
 /branches/bleeding_edge/src/incremental-marking.h
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/test/cctest/test-heap.cc

=======================================
--- /branches/bleeding_edge/src/heap.cc Tue Mar 27 05:19:50 2012
+++ /branches/bleeding_edge/src/heap.cc Tue Apr  3 00:32:19 2012
@@ -145,7 +145,6 @@
       number_idle_notifications_(0),
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
-      idle_notification_will_schedule_next_gc_(false),
       mark_sweeps_since_idle_round_started_(0),
       ms_count_at_last_idle_notification_(0),
       gc_count_at_last_idle_gc_(0),
@@ -504,11 +503,17 @@
       !incremental_marking()->IsStopped() &&
       !incremental_marking()->should_hurry() &&
       FLAG_incremental_marking_steps) {
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
-    }
-    collector = SCAVENGER;
-    collector_reason = "incremental marking delaying mark-sweep";
+    // Make progress in incremental marking.
+    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
+    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
+                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+    if (!incremental_marking()->IsComplete()) {
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+      }
+      collector = SCAVENGER;
+      collector_reason = "incremental marking delaying mark-sweep";
+    }
   }

   bool next_gc_likely_to_collect_more = false;
@@ -4817,10 +4822,8 @@


 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- // This flag prevents incremental marking from requesting GC via stack guard
-  idle_notification_will_schedule_next_gc_ = true;
-  incremental_marking()->Step(step_size);
-  idle_notification_will_schedule_next_gc_ = false;
+  incremental_marking()->Step(step_size,
+                              IncrementalMarking::NO_GC_VIA_STACK_GUARD);

   if (incremental_marking()->IsComplete()) {
     bool uncommit = false;
=======================================
--- /branches/bleeding_edge/src/heap.h  Fri Mar 23 06:33:11 2012
+++ /branches/bleeding_edge/src/heap.h  Tue Apr  3 00:32:19 2012
@@ -1569,10 +1569,6 @@
   // The roots that have an index less than this are always in old space.
   static const int kOldSpaceRoots = 0x20;

-  bool idle_notification_will_schedule_next_gc() {
-    return idle_notification_will_schedule_next_gc_;
-  }
-
   uint32_t HashSeed() {
     uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
     ASSERT(FLAG_randomize_hashes || seed == 0);
@@ -2033,7 +2029,6 @@
   unsigned int last_idle_notification_gc_count_;
   bool last_idle_notification_gc_count_init_;

-  bool idle_notification_will_schedule_next_gc_;
   int mark_sweeps_since_idle_round_started_;
   int ms_count_at_last_idle_notification_;
   unsigned int gc_count_at_last_idle_gc_;
=======================================
--- /branches/bleeding_edge/src/incremental-marking.cc Fri Mar 23 06:33:11 2012 +++ /branches/bleeding_edge/src/incremental-marking.cc Tue Apr 3 00:32:19 2012
@@ -743,7 +743,7 @@
 }


-void IncrementalMarking::MarkingComplete() {
+void IncrementalMarking::MarkingComplete(CompletionAction action) {
   state_ = COMPLETE;
// We will set the stack guard to request a GC now. This will mean the rest // of the GC gets performed as soon as possible (we can't do a GC here in a
@@ -754,13 +754,14 @@
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Complete (normal).\n");
   }
-  if (!heap_->idle_notification_will_schedule_next_gc()) {
+  if (action == GC_VIA_STACK_GUARD) {
     heap_->isolate()->stack_guard()->RequestGC();
   }
 }


-void IncrementalMarking::Step(intptr_t allocated_bytes) {
+void IncrementalMarking::Step(intptr_t allocated_bytes,
+                              CompletionAction action) {
   if (heap_->gc_state() != Heap::NOT_IN_GC ||
       !FLAG_incremental_marking ||
       !FLAG_incremental_marking_steps ||
@@ -833,7 +834,7 @@
       Marking::MarkBlack(obj_mark_bit);
       MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
     }
-    if (marking_deque_.IsEmpty()) MarkingComplete();
+    if (marking_deque_.IsEmpty()) MarkingComplete(action);
   }

   allocated_ = 0;
=======================================
--- /branches/bleeding_edge/src/incremental-marking.h Wed Jan 11 01:39:37 2012 +++ /branches/bleeding_edge/src/incremental-marking.h Tue Apr 3 00:32:19 2012
@@ -46,6 +46,11 @@
     COMPLETE
   };

+  enum CompletionAction {
+    GC_VIA_STACK_GUARD,
+    NO_GC_VIA_STACK_GUARD
+  };
+
   explicit IncrementalMarking(Heap* heap);

   void TearDown();
@@ -82,7 +87,7 @@

   void Abort();

-  void MarkingComplete();
+  void MarkingComplete(CompletionAction action);

// It's hard to know how much work the incremental marker should do to make // progress in the face of the mutator creating new work for it. We start
@@ -102,10 +107,11 @@
   static const intptr_t kMaxAllocationMarkingFactor = 1000;

   void OldSpaceStep(intptr_t allocated) {
-    Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+    Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
+         GC_VIA_STACK_GUARD);
   }

-  void Step(intptr_t allocated);
+  void Step(intptr_t allocated, CompletionAction action);

   inline void RestartIfNotMarking() {
     if (state_ == COMPLETE) {
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Mon Apr  2 01:32:31 2012
+++ /branches/bleeding_edge/src/spaces.cc       Tue Apr  3 00:32:19 2012
@@ -1234,13 +1234,15 @@
         allocation_info_.limit + inline_allocation_limit_step_,
         high);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(bytes_allocated);
+    heap()->incremental_marking()->Step(
+        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
     top_on_previous_step_ = new_top;
     return AllocateRaw(size_in_bytes);
   } else if (AddFreshPage()) {
     // Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(bytes_allocated);
+    heap()->incremental_marking()->Step(
+        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
     top_on_previous_step_ = to_space_.page_low();
     return AllocateRaw(size_in_bytes);
   } else {
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Wed Mar 7 09:52:16 2012 +++ /branches/bleeding_edge/test/cctest/test-heap.cc Tue Apr 3 00:32:19 2012
@@ -1521,16 +1521,12 @@

   while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
          !marking->IsStopped()) {
-    marking->Step(MB);
+ // Discard any pending GC requests otherwise we will get GC when we enter
+    // code below.
+    marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
   }

   CHECK(marking->IsMarking());
-
-  // Discard any pending GC requests otherwise we will get GC when we enter
-  // code below.
-  if (ISOLATE->stack_guard()->IsGCRequest()) {
-    ISOLATE->stack_guard()->Continue(GC_REQUEST);
-  }

   {
     v8::HandleScope scope;

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to