Reviewers: Michael Starzinger,

Message:
Thanks, I uploaded new patch set.


https://chromiumcodereview.appspot.com/9965054/diff/1/src/heap.cc
File src/heap.cc (left):

https://chromiumcodereview.appspot.com/9965054/diff/1/src/heap.cc#oldcode4821
src/heap.cc:4821: idle_notification_will_schedule_next_gc_ = true;
On 2012/04/02 09:01:46, Michael Starzinger wrote:
This was the last site that changed the above flag. Can we get rid of
the flag
completely?

Done.

https://chromiumcodereview.appspot.com/9965054/diff/1/src/incremental-marking.h
File src/incremental-marking.h (right):

https://chromiumcodereview.appspot.com/9965054/diff/1/src/incremental-marking.h#newcode49
src/incremental-marking.h:49: enum Finalizer {
On 2012/04/02 09:01:46, Michael Starzinger wrote:
The name "finalizer" feels wrong. Can we change this to something like
...
enum CompletionAction {
   NO_COMPLETION_ACTION,
   GC_VIA_STACK_GUARD
};

Done.

https://chromiumcodereview.appspot.com/9965054/diff/1/src/incremental-marking.h#newcode113
src/incremental-marking.h:113: void Step(intptr_t allocated, Finalizer
finalizer = GC_VIA_STACK_GUARD);
On 2012/04/02 09:01:46, Michael Starzinger wrote:
There are only three other call-sites, could we get along without a
default
argument?

Done.

Description:
Make progress in incremental marking if scavenge is delaying mark-sweep.

[email protected]

Please review this at https://chromiumcodereview.appspot.com/9965054/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files:
  M src/heap.h
  M src/heap.cc
  M src/incremental-marking.h
  M src/incremental-marking.cc
  M src/spaces.cc
  M test/cctest/test-heap.cc


Index: src/heap.cc
diff --git a/src/heap.cc b/src/heap.cc
index 4ce1816673a39368e57931c0f2a934594b5c0efc..48e8b56762dfad7733eaa701597e59cb713202b6 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -145,7 +145,6 @@ Heap::Heap()
       number_idle_notifications_(0),
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
-      idle_notification_will_schedule_next_gc_(false),
       mark_sweeps_since_idle_round_started_(0),
       ms_count_at_last_idle_notification_(0),
       gc_count_at_last_idle_gc_(0),
@@ -504,11 +503,17 @@ bool Heap::CollectGarbage(AllocationSpace space,
       !incremental_marking()->IsStopped() &&
       !incremental_marking()->should_hurry() &&
       FLAG_incremental_marking_steps) {
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+    // Make progress in incremental marking.
+    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
+    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
+                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+    if (!incremental_marking()->IsComplete()) {
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+      }
+      collector = SCAVENGER;
+      collector_reason = "incremental marking delaying mark-sweep";
     }
-    collector = SCAVENGER;
-    collector_reason = "incremental marking delaying mark-sweep";
   }

   bool next_gc_likely_to_collect_more = false;
@@ -4817,10 +4822,8 @@ void Heap::EnsureHeapIsIterable() {


 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- // This flag prevents incremental marking from requesting GC via stack guard
-  idle_notification_will_schedule_next_gc_ = true;
-  incremental_marking()->Step(step_size);
-  idle_notification_will_schedule_next_gc_ = false;
+  incremental_marking()->Step(step_size,
+                              IncrementalMarking::NO_GC_VIA_STACK_GUARD);

   if (incremental_marking()->IsComplete()) {
     bool uncommit = false;
Index: src/heap.h
diff --git a/src/heap.h b/src/heap.h
index 2bd037f15b1e32d7b4e4c182b43638a2cc434fb4..0391e0e526416264300f9188e49d427f78dbbdcb 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1569,10 +1569,6 @@ class Heap {
   // The roots that have an index less than this are always in old space.
   static const int kOldSpaceRoots = 0x20;

-  bool idle_notification_will_schedule_next_gc() {
-    return idle_notification_will_schedule_next_gc_;
-  }
-
   uint32_t HashSeed() {
     uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
     ASSERT(FLAG_randomize_hashes || seed == 0);
@@ -2033,7 +2029,6 @@ class Heap {
   unsigned int last_idle_notification_gc_count_;
   bool last_idle_notification_gc_count_init_;

-  bool idle_notification_will_schedule_next_gc_;
   int mark_sweeps_since_idle_round_started_;
   int ms_count_at_last_idle_notification_;
   unsigned int gc_count_at_last_idle_gc_;
Index: src/incremental-marking.cc
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 8fe89b4a98c4500ed5527fd602ab55793e21896a..f3255e21ebf75022ce40b774f9cc5578fc0e2adf 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -743,7 +743,7 @@ void IncrementalMarking::Finalize() {
 }


-void IncrementalMarking::MarkingComplete() {
+void IncrementalMarking::MarkingComplete(CompletionAction action) {
   state_ = COMPLETE;
// We will set the stack guard to request a GC now. This will mean the rest // of the GC gets performed as soon as possible (we can't do a GC here in a
@@ -754,13 +754,14 @@ void IncrementalMarking::MarkingComplete() {
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Complete (normal).\n");
   }
-  if (!heap_->idle_notification_will_schedule_next_gc()) {
+  if (action == GC_VIA_STACK_GUARD) {
     heap_->isolate()->stack_guard()->RequestGC();
   }
 }


-void IncrementalMarking::Step(intptr_t allocated_bytes) {
+void IncrementalMarking::Step(intptr_t allocated_bytes,
+                              CompletionAction action) {
   if (heap_->gc_state() != Heap::NOT_IN_GC ||
       !FLAG_incremental_marking ||
       !FLAG_incremental_marking_steps ||
@@ -833,7 +834,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
       Marking::MarkBlack(obj_mark_bit);
       MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
     }
-    if (marking_deque_.IsEmpty()) MarkingComplete();
+    if (marking_deque_.IsEmpty()) MarkingComplete(action);
   }

   allocated_ = 0;
Index: src/incremental-marking.h
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
index 4f8fa6b1275b0995a919503972c6ed8b360daaa5..8cbe6c18e7bd5631709982770ae35dfd02f6426c 100644
--- a/src/incremental-marking.h
+++ b/src/incremental-marking.h
@@ -46,6 +46,11 @@ class IncrementalMarking {
     COMPLETE
   };

+  enum CompletionAction {
+    GC_VIA_STACK_GUARD,
+    NO_GC_VIA_STACK_GUARD
+  };
+
   explicit IncrementalMarking(Heap* heap);

   void TearDown();
@@ -82,7 +87,7 @@ class IncrementalMarking {

   void Abort();

-  void MarkingComplete();
+  void MarkingComplete(CompletionAction action);

// It's hard to know how much work the incremental marker should do to make // progress in the face of the mutator creating new work for it. We start
@@ -102,10 +107,11 @@ class IncrementalMarking {
   static const intptr_t kMaxAllocationMarkingFactor = 1000;

   void OldSpaceStep(intptr_t allocated) {
-    Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+    Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
+         GC_VIA_STACK_GUARD);
   }

-  void Step(intptr_t allocated);
+  void Step(intptr_t allocated, CompletionAction action);

   inline void RestartIfNotMarking() {
     if (state_ == COMPLETE) {
Index: src/spaces.cc
diff --git a/src/spaces.cc b/src/spaces.cc
index a404b1e75ee5cc6537753aae7716e8c2fbd57b23..6144464304898fdb3fe814cceeadc87f5e48881d 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1234,13 +1234,15 @@ MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
         allocation_info_.limit + inline_allocation_limit_step_,
         high);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(bytes_allocated);
+    heap()->incremental_marking()->Step(
+        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
     top_on_previous_step_ = new_top;
     return AllocateRaw(size_in_bytes);
   } else if (AddFreshPage()) {
     // Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(bytes_allocated);
+    heap()->incremental_marking()->Step(
+        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
     top_on_previous_step_ = to_space_.page_low();
     return AllocateRaw(size_in_bytes);
   } else {
Index: test/cctest/test-heap.cc
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 999e2c66518b010561440292419606b8816b08d6..67792312e9b9073ef9e295e4e1d60c117e79abea 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1521,17 +1521,11 @@ TEST(InstanceOfStubWriteBarrier) {

   while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
          !marking->IsStopped()) {
-    marking->Step(MB);
+    marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
   }

   CHECK(marking->IsMarking());

-  // Discard any pending GC requests otherwise we will get GC when we enter
-  // code below.
-  if (ISOLATE->stack_guard()->IsGCRequest()) {
-    ISOLATE->stack_guard()->Continue(GC_REQUEST);
-  }
-
   {
     v8::HandleScope scope;
     v8::Handle<v8::Object> global = v8::Context::GetCurrent()->Global();


--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to