Revision: 24567
Author:   [email protected]
Date:     Mon Oct 13 16:27:55 2014 UTC
Log: Check if there is still time before finalizing an incremental collection.

BUG=
[email protected], [email protected]

Review URL: https://codereview.chromium.org/629903003
https://code.google.com/p/v8/source/detail?r=24567

Modified:
 /branches/bleeding_edge/src/heap/gc-idle-time-handler.cc
 /branches/bleeding_edge/src/heap/gc-idle-time-handler.h
 /branches/bleeding_edge/src/heap/heap.cc
 /branches/bleeding_edge/src/heap/heap.h
 /branches/bleeding_edge/src/heap/incremental-marking.cc
 /branches/bleeding_edge/src/heap/incremental-marking.h
 /branches/bleeding_edge/src/heap/mark-compact.cc
 /branches/bleeding_edge/src/heap/mark-compact.h
 /branches/bleeding_edge/test/cctest/test-heap.cc
/branches/bleeding_edge/test/unittests/heap/gc-idle-time-handler-unittest.cc

=======================================
--- /branches/bleeding_edge/src/heap/gc-idle-time-handler.cc Thu Oct 2 07:42:11 2014 UTC +++ /branches/bleeding_edge/src/heap/gc-idle-time-handler.cc Mon Oct 13 16:27:55 2014 UTC
@@ -63,6 +63,9 @@

 size_t GCIdleTimeHandler::EstimateMarkCompactTime(
     size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
+  // TODO(hpayer): Be more precise about the type of mark-compact event. It
+  // makes a huge difference if it is incremental or non-incremental and if
+  // compaction is happening.
   if (mark_compact_speed_in_bytes_per_ms == 0) {
mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
   }
@@ -71,7 +74,7 @@
 }


-bool GCIdleTimeHandler::DoScavenge(
+bool GCIdleTimeHandler::ShouldDoScavenge(
size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
     size_t scavenge_speed_in_bytes_per_ms,
     size_t new_space_allocation_throughput_in_bytes_per_ms) {
@@ -108,6 +111,15 @@
   }
   return false;
 }
+
+
+bool GCIdleTimeHandler::ShouldDoMarkCompact(
+    size_t idle_time_in_ms, size_t size_of_objects,
+    size_t mark_compact_speed_in_bytes_per_ms) {
+  return idle_time_in_ms >=
+         EstimateMarkCompactTime(size_of_objects,
+                                 mark_compact_speed_in_bytes_per_ms);
+}


 // The following logic is implemented by the controller:
@@ -128,10 +140,11 @@
 // that this currently may trigger a full garbage collection.
 GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
                                             HeapState heap_state) {
-  if (DoScavenge(idle_time_in_ms, heap_state.new_space_capacity,
-                 heap_state.used_new_space_size,
-                 heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
+  if (ShouldDoScavenge(
+          idle_time_in_ms, heap_state.new_space_capacity,
+          heap_state.used_new_space_size,
+          heap_state.scavenge_speed_in_bytes_per_ms,
+          heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
     return GCIdleTimeAction::Scavenge();
   }

@@ -148,10 +161,8 @@
   }

   if (heap_state.incremental_marking_stopped) {
-    size_t estimated_time_in_ms =
-        EstimateMarkCompactTime(heap_state.size_of_objects,
- heap_state.mark_compact_speed_in_bytes_per_ms);
-    if (idle_time_in_ms >= estimated_time_in_ms ||
+    if (ShouldDoMarkCompact(idle_time_in_ms, heap_state.size_of_objects,
+ heap_state.mark_compact_speed_in_bytes_per_ms) ||
         (heap_state.size_of_objects < kSmallHeapSize &&
          heap_state.contexts_disposed > 0)) {
// If there are no more than two GCs left in this idle round and we are
=======================================
--- /branches/bleeding_edge/src/heap/gc-idle-time-handler.h Thu Oct 2 07:21:53 2014 UTC +++ /branches/bleeding_edge/src/heap/gc-idle-time-handler.h Mon Oct 13 16:27:55 2014 UTC
@@ -155,7 +155,11 @@
   static size_t EstimateMarkCompactTime(
       size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);

-  static bool DoScavenge(
+  static bool ShouldDoMarkCompact(size_t idle_time_in_ms,
+                                  size_t size_of_objects,
+ size_t mark_compact_speed_in_bytes_per_ms);
+
+  static bool ShouldDoScavenge(
size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
       size_t scavenger_speed_in_bytes_per_ms,
       size_t new_space_allocation_throughput_in_bytes_per_ms);
=======================================
--- /branches/bleeding_edge/src/heap/heap.cc    Wed Oct  8 11:51:57 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap.cc    Mon Oct 13 16:27:55 2014 UTC
@@ -4268,11 +4268,14 @@
 }


-void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
-  incremental_marking()->Step(step_size,
- IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
-
-  if (incremental_marking()->IsComplete()) {
+void Heap::TryFinalizeIdleIncrementalMarking(
+    size_t idle_time_in_ms, size_t size_of_objects,
+    size_t mark_compact_speed_in_bytes_per_ms) {
+  if (incremental_marking()->IsComplete() ||
+      (mark_compact_collector()->IsMarkingDequeEmpty() &&
+       gc_idle_time_handler_.ShouldDoMarkCompact(
+           idle_time_in_ms, size_of_objects,
+           mark_compact_speed_in_bytes_per_ms))) {
     bool uncommit = false;
     if (gc_count_at_last_idle_gc_ == gc_count_) {
       // No GC since the last full GC, the mutator is probably not active.
@@ -4332,16 +4335,28 @@
       gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);

   bool result = false;
+  int actual_time_in_ms = 0;
   switch (action.type) {
     case DONE:
       result = true;
       break;
-    case DO_INCREMENTAL_MARKING:
+    case DO_INCREMENTAL_MARKING: {
       if (incremental_marking()->IsStopped()) {
         incremental_marking()->Start();
       }
-      AdvanceIdleIncrementalMarking(action.parameter);
+      incremental_marking()->Step(action.parameter,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+                                  IncrementalMarking::FORCE_MARKING,
+ IncrementalMarking::DO_NOT_FORCE_COMPLETION); + actual_time_in_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
+      int remaining_idle_time_in_ms = idle_time_in_ms - actual_time_in_ms;
+      if (remaining_idle_time_in_ms > 0) {
+        TryFinalizeIdleIncrementalMarking(
+            remaining_idle_time_in_ms, heap_state.size_of_objects,
+            heap_state.mark_compact_speed_in_bytes_per_ms);
+      }
       break;
+    }
     case DO_FULL_GC: {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
       const char* message = contexts_disposed_
@@ -4361,20 +4376,20 @@
       break;
   }

-  int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
-  if (actual_time_ms <= idle_time_in_ms) {
+  actual_time_in_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
+  if (actual_time_in_ms <= idle_time_in_ms) {
     if (action.type != DONE && action.type != DO_NOTHING) {
       isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
-          idle_time_in_ms - actual_time_ms);
+          idle_time_in_ms - actual_time_in_ms);
     }
   } else {
     isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
-        actual_time_ms - idle_time_in_ms);
+        actual_time_in_ms - idle_time_in_ms);
   }

   if (FLAG_trace_idle_notification) {
PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
-           idle_time_in_ms, actual_time_ms);
+           idle_time_in_ms, actual_time_in_ms);
     action.Print();
     PrintF("]\n");
   }
=======================================
--- /branches/bleeding_edge/src/heap/heap.h     Wed Oct  8 14:48:48 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap.h     Mon Oct 13 16:27:55 2014 UTC
@@ -1949,7 +1949,9 @@

   void SelectScavengingVisitorsTable();

-  void AdvanceIdleIncrementalMarking(intptr_t step_size);
+  void TryFinalizeIdleIncrementalMarking(
+      size_t idle_time_in_ms, size_t size_of_objects,
+      size_t mark_compact_speed_in_bytes_per_ms);

   bool WorthActivatingIncrementalMarking();

=======================================
--- /branches/bleeding_edge/src/heap/incremental-marking.cc Wed Oct 8 11:51:57 2014 UTC +++ /branches/bleeding_edge/src/heap/incremental-marking.cc Mon Oct 13 16:27:55 2014 UTC
@@ -27,6 +27,7 @@
       should_hurry_(false),
       marking_speed_(0),
       allocated_(0),
+      idle_marking_delay_counter_(0),
       no_marking_scope_depth_(0),
       unscanned_bytes_of_large_object_(0) {}

@@ -891,24 +892,27 @@
 }


-void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
-                              bool force_marking) {
+intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
+                                  CompletionAction action,
+                                  ForceMarkingAction marking,
+                                  ForceCompletionAction completion) {
   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
       !FLAG_incremental_marking_steps ||
       (state_ != SWEEPING && state_ != MARKING)) {
-    return;
+    return 0;
   }

   allocated_ += allocated_bytes;

-  if (!force_marking && allocated_ < kAllocatedThreshold &&
+ if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
       write_barriers_invoked_since_last_step_ <
           kWriteBarriersInvokedThreshold) {
-    return;
+    return 0;
   }

-  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
+  if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;

+  intptr_t bytes_processed = 0;
   {
     HistogramTimerScope incremental_marking_scope(
         heap_->isolate()->counters()->gc_incremental_marking());
@@ -929,7 +933,6 @@
     write_barriers_invoked_since_last_step_ = 0;

     bytes_scanned_ += bytes_to_process;
-    intptr_t bytes_processed = 0;

     if (state_ == SWEEPING) {
       if (heap_->mark_compact_collector()->sweeping_in_progress() &&
@@ -942,7 +945,14 @@
       }
     } else if (state_ == MARKING) {
       bytes_processed = ProcessMarkingDeque(bytes_to_process);
-      if (marking_deque_.IsEmpty()) MarkingComplete(action);
+      if (marking_deque_.IsEmpty()) {
+        if (completion == FORCE_COMPLETION ||
+            IsIdleMarkingDelayCounterLimitReached()) {
+          MarkingComplete(action);
+        } else {
+          IncrementIdleMarkingDelayCounter();
+        }
+      }
     }

     steps_count_++;
@@ -958,6 +968,7 @@
     // process the marking deque.
     heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
   }
+  return bytes_processed;
 }


@@ -977,5 +988,20 @@
 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
 }
+
+
+bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
+  return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
+}
+
+
+void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
+  idle_marking_delay_counter_++;
+}
+
+
+void IncrementalMarking::ClearIdleMarkingDelayCounter() {
+  idle_marking_delay_counter_ = 0;
+}
 }
 }  // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/heap/incremental-marking.h Tue Sep 16 12:48:59 2014 UTC +++ /branches/bleeding_edge/src/heap/incremental-marking.h Mon Oct 13 16:27:55 2014 UTC
@@ -20,6 +20,10 @@

   enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };

+  enum ForceMarkingAction { FORCE_MARKING, DO_NOT_FORCE_MARKING };
+
+  enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
+
   explicit IncrementalMarking(Heap* heap);

   static void Initialize();
@@ -83,10 +87,15 @@
   static const intptr_t kMarkingSpeedAccelleration = 2;
   static const intptr_t kMaxMarkingSpeed = 1000;

+  // This is the upper bound for how many times we allow finalization of
+  // incremental marking to be postponed.
+  static const size_t kMaxIdleMarkingDelayCounter = 3;
+
   void OldSpaceStep(intptr_t allocated);

-  void Step(intptr_t allocated, CompletionAction action,
-            bool force_marking = false);
+  intptr_t Step(intptr_t allocated, CompletionAction action,
+                ForceMarkingAction marking = DO_NOT_FORCE_MARKING,
+                ForceCompletionAction completion = FORCE_COMPLETION);

   inline void RestartIfNotMarking() {
     if (state_ == COMPLETE) {
@@ -164,6 +173,10 @@
   void NotifyIncompleteScanOfObject(int unscanned_bytes) {
     unscanned_bytes_of_large_object_ = unscanned_bytes;
   }
+
+  void ClearIdleMarkingDelayCounter();
+
+  bool IsIdleMarkingDelayCounterLimitReached();

  private:
   int64_t SpaceLeftInOldSpace();
@@ -195,6 +208,8 @@

   INLINE(void VisitObject(Map* map, HeapObject* obj, int size));

+  void IncrementIdleMarkingDelayCounter();
+
   Heap* heap_;

   State state_;
@@ -213,6 +228,7 @@
   intptr_t bytes_scanned_;
   intptr_t allocated_;
   intptr_t write_barriers_invoked_since_last_step_;
+  size_t idle_marking_delay_counter_;

   int no_marking_scope_depth_;

=======================================
--- /branches/bleeding_edge/src/heap/mark-compact.cc Thu Oct 9 14:01:59 2014 UTC +++ /branches/bleeding_edge/src/heap/mark-compact.cc Mon Oct 13 16:27:55 2014 UTC
@@ -862,6 +862,8 @@
     Deoptimizer::DeoptimizeMarkedCode(isolate());
     have_code_to_deoptimize_ = false;
   }
+
+  heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
 }


@@ -1935,6 +1937,11 @@
   MarkBit mark_bit = Marking::MarkBitFrom(site);
   SetMark(site, mark_bit);
 }
+
+
+bool MarkCompactCollector::IsMarkingDequeEmpty() {
+  return marking_deque_.IsEmpty();
+}


 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
=======================================
--- /branches/bleeding_edge/src/heap/mark-compact.h Thu Oct 2 08:24:48 2014 UTC +++ /branches/bleeding_edge/src/heap/mark-compact.h Mon Oct 13 16:27:55 2014 UTC
@@ -657,6 +657,8 @@
   // to artificially keep AllocationSites alive for a time.
   void MarkAllocationSite(AllocationSite* site);

+  bool IsMarkingDequeEmpty();
+
  private:
   class SweeperTask;

=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Tue Sep 30 10:29:32 2014 UTC +++ /branches/bleeding_edge/test/cctest/test-heap.cc Mon Oct 13 16:27:55 2014 UTC
@@ -2181,6 +2181,48 @@
   CHECK_EQ(0, f->shared()->opt_count());
   CHECK_EQ(0, f->shared()->code()->profiler_ticks());
 }
+
+
+TEST(IdleNotificationFinishMarking) {
+  i::FLAG_allow_natives_syntax = true;
+  CcTest::InitializeVM();
+  SimulateFullSpace(CcTest::heap()->old_pointer_space());
+  IncrementalMarking* marking = CcTest::heap()->incremental_marking();
+  marking->Abort();
+  marking->Start();
+
+  CHECK_EQ(CcTest::heap()->gc_count(), 0);
+
+  // TODO(hpayer): We cannot write proper unit test right now for heap.
+  // The ideal test would call kMaxIdleMarkingDelayCounter to test the
+  // marking delay counter.
+
+  // Perform a huge incremental marking step but don't complete marking.
+  intptr_t bytes_processed = 0;
+  do {
+    bytes_processed =
+        marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+                      IncrementalMarking::FORCE_MARKING,
+                      IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+    CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
+  } while (bytes_processed);
+
+  // The next invocations of incremental marking are not going to complete
+  // marking
+  // since the completion threshold is not reached
+ for (size_t i = 0; i < IncrementalMarking::kMaxIdleMarkingDelayCounter - 2;
+       i++) {
+    marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+                  IncrementalMarking::FORCE_MARKING,
+                  IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+    CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
+  }
+
+  // The next idle notification has to finish incremental marking.
+  const int kShortIdleTimeInMs = 1;
+  CcTest::isolate()->IdleNotification(kShortIdleTimeInMs);
+  CHECK_EQ(CcTest::heap()->gc_count(), 1);
+}


 // Test that HAllocateObject will always return an object in new-space.
=======================================
--- /branches/bleeding_edge/test/unittests/heap/gc-idle-time-handler-unittest.cc Thu Oct 2 07:21:53 2014 UTC +++ /branches/bleeding_edge/test/unittests/heap/gc-idle-time-handler-unittest.cc Mon Oct 13 16:27:55 2014 UTC
@@ -112,7 +112,7 @@
 TEST_F(GCIdleTimeHandlerTest, DoScavengeEmptyNewSpace) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   int idle_time_in_ms = 16;
-  EXPECT_FALSE(GCIdleTimeHandler::DoScavenge(
+  EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
       idle_time_in_ms, heap_state.new_space_capacity,
heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
       heap_state.new_space_allocation_throughput_in_bytes_per_ms));
@@ -123,7 +123,7 @@
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.used_new_space_size = kNewSpaceCapacity;
   int idle_time_in_ms = 16;
-  EXPECT_TRUE(GCIdleTimeHandler::DoScavenge(
+  EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
       idle_time_in_ms, heap_state.new_space_capacity,
heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
       heap_state.new_space_allocation_throughput_in_bytes_per_ms));
@@ -135,7 +135,7 @@
   heap_state.used_new_space_size = kNewSpaceCapacity;
   heap_state.scavenge_speed_in_bytes_per_ms = 0;
   int idle_time_in_ms = 16;
-  EXPECT_FALSE(GCIdleTimeHandler::DoScavenge(
+  EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
       idle_time_in_ms, heap_state.new_space_capacity,
heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
       heap_state.new_space_allocation_throughput_in_bytes_per_ms));
@@ -147,7 +147,7 @@
   heap_state.used_new_space_size = kNewSpaceCapacity;
   heap_state.scavenge_speed_in_bytes_per_ms = 1 * KB;
   int idle_time_in_ms = 16;
-  EXPECT_FALSE(GCIdleTimeHandler::DoScavenge(
+  EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
       idle_time_in_ms, heap_state.new_space_capacity,
heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
       heap_state.new_space_allocation_throughput_in_bytes_per_ms));
@@ -159,11 +159,24 @@
   heap_state.used_new_space_size = kNewSpaceCapacity;
   heap_state.scavenge_speed_in_bytes_per_ms = kNewSpaceCapacity;
   int idle_time_in_ms = 16;
-  EXPECT_TRUE(GCIdleTimeHandler::DoScavenge(
+  EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
       idle_time_in_ms, heap_state.new_space_capacity,
heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
       heap_state.new_space_allocation_throughput_in_bytes_per_ms));
 }
+
+
+TEST_F(GCIdleTimeHandlerTest, ShouldDoMarkCompact) {
+  size_t idle_time_in_ms = 16;
+ EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_in_ms, 0, 0));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DontDoMarkCompact) {
+  size_t idle_time_in_ms = 1;
+  EXPECT_FALSE(GCIdleTimeHandler::ShouldDoMarkCompact(
+      idle_time_in_ms, kSizeOfObjects, kMarkingSpeed));
+}


 TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to