Revision: 22003
Author:   [email protected]
Date:     Wed Jun 25 09:36:45 2014 UTC
Log:      Grow heap slower if GC freed many global handles.

BUG=263503
LOG=Y
[email protected]

Review URL: https://codereview.chromium.org/352763002
http://code.google.com/p/v8/source/detail?r=22003

Modified:
 /branches/bleeding_edge/src/global-handles.cc
 /branches/bleeding_edge/src/global-handles.h
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h

=======================================
--- /branches/bleeding_edge/src/global-handles.cc Tue Jun 3 08:12:43 2014 UTC +++ /branches/bleeding_edge/src/global-handles.cc Wed Jun 25 09:36:45 2014 UTC
@@ -611,21 +611,21 @@
 }


-bool GlobalHandles::PostGarbageCollectionProcessing(
+int GlobalHandles::PostGarbageCollectionProcessing(
     GarbageCollector collector, GCTracer* tracer) {
   // Process weak global handle callbacks. This must be done after the
   // GC is completely done, because the callbacks may invoke arbitrary
   // API functions.
   ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
   const int initial_post_gc_processing_count = ++post_gc_processing_count_;
-  bool next_gc_likely_to_collect_more = false;
+  int freed_nodes = 0;
   if (collector == SCAVENGER) {
     for (int i = 0; i < new_space_nodes_.length(); ++i) {
       Node* node = new_space_nodes_[i];
       ASSERT(node->is_in_new_space_list());
       if (!node->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
-        // the next_gc_likely_to_collect_more.
+        // the freed_nodes.
         continue;
       }
       // Skip dependent handles. Their weak callbacks might expect to be
@@ -641,29 +641,29 @@
           // PostGarbageCollection processing.  The current node might
           // have been deleted in that round, so we need to bail out (or
           // restart the processing).
-          return next_gc_likely_to_collect_more;
+          return freed_nodes;
         }
       }
       if (!node->IsRetainer()) {
-        next_gc_likely_to_collect_more = true;
+        freed_nodes++;
       }
     }
   } else {
     for (NodeIterator it(this); !it.done(); it.Advance()) {
       if (!it.node()->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
-        // the next_gc_likely_to_collect_more.
+        // the freed_nodes.
         continue;
       }
       it.node()->clear_partially_dependent();
       if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
           // See the comment above.
-          return next_gc_likely_to_collect_more;
+          return freed_nodes;
         }
       }
       if (!it.node()->IsRetainer()) {
-        next_gc_likely_to_collect_more = true;
+        freed_nodes++;
       }
     }
   }
@@ -686,7 +686,7 @@
     }
   }
   new_space_nodes_.Rewind(last);
-  return next_gc_likely_to_collect_more;
+  return freed_nodes;
 }


=======================================
--- /branches/bleeding_edge/src/global-handles.h Tue Jun 3 08:12:43 2014 UTC +++ /branches/bleeding_edge/src/global-handles.h Wed Jun 25 09:36:45 2014 UTC
@@ -155,9 +155,9 @@
   static bool IsWeak(Object** location);

   // Process pending weak handles.
-  // Returns true if next major GC is likely to collect more garbage.
-  bool PostGarbageCollectionProcessing(GarbageCollector collector,
-                                       GCTracer* tracer);
+  // Returns the number of freed nodes.
+  int PostGarbageCollectionProcessing(GarbageCollector collector,
+                                      GCTracer* tracer);

   // Iterates over all strong handles.
   void IterateStrongRoots(ObjectVisitor* v);
=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Jun 25 09:31:25 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Wed Jun 25 09:36:45 2014 UTC
@@ -61,7 +61,6 @@
 // Will be 4 * reserved_semispace_size_ to ensure that young
 // generation can be aligned to its size.
       maximum_committed_(0),
-      old_space_growing_factor_(4),
       survived_since_last_expansion_(0),
       sweep_generation_(0),
       always_allocate_scope_depth_(0),
@@ -90,7 +89,6 @@
       allocation_timeout_(0),
 #endif  // DEBUG
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
-      size_of_old_gen_at_last_old_space_gc_(0),
       old_gen_exhausted_(false),
       inline_allocation_disabled_(false),
       store_buffer_rebuilder_(store_buffer()),
@@ -1056,7 +1054,7 @@
     GarbageCollector collector,
     GCTracer* tracer,
     const v8::GCCallbackFlags gc_callback_flags) {
-  bool next_gc_likely_to_collect_more = false;
+  int freed_global_handles = 0;

   if (collector != SCAVENGER) {
     PROFILE(isolate_, CodeMovingGCEvent());
@@ -1096,12 +1094,11 @@
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
     sweep_generation_++;
-
-    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
-
+ // Temporarily set the limit for case when PostGarbageCollectionProcessing
+    // allocates and triggers GC. The real limit is set at after
+    // PostGarbageCollectionProcessing.
     old_generation_allocation_limit_ =
- OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-
+        OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
     old_gen_exhausted_ = false;
   } else {
     tracer_ = tracer;
@@ -1120,7 +1117,7 @@
   gc_post_processing_depth_++;
   { AllowHeapAllocation allow_allocation;
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
-    next_gc_likely_to_collect_more =
+    freed_global_handles =
         isolate_->global_handles()->PostGarbageCollectionProcessing(
             collector, tracer);
   }
@@ -1135,6 +1132,9 @@
     // Register the amount of external allocated memory.
     amount_of_external_allocated_memory_at_last_global_gc_ =
         amount_of_external_allocated_memory_;
+    old_generation_allocation_limit_ =
+        OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
+                                     freed_global_handles);
   }

   { GCCallbacksScope scope(this);
@@ -1153,7 +1153,7 @@
   }
 #endif

-  return next_gc_likely_to_collect_more;
+  return freed_global_handles > 0;
 }


@@ -4988,12 +4988,6 @@
           AllocationMemento::kSize));

   code_range_size_ = code_range_size * MB;
-
- // We set the old generation growing factor to 2 to grow the heap slower on
-  // memory-constrained devices.
-  if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
-    old_space_growing_factor_ = 2;
-  }

   configured_ = true;
   return true;
@@ -5061,6 +5055,47 @@
   return amount_of_external_allocated_memory_
       - amount_of_external_allocated_memory_at_last_global_gc_;
 }
+
+
+intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
+                                            int freed_global_handles) {
+  const int kMaxHandles = 1000;
+  const int kMinHandles = 100;
+  double min_factor = 1.1;
+  double max_factor = 4;
+ // We set the old generation growing factor to 2 to grow the heap slower on
+  // memory-constrained devices.
+  if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
+    max_factor = 2;
+  }
+  // If there are many freed global handles, then the next full GC will
+  // likely collect a lot of garbage. Choose the heap growing factor
+  // depending on freed global handles.
+  // TODO(ulan, hpayer): Take into account mutator utilization.
+  double factor;
+  if (freed_global_handles <= kMinHandles) {
+    factor = max_factor;
+  } else if (freed_global_handles >= kMaxHandles) {
+    factor = min_factor;
+  } else {
+    // Compute factor using linear interpolation between points
+    // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
+    factor = max_factor -
+ (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
+             (kMaxHandles - kMinHandles);
+  }
+
+  if (FLAG_stress_compaction ||
+      mark_compact_collector()->reduce_memory_footprint_) {
+    factor = min_factor;
+  }
+
+  intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
+  limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+  limit += new_space_.Capacity();
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+  return Min(limit, halfway_to_the_max);
+}


 void Heap::EnableInlineAllocation() {
=======================================
--- /branches/bleeding_edge/src/heap.h  Wed Jun 25 09:31:25 2014 UTC
+++ /branches/bleeding_edge/src/heap.h  Wed Jun 25 09:36:45 2014 UTC
@@ -1077,15 +1077,8 @@
   static const int kMaxExecutableSizeHugeMemoryDevice =
       700 * kPointerMultiplier;

-  intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
-    intptr_t limit = FLAG_stress_compaction
-        ? old_gen_size + old_gen_size / 10
-        : old_gen_size * old_space_growing_factor_;
-    limit = Max(limit, kMinimumOldGenerationAllocationLimit);
-    limit += new_space_.Capacity();
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
-    return Min(limit, halfway_to_the_max);
-  }
+  intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
+                                        int freed_global_handles);

   // Indicates whether inline bump-pointer allocation has been disabled.
   bool inline_allocation_disabled() { return inline_allocation_disabled_; }
@@ -1514,11 +1507,6 @@
   intptr_t max_executable_size_;
   intptr_t maximum_committed_;

-  // The old space growing factor is used in the old space heap growing
-  // strategy. The new old space size is the current old space size times
-  // old_space_growing_factor_.
-  int old_space_growing_factor_;
-
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
   int survived_since_last_expansion_;
@@ -1598,9 +1586,6 @@
   // generation and on every allocation in large object space.
   intptr_t old_generation_allocation_limit_;

-  // Used to adjust the limits that control the timing of the next GC.
-  intptr_t size_of_old_gen_at_last_old_space_gc_;
-
// Indicates that an allocation has failed in the old generation since the
   // last GC.
   bool old_gen_exhausted_;

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to