Author: [EMAIL PROTECTED]
Date: Wed Oct 29 02:27:59 2008
New Revision: 632

Modified:
    branches/bleeding_edge/src/heap.cc
    branches/bleeding_edge/src/heap.h
    branches/bleeding_edge/src/spaces.cc

Log:
Check the growth of the old generation before expanding the paged
spaces (during normal allocation) and when allocating large objects.
If the promotion limit is reached, fail allocation to trigger a
garbage collection.
Review URL: http://codereview.chromium.org/8657

Modified: branches/bleeding_edge/src/heap.cc
==============================================================================
--- branches/bleeding_edge/src/heap.cc  (original)
+++ branches/bleeding_edge/src/heap.cc  Wed Oct 29 02:27:59 2008
@@ -64,7 +64,12 @@
  MapSpace* Heap::map_space_ = NULL;
  LargeObjectSpace* Heap::lo_space_ = NULL;

-int Heap::promoted_space_limit_ = 0;
+static const int kMinimumPromotionLimit = 2*MB;
+static const int kMinimumAllocationLimit = 8*MB;
+
+int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
+int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
+
  int Heap::old_gen_exhausted_ = false;

  int Heap::amount_of_external_allocated_memory_ = 0;
@@ -138,8 +143,7 @@
    }

    // Is enough data promoted to justify a global GC?
-  if (PromotedSpaceSize() + PromotedExternalMemorySize()
-      > promoted_space_limit_) {
+  if (OldGenerationPromotionLimitReached()) {
      Counters::gc_compactor_caused_by_promoted_data.Increment();
      return MARK_COMPACTOR;
    }
@@ -360,9 +364,11 @@
    if (collector == MARK_COMPACTOR) {
      MarkCompact(tracer);

-    int promoted_space_size = PromotedSpaceSize();
-    promoted_space_limit_ =
-        promoted_space_size + Max(2 * MB, (promoted_space_size/100) * 35);
+    int old_gen_size = PromotedSpaceSize();
+    old_gen_promotion_limit_ =
+        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+    old_gen_allocation_limit_ =
+        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3);
      old_gen_exhausted_ = false;

      // If we have used the mark-compact collector to collect the new
@@ -2291,7 +2297,8 @@
    PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
           title, gc_count_);
    PrintF("mark-compact GC : %d\n", mc_count_);
-  PrintF("promoted_space_limit_ %d\n", promoted_space_limit_);
+  PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
+  PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);

    PrintF("\n");
    PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());

Modified: branches/bleeding_edge/src/heap.h
==============================================================================
--- branches/bleeding_edge/src/heap.h   (original)
+++ branches/bleeding_edge/src/heap.h   Wed Oct 29 02:27:59 2008
@@ -744,6 +744,20 @@
    // Allocate unitialized fixed array (pretenure == NON_TENURE).
    static Object* AllocateRawFixedArray(int length);

+  // True if we have reached the allocation limit in the old generation  
that
+  // should force the next GC (caused normally) to be a full one.
+  static bool OldGenerationPromotionLimitReached() {
+    return (PromotedSpaceSize() + PromotedExternalMemorySize())
+           > old_gen_promotion_limit_;
+  }
+
+  // True if we have reached the allocation limit in the old generation  
that
+  // should artificially cause a GC right now.
+  static bool OldGenerationAllocationLimitReached() {
+    return (PromotedSpaceSize() + PromotedExternalMemorySize())
+           > old_gen_allocation_limit_;
+  }
+
   private:
    static int semispace_size_;
    static int initial_semispace_size_;
@@ -785,8 +799,15 @@
    static bool disallow_allocation_failure_;
  #endif  // DEBUG

-  // Promotion limit that trigger a global GC
-  static int promoted_space_limit_;
+  // Limit that triggers a global GC on the next (normally caused) GC.   
This
+  // is checked when we have already decided to do a GC to help determine
+  // which collector to invoke.
+  static int old_gen_promotion_limit_;
+
+  // Limit that triggers a global GC as soon as is reasonable.  This is
+  // checked before expanding a paged space in the old generation and on
+  // every allocation in large object space.
+  static int old_gen_allocation_limit_;

    // The amount of external memory registered through the API kept alive
    // by global handles

Modified: branches/bleeding_edge/src/spaces.cc
==============================================================================
--- branches/bleeding_edge/src/spaces.cc        (original)
+++ branches/bleeding_edge/src/spaces.cc        Wed Oct 29 02:27:59 2008
@@ -1530,8 +1530,14 @@
      return HeapObject::cast(result);
    }

-  // Free list allocation failed and there is no next page.  Try to expand
-  // the space and allocate in the new next page.
+  // Free list allocation failed and there is no next page.  Fail if we  
have
+  // hit the old generation size limit that should cause a garbage
+  // collection.
+  if (Heap::OldGenerationAllocationLimitReached()) {
+    return NULL;
+  }
+
+  // Try to expand the space and allocate in the new next page.
    ASSERT(!current_page->next_page()->is_valid());
    if (Expand(current_page)) {
      return AllocateInNextPage(current_page, size_in_bytes);
@@ -2009,8 +2015,14 @@
      }
    }

-  // Free list allocation failed and there is no next page.  Try to expand
-  // the space and allocate in the new next page.
+  // Free list allocation failed and there is no next page.  Fail if we  
have
+  // hit the old generation size limit that should cause a garbage
+  // collection.
+  if (Heap::OldGenerationAllocationLimitReached()) {
+    return NULL;
+  }
+
+  // Try to expand the space and allocate in the new next page.
    ASSERT(!current_page->next_page()->is_valid());
    if (Expand(current_page)) {
      return AllocateInNextPage(current_page, size_in_bytes);
@@ -2236,6 +2248,13 @@
                                                int object_size,
                                                Executability executable) {
    ASSERT(0 < object_size && object_size <= requested_size);
+
+  // Check if we want to force a GC before growing the old space further.
+  // If so, fail the allocation.
+  if (Heap::OldGenerationAllocationLimitReached()) {
+    return Failure::RetryAfterGC(requested_size, identity());
+  }
+
    size_t chunk_size;
    LargeObjectChunk* chunk =
        LargeObjectChunk::New(requested_size, &chunk_size, executable);

--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---

Reply via email to