Revision: 11513
Author:   [email protected]
Date:     Fri May  4 02:36:46 2012
Log: Use correct size of promoted space for setting promotion and allocation limits.

Review URL: https://chromiumcodereview.appspot.com/10376008
http://code.google.com/p/v8/source/detail?r=11513

Modified:
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/incremental-marking-inl.h
 /branches/bleeding_edge/src/incremental-marking.cc
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/spaces.cc

=======================================
--- /branches/bleeding_edge/src/heap.cc Mon Apr 30 07:41:12 2012
+++ /branches/bleeding_edge/src/heap.cc Fri May  4 02:36:46 2012
@@ -805,7 +805,7 @@

     UpdateSurvivalRateTrend(start_new_space_size);

-    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
+    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();

     if (high_survival_rate_during_scavenges &&
         IsStableOrIncreasingSurvivalTrend()) {
@@ -5808,16 +5808,6 @@
     }
   }
 }
-
-
-intptr_t Heap::PromotedSpaceSize() {
-  return old_pointer_space_->Size()
-      + old_data_space_->Size()
-      + code_space_->Size()
-      + map_space_->Size()
-      + cell_space_->Size()
-      + lo_space_->Size();
-}


 intptr_t Heap::PromotedSpaceSizeOfObjects() {
=======================================
--- /branches/bleeding_edge/src/heap.h  Mon Apr 30 07:41:12 2012
+++ /branches/bleeding_edge/src/heap.h  Fri May  4 02:36:46 2012
@@ -1342,7 +1342,7 @@
PretenureFlag pretenure);

   inline intptr_t PromotedTotalSize() {
-    return PromotedSpaceSize() + PromotedExternalMemorySize();
+    return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
   }

// True if we have reached the allocation limit in the old generation that
@@ -1362,19 +1362,6 @@
   static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
   static const intptr_t kMinimumAllocationLimit =
       8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
-
- // When we sweep lazily we initially guess that there is no garbage on the - // heap and set the limits for the next GC accordingly. As we sweep we find
-  // out that some of the pages contained garbage and we have to adjust
- // downwards the size of the heap. This means the limits that control the
-  // timing of the next GC also need to be adjusted downwards.
-  void LowerOldGenLimits(intptr_t adjustment) {
-    size_of_old_gen_at_last_old_space_gc_ -= adjustment;
-    old_gen_promotion_limit_ =
-        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
-    old_gen_allocation_limit_ =
-        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-  }

   intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
     const int divisor = FLAG_stress_compaction ? 10 : 3;
@@ -1468,7 +1455,7 @@
     intptr_t adjusted_allocation_limit =
         old_gen_allocation_limit_ - new_space_.Capacity() / 5;

-    if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
+ if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;

     return false;
   }
@@ -1506,7 +1493,6 @@
   GCTracer* tracer() { return tracer_; }

   // Returns the size of objects residing in non new spaces.
-  intptr_t PromotedSpaceSize();
   intptr_t PromotedSpaceSizeOfObjects();

double total_regexp_code_generated() { return total_regexp_code_generated_; }
=======================================
--- /branches/bleeding_edge/src/incremental-marking-inl.h Wed Jan 18 01:21:07 2012 +++ /branches/bleeding_edge/src/incremental-marking-inl.h Fri May 4 02:36:46 2012
@@ -100,7 +100,7 @@
   int64_t old_bytes_rescanned = bytes_rescanned_;
   bytes_rescanned_ = old_bytes_rescanned + obj_size;
   if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
-    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
       // If we have queued twice the heap size for rescanning then we are
       // going around in circles, scanning the same objects again and again
       // as the program mutates the heap faster than we can incrementally
=======================================
--- /branches/bleeding_edge/src/incremental-marking.cc Tue Apr 17 03:37:41 2012 +++ /branches/bleeding_edge/src/incremental-marking.cc Fri May 4 02:36:46 2012
@@ -951,7 +951,7 @@


 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
-  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
+ return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
 }

 } }  // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Fri May  4 02:16:38 2012
+++ /branches/bleeding_edge/src/mark-compact.cc Fri May  4 02:36:46 2012
@@ -3829,7 +3829,7 @@
   bool lazy_sweeping_active = false;
   bool unused_page_present = false;

-  intptr_t old_space_size = heap()->PromotedSpaceSize();
+  intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
   intptr_t space_left =
       Min(heap()->OldGenPromotionLimit(old_space_size),
           heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Mon Apr 30 08:02:43 2012
+++ /branches/bleeding_edge/src/spaces.cc       Fri May  4 02:36:46 2012
@@ -2294,8 +2294,6 @@
   } else {
     first_unswept_page_ = p;
   }
-
-  heap()->LowerOldGenLimits(freed_bytes);

   heap()->FreeQueuedChunks();

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to