Reviewers: Michael Starzinger,
Message:
Please take a look.
Description:
Use correct size of promoted space for setting promotion and allocation
limits.
Please review this at https://chromiumcodereview.appspot.com/10376008/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files:
M src/heap.h
M src/heap.cc
M src/incremental-marking-inl.h
M src/incremental-marking.cc
M src/mark-compact.cc
M src/spaces.cc
Index: src/heap.cc
diff --git a/src/heap.cc b/src/heap.cc
index
e2e0e9eff3d47c89a5e7158d4dd62bf8a32fe80e..ad28c1ef442b313a16db572a44ed7e46b5d9223a
100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -805,7 +805,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector
collector,
UpdateSurvivalRateTrend(start_new_space_size);
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
+ size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
@@ -5810,16 +5810,6 @@ void Heap::RecordStats(HeapStats* stats, bool
take_snapshot) {
}
-intptr_t Heap::PromotedSpaceSize() {
- return old_pointer_space_->Size()
- + old_data_space_->Size()
- + code_space_->Size()
- + map_space_->Size()
- + cell_space_->Size()
- + lo_space_->Size();
-}
-
-
intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects()
Index: src/heap.h
diff --git a/src/heap.h b/src/heap.h
index
b91416fb08f7fb4b1dbd239d14ea485c05b0cf61..37380156827e3bfd4d3c00222604d23300c90319
100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1342,7 +1342,7 @@ class Heap {
PretenureFlag
pretenure);
inline intptr_t PromotedTotalSize() {
- return PromotedSpaceSize() + PromotedExternalMemorySize();
+ return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation
that
@@ -1363,18 +1363,6 @@ class Heap {
static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
- // When we sweep lazily we initially guess that there is no garbage on
the
- // heap and set the limits for the next GC accordingly. As we sweep we
find
- // out that some of the pages contained garbage and we have to adjust
- // downwards the size of the heap. This means the limits that control
the
- // timing of the next GC also need to be adjusted downwards.
- void LowerOldGenLimits(intptr_t adjustment) {
- size_of_old_gen_at_last_old_space_gc_ -= adjustment;
- old_gen_promotion_limit_ =
- OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
- old_gen_allocation_limit_ =
- OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
- }
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3;
@@ -1468,7 +1456,7 @@ class Heap {
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
- if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
+ if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return
true;
return false;
}
@@ -1506,7 +1494,6 @@ class Heap {
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
- intptr_t PromotedSpaceSize();
intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return
total_regexp_code_generated_; }
Index: src/incremental-marking-inl.h
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
index
3e3d6c43fda9c1c97c7a30238d0add88bd8c1224..5ce003f31d4c0d27e81a66df22ad269f11810ca4
100644
--- a/src/incremental-marking-inl.h
+++ b/src/incremental-marking-inl.h
@@ -100,7 +100,7 @@ void
IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size;
if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
- if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
+ if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
// If we have queued twice the heap size for rescanning then we are
// going around in circles, scanning the same objects again and again
// as the program mutates the heap faster than we can incrementally
Index: src/incremental-marking.cc
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index
2413b67803f090557280955d5196c590287f6007..5b58c9d7877bb8d211da7a4375fd0d1569b55c79
100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -951,7 +951,7 @@ void IncrementalMarking::ResetStepCounters() {
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
- return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
+ return heap_->MaxOldGenerationSize() -
heap_->PromotedSpaceSizeOfObjects();
}
} } // namespace v8::internal
Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index
9818da757f9badeee91041cab0c44652e246119e..aeb65b5324b699a294399e0cce6e01789ced3836
100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -3835,7 +3835,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace*
space, SweeperType sweeper) {
bool lazy_sweeping_active = false;
bool unused_page_present = false;
- intptr_t old_space_size = heap()->PromotedSpaceSize();
+ intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
intptr_t space_left =
Min(heap()->OldGenPromotionLimit(old_space_size),
heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
Index: src/spaces.cc
diff --git a/src/spaces.cc b/src/spaces.cc
index
a5d61ebb59c6f09ae7f4f523c7d801224f7ed7fd..a0c8f2cba168c6bc06bc0fe04879e0e4cd8d8c54
100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -2295,8 +2295,6 @@ bool PagedSpace::AdvanceSweeper(intptr_t
bytes_to_sweep) {
first_unswept_page_ = p;
}
- heap()->LowerOldGenLimits(freed_bytes);
-
heap()->FreeQueuedChunks();
return IsSweepingComplete();
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev