Reviewers: Hannes Payer,
Description:
Fix logic for doing incremental marking steps on tenured allocation.
[email protected]
BUG=
Please review this at https://codereview.chromium.org/1040233003/
Base URL: https://chromium.googlesource.com/v8/v8.git@master
Affected files (+45, -25 lines):
M src/heap/incremental-marking.h
M src/heap/incremental-marking.cc
M src/heap/spaces.h
M src/heap/spaces.cc
Index: src/heap/incremental-marking.cc
diff --git a/src/heap/incremental-marking.cc
b/src/heap/incremental-marking.cc
index
749fa5ef54934d7c2ed3945d8381b4cfd3c2cdcb..79e5590a6c0304dad076fb674cad3f6d0c403137
100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -838,7 +838,7 @@ void IncrementalMarking::OldSpaceStep(intptr_t
allocated) {
// in principle possible.
Start(PREVENT_COMPACTION);
} else {
- Step(allocated * kFastMarking / kInitialMarkingSpeed,
GC_VIA_STACK_GUARD);
+ Step(allocated * kOldSpaceAllocationMarkingFactor, GC_VIA_STACK_GUARD);
}
}
@@ -914,8 +914,7 @@ intptr_t IncrementalMarking::Step(intptr_t
allocated_bytes,
ForceMarkingAction marking,
ForceCompletionAction completion) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
- !FLAG_incremental_marking_steps ||
- (state_ != SWEEPING && state_ != MARKING)) {
+ !CanDoSteps()) {
return 0;
}
Index: src/heap/incremental-marking.h
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index
7d41cfef4156dc73f46abb3180f95baf9f625a3b..ecb063c95e535def1d88e25a44285fa190bef04d
100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -50,7 +50,10 @@ class IncrementalMarking {
INLINE(bool IsMarking()) { return state() >= MARKING; }
- inline bool IsMarkingIncomplete() { return state() == MARKING; }
+ inline bool CanDoSteps() {
+ return FLAG_incremental_marking_steps &&
+ (state() == MARKING || state() == SWEEPING);
+ }
inline bool IsComplete() { return state() == COMPLETE; }
@@ -104,6 +107,8 @@ class IncrementalMarking {
// But if we are promoting a lot of data we need to mark faster to keep
up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
+ static const intptr_t kOldSpaceAllocationMarkingFactor =
+ kFastMarking / kInitialMarkingSpeed;
// After this many steps we increase the marking/allocating factor.
static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
Index: src/heap/spaces.cc
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index
d372083a4a63aee1e48dbc4a18950a9ffa79e39e..06fde3c7b09e2b7acb91f82500b22c1fbb66e193
100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -2358,6 +2358,15 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes,
int* node_size) {
}
+void PagedSpace::SetTopAndLimit(Address top, Address limit) {
+ DCHECK(top == limit ||
+ Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(top);
+ allocation_info_.set_limit(limit);
+}
+
+
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space.
If
// the allocation fails then NULL is returned, and the caller can perform
a GC
@@ -2375,9 +2384,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
- owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
- old_linear_size);
-
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
@@ -2400,7 +2406,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
- const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+ // An old-space step will mark more data per byte allocated, because old
space
+ // allocation is more serious. We don't want the pause to be bigger, so
we
+ // do marking after a smaller amount of allocation.
+ const int kThreshold = IncrementalMarking::kAllocatedThreshold *
+
IncrementalMarking::kOldSpaceAllocationMarkingFactor;
// Memory in the linear allocation area is counted as allocated. We may
free
// a little of this again immediately - see below.
@@ -2412,9 +2422,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete()
&&
- FLAG_incremental_marking_steps) {
+ owner_->heap()->incremental_marking()->CanDoSteps()) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again
whether
// we want to do another increment until the linear area is used up.
@@ -2422,15 +2432,26 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes +
linear_size);
- } else if (bytes_left > 0) {
- // Normally we give the rest of the node to the allocator as its new
- // linear allocation area.
- owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes +
+ linear_size);
} else {
- // TODO(gc) Try not freeing linear allocation region when bytes_left
- // are zero.
- owner_->SetTopAndLimit(NULL, NULL);
+ if (owner_->heap()->incremental_marking()->CanDoSteps()) {
+ owner_->heap()->incremental_marking()->OldSpaceStep(new_node_size);
+ } else if (new_node_size > kThreshold) {
+ // When we give big chunks to the old space allocator we do a little
+ // step in case the incremental marker wants to start.
+ owner_->heap()->incremental_marking()->OldSpaceStep(kThreshold);
+ }
+ if (bytes_left > 0) {
+ // Normally we give the rest of the node to the allocator as its new
+ // linear allocation area.
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
+ } else {
+ // TODO(gc) Try not freeing linear allocation region when bytes_left
+ // are zero.
+ owner_->SetTopAndLimit(NULL, NULL);
+ }
}
return new_node;
@@ -3147,5 +3168,6 @@ void Page::Print() {
}
#endif // DEBUG
+
}
} // namespace v8::internal
Index: src/heap/spaces.h
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index
0272d59944149009fe3c01a6941052520772a1ec..2f90dbc9d4412bc0695a529b4760fd8e0ad49e3f
100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -1779,13 +1779,7 @@ class PagedSpace : public Space {
void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info.
- void SetTopAndLimit(Address top, Address limit) {
- DCHECK(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(top);
- allocation_info_.set_limit(limit);
- }
+ void SetTopAndLimit(Address top, Address limit);
// Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo() {
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.