Reviewers: Hannes Payer,
Description:
improve allocation accounting for incremental mark
Add an assertion that allocated_bytes >= 0 in IncrementalMark::Step and then
make it pass. We were not being diligent in maintaining
top_on_previous_step_
and as a result inaccurate, and even negative values of allocated_bytes were
being reported to Step.
BUG=
[email protected]
Please review this at https://codereview.chromium.org/1274453002/
Base URL: https://chromium.googlesource.com/v8/v8.git@master
Affected files (+27, -13 lines):
M src/heap/incremental-marking.cc
M src/heap/spaces.h
M src/heap/spaces.cc
Index: src/heap/incremental-marking.cc
diff --git a/src/heap/incremental-marking.cc
b/src/heap/incremental-marking.cc
index
947c961af8993d6efc959d4d1c7765a1c2e352a1..bf69daea53e77868e51ceac26874544f6d4bf476
100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -907,6 +907,8 @@ intptr_t IncrementalMarking::Step(intptr_t
allocated_bytes,
CompletionAction action,
ForceMarkingAction marking,
ForceCompletionAction completion) {
+ DCHECK(allocated_bytes >= 0);
+
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
!FLAG_incremental_marking_steps ||
(state_ != SWEEPING && state_ != MARKING)) {
Index: src/heap/spaces.cc
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index
ba0ddb6e38f87d1ac4180061fca72865bb6c8719..28ef9470bb57409bdda12688d43394ae01bd76be
100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -1396,6 +1396,7 @@ void NewSpace::UpdateAllocationInfo() {
void NewSpace::ResetAllocationInfo() {
+ Address old_top = allocation_info_.top();
to_space_.Reset();
UpdateAllocationInfo();
pages_used_ = 0;
@@ -1404,6 +1405,12 @@ void NewSpace::ResetAllocationInfo() {
while (it.has_next()) {
Bitmap::Clear(it.next());
}
+ if (top_on_previous_step_) {
+ int bytes_allocated = static_cast<int>(old_top -
top_on_previous_step_);
+ heap()->incremental_marking()->Step(bytes_allocated,
+
IncrementalMarking::GC_VIA_STACK_GUARD);
+ top_on_previous_step_ = allocation_info_.top();
+ }
}
@@ -1482,13 +1489,15 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
- // Do a step for the bytes allocated on the last page.
- int bytes_allocated = static_cast<int>(old_top -
top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated,
-
IncrementalMarking::GC_VIA_STACK_GUARD);
- old_top = allocation_info_.top();
- top_on_previous_step_ = old_top;
+ if (top_on_previous_step_) {
+ // Do a step for the bytes allocated on the last page.
+ int bytes_allocated = static_cast<int>(old_top -
top_on_previous_step_);
+ heap()->incremental_marking()->Step(
+ bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+ top_on_previous_step_ = allocation_info_.top();
+ }
+ old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -1500,12 +1509,14 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
// Either the limit has been lowered because linear allocation was
disabled
// or because incremental marking wants to get a chance to do a step.
Set
// the new limit accordingly.
- Address new_top = old_top + aligned_size_in_bytes;
- int bytes_allocated = static_cast<int>(new_top -
top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated,
-
IncrementalMarking::GC_VIA_STACK_GUARD);
+ if (top_on_previous_step_) {
+ Address new_top = old_top + aligned_size_in_bytes;
+ int bytes_allocated = static_cast<int>(new_top -
top_on_previous_step_);
+ heap()->incremental_marking()->Step(
+ bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+ top_on_previous_step_ = new_top;
+ }
UpdateInlineAllocationLimit(aligned_size_in_bytes);
- top_on_previous_step_ = new_top;
}
return true;
}
Index: src/heap/spaces.h
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index
e9b23e58befee2a4c62f44382a9f629339cfdb83..493f8e6d6a96580b87575b69f15a265fa1995b87
100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -2366,7 +2366,8 @@ class NewSpace : public Space {
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- inline_allocation_limit_step_(0) {}
+ inline_allocation_limit_step_(0),
+ top_on_previous_step_(0) {}
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2548,7 +2549,7 @@ class NewSpace : public Space {
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
UpdateInlineAllocationLimit(0);
- top_on_previous_step_ = allocation_info_.top();
+ top_on_previous_step_ = step ? allocation_info_.top() : 0;
}
// Get the extent of the inactive semispace (for use as a marking stack,
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.