Revision: 9932
Author: [email protected]
Date: Wed Nov 9 05:48:43 2011
Log: Ensure that promotion queue does not overlap with objects
relocated to ToSpace.
[email protected]
Review URL: http://codereview.chromium.org/8477030
http://code.google.com/p/v8/source/detail?r=9932
Modified:
/branches/bleeding_edge/src/heap-inl.h
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/heap.h
/branches/bleeding_edge/src/spaces-inl.h
/branches/bleeding_edge/src/spaces.cc
/branches/bleeding_edge/src/spaces.h
=======================================
--- /branches/bleeding_edge/src/heap-inl.h Thu Nov 3 07:17:05 2011
+++ /branches/bleeding_edge/src/heap-inl.h Wed Nov 9 05:48:43 2011
@@ -40,12 +40,30 @@
namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) {
+ if (emergency_stack_ != NULL) {
+ emergency_stack_->Add(Entry(target, size));
+ return;
+ }
+
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ =
reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
- }
+ ActivateGuardIfOnTheSamePage();
+ }
+
+ if (guard_) {
+ ASSERT(GetHeadPage() ==
+ Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
+
+ if ((rear_ - 2) < limit_) {
+ RelocateQueueHead();
+ emergency_stack_->Add(Entry(target, size));
+ return;
+ }
+ }
+
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
// Assert no overflow into live objects.
@@ -54,6 +72,13 @@
reinterpret_cast<Address>(rear_));
#endif
}
+
+
+void PromotionQueue::ActivateGuardIfOnTheSamePage() {
+ guard_ = guard_ ||
+ heap_->new_space()->active_space()->current_page()->address() ==
+ GetHeadPage()->address();
+}
int Heap::MaxObjectSizeInPagedSpace() {
=======================================
--- /branches/bleeding_edge/src/heap.cc Tue Nov 8 04:42:02 2011
+++ /branches/bleeding_edge/src/heap.cc Wed Nov 9 05:48:43 2011
@@ -143,6 +143,7 @@
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
+ promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
@@ -986,6 +987,41 @@
UNREACHABLE();
}
}
+
+
+void PromotionQueue::Initialize() {
+ // Assumes that a NewSpacePage exactly fits a number of promotion queue
+ // entries (where each is a pair of intptr_t). This allows us to simplify
+ // the test fpr when to switch pages.
+ ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
+ == 0);
+ limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
+ front_ = rear_ =
+ reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+ emergency_stack_ = NULL;
+ guard_ = false;
+}
+
+
+void PromotionQueue::RelocateQueueHead() {
+ ASSERT(emergency_stack_ == NULL);
+
+ Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+ intptr_t* head_start = rear_;
+ intptr_t* head_end =
+ Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
+
+ int entries_count = (head_end - head_start) / kEntrySizeInWords;
+
+ emergency_stack_ = new List<Entry>(2 * entries_count);
+
+ while (head_start != head_end) {
+ int size = *(head_start++);
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+ emergency_stack_->Add(Entry(obj, size));
+ }
+ rear_ = head_end;
+}
void Heap::Scavenge() {
@@ -1036,7 +1072,7 @@
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceStart();
- promotion_queue_.Initialize(new_space_.ToSpaceEnd());
+ promotion_queue_.Initialize();
#ifdef DEBUG
store_buffer()->Clean();
@@ -1076,10 +1112,11 @@
&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
+ promotion_queue_.Destroy();
+
LiveObjectList::UpdateReferencesForScavengeGC();
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
incremental_marking()->UpdateMarkingDequeAfterScavenge();
@@ -1486,6 +1523,7 @@
}
}
MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
*slot = MigrateObject(heap, object, HeapObject::cast(result),
object_size);
=======================================
--- /branches/bleeding_edge/src/heap.h Mon Nov 7 02:35:24 2011
+++ /branches/bleeding_edge/src/heap.h Wed Nov 9 05:48:43 2011
@@ -282,24 +282,58 @@
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
public:
- PromotionQueue() : front_(NULL), rear_(NULL) { }
-
- void Initialize(Address start_address) {
- // Assumes that a NewSpacePage exactly fits a number of promotion queue
- // entries (where each is a pair of intptr_t). This allows us to
simplify
- // the test fpr when to switch pages.
- ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 *
kPointerSize)
- == 0);
- ASSERT(NewSpacePage::IsAtEnd(start_address));
- front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
+ PromotionQueue(Heap* heap)
+ : front_(NULL),
+ rear_(NULL),
+ limit_(NULL),
+ emergency_stack_(0),
+ heap_(heap) { }
+
+ void Initialize();
+
+ void Destroy() {
+ ASSERT(is_empty());
+ delete emergency_stack_;
+ emergency_stack_ = NULL;
}
- bool is_empty() { return front_ == rear_; }
+ inline void ActivateGuardIfOnTheSamePage();
+
+ Page* GetHeadPage() {
+ return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+ }
+
+ void SetNewLimit(Address limit) {
+ if (!guard_) {
+ return;
+ }
+
+ ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
+ limit_ = reinterpret_cast<intptr_t*>(limit);
+
+ if (limit_ <= rear_) {
+ return;
+ }
+
+ RelocateQueueHead();
+ }
+
+ bool is_empty() {
+ return (front_ == rear_) &&
+ (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+ }
inline void insert(HeapObject* target, int size);
void remove(HeapObject** target, int* size) {
ASSERT(!is_empty());
+ if (front_ == rear_) {
+ Entry e = emergency_stack_->RemoveLast();
+ *target = e.obj_;
+ *size = e.size_;
+ return;
+ }
+
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
@@ -318,6 +352,23 @@
// The front of the queue is higher in the memory page chain than the
rear.
intptr_t* front_;
intptr_t* rear_;
+ intptr_t* limit_;
+
+ bool guard_;
+
+ static const int kEntrySizeInWords = 2;
+
+ struct Entry {
+ Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
+
+ HeapObject* obj_;
+ int size_;
+ };
+ List<Entry>* emergency_stack_;
+
+ Heap* heap_;
+
+ void RelocateQueueHead();
DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};
=======================================
--- /branches/bleeding_edge/src/spaces-inl.h Thu Oct 13 04:50:00 2011
+++ /branches/bleeding_edge/src/spaces-inl.h Wed Nov 9 05:48:43 2011
@@ -293,30 +293,12 @@
//
-----------------------------------------------------------------------------
// NewSpace
-MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes) {
+
+
+MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top;
if (allocation_info_.limit - old_top < size_in_bytes) {
- Address new_top = old_top + size_in_bytes;
- Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
- // Incremental marking has lowered the limit to get a
- // chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
- high);
- int bytes_allocated = static_cast<int>(new_top -
top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
- top_on_previous_step_ = new_top;
- return AllocateRawInternal(size_in_bytes);
- } else if (AddFreshPage()) {
- // Switched to new page. Try allocating again.
- int bytes_allocated = static_cast<int>(old_top -
top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
- top_on_previous_step_ = to_space_.page_low();
- return AllocateRawInternal(size_in_bytes);
- } else {
- return Failure::RetryAfterGC();
- }
+ return SlowAllocateRaw(size_in_bytes);
}
Object* obj = HeapObject::FromAddress(allocation_info_.top);
=======================================
--- /branches/bleeding_edge/src/spaces.cc Tue Oct 25 06:27:46 2011
+++ /branches/bleeding_edge/src/spaces.cc Wed Nov 9 05:48:43 2011
@@ -1012,14 +1012,47 @@
// Failed to get a new page in to-space.
return false;
}
+
// Clear remainder of current page.
- int remaining_in_page =
- static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
+ Address limit = NewSpacePage::FromLimit(top)->body_limit();
+ if (heap()->gc_state() == Heap::SCAVENGE) {
+ heap()->promotion_queue()->SetNewLimit(limit);
+ heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
+ }
+
+ int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page);
pages_used_++;
UpdateAllocationInfo();
+
return true;
}
+
+
+MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
+ Address old_top = allocation_info_.top;
+ Address new_top = old_top + size_in_bytes;
+ Address high = to_space_.page_high();
+ if (allocation_info_.limit < high) {
+ // Incremental marking has lowered the limit to get a
+ // chance to do a step.
+ allocation_info_.limit = Min(
+ allocation_info_.limit + inline_allocation_limit_step_,
+ high);
+ int bytes_allocated = static_cast<int>(new_top -
top_on_previous_step_);
+ heap()->incremental_marking()->Step(bytes_allocated);
+ top_on_previous_step_ = new_top;
+ return AllocateRaw(size_in_bytes);
+ } else if (AddFreshPage()) {
+ // Switched to new page. Try allocating again.
+ int bytes_allocated = static_cast<int>(old_top -
top_on_previous_step_);
+ heap()->incremental_marking()->Step(bytes_allocated);
+ top_on_previous_step_ = to_space_.page_low();
+ return AllocateRaw(size_in_bytes);
+ } else {
+ return Failure::RetryAfterGC();
+ }
+}
#ifdef DEBUG
@@ -1904,7 +1937,7 @@
// marking. The most reliable way to ensure that there is linear space
is
// to do the allocation, then rewind the limit.
ASSERT(bytes <= InitialCapacity());
- MaybeObject* maybe = AllocateRawInternal(bytes);
+ MaybeObject* maybe = AllocateRaw(bytes);
Object* object = NULL;
if (!maybe->ToObject(&object)) return false;
HeapObject* allocation = HeapObject::cast(object);
=======================================
--- /branches/bleeding_edge/src/spaces.h Tue Oct 25 06:34:52 2011
+++ /branches/bleeding_edge/src/spaces.h Wed Nov 9 05:48:43 2011
@@ -2140,9 +2140,7 @@
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
- MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
- return AllocateRawInternal(size_in_bytes);
- }
+ MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
@@ -2268,8 +2266,7 @@
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- // Implementation of AllocateRaw.
- MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int
size_in_bytes);
+ MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
friend class SemiSpaceIterator;
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev