Author: [email protected]
Date: Tue May 12 06:02:15 2009
New Revision: 1915
Modified:
branches/bleeding_edge/src/heap.cc
branches/bleeding_edge/src/spaces-inl.h
branches/bleeding_edge/src/spaces.cc
branches/bleeding_edge/src/spaces.h
Log:
Revert r1900, r1897 and r1895 which are all gc changes. The changes
to the page iterator leads to occasional crashes in tests.
Review URL: http://codereview.chromium.org/113262
Modified: branches/bleeding_edge/src/heap.cc
==============================================================================
--- branches/bleeding_edge/src/heap.cc (original)
+++ branches/bleeding_edge/src/heap.cc Tue May 12 06:02:15 2009
@@ -538,7 +538,7 @@
// Shared state read by the scavenge collector and set by ScavengeObject.
-static Address promoted_rear = NULL;
+static Address promoted_top = NULL;
#ifdef DEBUG
@@ -554,34 +554,24 @@
}
}
};
-
-
-static void VerifyNonPointerSpacePointers() {
- // Verify that there are no pointers to new space in spaces where we
- // do not expect them.
- VerifyNonPointerSpacePointersVisitor v;
- HeapObjectIterator code_it(Heap::code_space());
- while (code_it.has_next()) {
- HeapObject* object = code_it.next();
- if (object->IsCode()) {
- Code::cast(object)->ConvertICTargetsFromAddressToObject();
- object->Iterate(&v);
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
- } else {
- // If we find non-code objects in code space (e.g., free list
- // nodes) we want to verify them as well.
- object->Iterate(&v);
- }
- }
-
- HeapObjectIterator data_it(Heap::old_data_space());
- while (data_it.has_next()) data_it.next()->Iterate(&v);
-}
#endif
void Heap::Scavenge() {
#ifdef DEBUG
- if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
+ if (FLAG_enable_slow_asserts) {
+ VerifyNonPointerSpacePointersVisitor v;
+ HeapObjectIterator it(code_space_);
+ while (it.has_next()) {
+ HeapObject* object = it.next();
+ if (object->IsCode()) {
+ Code::cast(object)->ConvertICTargetsFromAddressToObject();
+ }
+ object->Iterate(&v);
+ if (object->IsCode()) {
+ Code::cast(object)->ConvertICTargetsFromObjectToAddress();
+ }
+ }
+ }
#endif
gc_state_ = SCAVENGE;
@@ -606,70 +596,72 @@
new_space_.Flip();
new_space_.ResetAllocationInfo();
- // We need to sweep newly copied objects which can be either in the
- // to space or promoted to the old generation. For to-space
- // objects, we treat the bottom of the to space as a queue. Newly
- // copied and unswept objects lie between a 'front' mark and the
- // allocation pointer.
+ // We need to sweep newly copied objects which can be in either the to
space
+ // or the old space. For to space objects, we use a mark. Newly copied
+ // objects lie between the mark and the allocation top. For objects
+ // promoted to old space, we write their addresses downward from the top
of
+ // the new space. Sweeping newly promoted objects requires an allocation
+ // pointer and a mark. Note that the allocation pointer 'top' actually
+ // moves downward from the high address in the to space.
//
- // Promoted objects can go into various old-generation spaces, and
- // can be allocated internally in the spaces (from the free list).
- // We treat the top of the to space as a queue of addresses of
- // promoted objects. The addresses of newly promoted and unswept
- // objects lie between a 'front' mark and a 'rear' mark that is
- // updated as a side effect of promoting an object.
- //
- // There is guaranteed to be enough room at the top of the to space
- // for the addresses of promoted objects: every object promoted
- // frees up its size in bytes from the top of the new space, and
- // objects are at least one pointer in size.
- Address new_space_front = new_space_.ToSpaceLow();
- Address promoted_front = new_space_.ToSpaceHigh();
- promoted_rear = new_space_.ToSpaceHigh();
+ // There is guaranteed to be enough room at the top of the to space for
the
+ // addresses of promoted objects: every object promoted frees up its
size in
+ // bytes from the top of the new space, and objects are at least one
pointer
+ // in size. Using the new space to record promoted addresses makes the
+ // scavenge collector agnostic to the allocation strategy (eg, linear or
+ // free-list) used in old space.
+ Address new_mark = new_space_.ToSpaceLow();
+ Address promoted_mark = new_space_.ToSpaceHigh();
+ promoted_top = new_space_.ToSpaceHigh();
ScavengeVisitor scavenge_visitor;
// Copy roots.
IterateRoots(&scavenge_visitor);
- // Copy objects reachable from weak pointers.
- GlobalHandles::IterateWeakRoots(&scavenge_visitor);
-
- // Copy objects reachable from the old generation. By definition,
- // there are no intergenerational pointers in code or data spaces.
+ // Copy objects reachable from the old generation. By definition, there
+ // are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
- do {
- ASSERT(new_space_front <= new_space_.top());
- ASSERT(promoted_front >= promoted_rear);
-
- // The addresses new_space_front and new_space_.top() define a
- // queue of unprocessed copied objects. Process them until the
- // queue is empty.
- while (new_space_front < new_space_.top()) {
- HeapObject* object = HeapObject::FromAddress(new_space_front);
- object->Iterate(&scavenge_visitor);
- new_space_front += object->Size();
- }
+ bool has_processed_weak_pointers = false;
+
+ while (true) {
+ ASSERT(new_mark <= new_space_.top());
+ ASSERT(promoted_mark >= promoted_top);
+
+ // Copy objects reachable from newly copied objects.
+ while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
+ // Sweep newly copied objects in the to space. The allocation
pointer
+ // can change during sweeping.
+ Address previous_top = new_space_.top();
+ SemiSpaceIterator new_it(new_space(), new_mark);
+ while (new_it.has_next()) {
+ new_it.next()->Iterate(&scavenge_visitor);
+ }
+ new_mark = previous_top;
- // The addresses promoted_front and promoted_rear define a queue
- // of unprocessed addresses of promoted objects. Process them
- // until the queue is empty.
- while (promoted_front > promoted_rear) {
- promoted_front -= kPointerSize;
- HeapObject* object =
- HeapObject::cast(Memory::Object_at(promoted_front));
- object->Iterate(&scavenge_visitor);
- UpdateRSet(object);
+ // Sweep newly copied objects in the old space. The promotion 'top'
+ // pointer could change during sweeping.
+ previous_top = promoted_top;
+ for (Address current = promoted_mark - kPointerSize;
+ current >= previous_top;
+ current -= kPointerSize) {
+ HeapObject* object = HeapObject::cast(Memory::Object_at(current));
+ object->Iterate(&scavenge_visitor);
+ UpdateRSet(object);
+ }
+ promoted_mark = previous_top;
}
- // Take another spin if there are now unswept objects in new space
- // (there are currently no more unswept promoted objects).
- } while (new_space_front < new_space_.top());
+ if (has_processed_weak_pointers) break; // We are done.
+ // Copy objects reachable from weak pointers.
+ GlobalHandles::IterateWeakRoots(&scavenge_visitor);
+ has_processed_weak_pointers = true;
+ }
// Set age mark.
- new_space_.set_age_mark(new_space_.top());
+ new_space_.set_age_mark(new_mark);
LOG(ResourceEvent("scavenge", "end"));
@@ -890,8 +882,8 @@
if (target_space == Heap::old_pointer_space_) {
// Record the object's address at the top of the to space, to allow
// it to be swept by the scavenger.
- promoted_rear -= kPointerSize;
- Memory::Object_at(promoted_rear) = *p;
+ promoted_top -= kPointerSize;
+ Memory::Object_at(promoted_top) = *p;
} else {
#ifdef DEBUG
// Objects promoted to the data space should not have pointers to
Modified: branches/bleeding_edge/src/spaces-inl.h
==============================================================================
--- branches/bleeding_edge/src/spaces-inl.h (original)
+++ branches/bleeding_edge/src/spaces-inl.h Tue May 12 06:02:15 2009
@@ -64,16 +64,15 @@
// PageIterator
bool PageIterator::has_next() {
- return prev_page_ != stop_page_;
+ return cur_page_ != stop_page_;
}
Page* PageIterator::next() {
ASSERT(has_next());
- prev_page_ = (prev_page_ == NULL)
- ? space_->first_page_
- : prev_page_->next_page();
- return prev_page_;
+ Page* result = cur_page_;
+ cur_page_ = cur_page_->next_page();
+ return result;
}
Modified: branches/bleeding_edge/src/spaces.cc
==============================================================================
--- branches/bleeding_edge/src/spaces.cc (original)
+++ branches/bleeding_edge/src/spaces.cc Tue May 12 06:02:15 2009
@@ -111,17 +111,17 @@
//
-----------------------------------------------------------------------------
// PageIterator
-PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
- prev_page_ = NULL;
+PageIterator::PageIterator(PagedSpace* space, Mode mode) {
+ cur_page_ = space->first_page_;
switch (mode) {
case PAGES_IN_USE:
- stop_page_ = space->AllocationTopPage();
+ stop_page_ = space->AllocationTopPage()->next_page();
break;
case PAGES_USED_BY_MC:
- stop_page_ = space->MCRelocationTopPage();
+ stop_page_ = space->MCRelocationTopPage()->next_page();
break;
case ALL_PAGES:
- stop_page_ = space->last_page_;
+ stop_page_ = Page::FromAddress(NULL);
break;
default:
UNREACHABLE();
@@ -496,11 +496,8 @@
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- // Sequentially initialize remembered sets in the newly allocated
- // pages and cache the current last page in the space.
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
p->ClearRSet();
- last_page_ = p;
}
// Use first_page_ for allocation.
@@ -679,11 +676,9 @@
MemoryAllocator::SetNextPage(last_page, p);
- // Sequentially clear remembered set of new pages and and cache the
- // new last page in the space.
+ // Clear remembered set of new pages.
while (p->is_valid()) {
p->ClearRSet();
- last_page_ = p;
p = p->next_page();
}
@@ -728,12 +723,10 @@
Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
MemoryAllocator::SetNextPage(last_page_to_keep, p);
- // Since pages are only freed in whole chunks, we may have kept more
- // than pages_to_keep. Count the extra pages and cache the new last
- // page in the space.
+ // Since pages are only freed in whole chunks, we may have kept more than
+ // pages_to_keep.
while (p->is_valid()) {
pages_to_keep++;
- last_page_ = p;
p = p->next_page();
}
Modified: branches/bleeding_edge/src/spaces.h
==============================================================================
--- branches/bleeding_edge/src/spaces.h (original)
+++ branches/bleeding_edge/src/spaces.h Tue May 12 06:02:15 2009
@@ -511,22 +511,11 @@
//
// A HeapObjectIterator iterates objects from a given address to the
// top of a space. The given address must be below the current
-// allocation pointer (space top). There are some caveats.
-//
-// (1) If the space top changes upward during iteration (because of
-// allocating new objects), the iterator does not iterate objects
-// above the original space top. The caller must create a new
-// iterator starting from the old top in order to visit these new
-// objects.
-//
-// (2) If new objects are allocated below the original allocation top
-// (e.g., free-list allocation in paged spaces), the new objects
-// may or may not be iterated depending on their position with
-// respect to the current point of iteration.
-//
-// (3) The space top should not change downward during iteration,
-// otherwise the iterator will return not-necessarily-valid
-// objects.
+// allocation pointer (space top). If the space top changes during
+// iteration (because of allocating new objects), the iterator does
+// not iterate new objects. The caller function must create a new
+// iterator starting from the old top in order to visit these new
+// objects. Heap::Scavenage() is such an example.
class HeapObjectIterator: public ObjectIterator {
public:
@@ -570,35 +559,17 @@
//
-----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a paged space.
+// A PageIterator iterates pages in a space.
//
// The PageIterator class provides three modes for iterating pages in a
space:
-// PAGES_IN_USE iterates pages containing allocated objects.
-// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
-// mark-compact collection.
+// PAGES_IN_USE iterates pages that are in use by the allocator;
+// PAGES_USED_BY_GC iterates pages that hold relocated objects during a
+// mark-compact collection;
// ALL_PAGES iterates all pages in the space.
-//
-// There are some caveats.
-//
-// (1) If the space expands during iteration, new pages will not be
-// returned by the iterator in any mode.
-//
-// (2) If new objects are allocated during iteration, they will appear
-// in pages returned by the iterator. Allocation may cause the
-// allocation pointer or MC allocation pointer in the last page to
-// change between constructing the iterator and iterating the last
-// page.
-//
-// (3) The space should not shrink during iteration, otherwise the
-// iterator will return deallocated pages.
class PageIterator BASE_EMBEDDED {
public:
- enum Mode {
- PAGES_IN_USE,
- PAGES_USED_BY_MC,
- ALL_PAGES
- };
+ enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES};
PageIterator(PagedSpace* space, Mode mode);
@@ -606,9 +577,8 @@
inline Page* next();
private:
- PagedSpace* space_;
- Page* prev_page_; // Previous page returned.
- Page* stop_page_; // Page to stop at (last page returned by the
iterator).
+ Page* cur_page_; // next page to return
+ Page* stop_page_; // page where to stop
};
@@ -838,10 +808,6 @@
// The first page in this space.
Page* first_page_;
-
- // The last page in this space. Initially set in Setup, updated in
- // Expand and Shrink.
- Page* last_page_;
// Normal allocation information.
AllocationInfo allocation_info_;
--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---