Revision: 9953
Author: [email protected]
Date: Thu Nov 10 05:24:00 2011
Log: Fix Heap::Shrink to ensure that it does not free pages that are
still in use.
Heap::Shrink is called from EnsureFromSpaceIsCommitted at the very start of
the GC. At this moment live bytes counts on pages are in inconsistent
states. Some pages might have been already swept but have not been yet
reached by an incremental marker (or incremental marker is not in progress)
and have live bytes count set to 0. Thus we can't rely only on LiveBytes to
determine which pages can be released to the OS.
[email protected]
BUG=100414
Review URL: http://codereview.chromium.org/8507038
http://code.google.com/p/v8/source/detail?r=9953
Modified:
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/spaces.cc
/branches/bleeding_edge/src/spaces.h
=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Nov 9 07:40:08 2011
+++ /branches/bleeding_edge/src/heap.cc Thu Nov 10 05:24:00 2011
@@ -5609,8 +5609,11 @@
void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
- for (PagedSpace* space = spaces.next(); space != NULL; space =
spaces.next())
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
space->ReleaseAllUnusedPages();
+ }
}
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Wed Nov 9 04:47:15 2011
+++ /branches/bleeding_edge/src/mark-compact.cc Thu Nov 10 05:24:00 2011
@@ -3547,7 +3547,7 @@
case LAZY_CONSERVATIVE: {
freed_bytes += SweepConservatively(space, p);
if (freed_bytes >= newspace_size && p != space->LastPage()) {
- space->SetPagesToSweep(p->next_page(), space->anchor());
+ space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true;
}
break;
=======================================
--- /branches/bleeding_edge/src/spaces.cc Wed Nov 9 05:48:43 2011
+++ /branches/bleeding_edge/src/spaces.cc Thu Nov 10 05:24:00 2011
@@ -658,8 +658,7 @@
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)),
- last_unswept_page_(Page::FromAddress(NULL)) {
+ first_unswept_page_(Page::FromAddress(NULL)) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) /
Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
@@ -754,6 +753,21 @@
void PagedSpace::ReleasePage(Page* page) {
ASSERT(page->LiveBytes() == 0);
+
+ // Adjust list of unswept pages if the page is it's head or tail.
+ if (first_unswept_page_ == page) {
+ first_unswept_page_ = page->next_page();
+ if (first_unswept_page_ == anchor()) {
+ first_unswept_page_ = Page::FromAddress(NULL);
+ }
+ }
+
+ if (page->WasSwept()) {
+ intptr_t size = free_list_.EvictFreeListItems(page);
+ accounting_stats_.AllocateBytes(size);
+ ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
+ }
+
page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
@@ -771,8 +785,26 @@
PageIterator it(this);
while (it.has_next()) {
Page* page = it.next();
- if (page->LiveBytes() == 0) {
- ReleasePage(page);
+ if (!page->WasSwept()) {
+ if (page->LiveBytes() == 0) ReleasePage(page);
+ } else {
+ HeapObject* obj = HeapObject::FromAddress(page->body());
+ if (obj->IsFreeSpace() &&
+ FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
+ // Sometimes we allocate memory from free list but don't
+ // immediately initialize it (e.g. see PagedSpace::ReserveSpace
+ // called from Heap::ReserveSpace that can cause GC before
+ // reserved space is actually initialized).
+ // Thus we can't simply assume that obj represents a valid
+ // node still owned by a free list
+ // Instead we should verify that the page is fully covered
+ // by free list items.
+ FreeList::SizeStats sizes;
+ free_list_.CountFreeListItems(page, &sizes);
+ if (sizes.Total() == Page::kObjectAreaSize) {
+ ReleasePage(page);
+ }
+ }
}
}
heap()->FreeQueuedChunks();
@@ -1870,12 +1902,49 @@
}
-void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) {
- sizes[0] = CountFreeListItemsInList(small_list_, p);
- sizes[1] = CountFreeListItemsInList(medium_list_, p);
- sizes[2] = CountFreeListItemsInList(large_list_, p);
- sizes[3] = CountFreeListItemsInList(huge_list_, p);
-}
+void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
+ sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
+ if (sizes->huge_size_ < Page::kObjectAreaSize) {
+ sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
+ sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
+ sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
+ } else {
+ sizes->small_size_ = 0;
+ sizes->medium_size_ = 0;
+ sizes->large_size_ = 0;
+ }
+}
+
+
+static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
+ intptr_t sum = 0;
+ while (*n != NULL) {
+ if (Page::FromAddress((*n)->address()) == p) {
+ FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+ sum += free_space->Size();
+ *n = (*n)->next();
+ } else {
+ n = (*n)->next_address();
+ }
+ }
+ return sum;
+}
+
+
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+ intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
+
+ if (sum < Page::kObjectAreaSize) {
+ sum += EvictFreeListItemsInList(&small_list_, p) +
+ EvictFreeListItemsInList(&medium_list_, p) +
+ EvictFreeListItemsInList(&large_list_, p);
+ }
+
+ available_ -= sum;
+
+ return sum;
+}
+
#ifdef DEBUG
intptr_t FreeList::SumFreeList(FreeListNode* cur) {
@@ -1963,7 +2032,6 @@
// Stop lazy sweeping and clear marking bits for unswept pages.
if (first_unswept_page_ != NULL) {
- Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
// Do not use ShouldBeSweptLazily predicate here.
@@ -1977,9 +2045,9 @@
}
}
p = p->next_page();
- } while (p != last);
- }
- first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
+ } while (p != anchor());
+ }
+ first_unswept_page_ = Page::FromAddress(NULL);
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
@@ -2020,7 +2088,6 @@
if (IsSweepingComplete()) return true;
intptr_t freed_bytes = 0;
- Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
Page* next_page = p->next_page();
@@ -2032,10 +2099,10 @@
freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
}
p = next_page;
- } while (p != last && freed_bytes < bytes_to_sweep);
-
- if (p == last) {
- last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
+ } while (p != anchor() && freed_bytes < bytes_to_sweep);
+
+ if (p == anchor()) {
+ first_unswept_page_ = Page::FromAddress(NULL);
} else {
first_unswept_page_ = p;
}
=======================================
--- /branches/bleeding_edge/src/spaces.h Wed Nov 9 05:48:43 2011
+++ /branches/bleeding_edge/src/spaces.h Thu Nov 10 05:24:00 2011
@@ -1347,8 +1347,6 @@
// 'wasted_bytes'. The size should be a non-zero multiple of the word
size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
- void MarkNodes();
-
#ifdef DEBUG
void Zap();
static intptr_t SumFreeList(FreeListNode* node);
@@ -1357,7 +1355,20 @@
bool IsVeryLong();
#endif
- void CountFreeListItems(Page* p, intptr_t* sizes);
+ struct SizeStats {
+ intptr_t Total() {
+ return small_size_ + medium_size_ + large_size_ + huge_size_;
+ }
+
+ intptr_t small_size_;
+ intptr_t medium_size_;
+ intptr_t large_size_;
+ intptr_t huge_size_;
+ };
+
+ void CountFreeListItems(Page* p, SizeStats* sizes);
+
+ intptr_t EvictFreeListItems(Page* p);
private:
// The size range of blocks, in bytes.
@@ -1541,9 +1552,8 @@
!p->WasSweptPrecisely();
}
- void SetPagesToSweep(Page* first, Page* last) {
+ void SetPagesToSweep(Page* first) {
first_unswept_page_ = first;
- last_unswept_page_ = last;
}
bool AdvanceSweeper(intptr_t bytes_to_sweep);
@@ -1556,16 +1566,18 @@
Page* LastPage() { return anchor_.prev_page(); }
bool IsFragmented(Page* p) {
- intptr_t sizes[4];
- free_list_.CountFreeListItems(p, sizes);
+ FreeList::SizeStats sizes;
+ free_list_.CountFreeListItems(p, &sizes);
intptr_t ratio;
intptr_t ratio_threshold;
if (identity() == CODE_SPACE) {
- ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize;
+ ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
+ Page::kObjectAreaSize;
ratio_threshold = 10;
} else {
- ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize;
+ ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
+ Page::kObjectAreaSize;
ratio_threshold = 15;
}
@@ -1573,19 +1585,23 @@
PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d
(%.2f%%) %s\n",
reinterpret_cast<void*>(p),
identity(),
- static_cast<int>(sizes[0]),
- static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[1]),
- static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[2]),
- static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[3]),
- static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize,
+ static_cast<int>(sizes.small_size_),
+ static_cast<double>(sizes.small_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.medium_size_),
+ static_cast<double>(sizes.medium_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.large_size_),
+ static_cast<double>(sizes.large_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.huge_size_),
+ static_cast<double>(sizes.huge_size_ * 100) /
+ Page::kObjectAreaSize,
(ratio > ratio_threshold) ? "[fragmented]" : "");
}
return (ratio > ratio_threshold) ||
- (FLAG_always_compact && sizes[3] != Page::kObjectAreaSize);
+ (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize);
}
void EvictEvacuationCandidatesFromFreeLists();
@@ -1617,7 +1633,6 @@
bool was_swept_conservatively_;
Page* first_unswept_page_;
- Page* last_unswept_page_;
// Expands the space by allocating a fixed number of pages. Returns
false if
// it cannot allocate requested number of pages from OS.
@@ -2333,8 +2348,6 @@
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact();
-
- void MarkFreeListNodes() { free_list_.MarkNodes(); }
protected:
void ResetFreeList() {
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev