Reviewers: Erik Corry,
Description:
Fix numerous bugs introduced by reducing Page::kMaxHeapObjectSize.
This is just a temporary solution. We should revisit free-list issue later.
Please review this at http://codereview.chromium.org/5999010/
SVN Base: https://v8.googlecode.com/svn/branches/experimental/gc
Affected files:
M src/serialize.cc
M src/spaces.h
M src/spaces.cc
M test/cctest/test-spaces.cc
Index: src/serialize.cc
diff --git a/src/serialize.cc b/src/serialize.cc
index
00a601ffd95ff15f0131d56e555fca6cd4b0144c..cc1cbdf8b2474d5c8b28efcbd5b01bf322a508ac
100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -577,8 +577,7 @@ Deserializer::Deserializer(SnapshotByteSource*
source) : source_(source) {
Address Deserializer::Allocate(int space_index, Space* space, int size) {
Address address;
if (!SpaceIsLarge(space_index)) {
- ASSERT(!SpaceIsPaged(space_index) ||
- size <= Page::kPageSize - Page::kObjectStartOffset);
+ ASSERT(!SpaceIsPaged(space_index) || size <= Page::kMaxHeapObjectSize);
MaybeObject* maybe_new_allocation;
if (space_index == NEW_SPACE) {
maybe_new_allocation =
@@ -593,7 +592,7 @@ Address Deserializer::Allocate(int space_index, Space*
space, int size) {
high_water_[space_index] = address + size;
} else {
ASSERT(SpaceIsLarge(space_index));
- ASSERT(size > Page::kPageSize - Page::kObjectStartOffset);
+ ASSERT(size > Page::kMaxHeapObjectSize);
LargeObjectSpace* lo_space =
reinterpret_cast<LargeObjectSpace*>(space);
Object* new_allocation;
if (space_index == kLargeData) {
Index: src/spaces.cc
diff --git a/src/spaces.cc b/src/spaces.cc
index
739154ca55c23474685ad77457b336df08d1e774..e61e7127aa8cee04570f65011b7174893f3a0f03
100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1773,7 +1773,23 @@ HeapObject* OldSpace::SlowAllocateRaw(int
size_in_bytes) {
// and allocated object address.
// Memory above the allocation watermark was not swept and
// might contain garbage pointers to new space.
- ASSERT(obj->address() == p->AllocationWatermark());
+ if (obj->address() != p->AllocationWatermark()) {
+ // TODO(gc) this is waste of time. we should enable linear
allocation
+ // at least from above watermark
+ HeapObject* filler =
+ HeapObject::FromAddress(p->AllocationWatermark());
+ while (filler->address() < obj->address()) {
+ Address next_filler = filler->address() + filler->Size();
+ if (filler->Size() > ByteArray::kHeaderSize) {
+ for (Address slot = filler->address() +
ByteArray::kHeaderSize;
+ slot < next_filler;
+ slot += kPointerSize) {
+ Memory::Address_at(slot) = 0;
+ }
+ }
+ filler = HeapObject::FromAddress(next_filler);
+ }
+ }
p->SetAllocationWatermark(obj->address() + size_in_bytes);
}
@@ -1803,10 +1819,7 @@ void OldSpace::PutRestOfCurrentPageOnFreeList(Page*
current_page) {
current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() -
allocation_info_.top);
- if (free_size > 0) {
- int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
- accounting_stats_.WasteBytes(wasted_bytes);
- }
+ if (free_size > 0) AddToFreeList(allocation_info_.top, free_size);
}
Index: src/spaces.h
diff --git a/src/spaces.h b/src/spaces.h
index
4d4ef4be7ee4c656e5d915cd25db2bd7e5806456..ec64e6e98d13797d0c8274e9df1e4bba5a5db651
100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -314,7 +314,10 @@ class Page : public MemoryChunk {
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
// Maximum object size that fits in a page.
- static const int kMaxHeapObjectSize = kObjectAreaSize >> 4;
+ static const int kMaxHeapObjectSize =
+ OBJECT_POINTER_ALIGN(kObjectAreaSize >> 4);
+
+ STATIC_ASSERT((kMaxHeapObjectSize & kObjectAlignmentMask) == 0);
#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
static const int kDirtyFlagOffset = 2 * kPointerSize;
@@ -1738,6 +1741,24 @@ class OldSpace : public PagedSpace {
return page->ObjectAreaEnd();
}
+ void AddToFreeList(Address start, int size_in_bytes) {
+ // TODO(gc) instead of putting large chunks into free list try to
+ // reuse them for linear allocation.
+ int wasted_bytes = 0;
+
+ while (size_in_bytes >= Page::kMaxHeapObjectSize) {
+ wasted_bytes += free_list_.Free(start, Page::kMaxHeapObjectSize);
+ start += Page::kMaxHeapObjectSize;
+ size_in_bytes -= Page::kMaxHeapObjectSize;
+ }
+
+ if (size_in_bytes > 0) {
+ wasted_bytes += free_list_.Free(start, size_in_bytes);
+ }
+
+ accounting_stats_.WasteBytes(wasted_bytes);
+ }
+
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
@@ -1745,10 +1766,7 @@ class OldSpace : public PagedSpace {
void Free(Address start, int size_in_bytes, bool add_to_freelist) {
accounting_stats_.DeallocateBytes(size_in_bytes);
- if (add_to_freelist) {
- int wasted_bytes = free_list_.Free(start, size_in_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
- }
+ if (add_to_freelist) AddToFreeList(start, size_in_bytes);
}
virtual void DeallocateBlock(Address start,
Index: test/cctest/test-spaces.cc
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index
d20ea2cc97d44c15b5735352ce0fdaba7472cbc4..debc3db071d6759dfccff4655ffe382de49d0e20
100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -168,8 +168,8 @@ TEST(OldSpace) {
CHECK(s->Setup());
- while (s->Available() > 0) {
- s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
+ while (s->Available() > Page::kMaxHeapObjectSize) {
+ s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectChecked();
}
s->TearDown();
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev