Revision: 6443
Author: [email protected]
Date: Mon Jan 24 06:34:54 2011
Log: * Complete new store buffer on ia32. The store buffer now covers
all the places it needs and can be verified against a scan of old
space.
Review URL: http://codereview.chromium.org/6309012
http://code.google.com/p/v8/source/detail?r=6443
Modified:
/branches/experimental/gc/src/heap-inl.h
/branches/experimental/gc/src/heap.cc
/branches/experimental/gc/src/heap.h
/branches/experimental/gc/src/ia32/ic-ia32.cc
/branches/experimental/gc/src/mark-compact.cc
/branches/experimental/gc/src/objects-inl.h
/branches/experimental/gc/src/serialize.cc
/branches/experimental/gc/src/serialize.h
/branches/experimental/gc/src/spaces.cc
/branches/experimental/gc/src/spaces.h
/branches/experimental/gc/src/store-buffer.cc
/branches/experimental/gc/src/store-buffer.h
/branches/experimental/gc/src/utils.h
/branches/experimental/gc/src/x64/full-codegen-x64.cc
/branches/experimental/gc/src/x64/stub-cache-x64.cc
=======================================
--- /branches/experimental/gc/src/heap-inl.h Fri Jan 7 06:11:14 2011
+++ /branches/experimental/gc/src/heap-inl.h Mon Jan 24 06:34:54 2011
@@ -277,13 +277,15 @@
void Heap::RecordWrite(Address address, int offset) {
- StoreBuffer::Mark(address + offset);
+ if (!InNewSpace(address)) StoreBuffer::Mark(address + offset);
}
void Heap::RecordWrites(Address address, int start, int len) {
- for (int i = 0; i < len; i++) {
- StoreBuffer::Mark(address + start + i);
+ if (!InNewSpace(address)) {
+ for (int i = 0; i < len; i += kPointerSize) {
+ StoreBuffer::Mark(address + start + i);
+ }
}
}
@@ -330,32 +332,23 @@
}
-void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size) {
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
+void Heap::CopyBlockToOldSpaceAndUpdateWriteBarrier(Address dst,
+ Address src,
+ int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
- Page* page = Page::FromAddress(dst);
- uint32_t marks = page->GetRegionMarks();
-
for (int remaining = byte_size / kPointerSize;
remaining > 0;
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
if (Heap::InNewSpace(Memory::Object_at(dst))) {
- marks |= page->GetRegionMaskForAddress(dst);
+ StoreBuffer::Mark(dst);
}
dst += kPointerSize;
src += kPointerSize;
}
-
- page->SetRegionMarks(marks);
-#else
- CopyBlock(dst, src, byte_size);
-#endif
}
@@ -380,21 +373,6 @@
memmove(dst, src, byte_size);
}
}
-
-
-void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
- ASSERT((dst >= (src + byte_size)) ||
- ((OffsetFrom(src) - OffsetFrom(dst)) >= kPointerSize));
-
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
- CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
-#else
- MoveBlock(dst, src, byte_size);
-#endif
-}
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
=======================================
--- /branches/experimental/gc/src/heap.cc Fri Jan 7 06:11:14 2011
+++ /branches/experimental/gc/src/heap.cc Mon Jan 24 06:34:54 2011
@@ -991,6 +991,10 @@
Address new_space_front = new_space_.ToSpaceLow();
promotion_queue.Initialize(new_space_.ToSpaceHigh());
+#ifdef DEBUG
+ StoreBuffer::Clean();
+#endif
+
ScavengeVisitor scavenge_visitor;
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
@@ -3941,6 +3945,8 @@
void Heap::Verify() {
ASSERT(HasBeenSetup());
+ StoreBuffer::Verify();
+
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
@@ -4053,20 +4059,21 @@
bool Heap::IteratePointersInDirtyRegion(Address start,
Address end,
ObjectSlotCallback
copy_object_func) {
- Address slot_address = start;
bool pointers_to_new_space_found = false;
- while (slot_address < end) {
+ for (Address slot_address = start;
+ slot_address < end;
+ slot_address += kPointerSize) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
copy_object_func(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
+ StoreBuffer::Mark(reinterpret_cast<Address>(slot));
pointers_to_new_space_found = true;
}
}
- slot_address += kPointerSize;
}
return pointers_to_new_space_found;
}
@@ -4171,10 +4178,6 @@
Address end,
ObjectSlotCallback callback) {
Address slot_address = start;
- Page* page = Page::FromAddress(start);
-
- uint32_t marks = page->GetRegionMarks();
-
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (Heap::InFromSpace(*slot)) {
@@ -4182,13 +4185,11 @@
callback(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
- marks |= page->GetRegionMaskForAddress(slot_address);
+ StoreBuffer::Mark(reinterpret_cast<Address>(slot));
}
}
slot_address += kPointerSize;
}
-
- page->SetRegionMarks(marks);
}
@@ -4198,71 +4199,159 @@
Address area_end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback copy_object_func) {
-#ifndef ENABLE_CARDMARKING_WRITE_BARRIER
ASSERT(marks == Page::kAllRegionsDirtyMarks);
visit_dirty_region(area_start, area_end, copy_object_func);
return Page::kAllRegionsDirtyMarks;
-#else
- uint32_t newmarks = 0;
- uint32_t mask = 1;
-
- if (area_start >= area_end) {
- return newmarks;
- }
-
- Address region_start = area_start;
-
- // area_start does not necessarily coincide with start of the first
region.
- // Thus to calculate the beginning of the next region we have to align
- // area_start by Page::kRegionSize.
- Address second_region =
- reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
- ~Page::kRegionAlignmentMask);
-
- // Next region might be beyond area_end.
- Address region_end = Min(second_region, area_end);
-
- if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
- newmarks |= mask;
- }
- }
- mask <<= 1;
-
- // Iterate subsequent regions which fully lay inside [area_start,
area_end[.
- region_start = region_end;
- region_end = region_start + Page::kRegionSize;
-
- while (region_end <= area_end) {
- if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
- newmarks |= mask;
- }
+}
+
+
+#ifdef DEBUG
+static void CheckStoreBuffer(Object** current,
+ Object** limit,
+ Object**** store_buffer_position,
+ Object*** store_buffer_top) {
+ for ( ; current < limit; current++) {
+ Object* o = *current;
+ if (reinterpret_cast<uintptr_t>(o) == kFreeListZapValue) {
+ Object*** zap_checker = *store_buffer_position;
+ while (*zap_checker < current) {
+ zap_checker++;
+ if (zap_checker >= store_buffer_top) break;
+ }
+ if (zap_checker < store_buffer_top) {
+ // Objects in the free list shouldn't be in the store buffer.
+ ASSERT(*zap_checker != current);
+ }
+ continue;
+ }
+ // We have to check that the pointer does not point into new space
+ // without trying to cast it to a heap object since the hash field of
+ // a string can contain values like 1 and 3 which are tagged null
+ // pointers.
+ if (!Heap::InNewSpace(o)) continue;
+ while (**store_buffer_position < current &&
+ *store_buffer_position < store_buffer_top) {
+ (*store_buffer_position)++;
+ }
+ if (**store_buffer_position != current ||
+ *store_buffer_position == store_buffer_top) {
+ Object** obj_start = current;
+ while (!(*obj_start)->IsMap()) obj_start--;
+ UNREACHABLE();
+ }
+ }
+}
+
+
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer(
+ ExpectedPageWatermarkState watermark_state) {
+ OldSpace* space = old_pointer_space();
+ PageIterator pages(space, PageIterator::PAGES_IN_USE);
+
+ space->free_list()->Zap();
+
+ StoreBuffer::SortUniq();
+
+ while (pages.has_next()) {
+ Page* page = pages.next();
+ Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if (watermark_state == WATERMARK_SHOULD_BE_VALID ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
+ }
+
+ Object*** store_buffer_position = StoreBuffer::Start();
+ Object*** store_buffer_top = StoreBuffer::Top();
+
+ Object** limit = reinterpret_cast<Object**>(end);
+ CheckStoreBuffer(current, limit, &store_buffer_position,
store_buffer_top);
+ }
+}
+
+
+void Heap::MapSpaceCheckStoreBuffer(
+ ExpectedPageWatermarkState watermark_state) {
+ MapSpace* space = map_space();
+ PageIterator pages(space, PageIterator::PAGES_IN_USE);
+
+ space->free_list()->Zap();
+
+ StoreBuffer::SortUniq();
+
+ while (pages.has_next()) {
+ Page* page = pages.next();
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if (watermark_state == WATERMARK_SHOULD_BE_VALID ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
}
- region_start = region_end;
- region_end = region_start + Page::kRegionSize;
-
- mask <<= 1;
- }
-
- if (region_start != area_end) {
- // A small piece of area left uniterated because area_end does not
coincide
- // with region end. Check whether region covering last part of area is
- // dirty.
- if (marks & mask) {
- if (visit_dirty_region(region_start, area_end, copy_object_func)) {
- newmarks |= mask;
- }
+
+ Address map_aligned_current = page->ObjectAreaStart();
+
+ ASSERT(map_aligned_current == MapStartAlign(map_aligned_current));
+ ASSERT(end == MapEndAlign(end));
+
+ Object*** store_buffer_position = StoreBuffer::Start();
+ Object*** store_buffer_top = StoreBuffer::Top();
+
+ for ( ; map_aligned_current < end; map_aligned_current += Map::kSize) {
+ ASSERT(!Heap::InNewSpace(Memory::Object_at(map_aligned_current)));
+ ASSERT(Memory::Object_at(map_aligned_current)->IsMap());
+
+ Object** current = reinterpret_cast<Object**>(
+ map_aligned_current + Map::kPointerFieldsBeginOffset);
+ Object** limit = reinterpret_cast<Object**>(
+ map_aligned_current + Map::kPointerFieldsEndOffset);
+
+ CheckStoreBuffer(current,
+ limit,
+ &store_buffer_position,
+ store_buffer_top);
}
}
-
- return newmarks;
-#endif
+}
+
+
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+ LargeObjectIterator it(lo_space());
+ for (HeapObject* object = it.next(); object != NULL; object = it.next())
{
+ // We only have code, sequential strings, or fixed arrays in large
+ // object space, and only fixed arrays can possibly contain pointers to
+ // the young generation.
+ if (object->IsFixedArray()) {
+ Object*** store_buffer_position = StoreBuffer::Start();
+ Object*** store_buffer_top = StoreBuffer::Top();
+ Object** current = reinterpret_cast<Object**>(object->address());
+ Object** limit =
+ reinterpret_cast<Object**>(object->address() + object->Size());
+ CheckStoreBuffer(current,
+ limit,
+ &store_buffer_position,
+ store_buffer_top);
+ }
+ }
}
+#endif
+
void Heap::IterateDirtyRegions(
PagedSpace* space,
@@ -4270,36 +4359,32 @@
ObjectSlotCallback copy_object_func,
ExpectedPageWatermarkState expected_page_watermark_state) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
-
- while (it.has_next()) {
- Page* page = it.next();
- uint32_t marks = page->GetRegionMarks();
-
- if (marks != Page::kAllRegionsCleanMarks) {
- Address start = page->ObjectAreaStart();
-
- // Do not try to visit pointers beyond page allocation watermark.
- // Page can contain garbage pointers there.
- Address end;
-
- if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
- page->IsWatermarkValid()) {
- end = page->AllocationWatermark();
- } else {
- end = page->CachedAllocationWatermark();
- }
-
- ASSERT(space == old_pointer_space_ ||
- (space == map_space_ &&
- ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
-
- page->SetRegionMarks(IterateDirtyRegions(marks,
- start,
- end,
- visit_dirty_region,
- copy_object_func));
- }
+ PageIterator pages(space, PageIterator::PAGES_IN_USE);
+
+ while (pages.has_next()) {
+ Page* page = pages.next();
+ Address start = page->ObjectAreaStart();
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
+ }
+
+ ASSERT(space == old_pointer_space_ ||
+ (space == map_space_ &&
+ ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
+
+ IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+ start,
+ end,
+ visit_dirty_region,
+ copy_object_func);
// Mark page watermark as invalid to maintain watermark validity
invariant.
// See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
=======================================
--- /branches/experimental/gc/src/heap.h Fri Jan 7 06:11:14 2011
+++ /branches/experimental/gc/src/heap.h Mon Jan 24 06:34:54 2011
@@ -933,6 +933,12 @@
// Verify the heap is in its normal state before or after a GC.
static void Verify();
+ static void OldPointerSpaceCheckStoreBuffer(
+ ExpectedPageWatermarkState watermark_state);
+ static void MapSpaceCheckStoreBuffer(
+ ExpectedPageWatermarkState watermark_state);
+ static void LargeObjectSpaceCheckStoreBuffer();
+
// Report heap statistics.
static void ReportHeapStatistics(const char* title);
static void ReportCodeStatistics(const char* title);
@@ -1085,18 +1091,14 @@
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int
byte_size);
+ static inline void CopyBlockToOldSpaceAndUpdateWriteBarrier(Address dst,
+ Address src,
+ int
byte_size);
// Optimized version of memmove for blocks with pointer size aligned
sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Address dst, Address src, int byte_size);
- static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
- Address src,
- int
byte_size);
-
// Check new space expansion criteria and expand semispaces if it was
hit.
static void CheckNewSpaceExpansionCriteria();
=======================================
--- /branches/experimental/gc/src/ia32/ic-ia32.cc Wed Jan 5 06:17:18 2011
+++ /branches/experimental/gc/src/ia32/ic-ia32.cc Mon Jan 24 06:34:54 2011
@@ -1723,9 +1723,7 @@
Address encoded_offsets_address = test_instruction_address + 1;
int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
int delta_to_map_check = -(encoded_offsets & 0xFFFF);
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
int delta_to_record_write = encoded_offsets >> 16;
-#endif
// Patch the map to check. The map address is the last 4 bytes of
// the 7-byte operand-immediate compare instruction.
@@ -1744,7 +1742,6 @@
(offset == 0 && map == Heap::null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
// Patch the offset in the write-barrier code. The offset is the
// last 4 bytes of a six byte lea instruction.
offset_address = map_check_address + delta_to_record_write + 2;
@@ -1754,7 +1751,6 @@
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == Heap::null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-#endif
return true;
}
=======================================
--- /branches/experimental/gc/src/mark-compact.cc Wed Jan 19 09:39:52 2011
+++ /branches/experimental/gc/src/mark-compact.cc Mon Jan 24 06:34:54 2011
@@ -1522,7 +1522,7 @@
int size,
bool to_old_space) {
if (to_old_space) {
- Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
+ Heap::CopyBlockToOldSpaceAndUpdateWriteBarrier(dst, src, size);
} else {
Heap::CopyBlock(dst, src, size);
}
=======================================
--- /branches/experimental/gc/src/objects-inl.h Fri Jan 7 06:11:14 2011
+++ /branches/experimental/gc/src/objects-inl.h Mon Jan 24 06:34:54 2011
@@ -795,7 +795,6 @@
#define WRITE_BARRIER(object, offset) \
Heap::RecordWrite(object->address(), offset);
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
// CONDITIONAL_WRITE_BARRIER must be issued after the actual
// write due to the assert validating the written value.
#define CONDITIONAL_WRITE_BARRIER(object, offset, mode) \
@@ -808,9 +807,6 @@
Page::FromAddress(object->address())-> \
IsRegionDirty(object->address() + offset)); \
}
-#else
-#define CONDITIONAL_WRITE_BARRIER(object, offset, mode)
-#endif
#define READ_DOUBLE_FIELD(p, offset) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
=======================================
--- /branches/experimental/gc/src/serialize.cc Thu Jan 6 06:05:23 2011
+++ /branches/experimental/gc/src/serialize.cc Mon Jan 24 06:34:54 2011
@@ -987,6 +987,11 @@
}
break;
}
+
+ case kSkip: {
+ current++;
+ break;
+ }
case kNativesStringResource: {
int index = source_->Get();
@@ -1103,7 +1108,9 @@
void Serializer::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
- if ((*current)->IsSmi()) {
+ if (reinterpret_cast<Address>(current) == StoreBuffer::TopAddress()) {
+ sink_->Put(kSkip, "Skip");
+ } else if ((*current)->IsSmi()) {
sink_->Put(kRawData, "RawData");
sink_->PutInt(kPointerSize, "length");
for (int i = 0; i < kPointerSize; i++) {
=======================================
--- /branches/experimental/gc/src/serialize.h Tue Dec 7 03:31:57 2010
+++ /branches/experimental/gc/src/serialize.h Mon Jan 24 06:34:54 2011
@@ -188,7 +188,8 @@
kRootArray = 0x9, // Object is found in root array.
kPartialSnapshotCache = 0xa, // Object is in the cache.
kExternalReference = 0xb, // Pointer to an external reference.
- // 0xc-0xf Free.
+ kSkip = 0xc, // Skip a pointer sized cell.
+ // 0xd-0xf Free.
kBackref = 0x10, // Object is described relative to
end.
// 0x11-0x18 One per space.
// 0x19-0x1f Common backref offsets.
=======================================
--- /branches/experimental/gc/src/spaces.cc Wed Jan 19 09:55:55 2011
+++ /branches/experimental/gc/src/spaces.cc Mon Jan 24 06:34:54 2011
@@ -1590,6 +1590,43 @@
}
return false;
}
+
+
+void FreeListNode::Zap() {
+ if (IsByteArray()) {
+ ByteArray* ba = ByteArray::cast(this);
+ // Skip map, length and next pointer.
+ Address payload_start = ba->GetDataStartAddress() + kPointerSize;
+ Address payload_end = ba->address() + ba->Size();
+ for (Address cur = payload_start;
+ cur < payload_end;
+ cur += kPointerSize) {
+ *reinterpret_cast<uintptr_t*>(cur) = kFreeListZapValue;
+ }
+ }
+}
+
+
+void OldSpaceFreeList::Zap() {
+ for (int i = 0; i < kFreeListsLength; i++) {
+ Address cur_addr = free_[i].head_node_;
+ while (cur_addr != NULL) {
+ FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+ cur_node->Zap();
+ cur_addr = cur_node->next();
+ }
+ }
+}
+
+
+void FixedSizeFreeList::Zap() {
+ Address cur_addr = head_;
+ while (cur_addr != NULL && cur_addr != tail_) {
+ FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+ cur_node->Zap();
+ cur_addr = cur_node->next();
+ }
+}
#endif
@@ -2331,13 +2368,8 @@
// object space, and only fixed arrays can possibly contain pointers to
// the young generation.
if (object->IsFixedArray()) {
- Page* page = Page::FromAddress(object->address());
- uint32_t marks = page->GetRegionMarks();
-
// TODO(gc): we can no longer assume that LargePage is bigger than
normal
// page.
- ASSERT(marks == Page::kAllRegionsDirtyMarks);
- USE(marks);
Address start = object->address();
Address object_end = start + object->Size();
=======================================
--- /branches/experimental/gc/src/spaces.h Wed Jan 19 09:39:52 2011
+++ /branches/experimental/gc/src/spaces.h Mon Jan 24 06:34:54 2011
@@ -1819,6 +1819,8 @@
inline Address next();
inline void set_next(Address next);
+ inline void Zap();
+
private:
static const int kNextOffset =
POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
@@ -1826,6 +1828,9 @@
};
+static const uintptr_t kFreeListZapValue = 0xfeed1eaf;
+
+
// The free list for the old space.
class OldSpaceFreeList BASE_EMBEDDED {
public:
@@ -1853,6 +1858,10 @@
void MarkNodes();
+#ifdef DEBUG
+ void Zap();
+#endif
+
private:
// The size range of blocks, in bytes. (Smaller allocations are allowed,
but
// will always result in waste.)
@@ -1953,6 +1962,10 @@
void MarkNodes();
+#ifdef DEBUG
+ void Zap();
+#endif
+
private:
// Available bytes on the free list.
intptr_t available_;
@@ -2025,6 +2038,8 @@
#ifdef DEBUG
// Reports statistics for the space
void ReportStatistics();
+
+ OldSpaceFreeList* free_list() { return &free_list_; }
#endif
protected:
@@ -2091,6 +2106,8 @@
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
+
+ FixedSizeFreeList* free_list() { return &free_list_; }
#endif
protected:
=======================================
--- /branches/experimental/gc/src/store-buffer.cc Thu Jan 6 06:05:23 2011
+++ /branches/experimental/gc/src/store-buffer.cc Mon Jan 24 06:34:54 2011
@@ -34,9 +34,15 @@
Address* StoreBuffer::start_ = NULL;
Address* StoreBuffer::limit_ = NULL;
+Address* StoreBuffer::old_start_ = NULL;
+Address* StoreBuffer::old_limit_ = NULL;
+Address* StoreBuffer::old_top_ = NULL;
uintptr_t* StoreBuffer::hash_map_1_ = NULL;
uintptr_t* StoreBuffer::hash_map_2_ = NULL;
VirtualMemory* StoreBuffer::virtual_memory_ = NULL;
+bool StoreBuffer::must_scan_entire_memory_ = false;
+bool StoreBuffer::old_buffer_is_sorted_ = false;
+bool StoreBuffer::during_gc_ = false;
void StoreBuffer::Setup() {
virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
@@ -46,6 +52,9 @@
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize *
2));
limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
+ old_top_ = old_start_ = new Address[kOldStoreBufferLength];
+ old_limit_ = old_start_ + kOldStoreBufferLength;
+
ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
Address* vm_limit = reinterpret_cast<Address*>(
@@ -65,6 +74,11 @@
hash_map_1_ = new uintptr_t[kHashMapLength];
hash_map_2_ = new uintptr_t[kHashMapLength];
+
+ Heap::AddGCPrologueCallback(&GCPrologue, kGCTypeAll);
+ Heap::AddGCEpilogueCallback(&GCEpilogue, kGCTypeAll);
+
+ GCPrologue(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
}
@@ -72,27 +86,154 @@
delete virtual_memory_;
delete[] hash_map_1_;
delete[] hash_map_2_;
+ delete[] old_start_;
+ old_start_ = old_top_ = old_limit_ = NULL;
start_ = limit_ = NULL;
Heap::public_set_store_buffer_top(start_);
}
-void StoreBuffer::Compact() {
+
+#if V8_TARGET_ARCH_X64
+static int CompareAddresses(const void* void_a, const void* void_b) {
+ intptr_t a =
+ reinterpret_cast<intptr_t>(*reinterpret_cast<const
Address*>(void_a));
+ intptr_t b =
+ reinterpret_cast<intptr_t>(*reinterpret_cast<const
Address*>(void_b));
+ // Unfortunately if int is smaller than intptr_t there is no branch-free
+ // way to return a number with the same sign as the difference between
the
+ // pointers.
+ if (a == b) return 0;
+ if (a < b) return -1;
+ ASSERT(a > b);
+ return 1;
+}
+#else
+static int CompareAddresses(const void* void_a, const void* void_b) {
+ intptr_t a =
+ reinterpret_cast<intptr_t>(*reinterpret_cast<const
Address*>(void_a));
+ intptr_t b =
+ reinterpret_cast<intptr_t>(*reinterpret_cast<const
Address*>(void_b));
+ ASSERT(sizeof(1) == sizeof(a));
+ // Shift down to avoid wraparound.
+ return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
+}
+#endif
+
+
+void StoreBuffer::Uniq() {
+ ASSERT(HashTablesAreZapped());
+ // Remove adjacent duplicates and cells that do not point at new space.
+ Address previous = NULL;
+ Address* write = old_start_;
+ for (Address* read = old_start_; read < old_top_; read++) {
+ Address current = *read;
+ if (current != previous) {
+ if (Heap::InNewSpace(*reinterpret_cast<Address*>(current))) {
+ *write++ = current;
+ }
+ }
+ previous = current;
+ }
+ old_top_ = write;
+}
+
+
+void StoreBuffer::SortUniq() {
+ Compact();
+ if (old_buffer_is_sorted_) return;
+ ZapHashTables();
+ qsort(reinterpret_cast<void*>(old_start_),
+ old_top_ - old_start_,
+ sizeof(*old_top_),
+ &CompareAddresses);
+ Uniq();
+
+ old_buffer_is_sorted_ = true;
+}
+
+
+#ifdef DEBUG
+void StoreBuffer::Clean() {
+ if (must_scan_entire_memory_) {
+ // We don't currently have a way to go back to using the store buffer.
+ // TODO(gc): We should rebuild the store buffer during GC.
+ old_top_ = old_start_; // Just clear the cache.
+ return;
+ }
+ ZapHashTables();
+ Uniq(); // Also removes things that no longer point to new space.
+ CheckForFullBuffer();
+}
+
+
+static bool Zapped(char* start, int size) {
+ for (int i = 0; i < size; i++) {
+ if (start[i] != 0) return false;
+ }
+ return true;
+}
+
+
+bool StoreBuffer::HashTablesAreZapped() {
+ return Zapped(reinterpret_cast<char*>(hash_map_1_),
+ sizeof(uintptr_t) * kHashMapLength) &&
+ Zapped(reinterpret_cast<char*>(hash_map_2_),
+ sizeof(uintptr_t) * kHashMapLength);
+}
+
+
+#endif
+
+
+void StoreBuffer::ZapHashTables() {
memset(reinterpret_cast<void*>(hash_map_1_),
0,
sizeof(uintptr_t) * kHashMapLength);
memset(reinterpret_cast<void*>(hash_map_2_),
0,
sizeof(uintptr_t) * kHashMapLength);
+}
+
+
+void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) {
+ ZapHashTables();
+ during_gc_ = true;
+ if (type != kGCTypeScavenge) {
+ old_top_ = old_start_;
+ Heap::public_set_store_buffer_top(start_);
+ }
+}
+
+
+void StoreBuffer::Verify() {
+#ifdef DEBUG
+ if (FLAG_verify_heap && !StoreBuffer::must_scan_entire_memory()) {
+ Heap::OldPointerSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
+ Heap::MapSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
+ Heap::LargeObjectSpaceCheckStoreBuffer();
+ }
+#endif
+}
+
+
+void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) {
+ during_gc_ = false;
+ Verify();
+}
+
+
+void StoreBuffer::Compact() {
Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top());
- Address* stop = top;
+ if (top == start_) return;
ASSERT(top <= limit_);
- top = start_;
+ Heap::public_set_store_buffer_top(start_);
+ if (must_scan_entire_memory_) return;
// Goes through the addresses in the store buffer attempting to remove
// duplicates. In the interest of speed this is a lossy operation. Some
// duplicates will remain. We have two hash tables with different hash
// functions to reduce the number of unnecessary clashes.
- for (Address* current = start_; current < stop; current++) {
+ for (Address* current = start_; current < top; current++) {
uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
// Shift out the last bits including any tags.
int_addr >>= kPointerSizeLog2;
@@ -113,24 +254,36 @@
hash_map_1_[hash1] = int_addr;
hash_map_2_[hash2] = 0;
}
- ASSERT(top <= current);
- ASSERT(top <= limit_);
- *top++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
+ old_buffer_is_sorted_ = false;
+ *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
+ ASSERT(old_top_ <= old_limit_);
}
Counters::store_buffer_compactions.Increment();
- if (limit_ - top < top - start_) {
- // Compression did not free up at least half.
+ CheckForFullBuffer();
+}
+
+
+void StoreBuffer::CheckForFullBuffer() {
+ if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
+ // After compression we don't have enough space that we can be sure
that
+ // the next two compressions will have enough space in the buffer. We
+ // start by trying a more agressive compression. If this frees up at
least
+ // half the space then we can keep going, otherwise it is time to
brake.
+ SortUniq();
+ if (old_limit_ - old_top_ < old_limit_ - old_top_) {
+ return;
+ }
// TODO(gc): Set an interrupt to do a GC on the next back edge.
// TODO(gc): Allocate the rest of new space to force a GC on the next
// allocation.
- if (limit_ - top < (top - start_) >> 1) {
- // Compression did not free up at least one quarter.
+ if (old_limit_ - old_top_ < kStoreBufferSize) {
+ // After compression we don't even have enough space for the next
+ // compression to be guaranteed to succeed.
// TODO(gc): Set a flag to scan all of memory.
- top = start_;
Counters::store_buffer_overflows.Increment();
+ must_scan_entire_memory_ = true;
}
}
- Heap::public_set_store_buffer_top(top);
}
} } // namespace v8::internal
=======================================
--- /branches/experimental/gc/src/store-buffer.h Thu Jan 6 06:05:23 2011
+++ /branches/experimental/gc/src/store-buffer.h Mon Jan 24 06:34:54 2011
@@ -50,17 +50,53 @@
static const int kStoreBufferOverflowBit = 1 << 16;
static const int kStoreBufferSize = kStoreBufferOverflowBit;
+ static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+ static const int kOldStoreBufferLength = kStoreBufferLength * 16;
static const int kHashMapLengthLog2 = 12;
static const int kHashMapLength = 1 << kHashMapLengthLog2;
static void Compact();
+ static void GCPrologue(GCType type, GCCallbackFlags flags);
+ static void GCEpilogue(GCType type, GCCallbackFlags flags);
+
+ static Object*** Start() { return
reinterpret_cast<Object***>(old_start_); }
+ static Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
+
+ static bool must_scan_entire_memory() { return must_scan_entire_memory_;
}
+ static bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
+
+ // Goes through the store buffer removing pointers to things that have
+ // been promoted. Rebuilds the store buffer completely if it overflowed.
+ static void SortUniq();
+ static void Verify();
+
+#ifdef DEBUG
+ static void Clean();
+#endif
private:
+ // The store buffer is divided up into a new buffer that is constantly
being
+ // filled by mutator activity and an old buffer that is filled with the
data
+ // from the new buffer after compression.
static Address* start_;
static Address* limit_;
+
+ static Address* old_start_;
+ static Address* old_limit_;
+ static Address* old_top_;
+
+ static bool old_buffer_is_sorted_;
+ static bool must_scan_entire_memory_;
+ static bool during_gc_;
+
static VirtualMemory* virtual_memory_;
static uintptr_t* hash_map_1_;
static uintptr_t* hash_map_2_;
+
+ static void CheckForFullBuffer();
+ static void Uniq();
+ static void ZapHashTables();
+ static bool HashTablesAreZapped();
};
} } // namespace v8::internal
=======================================
--- /branches/experimental/gc/src/utils.h Wed Jan 5 05:52:13 2011
+++ /branches/experimental/gc/src/utils.h Mon Jan 24 06:34:54 2011
@@ -161,8 +161,8 @@
-template <typename T>
-static inline bool IsAligned(T value, T alignment) {
+template <typename T, typename U>
+static inline bool IsAligned(T value, U alignment) {
ASSERT(IsPowerOf2(alignment));
return (value & (alignment - 1)) == 0;
}
=======================================
--- /branches/experimental/gc/src/x64/full-codegen-x64.cc Wed Jan 5
05:52:13 2011
+++ /branches/experimental/gc/src/x64/full-codegen-x64.cc Mon Jan 24
06:34:54 2011
@@ -116,13 +116,11 @@
// Store it in the context.
int context_offset = Context::SlotOffset(slot->index());
__ movq(Operand(rsi, context_offset), rax);
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
// Update the write barrier. This clobbers all involved
// registers, so we have use a third register to avoid
// clobbering rsi.
__ movq(rcx, rsi);
- __ RecordWrite(rcx, context_offset, rax, rbx);
-#endif
+ __ RecordWrite(rcx, context_offset, rax, rbx, kDontSaveFPRegs);
}
}
}
@@ -545,13 +543,11 @@
ASSERT(!scratch1.is(src) && !scratch2.is(src));
MemOperand location = EmitSlotSearch(dst, scratch1);
__ movq(location, src);
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
- __ RecordWrite(scratch1, offset, src, scratch2);
- }
-#endif
+ __ RecordWrite(scratch1, offset, src, scratch2, kDontSaveFPRegs);
+ }
}
@@ -602,11 +598,9 @@
} else if (function != NULL) {
VisitForAccumulatorValue(function);
__ movq(ContextOperand(rsi, slot->index()), result_register());
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
int offset = Context::SlotOffset(slot->index());
__ movq(rbx, rsi);
- __ RecordWrite(rbx, offset, result_register(), rcx);
-#endif
+ __ RecordWrite(rbx, offset, result_register(), rcx,
kDontSaveFPRegs);
}
break;
@@ -1315,10 +1309,8 @@
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ movq(FieldOperand(rbx, offset), result_register());
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
// Update the write barrier for the array store.
- __ RecordWrite(rbx, offset, result_register(), rcx);
-#endif
+ __ RecordWrite(rbx, offset, result_register(), rcx, kDontSaveFPRegs);
}
if (result_saved) {
@@ -1635,13 +1627,11 @@
// Perform the assignment and issue the write barrier.
__ movq(target, rax);
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
// The value of the assignment is in rax. RecordWrite clobbers its
// register arguments.
__ movq(rdx, rax);
int offset = FixedArray::kHeaderSize + slot->index() *
kPointerSize;
- __ RecordWrite(rcx, offset, rdx, rbx);
-#endif
+ __ RecordWrite(rcx, offset, rdx, rbx, kDontSaveFPRegs);
break;
}
@@ -2486,12 +2476,10 @@
// Store the value.
__ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ movq(rdx, rax);
- __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
-#endif
+ __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(rax);
=======================================
--- /branches/experimental/gc/src/x64/stub-cache-x64.cc Thu Jan 6 05:18:03
2011
+++ /branches/experimental/gc/src/x64/stub-cache-x64.cc Mon Jan 24 06:34:54
2011
@@ -807,10 +807,8 @@
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
__ movq(name_reg, rax);
- __ RecordWrite(receiver_reg, offset, name_reg, scratch);
-#endif
+ __ RecordWrite(receiver_reg, offset, name_reg, scratch,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -820,10 +818,8 @@
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
__ movq(name_reg, rax);
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
-#endif
+ __ RecordWrite(scratch, offset, name_reg, receiver_reg,
kDontSaveFPRegs);
}
// Return the value (register rax).
@@ -2532,9 +2528,7 @@
__ SmiToInteger32(rcx, rcx);
__ movq(FieldOperand(rdi, rcx, times_pointer_size,
FixedArray::kHeaderSize),
rax);
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
- __ RecordWrite(rdi, 0, rdx, rcx);
-#endif
+ __ RecordWrite(rdi, 0, rdx, rcx, kDontSaveFPRegs);
// Done.
__ ret(0);
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev