Revision: 6634
Author: [email protected]
Date: Fri Feb 4 04:05:25 2011
Log: Fix some correctness issues with the store buffer shown up by
the mozilla tests.
Review URL: http://codereview.chromium.org/6312147
http://code.google.com/p/v8/source/detail?r=6634
Modified:
/branches/experimental/gc/src/heap.cc
/branches/experimental/gc/src/store-buffer-inl.h
/branches/experimental/gc/src/store-buffer.cc
/branches/experimental/gc/src/store-buffer.h
=======================================
--- /branches/experimental/gc/src/heap.cc Thu Feb 3 05:20:16 2011
+++ /branches/experimental/gc/src/heap.cc Fri Feb 4 04:05:25 2011
@@ -4125,8 +4125,8 @@
if (Heap::InNewSpace(*slot)) {
ASSERT(Heap::InToSpace(*slot));
ASSERT((*slot)->IsHeapObject());
- StoreBuffer::EnterDirectlyIntoStoreBuffer(
- reinterpret_cast<Address>(slot));
+ ASSERT(StoreBuffer::CellIsInStoreBuffer(
+ reinterpret_cast<Address>(slot)));
}
}
slot_address += kPointerSize;
@@ -4230,7 +4230,6 @@
} else {
end = page->CachedAllocationWatermark();
}
-
Address map_aligned_current = page->ObjectAreaStart();
=======================================
--- /branches/experimental/gc/src/store-buffer-inl.h Thu Feb 3 05:20:16
2011
+++ /branches/experimental/gc/src/store-buffer-inl.h Fri Feb 4 04:05:25
2011
@@ -50,6 +50,22 @@
ASSERT(top < limit_);
}
}
+
+
+void StoreBuffer::set_store_buffer_mode(StoreBuffer::StoreBufferMode mode)
{
+ if (FLAG_trace_gc) {
+ if (mode != store_buffer_mode_) {
+ if (mode == kStoreBufferDisabled) {
+ PrintF("Store buffer overflowed.\n");
+ } else if (mode == kStoreBufferBeingRebuilt) {
+ PrintF("Store buffer being rebuilt.\n");
+ } else if (mode == kStoreBufferFunctional) {
+ PrintF("Store buffer reenabled.\n");
+ }
+ }
+ }
+ store_buffer_mode_ = mode;
+}
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
@@ -59,9 +75,10 @@
old_top_ = top;
if (top >= old_limit_) {
Counters::store_buffer_overflows.Increment();
- store_buffer_mode_ = kStoreBufferDisabled;
+ set_store_buffer_mode(kStoreBufferDisabled);
old_top_ = old_start_;
}
+ old_buffer_is_sorted_ = false;
}
}
=======================================
--- /branches/experimental/gc/src/store-buffer.cc Thu Feb 3 05:20:16 2011
+++ /branches/experimental/gc/src/store-buffer.cc Fri Feb 4 04:05:25 2011
@@ -81,7 +81,7 @@
Heap::AddGCPrologueCallback(&GCPrologue, kGCTypeAll);
Heap::AddGCEpilogueCallback(&GCEpilogue, kGCTypeAll);
- GCPrologue(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
+ ZapHashTables();
}
@@ -146,7 +146,7 @@
void StoreBuffer::SortUniq() {
Compact();
if (old_buffer_is_sorted_) return;
- if (store_buffer_mode_ == kStoreBufferDisabled) {
+ if (store_buffer_mode() == kStoreBufferDisabled) {
old_top_ = old_start_;
return;
}
@@ -163,7 +163,7 @@
#ifdef DEBUG
void StoreBuffer::Clean() {
- if (store_buffer_mode_ == kStoreBufferDisabled) {
+ if (store_buffer_mode() == kStoreBufferDisabled) {
old_top_ = old_start_; // Just clear the cache.
return;
}
@@ -194,7 +194,7 @@
bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
if (!FLAG_enable_slow_asserts) return true;
- if (store_buffer_mode_ != kStoreBufferFunctional) return true;
+ if (store_buffer_mode() != kStoreBufferFunctional) return true;
if (in_store_buffer_1_element_cache != NULL &&
*in_store_buffer_1_element_cache == cell_address) {
return true;
@@ -236,7 +236,7 @@
void StoreBuffer::Verify() {
#ifdef DEBUG
if (FLAG_verify_heap &&
- StoreBuffer::store_buffer_mode_ == kStoreBufferFunctional) {
+ StoreBuffer::store_buffer_mode() == kStoreBufferFunctional) {
Heap::OldPointerSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
Heap::MapSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
Heap::LargeObjectSpaceCheckStoreBuffer();
@@ -247,19 +247,22 @@
void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) {
during_gc_ = false;
- if (store_buffer_mode_ == kStoreBufferBeingRebuilt) {
- store_buffer_mode_ = kStoreBufferFunctional;
+ if (store_buffer_mode() == kStoreBufferBeingRebuilt) {
+ set_store_buffer_mode(kStoreBufferFunctional);
}
Verify();
}
void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback callback) {
- if (store_buffer_mode_ != kStoreBufferFunctional) {
+ if (store_buffer_mode() == kStoreBufferFunctional) {
+ SortUniq();
+ }
+ if (store_buffer_mode() != kStoreBufferFunctional) {
old_top_ = old_start_;
ZapHashTables();
Heap::public_set_store_buffer_top(start_);
- store_buffer_mode_ = kStoreBufferBeingRebuilt;
+ set_store_buffer_mode(kStoreBufferBeingRebuilt);
Heap::IteratePointers(Heap::old_pointer_space(),
&Heap::IteratePointersToNewSpace,
callback,
@@ -272,7 +275,6 @@
Heap::lo_space()->IteratePointersToNewSpace(callback);
} else {
- SortUniq();
Address* limit = old_top_;
old_top_ = old_start_;
{
@@ -311,7 +313,7 @@
if (top - start_ > old_limit_ - old_top_) {
CheckForFullBuffer();
}
- if (store_buffer_mode_ == kStoreBufferDisabled) return;
+ if (store_buffer_mode() == kStoreBufferDisabled) return;
ASSERT(may_move_store_buffer_entries_);
// Goes through the addresses in the store buffer attempting to remove
// duplicates. In the interest of speed this is a lossy operation. Some
@@ -353,7 +355,9 @@
// the next two compressions will have enough space in the buffer. We
// start by trying a more agressive compression. If this frees up at
least
// half the space then we can keep going, otherwise it is time to
brake.
- SortUniq();
+ if (!during_gc_) {
+ SortUniq();
+ }
if (old_limit_ - old_top_ < old_limit_ - old_top_) {
return;
}
@@ -365,7 +369,7 @@
// compression to be guaranteed to succeed.
// TODO(gc): Set a flag to scan all of memory.
Counters::store_buffer_overflows.Increment();
- store_buffer_mode_ = kStoreBufferDisabled;
+ set_store_buffer_mode(kStoreBufferDisabled);
}
}
}
=======================================
--- /branches/experimental/gc/src/store-buffer.h Thu Feb 3 05:20:16 2011
+++ /branches/experimental/gc/src/store-buffer.h Fri Feb 4 04:05:25 2011
@@ -64,10 +64,6 @@
// queue and they can overflow this buffer, which we must check for.
static inline void EnterDirectlyIntoStoreBuffer(Address addr);
- enum RebuildStoreBufferMode {
- kRebuildStoreBufferWhileIterating,
- kPreserveStoreBufferWhileIterating};
-
// Iterates over all pointers that go from old space to new space. It
will
// delete the store buffer as it starts so the callback should reenter
// surviving old-to-new pointers into the store buffer to rebuild it.
@@ -94,6 +90,7 @@
};
static StoreBufferMode store_buffer_mode() { return store_buffer_mode_; }
+ static inline void set_store_buffer_mode(StoreBufferMode mode);
static bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
// Goes through the store buffer removing pointers to things that have
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev