Revision: 10143
Author: [email protected]
Date: Fri Dec 2 06:08:12 2011
Log: Let store buffer start out small for a 1Mbyte saving in boot
memory use (2Mbyte on x64).
Review URL: http://codereview.chromium.org/8776032
http://code.google.com/p/v8/source/detail?r=10143
Modified:
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/platform-posix.cc
/branches/bleeding_edge/src/platform-win32.cc
/branches/bleeding_edge/src/platform.h
/branches/bleeding_edge/src/store-buffer.cc
/branches/bleeding_edge/src/store-buffer.h
/branches/bleeding_edge/test/cctest/test-mark-compact.cc
=======================================
--- /branches/bleeding_edge/src/heap.cc Thu Dec 1 04:17:19 2011
+++ /branches/bleeding_edge/src/heap.cc Fri Dec 2 06:08:12 2011
@@ -1012,7 +1012,7 @@
// Store Buffer overflowed while scanning promoted objects. These
are not
// in any particular page, though they are likely to be clustered by
the
// allocation routines.
- store_buffer_->HandleFullness();
+ store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
} else {
// Store Buffer overflowed while scanning a particular old space
page for
// pointers to new space.
=======================================
--- /branches/bleeding_edge/src/platform-posix.cc Mon Nov 21 07:01:52 2011
+++ /branches/bleeding_edge/src/platform-posix.cc Fri Dec 2 06:08:12 2011
@@ -68,6 +68,11 @@
if (result != 0) return 0;
return limit.rlim_cur;
}
+
+
+intptr_t OS::CommitPageSize() {
+ return 4096;
+}
#ifndef __CYGWIN__
=======================================
--- /branches/bleeding_edge/src/platform-win32.cc Thu Sep 29 05:23:05 2011
+++ /branches/bleeding_edge/src/platform-win32.cc Fri Dec 2 06:08:12 2011
@@ -887,6 +887,11 @@
VirtualFree(address, 0, MEM_RELEASE);
USE(size);
}
+
+
+intptr_t OS::CommitPageSize() {
+ return 4096;
+}
void OS::ProtectCode(void* address, const size_t size) {
=======================================
--- /branches/bleeding_edge/src/platform.h Mon Oct 10 07:17:42 2011
+++ /branches/bleeding_edge/src/platform.h Fri Dec 2 06:08:12 2011
@@ -172,6 +172,10 @@
bool is_executable);
static void Free(void* address, const size_t size);
+ // This is the granularity at which the ProtectCode(...) call can set
page
+ // permissions.
+ static intptr_t CommitPageSize();
+
// Mark code segments non-writable.
static void ProtectCode(void* address, const size_t size);
=======================================
--- /branches/bleeding_edge/src/store-buffer.cc Tue Oct 25 06:27:46 2011
+++ /branches/bleeding_edge/src/store-buffer.cc Fri Dec 2 06:08:12 2011
@@ -41,6 +41,7 @@
old_start_(NULL),
old_limit_(NULL),
old_top_(NULL),
+ old_reserved_limit_(NULL),
old_buffer_is_sorted_(false),
old_buffer_is_filtered_(false),
during_gc_(false),
@@ -59,10 +60,25 @@
reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_ =
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize *
2));
- limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
-
- old_top_ = old_start_ = new Address[kOldStoreBufferLength];
- old_limit_ = old_start_ + kOldStoreBufferLength;
+ limit_ = start_ + (kStoreBufferSize / kPointerSize);
+
+ old_virtual_memory_ =
+ new VirtualMemory(kOldStoreBufferLength * kPointerSize);
+ old_top_ = old_start_ =
+ reinterpret_cast<Address*>(old_virtual_memory_->address());
+ // Don't know the alignment requirements of the OS, but it is certainly
not
+ // less than 0xfff.
+ ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+ int initial_length = OS::CommitPageSize() / kPointerSize;
+ ASSERT(initial_length > 0);
+ ASSERT(initial_length <= kOldStoreBufferLength);
+ old_limit_ = old_start_ + initial_length;
+ old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
+
+ CHECK(old_virtual_memory_->Commit(
+ reinterpret_cast<void*>(old_start_),
+ (old_limit_ - old_start_) * kPointerSize,
+ false));
ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
@@ -76,9 +92,9 @@
ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) &
kStoreBufferOverflowBit) ==
0);
- virtual_memory_->Commit(reinterpret_cast<Address>(start_),
- kStoreBufferSize,
- false); // Not executable.
+ CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+ kStoreBufferSize,
+ false)); // Not executable.
heap_->public_set_store_buffer_top(start_);
hash_map_1_ = new uintptr_t[kHashMapLength];
@@ -90,10 +106,10 @@
void StoreBuffer::TearDown() {
delete virtual_memory_;
+ delete old_virtual_memory_;
delete[] hash_map_1_;
delete[] hash_map_2_;
- delete[] old_start_;
- old_start_ = old_top_ = old_limit_ = NULL;
+ old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
start_ = limit_ = NULL;
heap_->public_set_store_buffer_top(start_);
}
@@ -150,7 +166,18 @@
}
-void StoreBuffer::HandleFullness() {
+void StoreBuffer::EnsureSpace(intptr_t space_needed) {
+ while (old_limit_ - old_top_ < space_needed &&
+ old_limit_ < old_reserved_limit_) {
+ size_t grow = old_limit_ - old_start_; // Double size.
+ CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+ grow * kPointerSize,
+ false));
+ old_limit_ += grow;
+ }
+
+ if (old_limit_ - old_top_ >= space_needed) return;
+
if (old_buffer_is_filtered_) return;
ASSERT(may_move_store_buffer_entries_);
Compact();
@@ -645,9 +672,7 @@
// the worst case (compaction doesn't eliminate any pointers).
ASSERT(top <= limit_);
heap_->public_set_store_buffer_top(start_);
- if (top - start_ > old_limit_ - old_top_) {
- HandleFullness();
- }
+ EnsureSpace(top - start_);
ASSERT(may_move_store_buffer_entries_);
// Goes through the addresses in the store buffer attempting to remove
// duplicates. In the interest of speed this is a lossy operation. Some
@@ -688,9 +713,7 @@
void StoreBuffer::CheckForFullBuffer() {
- if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
- HandleFullness();
- }
+ EnsureSpace(kStoreBufferSize * 2);
}
} } // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/store-buffer.h Wed Nov 9 01:51:54 2011
+++ /branches/bleeding_edge/src/store-buffer.h Fri Dec 2 06:08:12 2011
@@ -109,7 +109,7 @@
// been promoted. Rebuilds the store buffer completely if it overflowed.
void SortUniq();
- void HandleFullness();
+ void EnsureSpace(intptr_t space_needed);
void Verify();
bool PrepareForIteration();
@@ -134,6 +134,8 @@
Address* old_start_;
Address* old_limit_;
Address* old_top_;
+ Address* old_reserved_limit_;
+ VirtualMemory* old_virtual_memory_;
bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_;
=======================================
--- /branches/bleeding_edge/test/cctest/test-mark-compact.cc Thu Dec 1
04:32:38 2011
+++ /branches/bleeding_edge/test/cctest/test-mark-compact.cc Fri Dec 2
06:08:12 2011
@@ -528,7 +528,7 @@
if (initial_memory >= 0) {
InitializeVM();
intptr_t booted_memory = MemoryInUse();
- CHECK_LE(booted_memory - initial_memory, 20 * 1024 * 1024);
+ CHECK_LE(booted_memory - initial_memory, 18 * 1024 * 1024);
}
}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev