Reviewers: Michael Starzinger,

Description:
Let store buffer start out small for a 1Mbyte saving in boot
memory use (2Mbyte on x64).

Please review this at http://codereview.chromium.org/8776032/

SVN Base: http://v8.googlecode.com/svn/branches/bleeding_edge/

Affected files:
  M     src/heap.cc
  M     src/platform-posix.cc
  M     src/platform-win32.cc
  M     src/platform.h
  M     src/store-buffer.h
  M     src/store-buffer.cc
  M     test/cctest/test-mark-compact.cc


Index: src/heap.cc
===================================================================
--- src/heap.cc (revision 10131)
+++ src/heap.cc (working copy)
@@ -1012,7 +1012,7 @@
// Store Buffer overflowed while scanning promoted objects. These are not // in any particular page, though they are likely to be clustered by the
       // allocation routines.
-      store_buffer_->HandleFullness();
+      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
     } else {
// Store Buffer overflowed while scanning a particular old space page for
       // pointers to new space.
Index: src/platform-posix.cc
===================================================================
--- src/platform-posix.cc       (revision 10131)
+++ src/platform-posix.cc       (working copy)
@@ -70,6 +70,11 @@
 }


+intptr_t OS::CommitPageSize() {
+  return 4096;
+}
+
+
 #ifndef __CYGWIN__
 // Get rid of writable permission on code allocations.
 void OS::ProtectCode(void* address, const size_t size) {
Index: src/platform-win32.cc
===================================================================
--- src/platform-win32.cc       (revision 10131)
+++ src/platform-win32.cc       (working copy)
@@ -889,6 +889,11 @@
 }


+intptr_t OS::CommitPageSize() {
+  return 4096;
+}
+
+
 void OS::ProtectCode(void* address, const size_t size) {
   DWORD old_protect;
   VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
Index: src/platform.h
===================================================================
--- src/platform.h      (revision 10131)
+++ src/platform.h      (working copy)
@@ -172,6 +172,10 @@
                         bool is_executable);
   static void Free(void* address, const size_t size);

+ // This is the granularity at which the ProtectCode(...) call can set page
+  // permissions.
+  static intptr_t CommitPageSize();
+
   // Mark code segments non-writable.
   static void ProtectCode(void* address, const size_t size);

Index: src/store-buffer.cc
===================================================================
--- src/store-buffer.cc (revision 10131)
+++ src/store-buffer.cc (working copy)
@@ -41,6 +41,7 @@
       old_start_(NULL),
       old_limit_(NULL),
       old_top_(NULL),
+      old_reserved_limit_(NULL),
       old_buffer_is_sorted_(false),
       old_buffer_is_filtered_(false),
       during_gc_(false),
@@ -59,11 +60,28 @@
       reinterpret_cast<uintptr_t>(virtual_memory_->address());
   start_ =
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
-  limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
+  limit_ = start_ + (kStoreBufferSize / kPointerSize);

-  old_top_ = old_start_ = new Address[kOldStoreBufferLength];
-  old_limit_ = old_start_ + kOldStoreBufferLength;
+  old_virtual_memory_ =
+      new VirtualMemory(kOldStoreBufferLength * kPointerSize);
+  old_top_ = old_start_ =
+      reinterpret_cast<Address*>(old_virtual_memory_->address());
+ // Don't know the alignment requirements of the OS, but it is certainly not
+  // less than 0xfff.
+  ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+  int initial_length = kInitialOldStoreBufferLength;
+  if (initial_length == 0) initial_length = 1;
+  while (initial_length * kPointerSize < OS::CommitPageSize()) {
+    initial_length *= 2;
+  }
+  old_limit_ = old_start_ + initial_length;;
+  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;

+  CHECK(old_virtual_memory_->Commit(
+            reinterpret_cast<void*>(old_start_),
+            (old_limit_ - old_start_) * kPointerSize,
+            false));
+
   ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
   ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
   Address* vm_limit = reinterpret_cast<Address*>(
@@ -76,9 +94,9 @@
ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
          0);

-  virtual_memory_->Commit(reinterpret_cast<Address>(start_),
-                          kStoreBufferSize,
-                          false);  // Not executable.
+  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+                                kStoreBufferSize,
+                                false));  // Not executable.
   heap_->public_set_store_buffer_top(start_);

   hash_map_1_ = new uintptr_t[kHashMapLength];
@@ -90,10 +108,10 @@

 void StoreBuffer::TearDown() {
   delete virtual_memory_;
+  delete old_virtual_memory_;
   delete[] hash_map_1_;
   delete[] hash_map_2_;
-  delete[] old_start_;
-  old_start_ = old_top_ = old_limit_ = NULL;
+  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
   start_ = limit_ = NULL;
   heap_->public_set_store_buffer_top(start_);
 }
@@ -150,7 +168,17 @@
 }


-void StoreBuffer::HandleFullness() {
+void StoreBuffer::EnsureSpace(intptr_t space_needed) {
+  while (old_limit_ - old_top_ < space_needed &&
+         old_limit_ < old_reserved_limit_) {
+    size_t grow = old_limit_ - old_start_;  // Double size.
+    CHECK(old_virtual_memory_->Commit(
+ reinterpret_cast<void*>(old_limit_), grow * kPointerSize, false));
+    old_limit_ += grow;
+  }
+
+  if (old_limit_ - old_top_ >= space_needed) return;
+
   if (old_buffer_is_filtered_) return;
   ASSERT(may_move_store_buffer_entries_);
   Compact();
@@ -645,9 +673,7 @@
   // the worst case (compaction doesn't eliminate any pointers).
   ASSERT(top <= limit_);
   heap_->public_set_store_buffer_top(start_);
-  if (top - start_ > old_limit_ - old_top_) {
-    HandleFullness();
-  }
+  EnsureSpace(top - start_);
   ASSERT(may_move_store_buffer_entries_);
   // Goes through the addresses in the store buffer attempting to remove
   // duplicates.  In the interest of speed this is a lossy operation.  Some
@@ -688,9 +714,7 @@


 void StoreBuffer::CheckForFullBuffer() {
-  if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
-    HandleFullness();
-  }
+  EnsureSpace(kStoreBufferSize * 2);
 }

 } }  // namespace v8::internal
Index: src/store-buffer.h
===================================================================
--- src/store-buffer.h  (revision 10131)
+++ src/store-buffer.h  (working copy)
@@ -85,6 +85,7 @@
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
   static const int kOldStoreBufferLength = kStoreBufferLength * 16;
+ static const int kInitialOldStoreBufferLength = kOldStoreBufferLength >> 9;
   static const int kHashMapLengthLog2 = 12;
   static const int kHashMapLength = 1 << kHashMapLengthLog2;

@@ -109,7 +110,7 @@
   // been promoted.  Rebuilds the store buffer completely if it overflowed.
   void SortUniq();

-  void HandleFullness();
+  void EnsureSpace(intptr_t space_needed);
   void Verify();

   bool PrepareForIteration();
@@ -134,6 +135,8 @@
   Address* old_start_;
   Address* old_limit_;
   Address* old_top_;
+  Address* old_reserved_limit_;
+  VirtualMemory* old_virtual_memory_;

   bool old_buffer_is_sorted_;
   bool old_buffer_is_filtered_;
Index: test/cctest/test-mark-compact.cc
===================================================================
--- test/cctest/test-mark-compact.cc    (revision 10131)
+++ test/cctest/test-mark-compact.cc    (working copy)
@@ -528,7 +528,7 @@
   if (initial_memory >= 0) {
     InitializeVM();
     intptr_t booted_memory = MemoryInUse();
-    CHECK_LE(booted_memory - initial_memory, 20 * 1024 * 1024);
+    CHECK_LE(booted_memory - initial_memory, 18 * 1024 * 1024);
   }
 }



--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to