Revision: 3606
Author: [email protected]
Date: Thu Jan 14 06:46:31 2010
Log: * Improve the interface to the memory-reservation functionality.
* Add a test case that generates a serialization of a single flat string.
Review URL: http://codereview.chromium.org/542073
http://code.google.com/p/v8/source/detail?r=3606

Modified:
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/serialize.cc
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/test/cctest/test-serialize.cc

=======================================
--- /branches/bleeding_edge/src/heap.cc Thu Jan  7 05:17:18 2010
+++ /branches/bleeding_edge/src/heap.cc Thu Jan 14 06:46:31 2010
@@ -477,6 +477,65 @@
   Heap::symbol_table()->IterateElements(&verifier);
 #endif  // DEBUG
 }
+
+
+void Heap::ReserveSpace(
+    int new_space_size,
+    int pointer_space_size,
+    int data_space_size,
+    int code_space_size,
+    int map_space_size,
+    int cell_space_size,
+    int large_object_size) {
+  NewSpace* new_space = Heap::new_space();
+  PagedSpace* old_pointer_space = Heap::old_pointer_space();
+  PagedSpace* old_data_space = Heap::old_data_space();
+  PagedSpace* code_space = Heap::code_space();
+  PagedSpace* map_space = Heap::map_space();
+  PagedSpace* cell_space = Heap::cell_space();
+  LargeObjectSpace* lo_space = Heap::lo_space();
+  bool gc_performed = true;
+  while (gc_performed) {
+    gc_performed = false;
+    if (!new_space->ReserveSpace(new_space_size)) {
+      Heap::CollectGarbage(new_space_size, NEW_SPACE);
+      gc_performed = true;
+    }
+    if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
+      Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
+      gc_performed = true;
+    }
+    if (!(old_data_space->ReserveSpace(data_space_size))) {
+      Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
+      gc_performed = true;
+    }
+    if (!(code_space->ReserveSpace(code_space_size))) {
+      Heap::CollectGarbage(code_space_size, CODE_SPACE);
+      gc_performed = true;
+    }
+    if (!(map_space->ReserveSpace(map_space_size))) {
+      Heap::CollectGarbage(map_space_size, MAP_SPACE);
+      gc_performed = true;
+    }
+    if (!(cell_space->ReserveSpace(cell_space_size))) {
+      Heap::CollectGarbage(cell_space_size, CELL_SPACE);
+      gc_performed = true;
+    }
+    // We add a slack-factor of 2 in order to have space for the remembered
+ // set and a series of large-object allocations that are only just larger
+    // than the page size.
+    large_object_size *= 2;
+    // The ReserveSpace method on the large object space checks how much
+    // we can expand the old generation.  This includes expansion caused by
+    // allocation in the other spaces.
+ large_object_size += cell_space_size + map_space_size + code_space_size +
+        data_space_size + pointer_space_size;
+    if (!(lo_space->ReserveSpace(large_object_size))) {
+      Heap::CollectGarbage(large_object_size, LO_SPACE);
+      gc_performed = true;
+    }
+  }
+}


 void Heap::EnsureFromSpaceIsCommitted() {
=======================================
--- /branches/bleeding_edge/src/heap.h  Wed Jan 13 11:16:07 2010
+++ /branches/bleeding_edge/src/heap.h  Thu Jan 14 06:46:31 2010
@@ -810,6 +810,21 @@
   // Commits from space if it is uncommitted.
   static void EnsureFromSpaceIsCommitted();

+  // Support for partial snapshots.  After calling this we can allocate a
+  // certain number of bytes using only linear allocation (with a
+ // LinearAllocationScope and an AlwaysAllocateScope) without using freelists + // or causing a GC. It returns true of space was reserved or false if a GC is + // needed. For paged spaces the space requested must include the space wasted
+  // at the end of each page when allocating linearly.
+  static void ReserveSpace(
+    int new_space_size,
+    int pointer_space_size,
+    int data_space_size,
+    int code_space_size,
+    int map_space_size,
+    int cell_space_size,
+    int large_object_size);
+
   //
   // Support for the API.
   //
=======================================
--- /branches/bleeding_edge/src/serialize.cc    Tue Jan 12 07:16:23 2010
+++ /branches/bleeding_edge/src/serialize.cc    Thu Jan 14 06:46:31 2010
@@ -993,7 +993,8 @@
       sink_->PutInt(root_index, "root_index");
       return;
     }
-    // TODO(erikcorry): Handle symbols here.
+    // All the symbols that the snapshot needs should be in the root table.
+    ASSERT(!heap_object->IsSymbol());
   }
   if (SerializationAddressMapper::IsMapped(heap_object)) {
     int space = SpaceOfAlreadySerializedObject(heap_object);
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Tue Jan 12 07:16:23 2010
+++ /branches/bleeding_edge/src/spaces.cc       Thu Jan 14 06:46:31 2010
@@ -1863,10 +1863,7 @@
 // You have to call this last, since the implementation from PagedSpace
 // doesn't know that memory was 'promised' to large object space.
 bool LargeObjectSpace::ReserveSpace(int bytes) {
-  // We add a slack-factor of 2 in order to have space for the remembered
-  // set and a series of large-object allocations that are only just larger
-  // than the page size.
-  return Heap::OldGenerationSpaceAvailable() >= bytes * 2;
+  return Heap::OldGenerationSpaceAvailable() >= bytes;
 }


=======================================
--- /branches/bleeding_edge/test/cctest/test-serialize.cc Tue Jan 12 07:36:43 2010 +++ /branches/bleeding_edge/test/cctest/test-serialize.cc Thu Jan 14 06:46:31 2010
@@ -277,55 +277,64 @@
   v8::Local<v8::Script> script = v8::Script::Compile(source);
   CHECK_EQ(4, script->Run()->Int32Value());
 }
+
+
+class FileByteSink : public SnapshotByteSink {
+ public:
+  explicit FileByteSink(const char* snapshot_file) {
+    fp_ = OS::FOpen(snapshot_file, "wb");
+    if (fp_ == NULL) {
+      PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+      exit(1);
+    }
+  }
+  virtual ~FileByteSink() {
+    if (fp_ != NULL) {
+      fclose(fp_);
+    }
+  }
+  virtual void Put(int byte, const char* description) {
+    if (fp_ != NULL) {
+      fputc(byte, fp_);
+    }
+  }
+
+ private:
+  FILE* fp_;
+};
+
+
+TEST(PartialSerialization) {
+  Serializer::Enable();
+  v8::V8::Initialize();
+  v8::Persistent<v8::Context> env = v8::Context::New();
+  env->Enter();
+
+  v8::HandleScope handle_scope;
+  v8::Local<v8::String> foo = v8::String::New("foo");
+
+  FileByteSink file(FLAG_testing_serialization_file);
+  Serializer ser(&file);
+  i::Handle<i::String> internal_foo = v8::Utils::OpenHandle(*foo);
+  Object* raw_foo = *internal_foo;
+  ser.SerializePartial(&raw_foo);
+}


 TEST(LinearAllocation) {
   v8::V8::Initialize();
-  NewSpace* new_space = Heap::new_space();
-  PagedSpace* old_pointer_space = Heap::old_pointer_space();
-  PagedSpace* old_data_space = Heap::old_data_space();
-  PagedSpace* code_space = Heap::code_space();
-  PagedSpace* map_space = Heap::map_space();
-  PagedSpace* cell_space = Heap::cell_space();
   int new_space_max = 512 * KB;
   for (int size = 1000; size < 5 * MB; size += size >> 1) {
-    bool gc_performed = true;
-    while (gc_performed) {
-      gc_performed = false;
-      if (size < new_space_max) {
-        if (!new_space->ReserveSpace(size)) {
-          Heap::CollectGarbage(size, NEW_SPACE);
-          gc_performed = true;
-          CHECK(new_space->ReserveSpace(size));
-        }
-      }
-      if (!old_pointer_space->ReserveSpace(size)) {
-        Heap::CollectGarbage(size, OLD_POINTER_SPACE);
-        gc_performed = true;
-        CHECK(old_pointer_space->ReserveSpace(size));
-      }
-      if (!(old_data_space->ReserveSpace(size))) {
-        Heap::CollectGarbage(size, OLD_DATA_SPACE);
-        gc_performed = true;
-        CHECK(old_data_space->ReserveSpace(size));
-      }
-      if (!(code_space->ReserveSpace(size))) {
-        Heap::CollectGarbage(size, CODE_SPACE);
-        gc_performed = true;
-        CHECK(code_space->ReserveSpace(size));
-      }
-      if (!(map_space->ReserveSpace(size))) {
-        Heap::CollectGarbage(size, MAP_SPACE);
-        gc_performed = true;
-        CHECK(map_space->ReserveSpace(size));
-      }
-      if (!(cell_space->ReserveSpace(size))) {
-        Heap::CollectGarbage(size, CELL_SPACE);
-        gc_performed = true;
-        CHECK(cell_space->ReserveSpace(size));
-      }
-    }
-    LinearAllocationScope scope;
+    int new_space_size = (size < new_space_max) ? size : new_space_max;
+    Heap::ReserveSpace(
+        new_space_size,
+        size,              // Old pointer space.
+        size,              // Old data space.
+        size,              // Code space.
+        size,              // Map space.
+        size,              // Cell space.
+        size);             // Large object space.
+    LinearAllocationScope linear_allocation_scope;
     const int kSmallFixedArrayLength = 4;
     const int kSmallFixedArraySize =
         FixedArray::kHeaderSize + kSmallFixedArrayLength * kPointerSize;
@@ -334,70 +343,69 @@
         SeqAsciiString::kHeaderSize + kSmallStringLength;
     const int kMapSize = Map::kSize;

-    if (size < new_space_max) {
-      Object* new_last = NULL;
-      for (int i = 0;
-           i + kSmallFixedArraySize <= size; i += kSmallFixedArraySize) {
-        Object* o = Heap::AllocateFixedArray(kSmallFixedArrayLength);
-        if (new_last != NULL) {
-          CHECK_EQ(reinterpret_cast<char*>(o),
- reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
-        }
-        new_last = o;
-      }
+    Object* new_last = NULL;
+    for (int i = 0;
+         i + kSmallFixedArraySize <= new_space_size;
+         i += kSmallFixedArraySize) {
+      Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength);
+      if (new_last != NULL) {
+        CHECK_EQ(reinterpret_cast<char*>(obj),
+                 reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
+      }
+      new_last = obj;
     }

-    Object* new_pointer = NULL;
+    Object* pointer_last = NULL;
     for (int i = 0;
          i + kSmallFixedArraySize <= size;
          i += kSmallFixedArraySize) {
- Object* o = Heap::AllocateFixedArray(kSmallFixedArrayLength, TENURED); + Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength, TENURED);
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
           page_fullness > Page::kObjectAreaSize) {
         i = RoundUp(i, Page::kPageSize);
-        new_pointer = NULL;
-      }
-      if (new_pointer != NULL) {
-        CHECK_EQ(reinterpret_cast<char*>(o),
- reinterpret_cast<char*>(new_pointer) + kSmallFixedArraySize);
-      }
-      new_pointer = o;
+        pointer_last = NULL;
+      }
+      if (pointer_last != NULL) {
+        CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(pointer_last) + kSmallFixedArraySize);
+      }
+      pointer_last = obj;
     }

-    new_pointer = NULL;
+    Object* data_last = NULL;
     for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
- Object* o = Heap::AllocateRawAsciiString(kSmallStringLength, TENURED); + Object* obj = Heap::AllocateRawAsciiString(kSmallStringLength, TENURED);
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
           page_fullness > Page::kObjectAreaSize) {
         i = RoundUp(i, Page::kPageSize);
-        new_pointer = NULL;
-      }
-      if (new_pointer != NULL) {
-        CHECK_EQ(reinterpret_cast<char*>(o),
-                 reinterpret_cast<char*>(new_pointer) + kSmallStringSize);
-      }
-      new_pointer = o;
+        data_last = NULL;
+      }
+      if (data_last != NULL) {
+        CHECK_EQ(reinterpret_cast<char*>(obj),
+                 reinterpret_cast<char*>(data_last) + kSmallStringSize);
+      }
+      data_last = obj;
     }

-    new_pointer = NULL;
+    Object* map_last = NULL;
     for (int i = 0; i + kMapSize <= size; i += kMapSize) {
-      Object* o = Heap::AllocateMap(JS_OBJECT_TYPE, 42 * kPointerSize);
+      Object* obj = Heap::AllocateMap(JS_OBJECT_TYPE, 42 * kPointerSize);
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kMapSize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
           page_fullness > Page::kObjectAreaSize) {
         i = RoundUp(i, Page::kPageSize);
-        new_pointer = NULL;
-      }
-      if (new_pointer != NULL) {
-        CHECK_EQ(reinterpret_cast<char*>(o),
-                 reinterpret_cast<char*>(new_pointer) + kMapSize);
-      }
-      new_pointer = o;
+        map_last = NULL;
+      }
+      if (map_last != NULL) {
+        CHECK_EQ(reinterpret_cast<char*>(obj),
+                 reinterpret_cast<char*>(map_last) + kMapSize);
+      }
+      map_last = obj;
     }

     if (size > Page::kObjectAreaSize) {
@@ -406,9 +414,9 @@
       AlwaysAllocateScope always;
       int large_object_array_length =
           (size - FixedArray::kHeaderSize) / kPointerSize;
-      new_pointer = Heap::AllocateFixedArray(large_object_array_length,
+      Object* obj = Heap::AllocateFixedArray(large_object_array_length,
                                              TENURED);
-      ASSERT(!new_pointer->IsFailure());
+      CHECK(!obj->IsFailure());
     }
   }
 }
-- 
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to