Reviewers: danno, Sven Panne,

Message:
This CL reserves a region of memory from code range and return it as a
VirtualMemory object. When this object is not used, We return it to the freelist
of code range and reset its address and size.

Comparing with the existing solution, this CL wasters some virtual memory as the address from code range is 1M aligned. But it makes all the executable memory allocated from code range for X64 and we could optimize the deoptimization jump table in the optimized code (DeoptimizeIf from lithium-codegen-x64.cc) by using
direct jump/branch.

Description:
Decouple allocation and creation of deopt tables from code range for X64

Please review this at https://codereview.chromium.org/11825023/

SVN Base: http://v8.googlecode.com/svn/branches/bleeding_edge/

Affected files:
  M     src/deoptimizer.cc
  M     src/platform.h
  M     src/spaces.h
  M     src/spaces.cc


Index: src/deoptimizer.cc
===================================================================
--- src/deoptimizer.cc  (revision 13335)
+++ src/deoptimizer.cc  (working copy)
@@ -44,8 +44,15 @@
   eager_deoptimization_entry_code_entries_ = -1;
   lazy_deoptimization_entry_code_entries_ = -1;
   size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
-  eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
-  lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+  if (Isolate::Current()->code_range()->exists()) {
+    eager_deoptimization_entry_code_ =
+ Isolate::Current()->code_range()->ReserveAlignedMemory(deopt_table_size);
+    lazy_deoptimization_entry_code_ =
+ Isolate::Current()->code_range()->ReserveAlignedMemory(deopt_table_size);
+  } else {
+    eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+    lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+  }
   current_ = NULL;
   deoptimizing_code_list_ = NULL;
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -55,6 +62,18 @@


 DeoptimizerData::~DeoptimizerData() {
+  if (Isolate::Current()->code_range()->exists()) {
+    // Return them to the freelist of code range
+    Isolate::Current()->code_range()->FreeRawMemory(
+ static_cast<Address>(eager_deoptimization_entry_code_->address()),
+            eager_deoptimization_entry_code_->size());
+    Isolate::Current()->code_range()->FreeRawMemory(
+ static_cast<Address>(lazy_deoptimization_entry_code_->address()),
+            lazy_deoptimization_entry_code_->size());
+    eager_deoptimization_entry_code_->Reset();
+    lazy_deoptimization_entry_code_->Reset();
+  }
+
   delete eager_deoptimization_entry_code_;
   eager_deoptimization_entry_code_ = NULL;
   delete lazy_deoptimization_entry_code_;
Index: src/platform.h
===================================================================
--- src/platform.h      (revision 13335)
+++ src/platform.h      (working copy)
@@ -384,6 +384,11 @@
   // Initialize or resets an embedded VirtualMemory object.
   void Reset();

+  void Set(void* address, size_t size) {
+    address_ = address;
+    size_ = size;
+  }
+
   // Returns the start address of the reserved memory.
   // If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
Index: src/spaces.cc
===================================================================
--- src/spaces.cc       (revision 13335)
+++ src/spaces.cc       (working copy)
@@ -207,9 +207,9 @@
 }


-
 Address CodeRange::AllocateRawMemory(const size_t requested,
-                                     size_t* allocated) {
+                                     size_t* allocated,
+                                     bool commit) {
   ASSERT(current_allocation_block_index_ < allocation_list_.length());
   if (requested > allocation_list_[current_allocation_block_index_].size) {
     // Find an allocation block large enough.  This function call may
@@ -227,9 +227,9 @@
   }
   ASSERT(*allocated <= current.size);
   ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!MemoryAllocator::CommitCodePage(code_range_,
-                                       current.start,
-                                       *allocated)) {
+  if (commit && !MemoryAllocator::CommitCodePage(code_range_,
+                                                 current.start,
+                                                 *allocated)) {
     *allocated = 0;
     return NULL;
   }
@@ -249,6 +249,15 @@
 }


+VirtualMemory* CodeRange::ReserveAlignedMemory(const size_t requested) {
+  size_t reserved_size;
+  VirtualMemory* reserved_virtual_memory = new VirtualMemory();
+  Address base = AllocateRawMemory(requested, &reserved_size, false);
+  reserved_virtual_memory->Set(static_cast<void*>(base), reserved_size);
+  return reserved_virtual_memory;
+}
+
+
 void CodeRange::TearDown() {
     delete code_range_;  // Frees all memory in the virtual memory range.
     code_range_ = NULL;
Index: src/spaces.h
===================================================================
--- src/spaces.h        (revision 13335)
+++ src/spaces.h        (working copy)
@@ -888,9 +888,15 @@
   // the code range.  On platforms with no separate code range, should
   // not be called.
   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
-                                            size_t* allocated);
+                                            size_t* allocated,
+                                            bool commit = true);
   void FreeRawMemory(Address buf, size_t length);

+  // This function is used by DeoptimizerData to decouple the reservation
+ // and commitment of deoptimization entry table. We do not need to release
+  // the reserved virtual memory as it is inside the code range.
+  VirtualMemory* ReserveAlignedMemory(const size_t requested);
+
  private:
   Isolate* isolate_;



--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to