Revision: 12877
Author:   [email protected]
Date:     Wed Nov  7 00:49:17 2012
Log:      Decouple allocation and creation of deopt tables

This makes it possible to calculate the future address of a deopt entry before it is possible to generate the deopt table.

Review URL: https://codereview.chromium.org/11275145
http://code.google.com/p/v8/source/detail?r=12877

Modified:
 /branches/bleeding_edge/src/api.cc
 /branches/bleeding_edge/src/deoptimizer.cc
 /branches/bleeding_edge/src/deoptimizer.h

=======================================
--- /branches/bleeding_edge/src/api.cc  Tue Nov  6 09:32:15 2012
+++ /branches/bleeding_edge/src/api.cc  Wed Nov  7 00:49:17 2012
@@ -6501,6 +6501,7 @@


 void Testing::DeoptimizeAll() {
+  i::HandleScope scope;
   internal::Deoptimizer::DeoptimizeAll();
 }

=======================================
--- /branches/bleeding_edge/src/deoptimizer.cc  Thu Oct 25 02:35:55 2012
+++ /branches/bleeding_edge/src/deoptimizer.cc  Wed Nov  7 00:49:17 2012
@@ -41,8 +41,11 @@
 namespace internal {

 DeoptimizerData::DeoptimizerData() {
-  eager_deoptimization_entry_code_ = NULL;
-  lazy_deoptimization_entry_code_ = NULL;
+  eager_deoptimization_entry_code_entries_ = -1;
+  lazy_deoptimization_entry_code_entries_ = -1;
+  size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
+  eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+  lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
   current_ = NULL;
   deoptimizing_code_list_ = NULL;
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -52,16 +55,11 @@


 DeoptimizerData::~DeoptimizerData() {
-  if (eager_deoptimization_entry_code_ != NULL) {
-    Isolate::Current()->memory_allocator()->Free(
-        eager_deoptimization_entry_code_);
-    eager_deoptimization_entry_code_ = NULL;
-  }
-  if (lazy_deoptimization_entry_code_ != NULL) {
-    Isolate::Current()->memory_allocator()->Free(
-        lazy_deoptimization_entry_code_);
-    lazy_deoptimization_entry_code_ = NULL;
-  }
+  delete eager_deoptimization_entry_code_;
+  eager_deoptimization_entry_code_ = NULL;
+  delete lazy_deoptimization_entry_code_;
+  lazy_deoptimization_entry_code_ = NULL;
+
   DeoptimizingCodeListNode* current = deoptimizing_code_list_;
   while (current != NULL) {
     DeoptimizingCodeListNode* prev = current;
@@ -101,6 +99,19 @@
   isolate->deoptimizer_data()->current_ = deoptimizer;
   return deoptimizer;
 }
+
+
+// No larger than 2K on all platforms
+static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
+
+
+size_t Deoptimizer::GetMaxDeoptTableSize() {
+  size_t entries_size =
+      Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
+  int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
+                    OS::CommitPageSize()) + 1;
+  return OS::CommitPageSize() * page_count;
+}


 Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
@@ -461,44 +472,45 @@
 }


-Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
+Address Deoptimizer::GetDeoptimizationEntry(int id,
+                                            BailoutType type,
+                                            GetEntryMode mode) {
   ASSERT(id >= 0);
-  if (id >= kNumberOfEntries) return NULL;
-  MemoryChunk* base = NULL;
+  if (id >= kMaxNumberOfEntries) return NULL;
+  VirtualMemory* base = NULL;
+  if (mode == ENSURE_ENTRY_CODE) {
+    EnsureCodeForDeoptimizationEntry(type, id);
+  } else {
+    ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
+  }
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
-    if (data->eager_deoptimization_entry_code_ == NULL) {
-      data->eager_deoptimization_entry_code_ = CreateCode(type);
-    }
     base = data->eager_deoptimization_entry_code_;
   } else {
-    if (data->lazy_deoptimization_entry_code_ == NULL) {
-      data->lazy_deoptimization_entry_code_ = CreateCode(type);
-    }
     base = data->lazy_deoptimization_entry_code_;
   }
   return
-      static_cast<Address>(base->area_start()) + (id * table_entry_size_);
+      static_cast<Address>(base->address()) + (id * table_entry_size_);
 }


 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
-  MemoryChunk* base = NULL;
+  VirtualMemory* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     base = data->eager_deoptimization_entry_code_;
   } else {
     base = data->lazy_deoptimization_entry_code_;
   }
+  Address base_casted = reinterpret_cast<Address>(base->address());
   if (base == NULL ||
-      addr < base->area_start() ||
-      addr >= base->area_start() +
-          (kNumberOfEntries * table_entry_size_)) {
+      addr < base->address() ||
+      addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
   ASSERT_EQ(0,
-      static_cast<int>(addr - base->area_start()) % table_entry_size_);
-  return static_cast<int>(addr - base->area_start()) / table_entry_size_;
+            static_cast<int>(addr - base_casted) % table_entry_size_);
+  return static_cast<int>(addr - base_casted) / table_entry_size_;
 }


@@ -1384,31 +1396,44 @@
 }


-MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
+void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
+                                                   int max_entry_id) {
   // We cannot run this if the serializer is enabled because this will
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
   // isn't meant to be serialized at all.
   ASSERT(!Serializer::enabled());

+  ASSERT(type == EAGER || type == LAZY);
+  DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+  int entry_count = (type == EAGER)
+      ? data->eager_deoptimization_entry_code_entries_
+      : data->lazy_deoptimization_entry_code_entries_;
+  if (max_entry_id < entry_count) return;
+  entry_count = Min(Max(entry_count * 2, Deoptimizer::kMinNumberOfEntries),
+                    Deoptimizer::kMaxNumberOfEntries);
+
   MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
   masm.set_emit_debug_code(false);
-  GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
+  GenerateDeoptimizationEntries(&masm, entry_count, type);
   CodeDesc desc;
   masm.GetCode(&desc);
   ASSERT(desc.reloc_size == 0);

-  MemoryChunk* chunk =
- Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
-                                                            EXECUTABLE,
-                                                            NULL);
-  ASSERT(chunk->area_size() >= desc.instr_size);
-  if (chunk == NULL) {
- V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
+  VirtualMemory* memory = type == EAGER
+      ? data->eager_deoptimization_entry_code_
+      : data->lazy_deoptimization_entry_code_;
+  size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
+  ASSERT(static_cast<int>(table_size) >= desc.instr_size);
+  memory->Commit(memory->address(), table_size, true);
+  memcpy(memory->address(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(memory->address(), desc.instr_size);
+
+  if (type == EAGER) {
+    data->eager_deoptimization_entry_code_entries_ = entry_count;
+  } else {
+    data->lazy_deoptimization_entry_code_entries_ = entry_count;
   }
-  memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->area_start(), desc.instr_size);
-  return chunk;
 }


=======================================
--- /branches/bleeding_edge/src/deoptimizer.h   Mon Oct 22 02:48:56 2012
+++ /branches/bleeding_edge/src/deoptimizer.h   Wed Nov  7 00:49:17 2012
@@ -100,8 +100,10 @@
 #endif

  private:
-  MemoryChunk* eager_deoptimization_entry_code_;
-  MemoryChunk* lazy_deoptimization_entry_code_;
+  int eager_deoptimization_entry_code_entries_;
+  int lazy_deoptimization_entry_code_entries_;
+  VirtualMemory* eager_deoptimization_entry_code_;
+  VirtualMemory* lazy_deoptimization_entry_code_;
   Deoptimizer* current_;

 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -226,7 +228,17 @@

   static void ComputeOutputFrames(Deoptimizer* deoptimizer);

-  static Address GetDeoptimizationEntry(int id, BailoutType type);
+
+  enum GetEntryMode {
+    CALCULATE_ENTRY_ADDRESS,
+    ENSURE_ENTRY_CODE
+  };
+
+
+  static Address GetDeoptimizationEntry(
+      int id,
+      BailoutType type,
+      GetEntryMode mode = ENSURE_ENTRY_CODE);
   static int GetDeoptimizationId(Address addr, BailoutType type);
   static int GetOutputInfo(DeoptimizationOutputData* data,
                            BailoutId node_id,
@@ -283,8 +295,11 @@

   int ConvertJSFrameIndexToFrameIndex(int jsframe_index);

+  static size_t GetMaxDeoptTableSize();
+
  private:
-  static const int kNumberOfEntries = 16384;
+  static const int kMinNumberOfEntries = 64;
+  static const int kMaxNumberOfEntries = 16384;

   Deoptimizer(Isolate* isolate,
               JSFunction* function,
@@ -327,7 +342,8 @@
   void AddArgumentsObjectValue(intptr_t value);
   void AddDoubleValue(intptr_t slot_address, double value);

-  static MemoryChunk* CreateCode(BailoutType type);
+  static void EnsureCodeForDeoptimizationEntry(BailoutType type,
+                                               int max_entry_id);
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to