Revision: 14695
Author:   [email protected]
Date:     Wed May 15 09:09:25 2013
Log:      Preserve optimized code map during GCs weakly.

This change preserves the contents of optimized code maps during GCs but
treats the references in this cache weakly. It uses infrastructure from
code flushing to maintain a list of all caches.

[email protected]

Review URL: https://codereview.chromium.org/14794007
http://code.google.com/p/v8/source/detail?r=14695

Modified:
 /branches/bleeding_edge/src/arm/code-stubs-arm.cc
 /branches/bleeding_edge/src/flag-definitions.h
 /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/mark-compact.h
 /branches/bleeding_edge/src/mips/code-stubs-mips.cc
 /branches/bleeding_edge/src/objects-visiting-inl.h
 /branches/bleeding_edge/src/objects.cc
 /branches/bleeding_edge/src/objects.h
 /branches/bleeding_edge/src/x64/code-stubs-x64.cc
 /branches/bleeding_edge/test/cctest/test-heap.cc

=======================================
--- /branches/bleeding_edge/src/arm/code-stubs-arm.cc Wed May 15 08:17:01 2013 +++ /branches/bleeding_edge/src/arm/code-stubs-arm.cc Wed May 15 09:09:25 2013
@@ -307,8 +307,8 @@
// The optimized code map must never be empty, so check the first elements.
   Label install_optimized;
   // Speculatively move code object into r4.
-  __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
-  __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
+  __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot));
+  __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot));
   __ cmp(r2, r5);
   __ b(eq, &install_optimized);

@@ -317,11 +317,9 @@
   __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
   __ bind(&loop);
   // Do not double check first entry.
-
-  __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
   __ b(eq, &install_unoptimized);
-  __ sub(r4, r4, Operand(
-      Smi::FromInt(SharedFunctionInfo::kEntryLength)));  // Skip an entry.
+  __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
   __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(r5, MemOperand(r5));
=======================================
--- /branches/bleeding_edge/src/flag-definitions.h      Wed May 15 08:17:01 2013
+++ /branches/bleeding_edge/src/flag-definitions.h      Wed May 15 09:09:25 2013
@@ -464,6 +464,7 @@
             "flush code that we expect not to use again (during full gc)")
 DEFINE_bool(flush_code_incrementally, true,
             "flush code that we expect not to use again (incrementally)")
+DEFINE_bool(trace_code_flushing, false, "trace code flushing progress")
 DEFINE_bool(age_code, true,
             "track un-executed functions to age code and flush only "
             "old code")
=======================================
--- /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc Tue May 14 08:30:55 2013 +++ /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc Wed May 15 09:09:25 2013
@@ -292,8 +292,8 @@
   // Map must never be empty, so check the first elements.
   Label install_optimized;
   // Speculatively move code object into edx.
-  __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
-  __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
+  __ mov(edx, FieldOperand(ebx, SharedFunctionInfo::kFirstCodeSlot));
+  __ cmp(ecx, FieldOperand(ebx, SharedFunctionInfo::kFirstContextSlot));
   __ j(equal, &install_optimized);

// Iterate through the rest of map backwards. edx holds an index as a Smi.
@@ -302,10 +302,9 @@
   __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ bind(&loop);
   // Do not double check first entry.
-  __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
   __ j(equal, &restore);
-  __ sub(edx, Immediate(Smi::FromInt(
-      SharedFunctionInfo::kEntryLength)));  // Skip an entry.
+  __ sub(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
   __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
   __ j(not_equal, &loop, Label::kNear);
   // Hit: fetch the optimized code.
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Fri Apr 26 00:35:07 2013
+++ /branches/bleeding_edge/src/mark-compact.cc Wed May 15 09:09:25 2013
@@ -1052,6 +1052,70 @@

   shared_function_info_candidates_head_ = NULL;
 }
+
+
+void CodeFlusher::ProcessOptimizedCodeMaps() {
+  static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
+  static const int kEntryLength = SharedFunctionInfo::kEntryLength;
+  static const int kContextOffset = 0;
+  static const int kCodeOffset = 1;
+  static const int kLiteralsOffset = 2;
+  STATIC_ASSERT(kEntryLength == 3);
+
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+  while (holder != NULL) {
+    next_holder = GetNextCodeMap(holder);
+    ClearNextCodeMap(holder);
+
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    int new_length = kEntriesStart;
+    int old_length = code_map->length();
+    for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
+      Code* code = Code::cast(code_map->get(i + kCodeOffset));
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      if (!code_mark.Get()) {
+        continue;
+      }
+
+      // Update and record the context slot in the optimizled code map.
+      Object** context_slot = HeapObject::RawField(code_map,
+          FixedArray::OffsetOfElementAt(new_length));
+      code_map->set(new_length++, code_map->get(i + kContextOffset));
+      ASSERT(Marking::IsBlack(
+          Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
+      isolate_->heap()->mark_compact_collector()->
+          RecordSlot(context_slot, context_slot, *context_slot);
+
+      // Update and record the code slot in the optimized code map.
+      Object** code_slot = HeapObject::RawField(code_map,
+          FixedArray::OffsetOfElementAt(new_length));
+      code_map->set(new_length++, code_map->get(i + kCodeOffset));
+      ASSERT(Marking::IsBlack(
+          Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
+      isolate_->heap()->mark_compact_collector()->
+          RecordSlot(code_slot, code_slot, *code_slot);
+
+      // Update and record the literals slot in the optimized code map.
+      Object** literals_slot = HeapObject::RawField(code_map,
+          FixedArray::OffsetOfElementAt(new_length));
+      code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
+      ASSERT(Marking::IsBlack(
+          Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
+      isolate_->heap()->mark_compact_collector()->
+          RecordSlot(literals_slot, literals_slot, *literals_slot);
+    }
+
+    // Trim the optimized code map if entries have been removed.
+    if (new_length < old_length) {
+      holder->TrimOptimizedCodeMap(old_length - new_length);
+    }
+
+    holder = next_holder;
+  }
+
+  optimized_code_map_holder_head_ = NULL;
+}


 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
@@ -1110,6 +1174,36 @@
     }
   }
 }
+
+
+void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+  ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
+         get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
+
+  // Make sure previous flushing decisions are revisited.
+  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
+
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+  if (holder == code_map_holder) {
+    next_holder = GetNextCodeMap(code_map_holder);
+    optimized_code_map_holder_head_ = next_holder;
+    ClearNextCodeMap(code_map_holder);
+  } else {
+    while (holder != NULL) {
+      next_holder = GetNextCodeMap(holder);
+
+      if (next_holder == code_map_holder) {
+        next_holder = GetNextCodeMap(code_map_holder);
+        SetNextCodeMap(holder, next_holder);
+        ClearNextCodeMap(code_map_holder);
+        break;
+      }
+
+      holder = next_holder;
+    }
+  }
+}


 void CodeFlusher::EvictJSFunctionCandidates() {
@@ -1134,6 +1228,18 @@
   }
   ASSERT(shared_function_info_candidates_head_ == NULL);
 }
+
+
+void CodeFlusher::EvictOptimizedCodeMaps() {
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+  while (holder != NULL) {
+    next_holder = GetNextCodeMap(holder);
+    EvictOptimizedCodeMap(holder);
+    holder = next_holder;
+  }
+  ASSERT(optimized_code_map_holder_head_ == NULL);
+}


 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
@@ -3955,6 +4061,10 @@
     delete code_flusher_;
     code_flusher_ = NULL;
   }
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
+  }
 }


=======================================
--- /branches/bleeding_edge/src/mark-compact.h  Mon Mar 18 06:57:49 2013
+++ /branches/bleeding_edge/src/mark-compact.h  Wed May 15 09:09:25 2013
@@ -406,9 +406,10 @@
 // CodeFlusher collects candidates for code flushing during marking and
 // processes those candidates after marking has completed in order to
 // reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in two ways:
+// be unreachable. Code objects can be referenced in three ways:
 //    - SharedFunctionInfo references unoptimized code.
 //    - JSFunction references either unoptimized or optimized code.
+//    - OptimizedCodeMap references optimized code.
 // We are not allowed to flush unoptimized code for functions that got
 // optimized or inlined into optimized code, because we might bailout
 // into the unoptimized code again during deoptimization.
@@ -417,7 +418,8 @@
   explicit CodeFlusher(Isolate* isolate)
       : isolate_(isolate),
         jsfunction_candidates_head_(NULL),
-        shared_function_info_candidates_head_(NULL) {}
+        shared_function_info_candidates_head_(NULL),
+        optimized_code_map_holder_head_(NULL) {}

   void AddCandidate(SharedFunctionInfo* shared_info) {
     if (GetNextCandidate(shared_info) == NULL) {
@@ -434,15 +436,25 @@
     }
   }

+  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+    if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
+      SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
+      optimized_code_map_holder_head_ = code_map_holder;
+    }
+  }
+
+  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
   void EvictCandidate(SharedFunctionInfo* shared_info);
   void EvictCandidate(JSFunction* function);

   void ProcessCandidates() {
+    ProcessOptimizedCodeMaps();
     ProcessSharedFunctionInfoCandidates();
     ProcessJSFunctionCandidates();
   }

   void EvictAllCandidates() {
+    EvictOptimizedCodeMaps();
     EvictJSFunctionCandidates();
     EvictSharedFunctionInfoCandidates();
   }
@@ -450,8 +462,10 @@
   void IteratePointersToFromSpace(ObjectVisitor* v);

  private:
+  void ProcessOptimizedCodeMaps();
   void ProcessJSFunctionCandidates();
   void ProcessSharedFunctionInfoCandidates();
+  void EvictOptimizedCodeMaps();
   void EvictJSFunctionCandidates();
   void EvictSharedFunctionInfoCandidates();

@@ -488,10 +502,28 @@
   static void ClearNextCandidate(SharedFunctionInfo* candidate) {
     candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
   }
+
+  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
+    return reinterpret_cast<SharedFunctionInfo*>(next_map);
+  }
+
+  static void SetNextCodeMap(SharedFunctionInfo* holder,
+                             SharedFunctionInfo* next_holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
+  }
+
+  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
+  }

   Isolate* isolate_;
   JSFunction* jsfunction_candidates_head_;
   SharedFunctionInfo* shared_function_info_candidates_head_;
+  SharedFunctionInfo* optimized_code_map_holder_head_;

   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
 };
=======================================
--- /branches/bleeding_edge/src/mips/code-stubs-mips.cc Wed May 15 08:17:01 2013 +++ /branches/bleeding_edge/src/mips/code-stubs-mips.cc Wed May 15 09:09:25 2013
@@ -309,8 +309,8 @@
// The optimized code map must never be empty, so check the first elements.
   Label install_optimized;
   // Speculatively move code object into t0.
-  __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
-  __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
+  __ lw(t0, FieldMemOperand(a1, SharedFunctionInfo::kFirstCodeSlot));
+  __ lw(t1, FieldMemOperand(a1, SharedFunctionInfo::kFirstContextSlot));
   __ Branch(&install_optimized, eq, a2, Operand(t1));

// Iterate through the rest of map backwards. t0 holds an index as a Smi.
@@ -318,11 +318,9 @@
   __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
   __ bind(&loop);
   // Do not double check first entry.
-
   __ Branch(&install_unoptimized, eq, t0,
-            Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
-  __ Subu(t0, t0, Operand(
-      Smi::FromInt(SharedFunctionInfo::kEntryLength)));  // Skip an entry.
+            Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
+  __ Subu(t0, t0, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
   __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
   __ Addu(t1, t1, Operand(at));
=======================================
--- /branches/bleeding_edge/src/objects-visiting-inl.h Mon May 13 10:36:26 2013 +++ /branches/bleeding_edge/src/objects-visiting-inl.h Wed May 15 09:09:25 2013
@@ -311,15 +311,17 @@
   if (shared->ic_age() != heap->global_ic_age()) {
     shared->ResetForNewContext(heap->global_ic_age());
   }
-  if (FLAG_cache_optimized_code) {
-    // Flush optimized code map on major GC.
-    // TODO(mstarzinger): We may experiment with rebuilding it or with
-    // retaining entries which should survive as we iterate through
-    // optimized functions anyway.
-    shared->ClearOptimizedCodeMap("during full gc");
-  }
   MarkCompactCollector* collector = heap->mark_compact_collector();
   if (collector->is_code_flushing_enabled()) {
+ if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+      // Add the shared function info holding an optimized code map to
+      // the code flusher for processing of code maps after marking.
+      collector->code_flusher()->AddOptimizedCodeMap(shared);
+      // Treat all references within the code map weakly by marking the
+      // code map itself but not pushing it onto the marking deque.
+ FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
+      StaticVisitor::MarkObjectWithoutPush(heap, code_map);
+    }
     if (IsFlushable(heap, shared)) {
       // This function's code looks flushable. But we have to postpone
       // the decision until we see all functions that point to the same
@@ -332,6 +334,12 @@
       VisitSharedFunctionInfoWeakCode(heap, object);
       return;
     }
+  } else {
+ if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+      // Flush optimized code map on major GCs without code flushing,
+      // needed because cached code doesn't contain breakpoints.
+      shared->ClearOptimizedCodeMap();
+    }
   }
   VisitSharedFunctionInfoStrongCode(heap, object);
 }
=======================================
--- /branches/bleeding_edge/src/objects.cc      Tue May 14 04:45:33 2013
+++ /branches/bleeding_edge/src/objects.cc      Wed May 15 09:09:25 2013
@@ -2186,7 +2186,7 @@
   Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);

   if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
-      ZapEndOfFixedArray(new_end, to_trim);
+    ZapEndOfFixedArray(new_end, to_trim);
   }

   int size_delta = to_trim * kPointerSize;
@@ -8976,33 +8976,46 @@
     Handle<Context> native_context,
     Handle<Code> code,
     Handle<FixedArray> literals) {
+  CALL_HEAP_FUNCTION_VOID(
+      shared->GetIsolate(),
+      shared->AddToOptimizedCodeMap(*native_context, *code, *literals));
+}
+
+
+MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
+                                                       Code* code,
+ FixedArray* literals) {
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
   ASSERT(native_context->IsNativeContext());
   STATIC_ASSERT(kEntryLength == 3);
-  Object* value = shared->optimized_code_map();
-  Handle<FixedArray> new_code_map;
+  Heap* heap = GetHeap();
+  FixedArray* new_code_map;
+  Object* value = optimized_code_map();
   if (value->IsSmi()) {
     // No optimized code map.
     ASSERT_EQ(0, Smi::cast(value)->value());
     // Crate 3 entries per context {context, code, literals}.
-    new_code_map = FACTORY->NewFixedArray(kEntryLength);
-    new_code_map->set(0, *native_context);
-    new_code_map->set(1, *code);
-    new_code_map->set(2, *literals);
+    MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
+    if (!maybe->To(&new_code_map)) return maybe;
+    new_code_map->set(kEntriesStart + 0, native_context);
+    new_code_map->set(kEntriesStart + 1, code);
+    new_code_map->set(kEntriesStart + 2, literals);
   } else {
     // Copy old map and append one new entry.
-    Handle<FixedArray> old_code_map(FixedArray::cast(value));
-    ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context));
+    FixedArray* old_code_map = FixedArray::cast(value);
+    ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context));
     int old_length = old_code_map->length();
     int new_length = old_length + kEntryLength;
-    new_code_map = FACTORY->NewFixedArray(new_length);
-    old_code_map->CopyTo(0, *new_code_map, 0, old_length);
-    new_code_map->set(old_length, *native_context);
-    new_code_map->set(old_length + 1, *code);
-    new_code_map->set(old_length + 2, *literals);
+    MaybeObject* maybe = old_code_map->CopySize(new_length);
+    if (!maybe->To(&new_code_map)) return maybe;
+    new_code_map->set(old_length + 0, native_context);
+    new_code_map->set(old_length + 1, code);
+    new_code_map->set(old_length + 2, literals);
+    // Zap the old map for the sake of the heap verifier.
+    if (Heap::ShouldZapGarbage()) ZapOptimizedCodeMap();
   }
 #ifdef DEBUG
-  for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
+ for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
     ASSERT(new_code_map->get(i)->IsNativeContext());
     ASSERT(new_code_map->get(i + 1)->IsCode());
     ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
@@ -9010,14 +9023,14 @@
     ASSERT(new_code_map->get(i + 2)->IsFixedArray());
   }
 #endif
-  shared->set_optimized_code_map(*new_code_map);
+  set_optimized_code_map(new_code_map);
+  return new_code_map;
 }


 void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
                                                      int index) {
-  ASSERT(index > 0);
-  ASSERT(optimized_code_map()->IsFixedArray());
+  ASSERT(index > kEntriesStart);
   FixedArray* code_map = FixedArray::cast(optimized_code_map());
   if (!bound()) {
FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
@@ -9031,15 +9044,18 @@
 }


-void SharedFunctionInfo::ClearOptimizedCodeMap(const char* reason) {
-  if (!optimized_code_map()->IsSmi()) {
-    if (FLAG_trace_opt) {
-      PrintF("[clearing entire optimizing code map (%s) for ", reason);
-      ShortPrint();
-      PrintF("]\n");
-    }
-    set_optimized_code_map(Smi::FromInt(0));
+void SharedFunctionInfo::ClearOptimizedCodeMap() {
+  FixedArray* code_map = FixedArray::cast(optimized_code_map());
+
+  // If the next map link slot is already used then the function was
+  // enqueued with code flushing and we remove it now.
+  if (!code_map->get(kNextMapIndex)->IsUndefined()) {
+ CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
+    flusher->EvictOptimizedCodeMap(this);
   }
+
+  ASSERT(code_map->get(kNextMapIndex)->IsUndefined());
+  set_optimized_code_map(Smi::FromInt(0));
 }


@@ -9050,11 +9066,11 @@
   int i;
   bool removed_entry = false;
   FixedArray* code_map = FixedArray::cast(optimized_code_map());
-  for (i = 0; i < code_map->length(); i += kEntryLength) {
+  for (i = kEntriesStart; i < code_map->length(); i += kEntryLength) {
     ASSERT(code_map->get(i)->IsNativeContext());
     if (Code::cast(code_map->get(i + 1)) == optimized_code) {
       if (FLAG_trace_opt) {
-        PrintF("[clearing optimizing code map (%s) for ", reason);
+ PrintF("[evicting entry from optimizing code map (%s) for ", reason);
         ShortPrint();
         PrintF("]\n");
       }
@@ -9069,13 +9085,33 @@
     i += kEntryLength;
   }
   if (removed_entry) {
-    if (code_map->length() > kEntryLength) {
-      RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength);
-    } else {
-      ClearOptimizedCodeMap(reason);
+    // Always trim even when array is cleared because of heap verifier.
+    RightTrimFixedArray<FROM_MUTATOR>(GetHeap(), code_map, kEntryLength);
+    if (code_map->length() == kEntriesStart) {
+      ClearOptimizedCodeMap();
     }
   }
 }
+
+
+void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
+  FixedArray* code_map = FixedArray::cast(optimized_code_map());
+  ASSERT(shrink_by % kEntryLength == 0);
+  ASSERT(shrink_by <= code_map->length() - kEntriesStart);
+  // Always trim even when array is cleared because of heap verifier.
+  RightTrimFixedArray<FROM_GC>(GetHeap(), code_map, shrink_by);
+  if (code_map->length() == kEntriesStart) {
+    ClearOptimizedCodeMap();
+  }
+}
+
+
+void SharedFunctionInfo::ZapOptimizedCodeMap() {
+  FixedArray* code_map = FixedArray::cast(optimized_code_map());
+  MemsetPointer(code_map->data_start(),
+                GetHeap()->the_hole_value(),
+                code_map->length());
+}


 bool JSFunction::CompileLazy(Handle<JSFunction> function,
@@ -9717,7 +9753,7 @@
   if (!value->IsSmi()) {
     FixedArray* optimized_code_map = FixedArray::cast(value);
     int length = optimized_code_map->length();
-    for (int i = 0; i < length; i += 3) {
+    for (int i = kEntriesStart; i < length; i += kEntryLength) {
       if (optimized_code_map->get(i) == native_context) {
         return i + 1;
       }
=======================================
--- /branches/bleeding_edge/src/objects.h       Tue May 14 04:02:20 2013
+++ /branches/bleeding_edge/src/objects.h       Wed May 15 09:09:25 2013
@@ -5807,7 +5807,7 @@
   inline void ReplaceCode(Code* code);

   // [optimized_code_map]: Map from native context to optimized code
-  // and a shared literals array or Smi 0 if none.
+  // and a shared literals array or Smi(0) if none.
   DECL_ACCESSORS(optimized_code_map, Object)

   // Returns index i of the entry with the specified context. At position
@@ -5820,17 +5820,34 @@
   void InstallFromOptimizedCodeMap(JSFunction* function, int index);

   // Clear optimized code map.
-  void ClearOptimizedCodeMap(const char* reason);
+  void ClearOptimizedCodeMap();

   // Removed a specific optimized code object from the optimized code map.
   void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);

+  // Trims the optimized code map after entries have been removed.
+  void TrimOptimizedCodeMap(int shrink_by);
+
+  // Zaps the contents of backing optimized code map.
+  void ZapOptimizedCodeMap();
+
   // Add a new entry to the optimized code map.
+ MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
+                                                     Code* code,
+                                                     FixedArray* literals);
   static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
                                     Handle<Context> native_context,
                                     Handle<Code> code,
                                     Handle<FixedArray> literals);
+
+  // Layout description of the optimized code map.
+  static const int kNextMapIndex = 0;
+  static const int kEntriesStart = 1;
   static const int kEntryLength = 3;
+ static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize; + static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize;
+  static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
+  static const int kInitialLength = kEntriesStart + kEntryLength;

   // [scope_info]: Scope info.
   DECL_ACCESSORS(scope_info, ScopeInfo)
=======================================
--- /branches/bleeding_edge/src/x64/code-stubs-x64.cc Tue May 14 08:30:55 2013 +++ /branches/bleeding_edge/src/x64/code-stubs-x64.cc Wed May 15 09:09:25 2013
@@ -287,8 +287,8 @@
// The optimized code map must never be empty, so check the first elements.
   Label install_optimized;
   // Speculatively move code object into edx.
-  __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
-  __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
+  __ movq(rdx, FieldOperand(rbx, SharedFunctionInfo::kFirstCodeSlot));
+  __ cmpq(rcx, FieldOperand(rbx, SharedFunctionInfo::kFirstContextSlot));
   __ j(equal, &install_optimized);

   // Iterate through the rest of map backwards. rdx holds an index.
@@ -298,9 +298,9 @@
   __ SmiToInteger32(rdx, rdx);
   __ bind(&loop);
   // Do not double check first entry.
-  __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
+  __ cmpq(rdx, Immediate(SharedFunctionInfo::kSecondEntryIndex));
   __ j(equal, &restore);
- __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength)); // Skip an entry.
+  __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
   __ cmpq(rcx, FieldOperand(rbx,
                             rdx,
                             times_pointer_size,
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Mon May 13 07:18:43 2013 +++ /branches/bleeding_edge/test/cctest/test-heap.cc Wed May 15 09:09:25 2013
@@ -2814,7 +2814,6 @@

   CcTest::InitializeVM();
   Isolate* isolate = Isolate::Current();
-  // Force experimental natives to compile to normalize heap layout.
   Heap* heap = isolate->heap();
   HandleScope scope(isolate);

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.


Reply via email to