Revision: 9402
Author:   [email protected]
Date:     Thu Sep 22 09:01:35 2011
Log:      Notify collector about lazily deoptimized code objects.

All slots that were recorded on these objects during incremental marking should be ignored as they are no longer valid.

To filter such invalidated slots out during slots buffers iteration we set all markbits under the invalidated code object to 1 after the code space was swept and before slots buffers are processed.

[email protected]
BUG=v8:1713
TEST=test/mjsunit/regress/regress-1713.js

Review URL: http://codereview.chromium.org/7983045
http://code.google.com/p/v8/source/detail?r=9402

Added:
 /branches/bleeding_edge/test/mjsunit/regress/regress-1713.js
Modified:
 /branches/bleeding_edge/src/arm/deoptimizer-arm.cc
 /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
 /branches/bleeding_edge/src/incremental-marking.cc
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/mark-compact.h
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/spaces.h
 /branches/bleeding_edge/src/x64/deoptimizer-x64.cc

=======================================
--- /dev/null
+++ /branches/bleeding_edge/test/mjsunit/regress/regress-1713.js Thu Sep 22 09:01:35 2011
@@ -0,0 +1,127 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --always-compact --expose-gc
+
+var O = { get f() { return 0; } };
+
+var CODE = [];
+
+var R = [];
+
+function Allocate4Kb(N) {
+  var arr = [];
+  do {arr.push(new Array(1024));} while (--N > 0);
+  return arr;
+}
+
+function AllocateXMb(X) {
+  return Allocate4Kb((1024 * X) / 4);
+}
+
+function Node(v, next) { this.v = v; this.next = next; }
+
+Node.prototype.execute = function (O) {
+  var n = this;
+  while (n.next !== null) n = n.next;
+  n.v(O);
+};
+
+function LongList(N, x) {
+  if (N == 0) return new Node(x, null);
+  return new Node(new Array(1024), LongList(N - 1, x));
+}
+
+var L = LongList(1024, function (O) {
+  for (var i = 0; i < 5; i++) O.f;
+});
+
+
+
+function Incremental(O, x) {
+  if (!x) {
+    return;
+  }
+  function CreateCode(i) {
+    var f = new Function("return O.f_" + i);
+    CODE.push(f);
+    f(); // compile
+    f(); // compile
+    f(); // compile
+  }
+
+  for (var i = 0; i < 1e4; i++) CreateCode(i);
+  gc();
+  gc();
+  gc();
+
+  print(">>> 1 <<<");
+
+  L.execute(O);
+
+  try {} catch (e) {}
+
+  L = null;
+  print(">>> 2 <<<");
+  AllocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+
+}
+
+function foo(O, x) {
+  Incremental(O, x);
+
+  print('f');
+
+  for (var i = 0; i < 5; i++) O.f;
+
+
+  print('g');
+
+  bar(x);
+}
+
+function bar(x) {
+  if (!x) return;
+  %DeoptimizeFunction(foo);
+  AllocateXMb(8);
+  AllocateXMb(8);
+}
+
+var O1 = {};
+var O2 = {};
+var O3 = {};
+var O4 = {f:0};
+
+foo(O1, false);
+foo(O2, false);
+foo(O3, false);
+%OptimizeFunctionOnNextCall(foo);
+foo(O4, true);
=======================================
--- /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Tue Sep 20 02:43:58 2011 +++ /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Thu Sep 22 09:01:35 2011
@@ -118,6 +118,11 @@
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;

+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());

=======================================
--- /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Tue Sep 20 02:43:58 2011 +++ /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Thu Sep 22 09:01:35 2011
@@ -206,6 +206,11 @@
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;

+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());

=======================================
--- /branches/bleeding_edge/src/incremental-marking.cc Tue Sep 20 04:20:00 2011 +++ /branches/bleeding_edge/src/incremental-marking.cc Thu Sep 22 09:01:35 2011
@@ -452,6 +452,19 @@
     // when we finish marking.
     MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
   }
+
+  if (is_compacting_) {
+    // It's difficult to filter out slots recorded for large objects.
+    LargeObjectIterator it(heap_->lo_space());
+    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+      if (obj->IsFixedArray() || obj->IsCode()) {
+        Page* p = Page::FromAddress(obj->address());
+        if (p->size() > static_cast<size_t>(Page::kPageSize)) {
+          p->SetFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }

   // Mark strong roots grey.
   IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
@@ -605,6 +618,16 @@
     PatchIncrementalMarkingRecordWriteStubs(heap_,
RecordWriteStub::STORE_BUFFER_ONLY);
     DeactivateIncrementalWriteBarrier();
+
+    if (is_compacting_) {
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
   }
   heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
   state_ = STOPPED;
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Thu Sep 22 06:03:22 2011
+++ /branches/bleeding_edge/src/mark-compact.cc Thu Sep 22 09:01:35 2011
@@ -28,6 +28,7 @@
 #include "v8.h"

 #include "compilation-cache.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "gdb-jit.h"
 #include "global-handles.h"
@@ -59,6 +60,7 @@
 #endif
       sweep_precisely_(false),
       compacting_(false),
+      was_marked_incrementally_(false),
       collect_maps_(FLAG_collect_maps),
       tracer_(NULL),
       migration_slots_buffer_(NULL),
@@ -238,9 +240,7 @@


 bool MarkCompactCollector::StartCompaction() {
-  // Don't start compaction if we are in the middle of incremental
-  // marking cycle. We did not collect any slots.
-  if (!compacting_ && !heap_->incremental_marking()->IsMarking()) {
+  if (!compacting_) {
     ASSERT(evacuation_candidates_.length() == 0);

     CollectEvacuationCandidates(heap()->old_pointer_space());
@@ -256,22 +256,6 @@

   return compacting_;
 }
-
-
-void MarkCompactCollector::AbortCompaction() {
-  if (compacting_) {
-    int npages = evacuation_candidates_.length();
-    for (int i = 0; i < npages; i++) {
-      Page* p = evacuation_candidates_[i];
-      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
-      p->ClearEvacuationCandidate();
-      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-    }
-    compacting_ = false;
-    evacuation_candidates_.Rewind(0);
-  }
-  ASSERT_EQ(0, evacuation_candidates_.length());
-}


 void MarkCompactCollector::CollectGarbage() {
@@ -460,16 +444,34 @@
            AllocationSpaceName(space->identity()));
   }
 }
+
+
+void MarkCompactCollector::AbortCompaction() {
+  if (compacting_) {
+    int npages = evacuation_candidates_.length();
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+      p->ClearEvacuationCandidate();
+      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+    compacting_ = false;
+    evacuation_candidates_.Rewind(0);
+    invalidated_code_.Rewind(0);
+  }
+  ASSERT_EQ(0, evacuation_candidates_.length());
+}


 void MarkCompactCollector::Prepare(GCTracer* tracer) {
   FLAG_flush_code = false;

+  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
+
   // Disable collection of maps if incremental marking is enabled.
// Map collection algorithm relies on a special map transition tree traversal
   // order which is not implemented for incremental marking.
-  collect_maps_ = FLAG_collect_maps &&
-      !heap()->incremental_marking()->IsMarking();
+  collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;

   // Rather than passing the tracer around we stash it in a static member
   // variable.
@@ -490,13 +492,18 @@
 #endif

   // Clear marking bits for precise sweeping to collect all garbage.
- if (heap()->incremental_marking()->IsMarking() && PreciseSweepingRequired()) {
+  if (was_marked_incrementally_ && PreciseSweepingRequired()) {
     heap()->incremental_marking()->Abort();
     ClearMarkbits(heap_);
     AbortCompaction();
+    was_marked_incrementally_ = false;
   }

-  if (!FLAG_never_compact) StartCompaction();
+  // Don't start compaction if we are in the middle of incremental
+  // marking cycle. We did not collect any slots.
+  if (!FLAG_never_compact && !was_marked_incrementally_) {
+    StartCompaction();
+  }

   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next();
@@ -506,7 +513,7 @@
   }

 #ifdef DEBUG
-  if (!heap()->incremental_marking()->IsMarking()) {
+  if (!was_marked_incrementally_) {
     VerifyMarkbitsAreClean();
   }
 #endif
@@ -1972,7 +1979,7 @@

   bool incremental_marking_overflowed = false;
   IncrementalMarking* incremental_marking = heap_->incremental_marking();
-  if (incremental_marking->IsMarking()) {
+  if (was_marked_incrementally_) {
// Finalize the incremental marking and check whether we had an overflow.
     // Both markers use grey color to mark overflowed objects so
     // non-incremental marker can deal with them as if overflow
@@ -2841,9 +2848,127 @@
   }
   p->ResetLiveBytes();
 }
+
+
+static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
+  Page* p = Page::FromAddress(code->address());
+
+  if (p->IsEvacuationCandidate() ||
+      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+    return false;
+  }
+
+  Address code_start = code->address();
+  Address code_end = code_start + code->Size();
+
+ uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
+  uint32_t end_index =
+      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
+
+  Bitmap* b = p->markbits();
+
+  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
+  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
+
+  MarkBit::CellType* start_cell = start_mark_bit.cell();
+  MarkBit::CellType* end_cell = end_mark_bit.cell();
+
+  if (value) {
+    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
+    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
+
+    if (start_cell == end_cell) {
+      *start_cell |= start_mask & end_mask;
+    } else {
+      *start_cell |= start_mask;
+ for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
+        *cell = ~0;
+      }
+      *end_cell |= end_mask;
+    }
+  } else {
+    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
+      *cell = 0;
+    }
+  }
+
+  return true;
+}
+
+
+static bool IsOnInvalidatedCodeObject(Address addr) {
+  // We did not record any slots in large objects thus
+  // we can safely go to the page from the slot address.
+  Page* p = Page::FromAddress(addr);
+
+  // First check owner's identity because old pointer and old data spaces
+  // are swept lazily and might still have non-zero mark-bits on some
+  // pages.
+  if (p->owner()->identity() != CODE_SPACE) return false;
+
+  // In code space only bits on evacuation candidates (but we don't record
+  // any slots on them) and under invalidated code objects are non-zero.
+  MarkBit mark_bit =
+ p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
+
+  return mark_bit.Get();
+}
+
+
+void MarkCompactCollector::InvalidateCode(Code* code) {
+  if (heap_->incremental_marking()->IsCompacting() &&
+      !ShouldSkipEvacuationSlotRecording(code)) {
+    ASSERT(compacting_);
+
+    // If the object is white than no slots were recorded on it yet.
+    MarkBit mark_bit = Marking::MarkBitFrom(code);
+    if (Marking::IsWhite(mark_bit)) return;
+
+    invalidated_code_.Add(code);
+  }
+}
+
+
+bool MarkCompactCollector::MarkInvalidatedCode() {
+  bool code_marked = false;
+
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+
+    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
+      code_marked = true;
+    }
+  }
+
+  return code_marked;
+}
+
+
+void MarkCompactCollector::RemoveDeadInvalidatedCode() {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
+  }
+}
+
+
+void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+    if (code != NULL) {
+      code->Iterate(visitor);
+      SetMarkBitsUnderInvalidatedCode(code, false);
+    }
+  }
+  invalidated_code_.Rewind(0);
+}


 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+  bool code_slots_filtering_required = MarkInvalidatedCode();
+
   EvacuateNewSpace();
   EvacuatePages();

@@ -2873,11 +2998,25 @@
     heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
   }

-  SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_);
+  SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                     migration_slots_buffer_,
+                                     code_slots_filtering_required);
   if (FLAG_trace_fragmentation) {
     PrintF("  migration slots buffer: %d\n",
            SlotsBuffer::SizeOfChain(migration_slots_buffer_));
   }
+
+  if (compacting_ && was_marked_incrementally_) {
+    // It's difficult to filter out slots recorded for large objects.
+    LargeObjectIterator it(heap_->lo_space());
+    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+      Page* p = Page::FromAddress(obj->address());
+      if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+        obj->Iterate(&updating_visitor);
+        p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+      }
+    }
+  }

   int npages = evacuation_candidates_.length();
   for (int i = 0; i < npages; i++) {
@@ -2886,7 +3025,9 @@
            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));

     if (p->IsEvacuationCandidate()) {
-      SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer());
+      SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                         p->slots_buffer(),
+                                         code_slots_filtering_required);
       if (FLAG_trace_fragmentation) {
         PrintF("  page %p slots buffer: %d\n",
                reinterpret_cast<void*>(p),
@@ -2958,6 +3099,10 @@
   EvacuationWeakObjectRetainer evacuation_object_retainer;
   heap()->ProcessWeakReferences(&evacuation_object_retainer);

+ // Visit invalidated code (we ignored all slots on it) and clear mark-bits
+  // under it.
+  ProcessInvalidatedCode(&updating_visitor);
+
 #ifdef DEBUG
   if (FLAG_verify_heap) {
     VerifyEvacuation(heap_);
@@ -3485,11 +3630,16 @@
   // non-live objects.
   SweepSpace(heap()->old_pointer_space(), how_to_sweep);
   SweepSpace(heap()->old_data_space(), how_to_sweep);
+
+  RemoveDeadInvalidatedCode();
   SweepSpace(heap()->code_space(), PRECISE);
+
   SweepSpace(heap()->cell_space(), PRECISE);
+
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
     EvacuateNewSpaceAndCandidates();
   }
+
   // ClearNonLiveTransitions depends on precise sweeping of map space to
   // detect whether unmarked map became dead in this collection or in one
   // of the previous ones.
@@ -3629,6 +3779,29 @@
     }
   }
 }
+
+
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
+        UpdateSlot(slot);
+      }
+    } else {
+      ++slot_idx;
+      ASSERT(slot_idx < idx_);
+      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
+      if (!IsOnInvalidatedCodeObject(pc)) {
+        UpdateSlot(&v,
+                   DecodeSlotType(slot),
+                   reinterpret_cast<Address>(slots_[slot_idx]));
+      }
+    }
+  }
+}


SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
=======================================
--- /branches/bleeding_edge/src/mark-compact.h  Wed Sep 21 04:20:05 2011
+++ /branches/bleeding_edge/src/mark-compact.h  Thu Sep 22 09:01:35 2011
@@ -324,6 +324,8 @@

   void UpdateSlots(Heap* heap);

+  void UpdateSlotsWithFilter(Heap* heap);
+
   SlotsBuffer* next() { return next_; }

   static int SizeOfChain(SlotsBuffer* buffer) {
@@ -340,9 +342,15 @@
     return idx_ < kNumberOfElements - 1;
   }

-  static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer) {
+  static void UpdateSlotsRecordedIn(Heap* heap,
+                                    SlotsBuffer* buffer,
+                                    bool code_slots_filtering_required) {
     while (buffer != NULL) {
-      buffer->UpdateSlots(heap);
+      if (code_slots_filtering_required) {
+        buffer->UpdateSlotsWithFilter(heap);
+      } else {
+        buffer->UpdateSlots(heap);
+      }
       buffer = buffer->next();
     }
   }
@@ -545,11 +553,18 @@
   inline void set_encountered_weak_maps(Object* weak_map) {
     encountered_weak_maps_ = weak_map;
   }
+
+  void InvalidateCode(Code* code);

  private:
   MarkCompactCollector();
   ~MarkCompactCollector();

+  bool MarkInvalidatedCode();
+  void RemoveDeadInvalidatedCode();
+  void ProcessInvalidatedCode(ObjectVisitor* visitor);
+
+
 #ifdef DEBUG
   enum CollectorState {
     IDLE,
@@ -573,6 +588,8 @@
   // candidates.
   bool compacting_;

+  bool was_marked_incrementally_;
+
   bool collect_maps_;

// A pointer to the current stack-allocated GC tracer object during a full
@@ -762,6 +779,7 @@
   Object* encountered_weak_maps_;

   List<Page*> evacuation_candidates_;
+  List<Code*> invalidated_code_;

   friend class Heap;
 };
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Wed Sep 21 04:42:48 2011
+++ /branches/bleeding_edge/src/spaces.cc       Thu Sep 22 09:01:35 2011
@@ -1930,8 +1930,10 @@
     Page* last = last_unswept_page_->next_page();
     Page* p = first_unswept_page_;
     do {
-      if (ShouldBeSweptLazily(p)) {
-        ASSERT(!p->WasSwept());
+      // Do not use ShouldBeSweptLazily predicate here.
+      // New evacuation candidates were selected but they still have
+      // to be swept before collection starts.
+      if (!p->WasSwept()) {
         Bitmap::Clear(p);
         if (FLAG_gc_verbose) {
           PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
=======================================
--- /branches/bleeding_edge/src/spaces.h        Tue Sep 20 08:35:36 2011
+++ /branches/bleeding_edge/src/spaces.h        Thu Sep 22 09:01:35 2011
@@ -187,7 +187,7 @@
   }

   static int SizeFor(int cells_count) {
-    return sizeof(MarkBit::CellType)*cells_count;
+    return sizeof(MarkBit::CellType) * cells_count;
   }

   INLINE(static uint32_t IndexToCell(uint32_t index)) {
@@ -1585,7 +1585,8 @@
              (ratio > ratio_threshold) ? "[fragmented]" : "");
     }

-    return (ratio > ratio_threshold) || FLAG_always_compact;
+    return (ratio > ratio_threshold) ||
+        (FLAG_always_compact && sizes[3] != Page::kObjectAreaSize);
   }

   void EvictEvacuationCandidatesFromFreeLists();
=======================================
--- /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Tue Sep 20 02:43:58 2011 +++ /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Thu Sep 22 09:01:35 2011
@@ -204,6 +204,11 @@
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;

+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to