Revision: 6406
Author: [email protected]
Date: Wed Jan 19 09:39:52 2011
Log: Introduce conservative sweeping.

Review URL: http://codereview.chromium.org/6321008
http://code.google.com/p/v8/source/detail?r=6406

Modified:
 /branches/experimental/gc/src/builtins.cc
 /branches/experimental/gc/src/mark-compact.cc
 /branches/experimental/gc/src/mark-compact.h
 /branches/experimental/gc/src/objects.h
 /branches/experimental/gc/src/spaces.cc
 /branches/experimental/gc/src/spaces.h

=======================================
--- /branches/experimental/gc/src/builtins.cc   Wed Jan  5 05:52:13 2011
+++ /branches/experimental/gc/src/builtins.cc   Wed Jan 19 09:39:52 2011
@@ -32,6 +32,7 @@
 #include "bootstrapper.h"
 #include "builtins.h"
 #include "ic-inl.h"
+#include "mark-compact.h"
 #include "vm-state-inl.h"

 namespace v8 {
@@ -350,6 +351,10 @@
   // we still do it.
   Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);

+  // Maintain marking consistency for HeapObjectIterator.
+  Marking::TransferMark(elms->address(),
+                        elms->address() + to_trim * kPointerSize);
+
   former_start[to_trim] = Heap::fixed_array_map();
   former_start[to_trim + 1] = Smi::FromInt(len - to_trim);

=======================================
--- /branches/experimental/gc/src/mark-compact.cc       Mon Jan 10 07:11:41 2011
+++ /branches/experimental/gc/src/mark-compact.cc       Wed Jan 19 09:39:52 2011
@@ -110,7 +110,8 @@

   // Check that swept all marked objects and
   // null out the GC tracer.
-  ASSERT(tracer_->marked_count() == 0);
+  // TODO(gc) does not work with conservative sweeping.
+  // ASSERT(tracer_->marked_count() == 0);
   tracer_ = NULL;
 }

@@ -135,6 +136,23 @@
 #endif


+static void ClearMarkbits(PagedSpace* space) {
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    p->markbits()->Clear();
+  }
+}
+
+static void ClearMarkbits() {
+ // We are sweeping code and map spaces precisely so clearing is not required.
+  ClearMarkbits(Heap::old_pointer_space());
+  ClearMarkbits(Heap::old_data_space());
+  ClearMarkbits(Heap::cell_space());
+}
+
+
 void MarkCompactCollector::Prepare(GCTracer* tracer) {
   FLAG_flush_code = false;
   FLAG_always_compact = false;
@@ -171,6 +189,8 @@
   Marking::ClearRange(new_space_bottom,
                       static_cast<int>(new_space_top - new_space_bottom));

+  ClearMarkbits();
+
 #ifdef DEBUG
   VerifyMarkbitsAreClean();

@@ -1718,7 +1738,172 @@
 }


-void MarkCompactCollector::SweepSpace(PagedSpace* space) {
+INLINE(static uint32_t SweepFree(PagedSpace* space,
+                                 Page* p,
+                                 uint32_t free_start,
+                                 uint32_t region_end,
+                                 uint32_t* cells));
+
+
+static uint32_t SweepFree(PagedSpace* space,
+                          Page* p,
+                          uint32_t free_start,
+                          uint32_t region_end,
+                          uint32_t* cells) {
+  uint32_t free_cell_index = Page::MarkbitsBitmap::IndexToCell(free_start);
+  ASSERT(cells[free_cell_index] == 0);
+  while (free_cell_index < region_end && cells[free_cell_index] == 0) {
+    free_cell_index++;
+  }
+
+  if (free_cell_index >= region_end) {
+    return free_cell_index;
+  }
+
+  uint32_t free_end = Page::MarkbitsBitmap::CellToIndex(free_cell_index);
+  space->DeallocateBlock(p->MarkbitIndexToAddress(free_start),
+                         (free_end - free_start) << kPointerSizeLog2,
+                         true);
+
+  return free_cell_index;
+}
+
+
+INLINE(static uint32_t NextCandidate(uint32_t cell_index,
+                                     uint32_t last_cell_index,
+                                     uint32_t* cells));
+
+
+static uint32_t NextCandidate(uint32_t cell_index,
+                              uint32_t last_cell_index,
+                              uint32_t* cells) {
+  do {
+    cell_index++;
+  } while (cell_index < last_cell_index && cells[cell_index] != 0);
+  return cell_index;
+}
+
+
+INLINE(static int SizeOfPreviousObject(Page* p,
+                                       uint32_t cell_index,
+                                       uint32_t* cells));
+
+
+static int SizeOfPreviousObject(Page* p,
+                                uint32_t cell_index,
+                                uint32_t* cells) {
+  ASSERT(cells[cell_index] == 0);
+  if (cells[cell_index - 1] == 0) return 0;
+
+  int leading_zeroes =
+      CompilerIntrinsics::CountLeadingZeros(cells[cell_index - 1]) + 1;
+  Address addr =
+      p->MarkbitIndexToAddress(
+          Page::MarkbitsBitmap::CellToIndex(cell_index) - leading_zeroes);
+  HeapObject* obj = HeapObject::FromAddress(addr);
+  ASSERT(obj->map()->IsMap());
+  return (obj->Size() >> kPointerSizeLog2) - leading_zeroes;
+}
+
+
+static void SweepConservatively(PagedSpace* space,
+                                Page* p,
+                                Address* last_free_start) {
+  typedef Page::MarkbitsBitmap::CellType CellType;
+  Page::MarkbitsBitmap* markbits = p->markbits();
+  CellType* cells = markbits->cells();
+
+  uint32_t last_cell_index =
+      Page::MarkbitsBitmap::IndexToCell(
+          Page::MarkbitsBitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->AllocationTop())));
+
+  uint32_t cell_index = Page::kFirstUsedCell;
+  uint32_t polluted_cell_index = Page::kFirstUsedCell;
+  if (cells[cell_index] == 0) {
+    polluted_cell_index =
+        SweepFree(space,
+                  p,
+                  p->AddressToMarkbitIndex(p->ObjectAreaStart()),
+                  last_cell_index,
+                  cells);
+
+    if (polluted_cell_index >= last_cell_index) {
+      // All cells are free.
+      *last_free_start = p->ObjectAreaStart();
+      return;
+    }
+  }
+
+  p->ClearFlag(Page::IS_CONTINUOUS);
+
+  ASSERT(cells[polluted_cell_index] != 0);
+ for (cell_index = NextCandidate(polluted_cell_index, last_cell_index, cells);
+       cell_index < last_cell_index;
+       cell_index = NextCandidate(polluted_cell_index,
+                                  last_cell_index,
+                                  cells)) {
+    ASSERT(cells[cell_index] == 0);
+
+    int size = SizeOfPreviousObject(p, cell_index, cells);
+    if (size <= 0) {
+      polluted_cell_index =
+          SweepFree(space,
+                    p,
+                    Page::MarkbitsBitmap::CellToIndex(cell_index),
+                    last_cell_index,
+                    cells);
+      if (polluted_cell_index >= last_cell_index) {
+        // This free region is the last on the page.
+        *last_free_start = p->MarkbitIndexToAddress(
+            Page::MarkbitsBitmap::CellToIndex(cell_index));
+        return;
+      }
+    } else {
+      // Skip cells covered by this object.
+      polluted_cell_index = cell_index +
+          Page::MarkbitsBitmap::IndexToCell(size - 1);
+    }
+  }
+}
+
+
+static void SweepPrecisely(PagedSpace* space,
+                           Page* p,
+                           Address* last_free_start) {
+  bool is_previous_alive = true;
+  Address free_start = NULL;
+  HeapObject* object;
+
+  for (Address current = p->ObjectAreaStart();
+       current < p->AllocationTop();
+       current += object->Size()) {
+    object = HeapObject::FromAddress(current);
+    if (Marking::IsMarked(object)) {
+      Marking::ClearMark(object);
+      MarkCompactCollector::tracer()->decrement_marked_count();
+
+      if (!is_previous_alive) {  // Transition from free to live.
+        space->DeallocateBlock(free_start,
+                               static_cast<int>(current - free_start),
+                               true);
+        is_previous_alive = true;
+      }
+    } else {
+      MarkCompactCollector::ReportDeleteIfNeeded(object);
+      if (is_previous_alive) {  // Transition from live to free.
+        free_start = current;
+        is_previous_alive = false;
+      }
+    }
+  }
+
+  if (!is_previous_alive) *last_free_start = free_start;
+}
+
+
+void MarkCompactCollector::SweepSpace(PagedSpace* space,
+                                      SweeperType sweeper) {
   PageIterator it(space, PageIterator::PAGES_IN_USE);

   // During sweeping of paged space we are trying to find longest sequences
@@ -1746,37 +1931,20 @@
   while (it.has_next()) {
     Page* p = it.next();

-    bool is_previous_alive = true;
-    Address free_start = NULL;
-    HeapObject* object;
-
-    for (Address current = p->ObjectAreaStart();
-         current < p->AllocationTop();
-         current += object->Size()) {
-      object = HeapObject::FromAddress(current);
-      if (Marking::IsMarked(object)) {
-        Marking::ClearMark(object);
-        MarkCompactCollector::tracer()->decrement_marked_count();
-
-        if (!is_previous_alive) {  // Transition from free to live.
-          space->DeallocateBlock(free_start,
-                                 static_cast<int>(current - free_start),
-                                 true);
-          is_previous_alive = true;
-        }
-      } else {
-        MarkCompactCollector::ReportDeleteIfNeeded(object);
-        if (is_previous_alive) {  // Transition from live to free.
-          free_start = current;
-          is_previous_alive = false;
-        }
-      }
- // The object is now unmarked for the call to Size() at the top of the
-      // loop.
+    Address free_start = p->AllocationTop();
+
+    if (sweeper == CONSERVATIVE) {
+      SweepConservatively(space, p, &free_start);
+      p->set_linearity_boundary(free_start);
+    } else {
+      ASSERT(sweeper == PRECISE);
+      SweepPrecisely(space, p, &free_start);
     }

-    bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
-        || (!is_previous_alive && free_start == p->ObjectAreaStart());
+    bool page_is_empty = (p->ObjectAreaStart() == free_start);
+    bool is_previous_alive = (free_start == p->AllocationTop());
+
+    ASSERT(free_start <= p->AllocationTop());

     if (page_is_empty) {
       // This page is empty. Check whether we are in the middle of
@@ -1830,7 +1998,7 @@
// Last used pages in space are empty. We can move allocation top backwards
     // to the beginning of first empty page.
     ASSERT(prev == space->AllocationTopPage());
-
+    space->FreePages(prec_first_empty_page, prev);
     new_allocation_top = first_empty_page->ObjectAreaStart();
   }

@@ -1869,21 +2037,26 @@
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
-  SweepSpace(Heap::old_pointer_space());
-  SweepSpace(Heap::old_data_space());
-  SweepSpace(Heap::code_space());
-  SweepSpace(Heap::cell_space());
+  SweepSpace(Heap::old_pointer_space(), CONSERVATIVE);
+  SweepSpace(Heap::old_data_space(), CONSERVATIVE);
+  SweepSpace(Heap::code_space(), PRECISE);
+  // TODO(gc): implement specialized sweeper for cell space.
+  SweepSpace(Heap::cell_space(), CONSERVATIVE);
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
     SweepNewSpace(Heap::new_space());
   }
-  SweepSpace(Heap::map_space());
+  // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of
+  // map space to detect whether unmarked map became dead in this
+  // collection or in one of the previous ones.
+  // TODO(gc): Implement specialized sweeper for map space.
+  SweepSpace(Heap::map_space(), PRECISE);

   Heap::IterateDirtyRegions(Heap::map_space(),
                             &Heap::IteratePointersInDirtyMapsRegion,
                             &UpdatePointerToNewGen,
                             Heap::WATERMARK_SHOULD_BE_VALID);

-  ASSERT(live_map_objects_size_ == Heap::map_space()->Size());
+  ASSERT(live_map_objects_size_ <= Heap::map_space()->Size());
 }


@@ -1941,6 +2114,10 @@
   if (obj->IsCode()) {
     PROFILE(CodeDeleteEvent(obj->address()));
   } else if (obj->IsJSFunction()) {
+    // TODO(gc): we are sweeping old pointer space conservatively thus
+    // we can't notify attached profiler about death of functions.
+    // Consider disabling conservative sweeping when profiler
+    // is enabled.
     PROFILE(FunctionDeleteEvent(obj->address()));
   }
 #endif
=======================================
--- /branches/experimental/gc/src/mark-compact.h        Mon Jan 10 07:00:48 2011
+++ /branches/experimental/gc/src/mark-compact.h        Wed Jan 19 09:39:52 2011
@@ -28,6 +28,8 @@
 #ifndef V8_MARK_COMPACT_H_
 #define V8_MARK_COMPACT_H_

+#include "compiler-intrinsics.h"
+
 namespace v8 {
 namespace internal {

@@ -57,54 +59,166 @@

   INLINE(static bool TestAndMark(Address addr)) {
     if (Heap::InNewSpace(addr)) {
-      uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+      uint32_t index = Heap::new_space()->AddressToMarkbitIndex(addr);
       return new_space_bitmap_->TestAndSet(index);
     } else {
       Page *p = Page::FromAddress(addr);
-      return p->markbits()->TestAndSet(p->Address2Markbit(addr));
+      return p->markbits()->TestAndSet(p->AddressToMarkbitIndex(addr));
     }
   }

   INLINE(static bool IsMarked(Address addr)) {
     if (Heap::InNewSpace(addr)) {
-      uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+      uint32_t index = Heap::new_space()->AddressToMarkbitIndex(addr);
       return new_space_bitmap_->Get(index);
     } else {
       Page *p = Page::FromAddress(addr);
-      return p->markbits()->Get(p->Address2Markbit(addr));
+      return p->markbits()->Get(p->AddressToMarkbitIndex(addr));
     }
   }

   INLINE(static void SetMark(Address addr)) {
     if (Heap::InNewSpace(addr)) {
-      uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+      uint32_t index = Heap::new_space()->AddressToMarkbitIndex(addr);
       new_space_bitmap_->Set(index);
     } else {
       Page *p = Page::FromAddress(addr);
-      p->markbits()->Set(p->FastAddress2Markbit(addr));
+      p->markbits()->Set(p->FastAddressToMarkbitIndex(addr));
     }
   }

   INLINE(static void ClearMark(Address addr)) {
     if (Heap::InNewSpace(addr)) {
-      uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+      uint32_t index = Heap::new_space()->AddressToMarkbitIndex(addr);
       new_space_bitmap_->Clear(index);
     } else {
       Page *p = Page::FromAddress(addr);
-      p->markbits()->Clear(p->FastAddress2Markbit(addr));
+      p->markbits()->Clear(p->FastAddressToMarkbitIndex(addr));
     }
   }

   INLINE(static void ClearRange(Address addr, int size)) {
     if (Heap::InNewSpace(addr)) {
-      uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+      uint32_t index = Heap::new_space()->AddressToMarkbitIndex(addr);
       new_space_bitmap_->ClearRange(index, size >> kPointerSizeLog2);
     } else {
       Page *p = Page::FromAddress(addr);
-      p->markbits()->ClearRange(p->FastAddress2Markbit(addr),
+      p->markbits()->ClearRange(p->FastAddressToMarkbitIndex(addr),
                                 size >> kPointerSizeLog2);
     }
   }
+
+  // Find first marked object in the given cell with index cell_index on
+  // the given page.
+  INLINE(static Address FirstMarkedObject(Page* page,
+                                          uint32_t cell_index,
+                                          uint32_t cell)) {
+    ASSERT(cell != 0);
+    uint32_t bit = CompilerIntrinsics::CountTrailingZeros(cell);
+    return page->MarkbitIndexToAddress(
+        Page::MarkbitsBitmap::CellToIndex(cell_index) + bit);
+  }
+
+  INLINE(static Address FirstLiveObject(Address start,
+                                        Address limit)) {
+    ASSERT(!Heap::InNewSpace(start));
+    if (start >= limit) return start;
+
+    Page* page = Page::FromAddress(start);
+
+    // If start is above linearity boundary is continuous then
+    // it coincides with a start of the live object. Just
+    // return it.
+    if (start >= page->linearity_boundary()) return start;
+
+    Page::MarkbitsBitmap* bitmap = page->markbits();
+    uint32_t markbit = page->AddressToMarkbitIndex(start);
+
+    // If the start address is marked return it.
+    if (bitmap->Get(markbit)) return start;
+
+    uint32_t* cells = bitmap->cells();
+    uint32_t cell_index = Page::MarkbitsBitmap::IndexToCell(markbit);
+
+    // Round limit towards start of the next cell.
+    uint32_t last_cell_index =
+        Page::MarkbitsBitmap::IndexToCell(
+            Page::MarkbitsBitmap::CellAlignIndex(
+                page->AddressToMarkbitIndex(limit)));
+
+    ASSERT(cell_index < last_cell_index);
+
+ while (cell_index < last_cell_index && cells[cell_index] == 0) cell_index++;
+
+    if (cell_index == last_cell_index) return limit;
+
+    return FirstMarkedObject(page, cell_index, cells[cell_index]);
+  }
+
+  INLINE(static Address NextLiveObject(HeapObject* obj,
+                                       int size,
+                                       Address end)) {
+    ASSERT(!Heap::InNewSpace(obj));
+    Page* page = Page::FromAddress(obj->address());
+    Address watermark = page->linearity_boundary();
+    Address next_addr = obj->address() + size;
+
+    if (next_addr >= watermark) return next_addr;
+
+    Page::MarkbitsBitmap* bitmap = page->markbits();
+    uint32_t markbit = page->AddressToMarkbitIndex(next_addr);
+
+    if (bitmap->Get(markbit)) return next_addr;
+
+    uint32_t* cells = bitmap->cells();
+    uint32_t cell_index = Page::MarkbitsBitmap::IndexToCell(markbit);
+
+    ASSERT(IsMarked(obj));
+
+    uint32_t bit  = Page::MarkbitsBitmap::IndexToBit(markbit);
+    uint32_t mask = (~1) << bit;
+    if ((cells[cell_index] & mask) != 0) {
+      // There are more marked objects in this cell.
+      return FirstMarkedObject(page, cell_index, cells[cell_index] & mask);
+    }
+
+    Address limit = Min(watermark, end);
+
+    // Round limit towards start of the next cell.
+    uint32_t last_cell_index =
+      Page::MarkbitsBitmap::IndexToCell(
+          Page::MarkbitsBitmap::CellAlignIndex(
+              page->AddressToMarkbitIndex(limit)));
+
+    // We expect last_cell to be bigger than cell because
+    // we rounded limit towards start of the next cell
+    // and limit is bigger than address of the current.
+    ASSERT(cell_index < last_cell_index);
+
+    // We skip current cell because it contains no unvisited
+    // live objects.
+    do {
+      cell_index++;
+    } while (cell_index < last_cell_index && cells[cell_index] == 0);
+
+    // If we reached last_cell return limit
+    // not the start of the last_cell because
+    // limit can be in the middle of the previous cell.
+    if (cell_index == last_cell_index) return limit;
+
+    return FirstMarkedObject(page, cell_index, cells[cell_index]);
+  }
+
+  static inline void TransferMark(Address old_start,
+                                  Address new_start) {
+    if (Heap::InNewSpace(old_start) ||
+        Page::FromAddress(old_start)->IsFlagSet(Page::IS_CONTINUOUS) ||
+        !IsMarked(old_start)) {
+      return;
+    }
+
+    SetMark(new_start);
+  }

   static bool Setup();

@@ -398,7 +512,10 @@
   static void SweepSpaces();

   static void SweepNewSpace(NewSpace* space);
-  static void SweepSpace(PagedSpace* space);
+
+  enum SweeperType { CONSERVATIVE, PRECISE };
+
+  static void SweepSpace(PagedSpace* space, SweeperType sweeper);

 #ifdef DEBUG
// -----------------------------------------------------------------------
=======================================
--- /branches/experimental/gc/src/objects.h     Fri Jan  7 06:11:14 2011
+++ /branches/experimental/gc/src/objects.h     Wed Jan 19 09:39:52 2011
@@ -3586,7 +3586,7 @@
   static const int kSize = MAP_POINTER_ALIGN(kPadStart);

   // Layout of pointer fields. Heap iteration code relies on them
-  // being continiously allocated.
+  // being continuously allocated.
   static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
   static const int kPointerFieldsEndOffset =
       Map::kCodeCacheOffset + kPointerSize;
=======================================
--- /branches/experimental/gc/src/spaces.cc     Fri Jan  7 06:11:14 2011
+++ /branches/experimental/gc/src/spaces.cc     Wed Jan 19 09:39:52 2011
@@ -55,17 +55,6 @@
                                        HeapObjectCallback size_func) {
   Initialize(space->bottom(), space->top(), size_func);
 }
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
-  Initialize(start, space->top(), NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
-                                       HeapObjectCallback size_func) {
-  Initialize(start, space->top(), size_func);
-}


 HeapObjectIterator::HeapObjectIterator(Page* page,
@@ -83,6 +72,11 @@
   Page* p = Page::FromAllocationTop(cur_addr_);
   cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();

+  if (!p->IsFlagSet(Page::IS_CONTINUOUS)) {
+    cur_addr_ = Marking::FirstLiveObject(cur_addr_, cur_limit_);
+    if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_;
+  }
+
 #ifdef DEBUG
   Verify();
 #endif
@@ -99,6 +93,11 @@
   cur_addr_ = cur_page->ObjectAreaStart();
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();

+  if (!cur_page->IsFlagSet(Page::IS_CONTINUOUS)) {
+    cur_addr_ = Marking::FirstLiveObject(cur_addr_, cur_limit_);
+    if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_;
+  }
+
   if (cur_addr_ == end_addr_) return NULL;
   ASSERT(cur_addr_ < cur_limit_);
 #ifdef DEBUG
@@ -106,6 +105,17 @@
 #endif
   return FromCurrentPage();
 }
+
+
+void HeapObjectIterator::AdvanceUsingMarkbits() {
+  HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+  int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+  ASSERT_OBJECT_SIZE(obj_size);
+  cur_addr_ = Marking::NextLiveObject(obj,
+                                      obj_size,
+                                      cur_limit_);
+  if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_;
+}


 #ifdef DEBUG
@@ -787,10 +797,10 @@
         above_allocation_top = true;
       }

-      // It should be packed with objects from the bottom to the top.
-      Address current = current_page->ObjectAreaStart();
-      while (current < top) {
-        HeapObject* object = HeapObject::FromAddress(current);
+      HeapObjectIterator it(current_page, NULL);
+      Address end_of_previous_object = current_page->ObjectAreaStart();
+ for(HeapObject* object = it.next(); object != NULL; object = it.next()) {
+        ASSERT(end_of_previous_object <= object->address());

// The first word should be a map, and we expect all map pointers to
         // be in map space.
@@ -801,6 +811,10 @@
         // Perform space-specific object verification.
         VerifyObject(object);

+ if (object->IsCodeCache() && ((uint32_t*)object->address())[2] == 0x2) {
+          current_page->PrintMarkbits();
+        }
+
         // The object itself should look OK.
         object->Verify();

@@ -810,11 +824,9 @@
         int size = object->Size();
         object->IterateBody(map->instance_type(), size, visitor);

-        current += size;
-      }
-
-      // The allocation pointer should not be in the middle of an object.
-      ASSERT(current == top);
+        ASSERT(object->address() + size <= top);
+        end_of_previous_object = object->address() + size;
+      }
     }

     current_page = current_page->next_page();
@@ -1672,6 +1684,13 @@
 void PagedSpace::FreePages(Page* prev, Page* last) {
   if (last == AllocationTopPage()) {
     // Pages are already at the end of used pages.
+    // Just mark them as continuos.
+    Page* p = prev == NULL ? first_page_ : prev->next_page();
+    Page* end_page = last->next_page();
+    do {
+      p->SetFlag(Page::IS_CONTINUOUS);
+      p = p->next_page();
+    } while (p != end_page);
     return;
   }

@@ -1697,9 +1716,10 @@
     first->SetAllocationWatermark(first->ObjectAreaStart());
     first->SetCachedAllocationWatermark(first->ObjectAreaStart());
     first->SetRegionMarks(Page::kAllRegionsCleanMarks);
+    first->SetFlag(Page::IS_CONTINUOUS);
     first->markbits()->Clear();
     first = first->next_page();
-  } while (first != NULL);
+  } while (first->is_valid());
 }


@@ -1777,6 +1797,13 @@
         ASSERT(obj->address() == p->AllocationWatermark());
         p->SetAllocationWatermark(obj->address() + size_in_bytes);
       }
+
+      if (!p->IsFlagSet(Page::IS_CONTINUOUS)) {
+        // This page is not continuous so we have to mark objects
+        // that should be visited by HeapObjectIterator.
+        ASSERT(!Marking::IsMarked(obj));
+        Marking::SetMark(obj);
+      }

       return obj;
     }
=======================================
--- /branches/experimental/gc/src/spaces.h      Mon Jan 10 07:11:41 2011
+++ /branches/experimental/gc/src/spaces.h      Wed Jan 19 09:39:52 2011
@@ -134,6 +134,22 @@
   static int SizeFor(int cells_count) {
     return sizeof(CellType)*cells_count;
   }
+
+  INLINE(static uint32_t IndexToCell(uint32_t index)) {
+    return index >> kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t IndexToBit(uint32_t index)) {
+    return index & kBitIndexMask;
+  }
+
+  INLINE(static uint32_t CellToIndex(uint32_t index)) {
+    return index << kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
+    return (index + kBitIndexMask) & ~kBitIndexMask;
+  }

   INLINE(CellType* cells()) {
     return reinterpret_cast<CellType*>(this);
@@ -185,7 +201,8 @@
     const uint32_t end_mask   = (1 << (end & kBitIndexMask)) - 1;

     ASSERT(static_cast<int>(start_cell) < CellsCount());
-    ASSERT(static_cast<int>(end_cell) < CellsCount());
+    ASSERT(static_cast<int>(end_cell) < CellsCount() ||
+           (end_mask == 0 && static_cast<int>(end_cell) == CellsCount()));

     if (start_cell == end_cell) {
       cells()[start_cell] &= ~(start_mask & end_mask);
@@ -205,17 +222,61 @@
     for (int i = 0; i < CellsCount(); i++) cells()[i] = 0;
   }

-  static void PrintWord(const uint32_t& word, const char* sep = " ") {
+  static void PrintWord(uint32_t word, uint32_t himask = 0) {
     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+      if ((mask & himask) != 0) PrintF("[");
       PrintF((mask & word) ? "1" : "0");
-    }
-    PrintF("%s", sep);
+      if ((mask & himask) != 0) PrintF("]");
+    }
   }

+  class CellPrinter {
+   public:
+    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
+
+    void Print(uint32_t pos, uint32_t cell) {
+      if (cell == seq_type) {
+        seq_length++;
+        return;
+      }
+
+      Flush();
+
+      if (IsSeq(cell)) {
+        seq_start = pos;
+        seq_length = 0;
+        seq_type = cell;
+        return;
+      }
+
+      PrintF("%d: ", pos);
+      PrintWord(cell);
+      PrintF("\n");
+    }
+
+    void Flush() {
+      if (seq_length > 0) {
+        PrintF("%d: %dx%d\n",
+               seq_start,
+               seq_type == 0 ? 0 : 1,
+               seq_length * kBitsPerCell);
+        seq_length = 0;
+      }
+    }
+
+ static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+   private:
+    uint32_t seq_start;
+    uint32_t seq_type;
+    uint32_t seq_length;
+  };
+
   void Print() {
+    CellPrinter printer;
     for (int i = 0; i < CellsCount(); i++) {
-      PrintWord(cells()[i]);
-    }
+      printer.Print(i, cells()[i]);
+    }
+    printer.Flush();
     PrintF("\n");
   }

@@ -257,15 +318,15 @@
     NUM_MEMORY_CHUNK_FLAGS
   };

-  void SetFlag(MemoryChunkFlags flag) {
+  void SetFlag(int flag) {
     flags_ |= 1 << flag;
   }

-  void ClearFlag(MemoryChunkFlags flag) {
+  void ClearFlag(int flag) {
     flags_ &= ~(1 << flag);
   }

-  bool IsFlagSet(MemoryChunkFlags flag) {
+  bool IsFlagSet(int flag) {
     return (flags_ & (1 << flag)) != 0;
   }

@@ -274,7 +335,7 @@
   static const intptr_t kAlignmentMask = kAlignment - 1;

static const size_t kHeaderSize = kPointerSize + kPointerSize + kPointerSize +
-    kPointerSize + kPointerSize;
+    kPointerSize + kPointerSize + kPointerSize;

   static const size_t kMarksBitmapLength =
     (1 << kPageSizeBits) >> (kPointerSizeLog2);
@@ -293,6 +354,7 @@

   // ---------------------------------------------------------------------
   // Markbits support
+
   class BitmapStorageDescriptor {
    public:
     INLINE(static int CellsCount(Address addr)) {
@@ -307,18 +369,20 @@
     return MarkbitsBitmap::FromAddress(address() + kHeaderSize);
   }

-  inline uint32_t Address2Markbit(Address addr) {
+  void PrintMarkbits() { markbits()->Print(); }
+
+  inline uint32_t AddressToMarkbitIndex(Address addr) {
return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
   }

-  inline static uint32_t FastAddress2Markbit(Address addr) {
+  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
     const intptr_t offset =
         reinterpret_cast<intptr_t>(addr) & kAlignmentMask;

     return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
   }

-  inline Address Markbit2Address(uint32_t index) {
+  inline Address MarkbitIndexToAddress(uint32_t index) {
     return this->address() + (index << kPointerSizeLog2);
   }

@@ -469,6 +533,14 @@
   // Maximum object size that fits in a page.
   static const int kMaxHeapObjectSize = kObjectAreaSize;

+  static const int kFirstUsedCell =
+    (kBodyOffset/kPointerSize) >> MarkbitsBitmap::kBitsPerCellLog2;
+
+  static const int kLastUsedCell =
+    ((kPageSize - kPointerSize)/kPointerSize) >>
+      MarkbitsBitmap::kBitsPerCellLog2;
+
+
 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
   static const int kDirtyFlagOffset = 2 * kPointerSize;
   static const int kRegionSizeLog2 = 8;
@@ -482,11 +554,22 @@
// Page allocation watermark was bumped by preallocation during scavenge. // Correct watermark can be retrieved by CachedAllocationWatermark() method
     WATERMARK_INVALIDATED = NUM_MEMORY_CHUNK_FLAGS,
+
+    // We say that memory region [start_addr, end_addr[ is continuous if
+    // and only if:
+    //   a) start_addr coincides with the start of a valid heap object
+    //   b) for any valid heap object o in this region address
+ // o->address() + o->Size() is either equal to end_addr or coincides
+    //      with the start of a valid heap object.
+    // We say that a page is continuous if and only if the region
+    // [page->ObjectAreaStart(), page->AllocationTop()[ is continuous.
+ // For non-continuous pages we say that address lb is a linearity boundary + // if and only if [lb, page->AllocationTop()[ is either empty or continuous.
+    IS_CONTINUOUS,
+
     NUM_PAGE_FLAGS  // Must be last
   };

-  static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
-
   // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
   // scavenge we just invalidate the watermark on each old space page after
// processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
@@ -510,7 +593,7 @@

   inline void ClearGCFields();

- static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
+  static const int kAllocationWatermarkOffsetShift = NUM_PAGE_FLAGS;
   static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
   static const uint32_t kAllocationWatermarkOffsetMask =
       ((1 << kAllocationWatermarkOffsetBits) - 1) <<
@@ -526,17 +609,27 @@
   // Instead of clearing this flag from all pages we just flip
   // its meaning at the beginning of a scavenge.
   static intptr_t watermark_invalidated_mark_;
+
+  // See comments for IS_CONTINUOUS flag.
+  Address linearity_boundary() { return linearity_boundary_; }
+  void set_linearity_boundary(Address linearity_boundary) {
+    linearity_boundary_ = linearity_boundary;
+  }

  private:
   static Page* Initialize(MemoryChunk* chunk) {
     Page* page = static_cast<Page*>(chunk);
     page->allocation_watermark_ = page->body();
     page->InvalidateWatermark(true);
+    page->SetFlag(IS_CONTINUOUS);
     return page;
   }

   Address allocation_watermark_;

+  // See comments for IS_CONTINUOUS flag.
+  Address linearity_boundary_;
+
   friend class MemoryAllocator;
 };

@@ -845,9 +938,10 @@
// -----------------------------------------------------------------------------
 // Heap object iterator in new/old/map spaces.
 //
-// A HeapObjectIterator iterates objects from a given address to the
-// top of a space. The given address must be below the current
-// allocation pointer (space top). There are some caveats.
+// A HeapObjectIterator iterates objects from the bottom of the given space
+// to it's top or from the bottom of the given page to it's top.
+//
+// There are some caveats.
 //
 // (1) If the space top changes upward during iteration (because of
 //     allocating new objects), the iterator does not iterate objects
@@ -866,16 +960,11 @@

 class HeapObjectIterator: public ObjectIterator {
  public:
-  // Creates a new object iterator in a given space. If a start
-  // address is not given, the iterator starts from the space bottom.
+  // Creates a new object iterator in a given space.
   // If the size function is not given, the iterator calls the default
   // Object::Size().
   explicit HeapObjectIterator(PagedSpace* space);
   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
-  HeapObjectIterator(PagedSpace* space, Address start);
-  HeapObjectIterator(PagedSpace* space,
-                     Address start,
-                     HeapObjectCallback size_func);
   HeapObjectIterator(Page* page, HeapObjectCallback size_func);

   inline HeapObject* next() {
@@ -894,16 +983,23 @@

   HeapObject* FromCurrentPage() {
     ASSERT(cur_addr_ < cur_limit_);
-
     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
-    ASSERT_OBJECT_SIZE(obj_size);
-
-    cur_addr_ += obj_size;
-    ASSERT(cur_addr_ <= cur_limit_);
+
+    Page* p = Page::FromAddress(cur_addr_);
+    if (p->IsFlagSet(Page::IS_CONTINUOUS)) {
+      int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+      ASSERT_OBJECT_SIZE(obj_size);
+
+      cur_addr_ += obj_size;
+      ASSERT(cur_addr_ <= cur_limit_);
+    } else {
+      AdvanceUsingMarkbits();
+    }

     return obj;
   }
+
+  void AdvanceUsingMarkbits();

   // Slow path of next, goes into the next page.
   HeapObject* FromNextPage();
@@ -1567,13 +1663,13 @@
   Address start() { return start_; }
   uintptr_t mask() { return address_mask_; }

-  INLINE(uint32_t Address2MarkbitIndex(Address addr)) {
+  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
     ASSERT(Contains(addr));
     ASSERT(IsAligned(OffsetFrom(addr), kPointerSize));
     return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
   }

-  INLINE(Address MarkbitIndex2Address(uint32_t index)) {
+  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
     return reinterpret_cast<Address>(index << kPointerSizeLog2);
   }

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to