Revision: 3600
Author: [email protected]
Date: Wed Jan 13 11:16:07 2010
Log: Compact map space when doing mark-sweep if after collection size of map space would
drop below threshold.

Review URL: http://codereview.chromium.org/509035
http://code.google.com/p/v8/source/detail?r=3600

Modified:
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/spaces.h

=======================================
--- /branches/bleeding_edge/src/heap.h  Tue Jan 12 07:16:23 2010
+++ /branches/bleeding_edge/src/heap.h  Wed Jan 13 11:16:07 2010
@@ -804,6 +804,9 @@
   // Rebuild remembered set in old and map spaces.
   static void RebuildRSets();

+  // Update an old object's remembered set
+  static int UpdateRSet(HeapObject* obj);
+
   // Commits from space if it is uncommitted.
   static void EnsureFromSpaceIsCommitted();

@@ -1074,9 +1077,6 @@
   static void ReportStatisticsAfterGC();
 #endif

-  // Update an old object's remembered set
-  static int UpdateRSet(HeapObject* obj);
-
   // Rebuild remembered set in an old space.
   static void RebuildRSets(PagedSpace* space);

@@ -1236,7 +1236,7 @@


 // Space iterator for iterating over all the paged spaces of the heap:
-// Map space, old pointer space, old data space and code space.
+// Map space, old pointer space, old data space, code space and cell space.
 // Returns each space in turn, and null when it is done.
 class PagedSpaces BASE_EMBEDDED {
  public:
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Fri Dec 18 05:38:09 2009
+++ /branches/bleeding_edge/src/mark-compact.cc Wed Jan 13 11:16:07 2010
@@ -791,7 +791,7 @@
   // back pointers, reversing them all at once.  This allows us to find
   // those maps with map transitions that need to be nulled, and only
   // scan the descriptor arrays of those maps, not all maps.
-  // All of these actions are carried out only on maps of JSObects
+  // All of these actions are carried out only on maps of JSObjects
   // and related subtypes.
   while (map_iterator.has_next()) {
     Map* map = reinterpret_cast<Map*>(map_iterator.next());
@@ -1168,7 +1168,7 @@

 void MarkCompactCollector::DeallocateMapBlock(Address start,
                                               int size_in_bytes) {
- // Objects in map space are frequently assumed to have size Map::kSize and a
+  // Objects in map space are assumed to have size Map::kSize and a
   // valid map in their first word.  Thus, we break the free block up into
   // chunks and free them separately.
   ASSERT(size_in_bytes % Map::kSize == 0);
@@ -1242,6 +1242,225 @@
 }


+class MapIterator : public HeapObjectIterator {
+ public:
+  MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
+
+  explicit MapIterator(Address start)
+      : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
+
+ private:
+  static int SizeCallback(HeapObject* unused) {
+    USE(unused);
+    return Map::kSize;
+  }
+};
+
+
+class MapCompact {
+ public:
+  explicit MapCompact(int live_maps)
+    : live_maps_(live_maps),
+      to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
+      map_to_evacuate_it_(to_evacuate_start_),
+      first_map_to_evacuate_(
+ reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
+  }
+
+  void CompactMaps() {
+    // As we know the number of maps to evacuate beforehand,
+    // we stop then there is no more vacant maps.
+    for (Map* next_vacant_map = NextVacantMap();
+         next_vacant_map;
+         next_vacant_map = NextVacantMap()) {
+      EvacuateMap(next_vacant_map, NextMapToEvacuate());
+    }
+
+#ifdef DEBUG
+    CheckNoMapsToEvacuate();
+#endif
+  }
+
+  void UpdateMapPointersInRoots() {
+    Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
+    GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
+  }
+
+  void FinishMapSpace() {
+    // Iterate through to space and finish move.
+    MapIterator it;
+    HeapObject* o = it.next();
+    for (; o != first_map_to_evacuate_; o = it.next()) {
+      Map* map = reinterpret_cast<Map*>(o);
+      ASSERT(!map->IsMarked());
+      ASSERT(!map->IsOverflowed());
+      ASSERT(map->IsMap());
+      Heap::UpdateRSet(map);
+    }
+  }
+
+  void UpdateMapPointersInPagedSpace(PagedSpace* space) {
+    ASSERT(space != Heap::map_space());
+
+    PageIterator it(space, PageIterator::PAGES_IN_USE);
+    while (it.has_next()) {
+      Page* p = it.next();
+      UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
+    }
+  }
+
+  void UpdateMapPointersInNewSpace() {
+    NewSpace* space = Heap::new_space();
+    UpdateMapPointersInRange(space->bottom(), space->top());
+  }
+
+  void UpdateMapPointersInLargeObjectSpace() {
+    LargeObjectIterator it(Heap::lo_space());
+    while (true) {
+      if (!it.has_next()) break;
+      UpdateMapPointersInObject(it.next());
+    }
+  }
+
+  void Finish() {
+    Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+  }
+
+ private:
+  int live_maps_;
+  Address to_evacuate_start_;
+  MapIterator vacant_map_it_;
+  MapIterator map_to_evacuate_it_;
+  Map* first_map_to_evacuate_;
+
+  // Helper class for updating map pointers in HeapObjects.
+  class MapUpdatingVisitor: public ObjectVisitor {
+  public:
+    void VisitPointer(Object** p) {
+      UpdateMapPointer(p);
+    }
+
+    void VisitPointers(Object** start, Object** end) {
+      for (Object** p = start; p < end; p++) UpdateMapPointer(p);
+    }
+
+  private:
+    void UpdateMapPointer(Object** p) {
+      if (!(*p)->IsHeapObject()) return;
+      HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
+
+      // Moved maps are tagged with overflowed map word.  They are the only
+ // objects those map word is overflowed as marking is already complete.
+      MapWord map_word = old_map->map_word();
+      if (!map_word.IsOverflowed()) return;
+
+      *p = GetForwardedMap(map_word);
+    }
+  };
+
+  static MapUpdatingVisitor map_updating_visitor_;
+
+  static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
+    while (true) {
+      ASSERT(it->has_next());
+      HeapObject* next = it->next();
+      if (next == last)
+        return NULL;
+      ASSERT(!next->IsOverflowed());
+      ASSERT(!next->IsMarked());
+      ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
+      if (next->IsMap() == live)
+        return reinterpret_cast<Map*>(next);
+    }
+  }
+
+  Map* NextVacantMap() {
+    Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
+    ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
+    return map;
+  }
+
+  Map* NextMapToEvacuate() {
+    Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
+    ASSERT(map != NULL);
+    ASSERT(map->IsMap());
+    return map;
+  }
+
+  static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
+    ASSERT(FreeListNode::IsFreeListNode(vacant_map));
+    ASSERT(map_to_evacuate->IsMap());
+
+    memcpy(
+        reinterpret_cast<void*>(vacant_map->address()),
+        reinterpret_cast<void*>(map_to_evacuate->address()),
+        Map::kSize);
+    ASSERT(vacant_map->IsMap());  // Due to memcpy above.
+
+    MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
+    forwarding_map_word.SetOverflow();
+    map_to_evacuate->set_map_word(forwarding_map_word);
+
+    ASSERT(map_to_evacuate->map_word().IsOverflowed());
+    ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
+  }
+
+  static Map* GetForwardedMap(MapWord map_word) {
+    ASSERT(map_word.IsOverflowed());
+    map_word.ClearOverflow();
+    Map* new_map = map_word.ToMap();
+    ASSERT_MAP_ALIGNED(new_map->address());
+    return new_map;
+  }
+
+  static int UpdateMapPointersInObject(HeapObject* obj) {
+    ASSERT(!obj->IsMarked());
+    Map* map = obj->map();
+    ASSERT(Heap::map_space()->Contains(map));
+    MapWord map_word = map->map_word();
+    ASSERT(!map_word.IsMarked());
+    if (map_word.IsOverflowed()) {
+      Map* new_map = GetForwardedMap(map_word);
+      ASSERT(Heap::map_space()->Contains(new_map));
+      obj->set_map(new_map);
+
+#ifdef DEBUG
+      if (FLAG_gc_verbose) {
+        PrintF("update %p : %p -> %p\n", obj->address(),
+              map, new_map);
+      }
+#endif
+    }
+
+    int size = obj->SizeFromMap(map);
+    obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
+    return size;
+  }
+
+  static void UpdateMapPointersInRange(Address start, Address end) {
+    HeapObject* object;
+    int size;
+    for (Address current = start; current < end; current += size) {
+      object = HeapObject::FromAddress(current);
+      size = UpdateMapPointersInObject(object);
+      ASSERT(size > 0);
+    }
+  }
+
+#ifdef DEBUG
+  void CheckNoMapsToEvacuate() {
+    if (!FLAG_enable_slow_asserts)
+      return;
+
+    while (map_to_evacuate_it_.has_next())
+      ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
+  }
+#endif
+};
+
+MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
+
+
 void MarkCompactCollector::SweepSpaces() {
   ASSERT(state_ == SWEEP_SPACES);
   ASSERT(!IsCompacting());
@@ -1256,6 +1475,26 @@
   SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
   SweepSpace(Heap::new_space());
   SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+  int live_maps = Heap::map_space()->Size() / Map::kSize;
+  ASSERT(live_map_objects_ == live_maps);
+
+  if (Heap::map_space()->NeedsCompaction(live_maps)) {
+    MapCompact map_compact(live_maps);
+
+    map_compact.CompactMaps();
+    map_compact.UpdateMapPointersInRoots();
+
+    map_compact.FinishMapSpace();
+    PagedSpaces spaces;
+    while (PagedSpace* space = spaces.next()) {
+      if (space == Heap::map_space()) continue;
+      map_compact.UpdateMapPointersInPagedSpace(space);
+    }
+    map_compact.UpdateMapPointersInNewSpace();
+    map_compact.UpdateMapPointersInLargeObjectSpace();
+
+    map_compact.Finish();
+  }
 }


=======================================
--- /branches/bleeding_edge/src/spaces.h        Tue Jan 12 07:16:23 2010
+++ /branches/bleeding_edge/src/spaces.h        Wed Jan 13 11:16:07 2010
@@ -1731,6 +1731,10 @@
   // the page after current_page (there is assumed to be one).
   HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);

+  void ResetFreeList() {
+    free_list_.Reset();
+  }
+
  private:
   // The size of objects in this space.
   int object_size_in_bytes_;
@@ -1771,6 +1775,59 @@
     ASSERT(n_of_pages == CountTotalPages());
     return n_of_pages <= kMaxMapPageIndex;
   }
+
+  // Should be called after forced sweep to find out if map space needs
+  // compaction.
+  int NeedsCompaction(int live_maps) {
+    return !MapPointersEncodable() && live_maps <= kCompactionThreshold;
+  }
+
+  Address TopAfterCompaction(int live_maps) {
+    ASSERT(NeedsCompaction(live_maps));
+
+    int pages_left = live_maps / kMapsPerPage;
+    PageIterator it(this, PageIterator::ALL_PAGES);
+    while (pages_left-- > 0) {
+      ASSERT(it.has_next());
+      it.next()->ClearRSet();
+    }
+    ASSERT(it.has_next());
+    Page* top_page = it.next();
+    top_page->ClearRSet();
+    ASSERT(top_page->is_valid());
+
+    int offset = live_maps % kMapsPerPage * Map::kSize;
+    Address top = top_page->ObjectAreaStart() + offset;
+    ASSERT(top < top_page->ObjectAreaEnd());
+    ASSERT(Contains(top));
+
+    return top;
+  }
+
+  void FinishCompaction(Address new_top, int live_maps) {
+    Page* top_page = Page::FromAddress(new_top);
+    ASSERT(top_page->is_valid());
+
+    SetAllocationInfo(&allocation_info_, top_page);
+    allocation_info_.top = new_top;
+
+    int new_size = live_maps * Map::kSize;
+    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
+    accounting_stats_.AllocateBytes(new_size);
+
+#ifdef DEBUG
+    if (FLAG_enable_slow_asserts) {
+      int actual_size = 0;
+      for (Page* p = first_page_; p != top_page; p = p->next_page())
+        actual_size += kMapsPerPage * Map::kSize;
+      actual_size += (new_top - top_page->ObjectAreaStart());
+      ASSERT(accounting_stats_.Size() == actual_size);
+    }
+#endif
+
+    Shrink();
+    ResetFreeList();
+  }

  protected:
 #ifdef DEBUG
@@ -1778,6 +1835,11 @@
 #endif

  private:
+  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
+
+  // Do map space compaction if there is a page gap.
+ static const int kCompactionThreshold = kMapsPerPage * (kMaxMapPageIndex - 1);
+
   // An array of page start address in a map space.
   Address page_addresses_[kMaxMapPageIndex + 1];

-- 
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to