Revision: 9481
Author:   [email protected]
Date:     Thu Sep 29 05:27:31 2011
Log:      Implement shrinking of paged spaces during sweeping.

For each paged space we release all but one of the unused pages after
marking (when we know the number of live bytes) but before actually
sweeping it. This is not yet done for lazy swept pages.

[email protected]
BUG=v8:1614

Review URL: http://codereview.chromium.org/7891010
http://code.google.com/p/v8/source/detail?r=9481

Modified:
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/spaces.h

=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Sep 28 05:55:34 2011
+++ /branches/bleeding_edge/src/heap.cc Thu Sep 29 05:27:31 2011
@@ -144,7 +144,6 @@
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
       configured_(false),
-      last_empty_page_was_given_back_to_the_os_(false),
       chunks_queued_for_free_(NULL) {
   // Allow build-time customization of the max semispace size. Building
   // V8 with snapshots and a non-default max semispace size is much
@@ -813,8 +812,6 @@

   gc_state_ = NOT_IN_GC;

-  Shrink();
-
   isolate_->counters()->objs_since_last_full()->Set(0);

   contexts_disposed_ = 0;
@@ -5627,7 +5624,7 @@
   // Try to shrink all paged spaces.
   PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
-    space->Shrink();
+    space->ReleaseAllUnusedPages();
 }


@@ -6470,6 +6467,7 @@
       }
     }
   }
+  isolate_->heap()->store_buffer()->Compact();
   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
     next = chunk->next_chunk();
=======================================
--- /branches/bleeding_edge/src/heap.h  Mon Sep 26 04:14:41 2011
+++ /branches/bleeding_edge/src/heap.h  Thu Sep 29 05:27:31 2011
@@ -1045,21 +1045,6 @@
                                          Address end,
                                          ObjectSlotCallback callback);

- // Iterate pointers to new space found in memory interval from start to end.
-  static void IteratePointersToNewSpace(Heap* heap,
-                                        Address start,
-                                        Address end,
-                                        ObjectSlotCallback callback);
-
-
- // Iterate pointers to new space found in memory interval from start to end.
-  // This interval is considered to belong to the map space.
-  static void IteratePointersFromMapsToNewSpace(Heap* heap,
-                                                Address start,
-                                                Address end,
- ObjectSlotCallback callback);
-
-
   // Returns whether the object resides in new space.
   inline bool InNewSpace(Object* object);
   inline bool InNewSpace(Address addr);
@@ -1441,12 +1426,6 @@
inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
     scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
   }
-
-  bool ShouldWeGiveBackAPageToTheOS() {
-    last_empty_page_was_given_back_to_the_os_ =
-        !last_empty_page_was_given_back_to_the_os_;
-    return last_empty_page_was_given_back_to_the_os_;
-  }

   void QueueMemoryChunkForFree(MemoryChunk* chunk);
   void FreeQueuedChunks();
@@ -1818,7 +1797,6 @@

   VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;

-  bool last_empty_page_was_given_back_to_the_os_;
   MemoryChunk* chunks_queued_for_free_;

   friend class Factory;
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Wed Sep 28 10:45:58 2011
+++ /branches/bleeding_edge/src/mark-compact.cc Thu Sep 29 05:27:31 2011
@@ -2775,23 +2775,6 @@
       break;
   }
 }
-
-
-static inline void UpdateSlotsInRange(Object** start, Object** end) {
-  for (Object** slot = start;
-       slot < end;
-       slot++) {
-    Object* obj = *slot;
-    if (obj->IsHeapObject() &&
-        MarkCompactCollector::IsOnEvacuationCandidate(obj)) {
-      MapWord map_word = HeapObject::cast(obj)->map_word();
-      if (map_word.IsForwardingAddress()) {
-        *slot = map_word.ToForwardingAddress();
-        ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot));
-      }
-    }
-  }
-}


 enum SweepingMode {
@@ -3158,52 +3141,6 @@
   evacuation_candidates_.Rewind(0);
   compacting_ = false;
 }
-
-
-INLINE(static uint32_t SweepFree(PagedSpace* space,
-                                 Page* p,
-                                 uint32_t free_start,
-                                 uint32_t region_end,
-                                 uint32_t* cells));
-
-
-static uint32_t SweepFree(PagedSpace* space,
-                          Page* p,
-                          uint32_t free_start,
-                          uint32_t region_end,
-                          uint32_t* cells) {
-  uint32_t free_cell_index = Bitmap::IndexToCell(free_start);
-  ASSERT(cells[free_cell_index] == 0);
-  while (free_cell_index < region_end && cells[free_cell_index] == 0) {
-    free_cell_index++;
-  }
-
-  if (free_cell_index >= region_end) {
-    return free_cell_index;
-  }
-
-  uint32_t free_end = Bitmap::CellToIndex(free_cell_index);
-  space->FreeOrUnmapPage(p,
-                         p->MarkbitIndexToAddress(free_start),
-                         (free_end - free_start) << kPointerSizeLog2);
-
-  return free_cell_index;
-}
-
-
-INLINE(static uint32_t NextCandidate(uint32_t cell_index,
-                                     uint32_t last_cell_index,
-                                     uint32_t* cells));
-
-
-static uint32_t NextCandidate(uint32_t cell_index,
-                              uint32_t last_cell_index,
-                              uint32_t* cells) {
-  do {
-    cell_index++;
-  } while (cell_index < last_cell_index && cells[cell_index] != 0);
-  return cell_index;
-}


 static const int kStartTableEntriesPerLine = 5;
@@ -3589,6 +3526,7 @@
   intptr_t freed_bytes = 0;
   intptr_t newspace_size = space->heap()->new_space()->Size();
   bool lazy_sweeping_active = false;
+  bool unused_page_present = false;

   while (it.has_next()) {
     Page* p = it.next();
@@ -3614,6 +3552,19 @@
       }
       continue;
     }
+
+ // One unused page is kept, all further are released before sweeping them.
+    if (p->LiveBytes() == 0) {
+      if (unused_page_present) {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        space->ReleasePage(p);
+        continue;
+      }
+      unused_page_present = true;
+    }

     if (FLAG_gc_verbose) {
       PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
@@ -3629,7 +3580,7 @@
       case LAZY_CONSERVATIVE: {
         freed_bytes += SweepConservatively(space, p);
         if (freed_bytes >= newspace_size && p != space->LastPage()) {
-          space->SetPagesToSweep(p->next_page(), space->LastPage());
+          space->SetPagesToSweep(p->next_page(), space->anchor());
           lazy_sweeping_active = true;
         }
         break;
@@ -3647,6 +3598,9 @@
       }
     }
   }
+
+  // Give pages that are queued to be freed back to the OS.
+  heap()->FreeQueuedChunks();
 }


=======================================
--- /branches/bleeding_edge/src/spaces.cc       Thu Sep 29 05:23:05 2011
+++ /branches/bleeding_edge/src/spaces.cc       Thu Sep 29 05:27:31 2011
@@ -763,8 +763,30 @@
 #endif


-void PagedSpace::Shrink() {
-  // TODO(1614) Not implemented.
+void PagedSpace::ReleasePage(Page* page) {
+  ASSERT(page->LiveBytes() == 0);
+  page->Unlink();
+  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
+    heap()->isolate()->memory_allocator()->Free(page);
+  } else {
+    heap()->QueueMemoryChunkForFree(page);
+  }
+
+  ASSERT(Capacity() > 0);
+  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+  accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
+}
+
+
+void PagedSpace::ReleaseAllUnusedPages() {
+  PageIterator it(this);
+  while (it.has_next()) {
+    Page* page = it.next();
+    if (page->LiveBytes() == 0) {
+      ReleasePage(page);
+    }
+  }
+  heap()->FreeQueuedChunks();
 }


@@ -1647,25 +1669,6 @@
   large_list_ = NULL;
   huge_list_ = NULL;
 }
-
-
-int PagedSpace::FreeOrUnmapPage(Page* page, Address start, int size_in_bytes) {
-  Heap* heap = page->heap();
- // TODO(gc): When we count the live bytes per page we can free empty pages
-  // instead of sweeping.  At that point this if should be turned into an
-  // ASSERT that the area to be freed cannot be the entire page.
-  if (size_in_bytes == Page::kObjectAreaSize &&
-      heap->ShouldWeGiveBackAPageToTheOS()) {
-    page->Unlink();
-    if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
-      heap->isolate()->memory_allocator()->Free(page);
-    } else {
-      heap->QueueMemoryChunkForFree(page);
-    }
-    return 0;
-  }
-  return Free(start, size_in_bytes);
-}


 int FreeList::Free(Address start, int size_in_bytes) {
@@ -1920,7 +1923,7 @@

   // Stop lazy sweeping and clear marking bits for unswept pages.
   if (first_unswept_page_ != NULL) {
-    Page* last = last_unswept_page_->next_page();
+    Page* last = last_unswept_page_;
     Page* p = first_unswept_page_;
     do {
       // Do not use ShouldBeSweptLazily predicate here.
@@ -1977,7 +1980,7 @@
   if (IsSweepingComplete()) return true;

   intptr_t freed_bytes = 0;
-  Page* last = last_unswept_page_->next_page();
+  Page* last = last_unswept_page_;
   Page* p = first_unswept_page_;
   do {
     Page* next_page = p->next_page();
=======================================
--- /branches/bleeding_edge/src/spaces.h        Thu Sep 22 10:10:40 2011
+++ /branches/bleeding_edge/src/spaces.h        Thu Sep 29 05:27:31 2011
@@ -1234,6 +1234,15 @@
     size_ += size_in_bytes;
     ASSERT(size_ >= 0);
   }
+
+  // Shrink the space by removing available bytes.  Since shrinking is done
+ // during sweeping, bytes have been marked as being in use (part of the size)
+  // and are hereby freed.
+  void ShrinkSpace(int size_in_bytes) {
+    capacity_ -= size_in_bytes;
+    size_ -= size_in_bytes;
+    ASSERT(size_ >= 0);
+  }

   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
@@ -1483,8 +1492,6 @@
     accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
     return size_in_bytes - wasted;
   }
-
-  int FreeOrUnmapPage(Page* page, Address start, int size_in_bytes);

   // Set space allocation info.
   void SetTop(Address top, Address limit) {
@@ -1502,8 +1509,11 @@
     accounting_stats_.ExpandSpace(size);
   }

-  // Releases half of unused pages.
-  void Shrink();
+  // Releases an unused page and shrinks the space.
+  void ReleasePage(Page* page);
+
+  // Releases all of the unused pages.
+  void ReleaseAllUnusedPages();

   // The dummy page that anchors the linked list of pages.
   Page* anchor() { return &anchor_; }

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to