Revision: 20966
Author:   [email protected]
Date:     Fri Apr 25 09:50:42 2014 UTC
Log:      Remove lazy sweeping.

BUG=
[email protected]

Review URL: https://codereview.chromium.org/254603002
http://code.google.com/p/v8/source/detail?r=20966

Modified:
 /branches/bleeding_edge/src/flag-definitions.h
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/incremental-marking.cc
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/mark-compact.h
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/spaces.h
 /branches/bleeding_edge/src/store-buffer.cc
 /branches/bleeding_edge/test/cctest/test-heap.cc
 /branches/bleeding_edge/test/mjsunit/regress/regress-1708.js

=======================================
--- /branches/bleeding_edge/src/flag-definitions.h Thu Apr 24 12:31:10 2014 UTC +++ /branches/bleeding_edge/src/flag-definitions.h Fri Apr 25 09:50:42 2014 UTC
@@ -565,8 +565,6 @@

 // mark-compact.cc
 DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
-DEFINE_bool(lazy_sweeping, true,
-            "Use lazy sweeping for old pointer and data spaces")
 DEFINE_bool(never_compact, false,
             "Never perform compaction on full GC - testing only")
 DEFINE_bool(compact_code_space, true,
=======================================
--- /branches/bleeding_edge/src/heap.cc Fri Apr 25 08:10:44 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Fri Apr 25 09:50:42 2014 UTC
@@ -4600,17 +4600,8 @@
   // An incremental GC progresses as follows:
   // 1. many incremental marking steps,
   // 2. one old space mark-sweep-compact,
-  // 3. many lazy sweep steps.
   // Use mark-sweep-compact events to count incremental GCs in a round.

-  if (incremental_marking()->IsStopped()) {
-    if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
-        !IsSweepingComplete() &&
-        !AdvanceSweepers(static_cast<int>(step_size))) {
-      return false;
-    }
-  }
-
   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
     if (EnoughGarbageSinceLastIdleRound()) {
       StartIdleRound();
@@ -5332,14 +5323,6 @@
       + property_cell_space_->SizeOfObjects()
       + lo_space_->SizeOfObjects();
 }
-
-
-bool Heap::AdvanceSweepers(int step_size) {
-  ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
-  bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
-  sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
-  return sweeping_complete;
-}


 int64_t Heap::PromotedExternalMemorySize() {
=======================================
--- /branches/bleeding_edge/src/heap.h  Fri Apr 25 08:10:44 2014 UTC
+++ /branches/bleeding_edge/src/heap.h  Fri Apr 25 09:50:42 2014 UTC
@@ -1553,14 +1553,6 @@
     return &incremental_marking_;
   }

-  bool IsSweepingComplete() {
-    return !mark_compact_collector()->IsConcurrentSweepingInProgress() &&
-           old_data_space()->IsLazySweepingComplete() &&
-           old_pointer_space()->IsLazySweepingComplete();
-  }
-
-  bool AdvanceSweepers(int step_size);
-
   bool EnsureSweepersProgressed(int step_size) {
bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size); sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size);
=======================================
--- /branches/bleeding_edge/src/incremental-marking.cc Mon Mar 3 13:27:59 2014 UTC +++ /branches/bleeding_edge/src/incremental-marking.cc Fri Apr 25 09:50:42 2014 UTC
@@ -565,7 +565,7 @@

   ResetStepCounters();

-  if (heap_->IsSweepingComplete()) {
+  if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
     StartMarking(flag);
   } else {
     if (FLAG_trace_incremental_marking) {
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Wed Apr 23 15:43:39 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Fri Apr 25 09:50:42 2014 UTC
@@ -4152,7 +4152,6 @@

void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
-                                      sweeper == LAZY_CONSERVATIVE ||
                                       sweeper == PARALLEL_CONSERVATIVE ||
                                       sweeper == CONCURRENT_CONSERVATIVE);
   space->ClearStats();
@@ -4160,7 +4159,6 @@
   PageIterator it(space);

   int pages_swept = 0;
-  bool lazy_sweeping_active = false;
   bool unused_page_present = false;
   bool parallel_sweeping_active = false;

@@ -4206,25 +4204,6 @@
         pages_swept++;
         break;
       }
-      case LAZY_CONSERVATIVE: {
-        if (lazy_sweeping_active) {
-          if (FLAG_gc_verbose) {
-            PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
-                   reinterpret_cast<intptr_t>(p));
-          }
-          space->IncreaseUnsweptFreeBytes(p);
-        } else {
-          if (FLAG_gc_verbose) {
-            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
-                   reinterpret_cast<intptr_t>(p));
-          }
-          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
-          pages_swept++;
-          space->SetPagesToSweep(p->next_page());
-          lazy_sweeping_active = true;
-        }
-        break;
-      }
       case CONCURRENT_CONSERVATIVE:
       case PARALLEL_CONSERVATIVE: {
         if (!parallel_sweeping_active) {
@@ -4285,8 +4264,7 @@
 #ifdef DEBUG
   state_ = SWEEP_SPACES;
 #endif
-  SweeperType how_to_sweep =
-      FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+  SweeperType how_to_sweep = CONSERVATIVE;
   if (AreSweeperThreadsActivated()) {
     if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
     if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
=======================================
--- /branches/bleeding_edge/src/mark-compact.h  Fri Apr 11 10:36:09 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.h  Fri Apr 25 09:50:42 2014 UTC
@@ -594,7 +594,6 @@

   enum SweeperType {
     CONSERVATIVE,
-    LAZY_CONSERVATIVE,
     PARALLEL_CONSERVATIVE,
     CONCURRENT_CONSERVATIVE,
     PRECISE
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Wed Apr 23 15:08:03 2014 UTC
+++ /branches/bleeding_edge/src/spaces.cc       Fri Apr 25 09:50:42 2014 UTC
@@ -953,7 +953,6 @@
     : Space(heap, id, executable),
       free_list_(this),
       was_swept_conservatively_(false),
-      first_unswept_page_(Page::FromAddress(NULL)),
       unswept_free_bytes_(0) {
   if (id == CODE_SPACE) {
     area_size_ = heap->isolate()->memory_allocator()->
@@ -1130,14 +1129,6 @@
 void PagedSpace::ReleasePage(Page* page, bool unlink) {
   ASSERT(page->LiveBytes() == 0);
   ASSERT(AreaSize() == page->area_size());
-
-  // Adjust list of unswept pages if the page is the head of the list.
-  if (first_unswept_page_ == page) {
-    first_unswept_page_ = page->next_page();
-    if (first_unswept_page_ == anchor()) {
-      first_unswept_page_ = Page::FromAddress(NULL);
-    }
-  }

   if (page->WasSwept()) {
     intptr_t size = free_list_.EvictFreeListItems(page);
@@ -2555,24 +2546,8 @@
   // on the first allocation after the sweep.
   EmptyAllocationInfo();

-  // Stop lazy sweeping and clear marking bits for unswept pages.
-  if (first_unswept_page_ != NULL) {
-    Page* p = first_unswept_page_;
-    do {
-      // Do not use ShouldBeSweptLazily predicate here.
-      // New evacuation candidates were selected but they still have
-      // to be swept before collection starts.
-      if (!p->WasSwept()) {
-        Bitmap::Clear(p);
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-      }
-      p = p->next_page();
-    } while (p != anchor());
-  }
-  first_unswept_page_ = Page::FromAddress(NULL);
+  // This counter will be increased for pages which will be swept by the
+  // sweeper threads.
   unswept_free_bytes_ = 0;

   // Clear the free list before a full GC---it will be rebuilt afterward.
@@ -2581,7 +2556,8 @@


 intptr_t PagedSpace::SizeOfObjects() {
-  ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
+ ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
+         (unswept_free_bytes_ == 0));
   return Size() - unswept_free_bytes_ - (limit() - top());
 }

@@ -2594,39 +2570,6 @@
   free_list_.RepairLists(heap());
 }

-
-bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
-  if (IsLazySweepingComplete()) return true;
-
-  intptr_t freed_bytes = 0;
-  Page* p = first_unswept_page_;
-  do {
-    Page* next_page = p->next_page();
-    if (ShouldBeSweptLazily(p)) {
-      if (FLAG_gc_verbose) {
-        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
-               reinterpret_cast<intptr_t>(p));
-      }
-      DecreaseUnsweptFreeBytes(p);
-      freed_bytes +=
-          MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
-                  this, NULL, p);
-    }
-    p = next_page;
-  } while (p != anchor() && freed_bytes < bytes_to_sweep);
-
-  if (p == anchor()) {
-    first_unswept_page_ = Page::FromAddress(NULL);
-  } else {
-    first_unswept_page_ = p;
-  }
-
-  heap()->FreeQueuedChunks();
-
-  return IsLazySweepingComplete();
-}
-

 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
   if (allocation_info_.top() >= allocation_info_.limit()) return;
@@ -2656,17 +2599,15 @@
       }
       return false;
     }
-    return true;
-  } else {
-    return AdvanceSweeper(size_in_bytes);
   }
+  return true;
 }


 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Allocation in this space has failed.

- // If there are unswept pages advance lazy sweeper a bounded number of times
+  // If there are unswept pages advance sweeping a bounded number of times
   // until we find a size_in_bytes contiguous piece of memory
   const int kMaxSweepingTries = 5;
   bool sweeping_complete = false;
@@ -2693,10 +2634,9 @@
     return free_list_.Allocate(size_in_bytes);
   }

- // Last ditch, sweep all the remaining pages to try to find space. This may
-  // cause a pause.
-  if (!IsLazySweepingComplete()) {
-    EnsureSweeperProgress(kMaxInt);
+  // Last ditch, sweep all the remaining pages to try to find space.
+  if (heap()->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+    heap()->mark_compact_collector()->WaitUntilSweepingCompleted();

     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
=======================================
--- /branches/bleeding_edge/src/spaces.h        Wed Apr 23 15:08:03 2014 UTC
+++ /branches/bleeding_edge/src/spaces.h        Fri Apr 25 09:50:42 2014 UTC
@@ -1783,8 +1783,9 @@
   intptr_t Available() { return free_list_.available(); }

// Allocated bytes in this space. Garbage bytes that were not found due to - // lazy sweeping are counted as being allocated! The bytes in the current
-  // linear allocation area (between top and limit) are also counted here.
+  // concurrent sweeping are counted as being allocated!  The bytes in the
+ // current linear allocation area (between top and limit) are also counted
+  // here.
   virtual intptr_t Size() { return accounting_stats_.Size(); }

// As size, but the bytes in lazily swept pages are estimated and the bytes
@@ -1885,24 +1886,18 @@

   // Evacuation candidates are swept by evacuator.  Needs to return a valid
   // result before _and_ after evacuation has finished.
-  static bool ShouldBeSweptLazily(Page* p) {
+  static bool ShouldBeSweptBySweeperThreads(Page* p) {
     return !p->IsEvacuationCandidate() &&
            !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
            !p->WasSweptPrecisely();
   }
-
-  void SetPagesToSweep(Page* first) {
-    ASSERT(unswept_free_bytes_ == 0);
-    if (first == &anchor_) first = NULL;
-    first_unswept_page_ = first;
-  }

   void IncrementUnsweptFreeBytes(intptr_t by) {
     unswept_free_bytes_ += by;
   }

   void IncreaseUnsweptFreeBytes(Page* p) {
-    ASSERT(ShouldBeSweptLazily(p));
+    ASSERT(ShouldBeSweptBySweeperThreads(p));
     unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
   }

@@ -1911,7 +1906,7 @@
   }

   void DecreaseUnsweptFreeBytes(Page* p) {
-    ASSERT(ShouldBeSweptLazily(p));
+    ASSERT(ShouldBeSweptBySweeperThreads(p));
     unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
   }

@@ -1919,17 +1914,12 @@
     unswept_free_bytes_ = 0;
   }

-  bool AdvanceSweeper(intptr_t bytes_to_sweep);
-
-  // When parallel sweeper threads are active and the main thread finished
- // its sweeping phase, this function waits for them to complete, otherwise
-  // AdvanceSweeper with size_in_bytes is called.
+ // This function tries to steal size_in_bytes memory from the sweeper threads + // free-lists. If it does not succeed stealing enough memory, it will wait
+  // for the sweeper threads to finish sweeping.
+  // It returns true when sweeping is completed and false otherwise.
   bool EnsureSweeperProgress(intptr_t size_in_bytes);

-  bool IsLazySweepingComplete() {
-    return !first_unswept_page_->is_valid();
-  }
-
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }

@@ -1969,13 +1959,9 @@

   bool was_swept_conservatively_;

-  // The first page to be swept when the lazy sweeper advances. Is set
-  // to NULL when all pages have been swept.
-  Page* first_unswept_page_;
-
   // The number of free bytes which could be reclaimed by advancing the
-  // lazy sweeper.  This is only an estimation because lazy sweeping is
-  // done conservatively.
+ // concurrent sweeper threads. This is only an estimation because concurrent
+  // sweeping is done conservatively.
   intptr_t unswept_free_bytes_;

// Expands the space by allocating a fixed number of pages. Returns false if
=======================================
--- /branches/bleeding_edge/src/store-buffer.cc Tue Apr  8 16:31:57 2014 UTC
+++ /branches/bleeding_edge/src/store-buffer.cc Fri Apr 25 09:50:42 2014 UTC
@@ -508,9 +508,9 @@
// This function iterates over all the pointers in a paged space in the heap,
 // looking for pointers into new space.  Within the pages there may be dead
// objects that have not been overwritten by free spaces or fillers because of -// lazy sweeping. These dead objects may not contain pointers to new space. -// The garbage areas that have been swept properly (these will normally be the
-// large ones) will be marked with free space and filler map words.  In
+// concurrent sweeping.  These dead objects may not contain pointers to new
+// space. The garbage areas that have been swept properly (these will normally +// be the large ones) will be marked with free space and filler map words. In // addition any area that has never been used at all for object allocation must // be marked with a free space or filler. Because the free space and filler
 // maps do not move we can always recognize these even after a compaction.
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Thu Apr 24 09:04:12 2014 UTC +++ /branches/bleeding_edge/test/cctest/test-heap.cc Fri Apr 25 09:50:42 2014 UTC
@@ -1606,12 +1606,16 @@
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
-  CHECK(CcTest::heap()->old_pointer_space()->IsLazySweepingComplete());
+ MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
+  if (collector->IsConcurrentSweepingInProgress()) {
+    collector->WaitUntilSweepingCompleted();
+  }
   int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());

   {
     // Allocate objects on several different old-space pages so that
-    // lazy sweeping kicks in for subsequent GC runs.
+    // concurrent sweeper threads will be busy sweeping the old space on
+    // subsequent GC runs.
     AlwaysAllocateScope always_allocate(CcTest::i_isolate());
     int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
     for (int i = 1; i <= 100; i++) {
@@ -1629,11 +1633,11 @@

CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));

-  // Advancing the sweeper step-wise should not change the heap size.
-  while (!CcTest::heap()->old_pointer_space()->IsLazySweepingComplete()) {
-    CcTest::heap()->old_pointer_space()->AdvanceSweeper(KB);
- CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
+  // Waiting for sweeper threads should not change heap size.
+  if (collector->IsConcurrentSweepingInProgress()) {
+    collector->WaitUntilSweepingCompleted();
   }
+ CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
 }


@@ -4199,11 +4203,8 @@
 }


-TEST(ArrayShiftLazySweeping) {
+TEST(ArrayShiftSweeping) {
   i::FLAG_expose_gc = true;
-  i::FLAG_parallel_sweeping = false;
-  i::FLAG_concurrent_sweeping = false;
-  i::FLAG_lazy_sweeping = true;
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
   Isolate* isolate = CcTest::i_isolate();
=======================================
--- /branches/bleeding_edge/test/mjsunit/regress/regress-1708.js Thu Sep 22 13:03:22 2011 UTC +++ /branches/bleeding_edge/test/mjsunit/regress/regress-1708.js Fri Apr 25 09:50:42 2014 UTC
@@ -27,6 +27,10 @@

 // Regression test of a very rare corner case where left-trimming an
 // array caused invalid marking bit patterns on lazily swept pages.
+//
+// Lazy sweeping was deprecated. We are keeping the test case to make
+// sure that concurrent sweeping, which relies on similar assumptions
+// as lazy sweeping works correctly.

 // Flags: --expose-gc --noincremental-marking --max-new-space-size 1000

@@ -34,7 +38,7 @@
   var head = new Array(1);
   var tail = head;

-  // Fill heap to increase old-space size and trigger lazy sweeping on
+ // Fill heap to increase old-space size and trigger concurrent sweeping on
   // some of the old-space pages.
   for (var i = 0; i < 200; i++) {
     tail[1] = new Array(1000);
@@ -44,7 +48,7 @@
   gc(); gc();

   // At this point "array" should have been promoted to old-space and be
-  // located in a lazy swept page with intact marking bits. Now shift
+ // located in a concurrently swept page with intact marking bits. Now shift
   // the array to trigger left-trimming operations.
   assertEquals(100, array.length);
   for (var i = 0; i < 50; i++) {
@@ -54,7 +58,7 @@

   // At this point "array" should have been trimmed from the left with
   // marking bits being correctly transfered to the new object start.
-  // Scavenging operations cause lazy sweeping to advance and verify
+  // Scavenging operations cause concurrent sweeping to advance and verify
   // that marking bit patterns are still sane.
   for (var i = 0; i < 200; i++) {
     tail[1] = new Array(1000);

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to