Revision: 23283
Author:   [email protected]
Date:     Thu Aug 21 14:50:18 2014 UTC
Log:      Remove conservative sweeping.

BUG=
[email protected]

Review URL: https://codereview.chromium.org/479113004
http://code.google.com/p/v8/source/detail?r=23283

Modified:
 /branches/bleeding_edge/src/flag-definitions.h
 /branches/bleeding_edge/src/heap/heap.cc
 /branches/bleeding_edge/src/heap/heap.h
 /branches/bleeding_edge/src/heap/mark-compact-inl.h
 /branches/bleeding_edge/src/heap/mark-compact.cc
 /branches/bleeding_edge/src/heap/mark-compact.h
 /branches/bleeding_edge/src/heap/spaces.cc
 /branches/bleeding_edge/src/heap/spaces.h
 /branches/bleeding_edge/src/heap/store-buffer.cc
 /branches/bleeding_edge/src/heap-snapshot-generator.cc

=======================================
--- /branches/bleeding_edge/src/flag-definitions.h Thu Aug 21 12:06:25 2014 UTC +++ /branches/bleeding_edge/src/flag-definitions.h Thu Aug 21 14:50:18 2014 UTC
@@ -526,7 +526,6 @@
             "trace progress of the incremental marking")
 DEFINE_BOOL(track_gc_object_stats, false,
             "track object counts and memory usage")
-DEFINE_BOOL(always_precise_sweeping, true, "always sweep precisely")
 DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
 DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
 DEFINE_INT(sweeper_threads, 0,
=======================================
--- /branches/bleeding_edge/src/heap/heap.cc    Thu Aug 21 14:42:22 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap.cc    Thu Aug 21 14:50:18 2014 UTC
@@ -1273,14 +1273,10 @@
        object = code_it.Next())
     object->Iterate(&v);

- // The old data space was normally swept conservatively so that the iterator
-  // doesn't work, so we normally skip the next bit.
-  if (heap->old_data_space()->swept_precisely()) {
     HeapObjectIterator data_it(heap->old_data_space());
     for (HeapObject* object = data_it.Next(); object != NULL;
          object = data_it.Next())
       object->Iterate(&v);
-  }
 }
 #endif  // VERIFY_HEAP

@@ -4242,9 +4238,7 @@
 bool Heap::IsHeapIterable() {
   // TODO(hpayer): This function is not correct. Allocation folding in old
   // space breaks the iterability.
-  return (old_pointer_space()->swept_precisely() &&
-          old_data_space()->swept_precisely() &&
-          new_space_top_after_last_gc_ == new_space()->top());
+  return new_space_top_after_last_gc_ == new_space()->top();
 }


=======================================
--- /branches/bleeding_edge/src/heap/heap.h     Thu Aug 21 14:42:22 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap.h     Thu Aug 21 14:50:18 2014 UTC
@@ -715,14 +715,11 @@
       const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);

   static const int kNoGCFlags = 0;
-  static const int kSweepPreciselyMask = 1;
-  static const int kReduceMemoryFootprintMask = 2;
-  static const int kAbortIncrementalMarkingMask = 4;
+  static const int kReduceMemoryFootprintMask = 1;
+  static const int kAbortIncrementalMarkingMask = 2;

-  // Making the heap iterable requires us to sweep precisely and abort any
-  // incremental marking as well.
-  static const int kMakeHeapIterableMask =
-      kSweepPreciselyMask | kAbortIncrementalMarkingMask;
+  // Making the heap iterable requires us to abort incremental marking.
+  static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;

// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is // non-zero, then the slower precise sweeper is used, which leaves the heap
=======================================
--- /branches/bleeding_edge/src/heap/mark-compact-inl.h Tue Aug 5 08:18:22 2014 UTC +++ /branches/bleeding_edge/src/heap/mark-compact-inl.h Thu Aug 21 14:50:18 2014 UTC
@@ -23,7 +23,6 @@


 void MarkCompactCollector::SetFlags(int flags) {
-  sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
   abort_incremental_marking_ =
       ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
=======================================
--- /branches/bleeding_edge/src/heap/mark-compact.cc Wed Aug 20 12:10:41 2014 UTC +++ /branches/bleeding_edge/src/heap/mark-compact.cc Thu Aug 21 14:50:18 2014 UTC
@@ -41,7 +41,6 @@
 #ifdef DEBUG
       state_(IDLE),
 #endif
-      sweep_precisely_(false),
       reduce_memory_footprint_(false),
       abort_incremental_marking_(false),
       marking_parity_(ODD_MARKING_PARITY),
@@ -200,7 +199,6 @@


 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
-  if (!space->swept_precisely()) return;
   if (FLAG_use_allocation_folding &&
(space == heap->old_pointer_space() || space == heap->old_data_space())) {
     return;
@@ -3126,7 +3124,7 @@
   AlwaysAllocateScope always_allocate(isolate());
   PagedSpace* space = static_cast<PagedSpace*>(p->owner());
   DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
-  p->MarkSweptPrecisely();
+  p->SetWasSwept();

   int offsets[16];

@@ -3290,10 +3288,7 @@
 }


-// Sweep a space precisely.  After this has been done the space can
-// be iterated precisely, hitting only the live objects.  Code space
-// is always swept precisely because we want to be able to iterate
-// over it.  Map space is swept precisely, because it is not compacted.
+// Sweeps a page. After sweeping the page can be iterated.
 // Slots in live objects pointing into evacuation candidates are updated
 // if requested.
 // Returns the size of the biggest continuous freed memory chunk in bytes.
@@ -3301,8 +3296,8 @@
           MarkCompactCollector::SweepingParallelism parallelism,
           SkipListRebuildingMode skip_list_mode,
           FreeSpaceTreatmentMode free_space_mode>
-static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
-                          ObjectVisitor* v) {
+static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
+                 ObjectVisitor* v) {
   DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
   DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
             space->identity() == CODE_SPACE);
@@ -3384,7 +3379,7 @@
     // sweeping by the main thread.
     p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
   } else {
-    p->MarkSweptPrecisely();
+    p->SetWasSwept();
   }
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
 }
@@ -3621,22 +3616,24 @@

         switch (space->identity()) {
           case OLD_DATA_SPACE:
-            SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
+            Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                  IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
             break;
           case OLD_POINTER_SPACE:
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
-                           IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
-                space, NULL, p, &updating_visitor);
+            Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                  IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
             break;
           case CODE_SPACE:
             if (FLAG_zap_code_space) {
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
-                             REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
-                  space, NULL, p, &updating_visitor);
+              Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                    REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
             } else {
- SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
-                             REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
-                  space, NULL, p, &updating_visitor);
+              Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                    REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+ &updating_visitor);
             }
             break;
           default:
@@ -4117,182 +4114,6 @@
   }
   return objects;
 }
-
-
-static inline Address DigestFreeStart(Address approximate_free_start,
-                                      uint32_t free_start_cell) {
-  DCHECK(free_start_cell != 0);
-
-  // No consecutive 1 bits.
-  DCHECK((free_start_cell & (free_start_cell << 1)) == 0);
-
-  int offsets[16];
-  uint32_t cell = free_start_cell;
-  int offset_of_last_live;
-  if ((cell & 0x80000000u) != 0) {
-    // This case would overflow below.
-    offset_of_last_live = 31;
-  } else {
- // Remove all but one bit, the most significant. This is an optimization
-    // that may or may not be worthwhile.
-    cell |= cell >> 16;
-    cell |= cell >> 8;
-    cell |= cell >> 4;
-    cell |= cell >> 2;
-    cell |= cell >> 1;
-    cell = (cell + 1) >> 1;
-    int live_objects = MarkWordToObjectStarts(cell, offsets);
-    DCHECK(live_objects == 1);
-    offset_of_last_live = offsets[live_objects - 1];
-  }
-  Address last_live_start =
-      approximate_free_start + offset_of_last_live * kPointerSize;
-  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
-  Address free_start = last_live_start + last_live->Size();
-  return free_start;
-}
-
-
-static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
-  DCHECK(cell != 0);
-
-  // No consecutive 1 bits.
-  DCHECK((cell & (cell << 1)) == 0);
-
-  int offsets[16];
-  if (cell == 0x80000000u) {  // Avoid overflow below.
-    return block_address + 31 * kPointerSize;
-  }
-  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
-  DCHECK((first_set_bit & cell) == first_set_bit);
-  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
-  DCHECK(live_objects == 1);
-  USE(live_objects);
-  return block_address + offsets[0] * kPointerSize;
-}
-
-
-// Force instantiation of templatized SweepConservatively method for
-// SWEEP_ON_MAIN_THREAD mode.
-template int MarkCompactCollector::SweepConservatively<
- MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*);
-
-
-// Force instantiation of templatized SweepConservatively method for
-// SWEEP_IN_PARALLEL mode.
-template int MarkCompactCollector::SweepConservatively<
- MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*);
-
-
-// Sweeps a space conservatively.  After this has been done the larger free
-// spaces have been put on the free list and the smaller ones have been
-// ignored and left untouched. A free space is always either ignored or put
-// on the free list, never split up into two parts.  This is important
-// because it means that any FreeSpace maps left actually describe a region of
-// memory that can be ignored when scanning.  Dead objects other than free
-// spaces will not contain the free space map.
-template <MarkCompactCollector::SweepingParallelism mode>
-int MarkCompactCollector::SweepConservatively(PagedSpace* space,
- FreeList* free_list, Page* p) {
-  DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
-  DCHECK(
- (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) ||
-      (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
-       free_list == NULL));
-
-  intptr_t freed_bytes = 0;
-  intptr_t max_freed_bytes = 0;
-  size_t size = 0;
-
- // Skip over all the dead objects at the start of the page and mark them free.
-  Address cell_base = 0;
-  MarkBit::CellType* cell = NULL;
-  MarkBitCellIterator it(p);
-  for (; !it.Done(); it.Advance()) {
-    cell_base = it.CurrentCellBase();
-    cell = it.CurrentCell();
-    if (*cell != 0) break;
-  }
-
-  if (it.Done()) {
-    size = p->area_end() - p->area_start();
-    freed_bytes =
- Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
-    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-    DCHECK_EQ(0, p->LiveBytes());
-    if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
-      // When concurrent sweeping is active, the page will be marked after
-      // sweeping by the main thread.
-      p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
-    } else {
-      p->MarkSweptConservatively();
-    }
- return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
-  }
-
- // Grow the size of the start-of-page free space a little to get up to the
-  // first live object.
-  Address free_end = StartOfLiveObject(cell_base, *cell);
-  // Free the first free space.
-  size = free_end - p->area_start();
-  freed_bytes =
- Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
-  max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-
- // The start of the current free area is represented in undigested form by - // the address of the last 32-word section that contained a live object and - // the marking bitmap for that cell, which describes where the live object
-  // started.  Unless we find a large free space in the bitmap we will not
- // digest this pair into a real address. We start the iteration here at the
-  // first word in the marking bit map that indicates a live object.
-  Address free_start = cell_base;
-  MarkBit::CellType free_start_cell = *cell;
-
-  for (; !it.Done(); it.Advance()) {
-    cell_base = it.CurrentCellBase();
-    cell = it.CurrentCell();
-    if (*cell != 0) {
- // We have a live object. Check approximately whether it is more than 32
-      // words since the last live object.
-      if (cell_base - free_start > 32 * kPointerSize) {
-        free_start = DigestFreeStart(free_start, free_start_cell);
-        if (cell_base - free_start > 32 * kPointerSize) {
- // Now that we know the exact start of the free space it still looks - // like we have a large enough free space to be worth bothering with. - // so now we need to find the start of the first live object at the
-          // end of the free space.
-          free_end = StartOfLiveObject(cell_base, *cell);
-          freed_bytes = Free<mode>(space, free_list, free_start,
- static_cast<int>(free_end - free_start));
-          max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-        }
-      }
- // Update our undigested record of where the current free area started.
-      free_start = cell_base;
-      free_start_cell = *cell;
-      // Clear marking bits for current cell.
-      *cell = 0;
-    }
-  }
-
-  // Handle the free space at the end of the page.
-  if (cell_base - free_start > 32 * kPointerSize) {
-    free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes = Free<mode>(space, free_list, free_start,
-                             static_cast<int>(p->area_end() - free_start));
-    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-  }
-
-  p->ResetLiveBytes();
-  if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
-    // When concurrent sweeping is active, the page will be marked after
-    // sweeping by the main thread.
-    p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
-  } else {
-    p->MarkSweptConservatively();
-  }
- return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
-}


 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
@@ -4321,14 +4142,8 @@
                               ? free_list_old_pointer_space_.get()
                               : free_list_old_data_space_.get();
     FreeList private_free_list(space);
-    if (space->swept_precisely()) {
-      max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL,
-                                 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
-          space, &private_free_list, page, NULL);
-    } else {
-      max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
-          space, &private_free_list, page);
-    }
+    max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
     free_list->Concatenate(&private_free_list);
   }
   return max_freed;
@@ -4336,9 +4151,6 @@


void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
-  space->set_swept_precisely(sweeper == PRECISE ||
-                             sweeper == CONCURRENT_PRECISE ||
-                             sweeper == PARALLEL_PRECISE);
   space->ClearStats();

// We defensively initialize end_of_unswept_pages_ here with the first page
@@ -4356,8 +4168,7 @@
     DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);

     // Clear sweeping flags indicating that marking bits are still intact.
-    p->ClearSweptPrecisely();
-    p->ClearSweptConservatively();
+    p->ClearWasSwept();

     if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
         p->IsEvacuationCandidate()) {
@@ -4383,41 +4194,20 @@
     }

     switch (sweeper) {
-      case CONCURRENT_CONSERVATIVE:
-      case PARALLEL_CONSERVATIVE: {
+      case CONCURRENT_SWEEPING:
+      case PARALLEL_SWEEPING:
         if (!parallel_sweeping_active) {
           if (FLAG_gc_verbose) {
-            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+            PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
                    reinterpret_cast<intptr_t>(p));
           }
-          SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
-          pages_swept++;
-          parallel_sweeping_active = true;
-        } else {
-          if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
-                   reinterpret_cast<intptr_t>(p));
-          }
-          p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
-          space->IncreaseUnsweptFreeBytes(p);
-        }
-        space->set_end_of_unswept_pages(p);
-        break;
-      }
-      case CONCURRENT_PRECISE:
-      case PARALLEL_PRECISE:
-        if (!parallel_sweeping_active) {
-          if (FLAG_gc_verbose) {
-            PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
-                   reinterpret_cast<intptr_t>(p));
-          }
- SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-                         IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
           pages_swept++;
           parallel_sweeping_active = true;
         } else {
           if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+            PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
                    reinterpret_cast<intptr_t>(p));
           }
           p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
@@ -4425,20 +4215,19 @@
         }
         space->set_end_of_unswept_pages(p);
         break;
-      case PRECISE: {
+      case SEQUENTIAL_SWEEPING: {
         if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
-                 reinterpret_cast<intptr_t>(p));
+ PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
         }
         if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
- SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                         ZAP_FREE_SPACE>(space, NULL, p, NULL);
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                ZAP_FREE_SPACE>(space, NULL, p, NULL);
         } else if (space->identity() == CODE_SPACE) {
- SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                         IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         } else {
- SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-                         IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         }
         pages_swept++;
         break;
@@ -4458,17 +4247,14 @@


static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
-  return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
-         type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
-         type == MarkCompactCollector::PARALLEL_PRECISE ||
-         type == MarkCompactCollector::CONCURRENT_PRECISE;
+  return type == MarkCompactCollector::PARALLEL_SWEEPING ||
+         type == MarkCompactCollector::CONCURRENT_SWEEPING;
 }


 static bool ShouldWaitForSweeperThreads(
     MarkCompactCollector::SweeperType type) {
-  return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
-         type == MarkCompactCollector::PARALLEL_PRECISE;
+  return type == MarkCompactCollector::PARALLEL_SWEEPING;
 }


@@ -4482,16 +4268,9 @@
 #ifdef DEBUG
   state_ = SWEEP_SPACES;
 #endif
-  SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
-  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
-  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
-  if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
-    how_to_sweep = PARALLEL_PRECISE;
-  }
-  if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
-    how_to_sweep = CONCURRENT_PRECISE;
-  }
-  if (sweep_precisely_) how_to_sweep = PRECISE;
+  SweeperType how_to_sweep = CONCURRENT_SWEEPING;
+  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING;
+  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING;

   MoveEvacuationCandidatesToEndOfPagesList();

@@ -4522,14 +4301,14 @@
   {
     GCTracer::Scope sweep_scope(heap()->tracer(),
                                 GCTracer::Scope::MC_SWEEP_CODE);
-    SweepSpace(heap()->code_space(), PRECISE);
+    SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
   }

   {
     GCTracer::Scope sweep_scope(heap()->tracer(),
                                 GCTracer::Scope::MC_SWEEP_CELL);
-    SweepSpace(heap()->cell_space(), PRECISE);
-    SweepSpace(heap()->property_cell_space(), PRECISE);
+    SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
+    SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
   }

   EvacuateNewSpaceAndCandidates();
@@ -4540,7 +4319,7 @@
   {
     GCTracer::Scope sweep_scope(heap()->tracer(),
                                 GCTracer::Scope::MC_SWEEP_MAP);
-    SweepSpace(heap()->map_space(), PRECISE);
+    SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
   }

   // Deallocate unmarked objects and clear marked bits for marked objects.
@@ -4562,11 +4341,7 @@
     Page* p = it.next();
     if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
       p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
-      if (space->swept_precisely()) {
-        p->MarkSweptPrecisely();
-      } else {
-        p->MarkSweptConservatively();
-      }
+      p->SetWasSwept();
     }
     DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
   }
=======================================
--- /branches/bleeding_edge/src/heap/mark-compact.h Wed Aug 20 12:10:41 2014 UTC +++ /branches/bleeding_edge/src/heap/mark-compact.h Thu Aug 21 14:50:18 2014 UTC
@@ -544,11 +544,9 @@
   void EnableCodeFlushing(bool enable);

   enum SweeperType {
-    PARALLEL_CONSERVATIVE,
-    CONCURRENT_CONSERVATIVE,
-    PARALLEL_PRECISE,
-    CONCURRENT_PRECISE,
-    PRECISE
+    PARALLEL_SWEEPING,
+    CONCURRENT_SWEEPING,
+    SEQUENTIAL_SWEEPING
   };

   enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
@@ -561,12 +559,6 @@
   void VerifyOmittedMapChecks();
 #endif

-  // Sweep a single page from the given space conservatively.
- // Returns the size of the biggest continuous freed memory chunk in bytes.
-  template <SweepingParallelism type>
-  static int SweepConservatively(PagedSpace* space, FreeList* free_list,
-                                 Page* p);
-
   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
     return Page::FromAddress(reinterpret_cast<Address>(anchor))
         ->ShouldSkipEvacuationSlotRecording();
@@ -693,10 +685,6 @@
   CollectorState state_;
 #endif

-  // Global flag that forces sweeping to be precise, so we can traverse the
-  // heap.
-  bool sweep_precisely_;
-
   bool reduce_memory_footprint_;

   bool abort_incremental_marking_;
=======================================
--- /branches/bleeding_edge/src/heap/spaces.cc  Thu Aug 21 09:35:59 2014 UTC
+++ /branches/bleeding_edge/src/heap/spaces.cc  Thu Aug 21 14:50:18 2014 UTC
@@ -47,18 +47,13 @@
          owner == page->heap()->code_space());
   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
              page->area_end(), kOnePageOnly, size_func);
-  DCHECK(page->WasSweptPrecisely() ||
-         (static_cast<PagedSpace*>(owner)->swept_precisely() &&
-          page->SweepingCompleted()));
+  DCHECK(page->WasSwept() || page->SweepingCompleted());
 }


void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
                                     HeapObjectIterator::PageMode mode,
                                     HeapObjectCallback size_f) {
-  // Check that we actually can iterate this space.
-  DCHECK(space->swept_precisely());
-
   space_ = space;
   cur_addr_ = cur;
   cur_end_ = end;
@@ -83,9 +78,7 @@
   if (cur_page == space_->anchor()) return false;
   cur_addr_ = cur_page->area_start();
   cur_end_ = cur_page->area_end();
-  DCHECK(cur_page->WasSweptPrecisely() ||
-         (static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() &&
-          cur_page->SweepingCompleted()));
+  DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
   return true;
 }

@@ -459,7 +452,7 @@
   chunk->ResetLiveBytes();
   Bitmap::Clear(chunk);
   chunk->initialize_scan_on_scavenge(false);
-  chunk->SetFlag(WAS_SWEPT_PRECISELY);
+  chunk->SetFlag(WAS_SWEPT);

   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
@@ -886,7 +879,6 @@
                        Executability executable)
     : Space(heap, id, executable),
       free_list_(this),
-      swept_precisely_(true),
       unswept_free_bytes_(0),
       end_of_unswept_pages_(NULL),
       emergency_memory_(NULL) {
@@ -936,7 +928,7 @@


 Object* PagedSpace::FindObject(Address addr) {
-  // Note: this function can only be called on precisely swept spaces.
+  // Note: this function can only be called on iterable spaces.
   DCHECK(!heap()->mark_compact_collector()->in_use());

   if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
@@ -1129,9 +1121,6 @@

 #ifdef VERIFY_HEAP
 void PagedSpace::Verify(ObjectVisitor* visitor) {
-  // We can only iterate over the pages if they were swept precisely.
-  if (!swept_precisely_) return;
-
   bool allocation_pointer_found_in_space =
       (allocation_info_.top() == allocation_info_.limit());
   PageIterator page_iterator(this);
@@ -1141,7 +1130,7 @@
     if (page == Page::FromAllocationTop(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
     }
-    CHECK(page->WasSweptPrecisely());
+    CHECK(page->WasSwept());
     HeapObjectIterator it(page, NULL);
     Address end_of_previous_object = page->area_start();
     Address top = page->area_end();
@@ -2737,7 +2726,6 @@
          ", available: %" V8_PTR_PREFIX "d, %%%d\n",
          Capacity(), Waste(), Available(), pct);

-  if (!swept_precisely_) return;
   if (heap()->mark_compact_collector()->sweeping_in_progress()) {
     heap()->mark_compact_collector()->EnsureSweepingCompleted();
   }
=======================================
--- /branches/bleeding_edge/src/heap/spaces.h   Mon Aug 11 14:22:24 2014 UTC
+++ /branches/bleeding_edge/src/heap/spaces.h   Thu Aug 21 14:50:18 2014 UTC
@@ -373,12 +373,9 @@
     EVACUATION_CANDIDATE,
     RESCAN_ON_EVACUATION,

- // Pages swept precisely can be iterated, hitting only the live objects. - // Whereas those swept conservatively cannot be iterated over. Both flags - // indicate that marking bits have been cleared by the sweeper, otherwise
-    // marking bits are still intact.
-    WAS_SWEPT_PRECISELY,
-    WAS_SWEPT_CONSERVATIVELY,
+ // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
+    // otherwise marking bits are still intact.
+    WAS_SWEPT,

// Large objects can have a progress bar in their page header. These object // are scanned in increments and will be kept black while being scanned.
@@ -765,15 +762,9 @@

   void InitializeAsAnchor(PagedSpace* owner);

-  bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
- bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } - bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
-
-  void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
-  void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
-
-  void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
-  void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
+  bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
+  void SetWasSwept() { SetFlag(WAS_SWEPT); }
+  void ClearWasSwept() { ClearFlag(WAS_SWEPT); }

   void ResetFreeListStatistics();

@@ -1829,15 +1820,12 @@
   static void ReportCodeStatistics(Isolate* isolate);
   static void ResetCodeStatistics(Isolate* isolate);
 #endif
-
-  bool swept_precisely() { return swept_precisely_; }
-  void set_swept_precisely(bool b) { swept_precisely_ = b; }

   // Evacuation candidates are swept by evacuator.  Needs to return a valid
   // result before _and_ after evacuation has finished.
   static bool ShouldBeSweptBySweeperThreads(Page* p) {
     return !p->IsEvacuationCandidate() &&
- !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSweptPrecisely();
+           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
   }

void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
@@ -1907,12 +1895,8 @@
   // Normal allocation information.
   AllocationInfo allocation_info_;

-  // This space was swept precisely, hence it is iterable.
-  bool swept_precisely_;
-
   // The number of free bytes which could be reclaimed by advancing the
- // concurrent sweeper threads. This is only an estimation because concurrent
-  // sweeping is done conservatively.
+  // concurrent sweeper threads.
   intptr_t unswept_free_bytes_;

// The sweeper threads iterate over the list of pointer and data space pages
=======================================
--- /branches/bleeding_edge/src/heap/store-buffer.cc Thu Aug 14 07:41:33 2014 UTC +++ /branches/bleeding_edge/src/heap/store-buffer.cc Thu Aug 21 14:50:18 2014 UTC
@@ -477,10 +477,8 @@
         } else {
           Page* page = reinterpret_cast<Page*>(chunk);
           PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
-          Address start = page->area_start();
-          Address end = page->area_end();
           if (owner == heap_->map_space()) {
-            DCHECK(page->WasSweptPrecisely());
+            DCHECK(page->WasSwept());
             HeapObjectIterator iterator(page, NULL);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
                  heap_object = iterator.Next()) {
@@ -504,24 +502,17 @@
                 heap_->mark_compact_collector()->EnsureSweepingCompleted();
               }
             }
- // TODO(hpayer): remove the special casing and merge map and pointer
-            // space handling as soon as we removed conservative sweeping.
             CHECK(page->owner() == heap_->old_pointer_space());
-            if (heap_->old_pointer_space()->swept_precisely()) {
-              HeapObjectIterator iterator(page, NULL);
-              for (HeapObject* heap_object = iterator.Next();
-                   heap_object != NULL; heap_object = iterator.Next()) {
- // We iterate over objects that contain new space pointers only.
-                if (heap_object->MayContainNewSpacePointers()) {
-                  FindPointersToNewSpaceInRegion(
-                      heap_object->address() + HeapObject::kHeaderSize,
-                      heap_object->address() + heap_object->Size(),
-                      slot_callback, clear_maps);
-                }
+            HeapObjectIterator iterator(page, NULL);
+ for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+                 heap_object = iterator.Next()) {
+ // We iterate over objects that contain new space pointers only.
+              if (heap_object->MayContainNewSpacePointers()) {
+                FindPointersToNewSpaceInRegion(
+                    heap_object->address() + HeapObject::kHeaderSize,
+ heap_object->address() + heap_object->Size(), slot_callback,
+                    clear_maps);
               }
-            } else {
-              FindPointersToNewSpaceInRegion(start, end, slot_callback,
-                                             clear_maps);
             }
           }
         }
=======================================
--- /branches/bleeding_edge/src/heap-snapshot-generator.cc Thu Aug 21 08:16:06 2014 UTC +++ /branches/bleeding_edge/src/heap-snapshot-generator.cc Thu Aug 21 14:50:18 2014 UTC
@@ -2580,15 +2580,6 @@

 #ifdef VERIFY_HEAP
   Heap* debug_heap = heap_;
-  CHECK(debug_heap->old_data_space()->swept_precisely());
-  CHECK(debug_heap->old_pointer_space()->swept_precisely());
-  CHECK(debug_heap->code_space()->swept_precisely());
-  CHECK(debug_heap->cell_space()->swept_precisely());
-  CHECK(debug_heap->property_cell_space()->swept_precisely());
-  CHECK(debug_heap->map_space()->swept_precisely());
-#endif
-
-#ifdef VERIFY_HEAP
   debug_heap->Verify();
 #endif

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to