Revision: 13552
Author:   [email protected]
Date:     Wed Jan 30 04:19:32 2013
Log:      Parallel and concurrent sweeping.

Sweep old pointer space and old data space concurrently to the main mutator thread and in parallel.

BUG=

Review URL: https://codereview.chromium.org/11782028
http://code.google.com/p/v8/source/detail?r=13552

Added:
 /branches/bleeding_edge/src/sweeper-thread.cc
 /branches/bleeding_edge/src/sweeper-thread.h
Modified:
 /branches/bleeding_edge/src/flag-definitions.h
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/isolate.cc
 /branches/bleeding_edge/src/isolate.h
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/mark-compact.h
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/spaces.h
 /branches/bleeding_edge/tools/gyp/v8.gyp

=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/sweeper-thread.cc       Wed Jan 30 04:19:32 2013
@@ -0,0 +1,105 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "sweeper-thread.h"
+
+#include "v8.h"
+
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+SweeperThread::SweeperThread(Isolate* isolate)
+     : Thread("SweeperThread"),
+       isolate_(isolate),
+       heap_(isolate->heap()),
+       collector_(heap_->mark_compact_collector()),
+       start_sweeping_semaphore_(OS::CreateSemaphore(0)),
+       end_sweeping_semaphore_(OS::CreateSemaphore(0)),
+       stop_semaphore_(OS::CreateSemaphore(0)),
+       free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+       free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
+ private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+       private_free_list_old_pointer_space_(
+           heap_->paged_space(OLD_POINTER_SPACE)) {
+  NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+}
+
+
+bool SweeperThread::sweeping_pending_ = false;
+
+
+void SweeperThread::Run() {
+  Isolate::SetIsolateThreadLocals(isolate_, NULL);
+  while (true) {
+    start_sweeping_semaphore_->Wait();
+
+    if (Acquire_Load(&stop_thread_)) {
+      stop_semaphore_->Signal();
+      return;
+    }
+
+    collector_->SweepInParallel(heap_->old_data_space(),
+                                &private_free_list_old_data_space_,
+                                &free_list_old_data_space_);
+    collector_->SweepInParallel(heap_->old_pointer_space(),
+                                &private_free_list_old_pointer_space_,
+                                &free_list_old_pointer_space_);
+    end_sweeping_semaphore_->Signal();
+  }
+}
+
+intptr_t SweeperThread::StealMemory(PagedSpace* space) {
+  intptr_t free_bytes = 0;
+  if (space->identity() == OLD_POINTER_SPACE) {
+ free_bytes = space->free_list()->Concatenate(&free_list_old_pointer_space_);
+    space->AddToAccountingStats(free_bytes);
+  } else if (space->identity() == OLD_DATA_SPACE) {
+ free_bytes = space->free_list()->Concatenate(&free_list_old_data_space_);
+    space->AddToAccountingStats(free_bytes);
+  }
+  return free_bytes;
+}
+
+void SweeperThread::Stop() {
+  Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+  start_sweeping_semaphore_->Signal();
+  stop_semaphore_->Wait();
+}
+
+
+void SweeperThread::StartSweeping() {
+  start_sweeping_semaphore_->Signal();
+}
+
+
+void SweeperThread::WaitForSweeperThread() {
+  end_sweeping_semaphore_->Wait();
+}
+} }  // namespace v8::internal
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/sweeper-thread.h        Wed Jan 30 04:19:32 2013
@@ -0,0 +1,81 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SWEEPER_THREAD_H_
+#define V8_SWEEPER_THREAD_H_
+
+#include "atomicops.h"
+#include "flags.h"
+#include "platform.h"
+#include "v8utils.h"
+
+#include "spaces.h"
+
+#include "heap.h"
+
+namespace v8 {
+namespace internal {
+
+class SweeperThread : public Thread {
+ public:
+  explicit SweeperThread(Isolate* isolate);
+
+  void Run();
+  void Stop();
+  void StartSweeping();
+  void WaitForSweeperThread();
+  intptr_t StealMemory(PagedSpace* space);
+
+  static bool sweeping_pending() { return sweeping_pending_; }
+  static void set_sweeping_pending(bool sweeping_pending) {
+    sweeping_pending_ = sweeping_pending;
+  }
+
+  ~SweeperThread() {
+    delete start_sweeping_semaphore_;
+    delete end_sweeping_semaphore_;
+    delete stop_semaphore_;
+  }
+
+ private:
+  Isolate* isolate_;
+  Heap* heap_;
+  MarkCompactCollector* collector_;
+  Semaphore* start_sweeping_semaphore_;
+  Semaphore* end_sweeping_semaphore_;
+  Semaphore* stop_semaphore_;
+  FreeList free_list_old_data_space_;
+  FreeList free_list_old_pointer_space_;
+  FreeList private_free_list_old_data_space_;
+  FreeList private_free_list_old_pointer_space_;
+  volatile AtomicWord stop_thread_;
+  static bool sweeping_pending_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SWEEPER_THREAD_H_
=======================================
--- /branches/bleeding_edge/src/flag-definitions.h      Mon Jan 28 02:25:38 2013
+++ /branches/bleeding_edge/src/flag-definitions.h      Wed Jan 30 04:19:32 2013
@@ -417,6 +417,10 @@
             "trace progress of the incremental marking")
 DEFINE_bool(track_gc_object_stats, false,
             "track object counts and memory usage")
+DEFINE_bool(parallel_sweeping, false, "enable parallel sweeping")
+DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
+DEFINE_int(sweeper_threads, 1,
+           "number of parallel and concurrent sweeping threads")
 #ifdef VERIFY_HEAP
 DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
 #endif
=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Jan 30 02:51:13 2013
+++ /branches/bleeding_edge/src/heap.cc Wed Jan 30 04:19:32 2013
@@ -1308,7 +1308,8 @@

   incremental_marking()->PrepareForScavenge();

-  AdvanceSweepers(static_cast<int>(new_space_.Size()));
+  paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
+  paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());

// Flip the semispaces. After flipping, to space is empty, from space has
   // live objects.
@@ -5420,9 +5421,9 @@
   // 3. many lazy sweep steps.
   // Use mark-sweep-compact events to count incremental GCs in a round.

-
   if (incremental_marking()->IsStopped()) {
-    if (!IsSweepingComplete() &&
+    if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
+        !IsSweepingComplete() &&
         !AdvanceSweepers(static_cast<int>(step_size))) {
       return false;
     }
=======================================
--- /branches/bleeding_edge/src/heap.h  Wed Jan 30 02:51:13 2013
+++ /branches/bleeding_edge/src/heap.h  Wed Jan 30 04:19:32 2013
@@ -1636,6 +1636,7 @@
   }

   bool AdvanceSweepers(int step_size) {
+    ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
     bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
     sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
     return sweeping_complete;
=======================================
--- /branches/bleeding_edge/src/isolate.cc      Tue Jan 29 07:28:05 2013
+++ /branches/bleeding_edge/src/isolate.cc      Wed Jan 30 04:19:32 2013
@@ -49,6 +49,7 @@
 #include "simulator.h"
 #include "spaces.h"
 #include "stub-cache.h"
+#include "sweeper-thread.h"
 #include "version.h"
 #include "vm-state-inl.h"

@@ -1698,6 +1699,7 @@
   ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
 #undef ISOLATE_INIT_ARRAY_EXECUTE
 }
+

 void Isolate::TearDown() {
   TRACE_ISOLATE(tear_down);
@@ -1734,6 +1736,14 @@
   if (state_ == INITIALIZED) {
     TRACE_ISOLATE(deinit);

+    if (FLAG_concurrent_sweeping || FLAG_parallel_sweeping) {
+      for (int i = 0; i < FLAG_sweeper_threads; i++) {
+        sweeper_thread_[i]->Stop();
+        delete sweeper_thread_[i];
+      }
+      delete[] sweeper_thread_;
+    }
+
     if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();

     if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
@@ -2103,6 +2113,17 @@
   }

   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
+
+  if (FLAG_parallel_sweeping || FLAG_concurrent_sweeping) {
+    if (FLAG_sweeper_threads < 1) {
+      FLAG_sweeper_threads = 1;
+    }
+    sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
+    for (int i = 0; i < FLAG_sweeper_threads; i++) {
+      sweeper_thread_[i] = new SweeperThread(this);
+      sweeper_thread_[i]->Start();
+    }
+  }
   return true;
 }

=======================================
--- /branches/bleeding_edge/src/isolate.h       Tue Jan 29 01:09:55 2013
+++ /branches/bleeding_edge/src/isolate.h       Wed Jan 30 04:19:32 2013
@@ -78,6 +78,7 @@
 class ConsStringIteratorOp;
 class StringTracker;
 class StubCache;
+class SweeperThread;
 class ThreadManager;
 class ThreadState;
 class ThreadVisitor;  // Defined in v8threads.h
@@ -1078,6 +1079,10 @@
   // TODO(svenpanne) This method is on death row...
   static v8::Isolate* GetDefaultIsolateForLocking();

+  SweeperThread** sweeper_threads() {
+    return sweeper_thread_;
+  }
+
  private:
   Isolate();

@@ -1301,11 +1306,13 @@

   DeferredHandles* deferred_handles_head_;
   OptimizingCompilerThread optimizing_compiler_thread_;
+  SweeperThread** sweeper_thread_;

   friend class ExecutionAccess;
   friend class HandleScopeImplementer;
   friend class IsolateInitializer;
   friend class OptimizingCompilerThread;
+  friend class SweeperThread;
   friend class ThreadManager;
   friend class Simulator;
   friend class StackGuard;
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Wed Jan 30 02:51:13 2013
+++ /branches/bleeding_edge/src/mark-compact.cc Wed Jan 30 04:19:32 2013
@@ -40,6 +40,7 @@
 #include "objects-visiting.h"
 #include "objects-visiting-inl.h"
 #include "stub-cache.h"
+#include "sweeper-thread.h"

 namespace v8 {
 namespace internal {
@@ -501,6 +502,42 @@
     Page::FromAddress(obj->address())->ResetLiveBytes();
   }
 }
+
+
+void MarkCompactCollector::StartSweeperThreads() {
+  SweeperThread::set_sweeping_pending(true);
+  for (int i = 0; i < FLAG_sweeper_threads; i++) {
+    heap()->isolate()->sweeper_threads()[i]->StartSweeping();
+  }
+}
+
+
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+  if (SweeperThread::sweeping_pending()) {
+    for (int i = 0; i < FLAG_sweeper_threads; i++) {
+      heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+    }
+    SweeperThread::set_sweeping_pending(false);
+    StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
+    StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+    heap()->FreeQueuedChunks();
+  }
+}
+
+
+intptr_t MarkCompactCollector::
+             StealMemoryFromSweeperThreads(PagedSpace* space) {
+  intptr_t freed_bytes = 0;
+  for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
+  }
+  return freed_bytes;
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+  return heap()->isolate()->sweeper_threads() != NULL;
+}


 bool Marking::TransferMark(Address old_start, Address new_start) {
@@ -805,6 +842,11 @@

   ASSERT(!FLAG_never_compact || !FLAG_always_compact);

+  if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) {
+    // Instead of waiting we could also abort the sweeper threads here.
+    WaitUntilSweepingCompleted();
+  }
+
   // Clear marking bits if incremental marking is aborted.
   if (was_marked_incrementally_ && abort_incremental_marking_) {
     heap()->incremental_marking()->Abort();
@@ -3131,7 +3173,7 @@

         switch (space->identity()) {
           case OLD_DATA_SPACE:
-            SweepConservatively(space, p);
+            SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
             break;
           case OLD_POINTER_SPACE:
             SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
@@ -3488,6 +3530,19 @@
   USE(live_objects);
   return block_address + offsets[0] * kPointerSize;
 }
+
+
+template<MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space,
+                     FreeList* free_list,
+                     Address start,
+                     int size) {
+  if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
+    return space->Free(start, size);
+  } else {
+    return size - free_list->Free(start, size);
+  }
+}


 // Sweeps a space conservatively.  After this has been done the larger free
@@ -3497,12 +3552,15 @@
// because it means that any FreeSpace maps left actually describe a region of
 // memory that can be ignored when scanning.  Dead objects other than free
 // spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+template<MarkCompactCollector::SweepingParallelism mode>
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
+                                                   FreeList* free_list,
+                                                   Page* p) {
   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
-  double start_time = 0.0;
-  if (FLAG_print_cumulative_gc_stat) {
-    start_time = OS::TimeCurrentMillis();
-  }
+  ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
+         free_list != NULL) ||
+         (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
+         free_list == NULL));

   MarkBit::CellType* cells = p->markbits()->cells();
   p->MarkSweptConservatively();
@@ -3530,8 +3588,7 @@
   }
   size_t size = block_address - p->area_start();
   if (cell_index == last_cell_index) {
-    freed_bytes += static_cast<int>(space->Free(p->area_start(),
-                                                static_cast<int>(size)));
+    freed_bytes += Free<mode>(space, free_list, p->area_start(), size);
     ASSERT_EQ(0, p->LiveBytes());
     return freed_bytes;
   }
@@ -3540,8 +3597,8 @@
   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
   // Free the first free space.
   size = free_end - p->area_start();
-  freed_bytes += space->Free(p->area_start(),
-                             static_cast<int>(size));
+  freed_bytes += Free<mode>(space, free_list, p->area_start(), size);
+
// The start of the current free area is represented in undigested form by // the address of the last 32-word section that contained a live object and // the marking bitmap for that cell, which describes where the live object
@@ -3554,10 +3611,10 @@
   for ( ;
        cell_index < last_cell_index;
        cell_index++, block_address += 32 * kPointerSize) {
-    ASSERT(static_cast<unsigned>(cell_index) ==
-           Bitmap::IndexToCell(
-               Bitmap::CellAlignIndex(
-                   p->AddressToMarkbitIndex(block_address))));
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(block_address))));
     uint32_t cell = cells[cell_index];
     if (cell != 0) {
// We have a live object. Check approximately whether it is more than 32
@@ -3570,8 +3627,8 @@
// so now we need to find the start of the first live object at the
           // end of the free space.
           free_end = StartOfLiveObject(block_address, cell);
-          freed_bytes += space->Free(free_start,
- static_cast<int>(free_end - free_start));
+          freed_bytes += Free<mode>(space, free_list, free_start,
+ static_cast<int>(free_end - free_start));
         }
       }
// Update our undigested record of where the current free area started.
@@ -3585,23 +3642,33 @@
   // Handle the free space at the end of the page.
   if (block_address - free_start > 32 * kPointerSize) {
     free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += space->Free(free_start,
- static_cast<int>(block_address - free_start));
+    freed_bytes += Free<mode>(space, free_list, free_start,
+ static_cast<int>(block_address - free_start));
   }

   p->ResetLiveBytes();
+  return freed_bytes;
+}

-  if (FLAG_print_cumulative_gc_stat) {
-    space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
+
+void MarkCompactCollector::SweepInParallel(PagedSpace* space,
+                                           FreeList* private_free_list,
+                                           FreeList* free_list) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+
+    if (p->TryParallelSweeping()) {
+      SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
+      free_list->Concatenate(private_free_list);
+    }
   }
-  return freed_bytes;
 }


void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
                                       sweeper == LAZY_CONSERVATIVE);
-
   space->ClearStats();

   PageIterator it(space);
@@ -3614,6 +3681,7 @@
   while (it.has_next()) {
     Page* p = it.next();

+    ASSERT(p->parallel_sweeping() == 0);
     // Clear sweeping flags indicating that marking bits are still intact.
     p->ClearSweptPrecisely();
     p->ClearSweptConservatively();
@@ -3659,7 +3727,7 @@
           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
                  reinterpret_cast<intptr_t>(p));
         }
-        SweepConservatively(space, p);
+        SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
         pages_swept++;
         break;
       }
@@ -3668,12 +3736,20 @@
           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
                  reinterpret_cast<intptr_t>(p));
         }
-        freed_bytes += SweepConservatively(space, p);
+ freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
         pages_swept++;
         space->SetPagesToSweep(p->next_page());
         lazy_sweeping_active = true;
         break;
       }
+      case PARALLEL_CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        p->set_parallel_sweeping(1);
+        break;
+      }
       case PRECISE: {
         if (FLAG_gc_verbose) {
           PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
@@ -3713,11 +3789,13 @@
       FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
   if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
   if (sweep_precisely_) how_to_sweep = PRECISE;
+  if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE;
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
+
   SweepSpace(heap()->old_pointer_space(), how_to_sweep);
   SweepSpace(heap()->old_data_space(), how_to_sweep);

@@ -3728,6 +3806,15 @@

   EvacuateNewSpaceAndCandidates();

+  if (AreSweeperThreadsActivated()) {
+    // TODO(hpayer): The starting of the sweeper threads should be after
+    // SweepSpace old data space.
+    StartSweeperThreads();
+    if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) {
+      WaitUntilSweepingCompleted();
+    }
+  }
+
   // ClearNonLiveTransitions depends on precise sweeping of map space to
   // detect whether unmarked map became dead in this collection or in one
   // of the previous ones.
=======================================
--- /branches/bleeding_edge/src/mark-compact.h  Thu Jan 24 03:55:05 2013
+++ /branches/bleeding_edge/src/mark-compact.h  Wed Jan 30 04:19:32 2013
@@ -594,9 +594,15 @@
   enum SweeperType {
     CONSERVATIVE,
     LAZY_CONSERVATIVE,
+    PARALLEL_CONSERVATIVE,
     PRECISE
   };

+  enum SweepingParallelism {
+    SWEEP_SEQUENTIALLY,
+    SWEEP_IN_PARALLEL
+  };
+
 #ifdef VERIFY_HEAP
   void VerifyMarkbitsAreClean();
   static void VerifyMarkbitsAreClean(PagedSpace* space);
@@ -605,7 +611,10 @@

   // Sweep a single page from the given space conservatively.
   // Return a number of reclaimed bytes.
-  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+  template<SweepingParallelism type>
+  static intptr_t SweepConservatively(PagedSpace* space,
+                                      FreeList* free_list,
+                                      Page* p);

   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
     return Page::FromAddress(reinterpret_cast<Address>(anchor))->
@@ -670,6 +679,16 @@
   bool is_compacting() const { return compacting_; }

   MarkingParity marking_parity() { return marking_parity_; }
+
+  void SweepInParallel(PagedSpace* space,
+                       FreeList* private_free_list,
+                       FreeList* free_list);
+
+  void WaitUntilSweepingCompleted();
+
+  intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
+
+  bool AreSweeperThreadsActivated();

  private:
   MarkCompactCollector();
@@ -679,6 +698,7 @@
   void RemoveDeadInvalidatedCode();
   void ProcessInvalidatedCode(ObjectVisitor* visitor);

+  void StartSweeperThreads();

 #ifdef DEBUG
   enum CollectorState {
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Tue Jan 29 01:09:55 2013
+++ /branches/bleeding_edge/src/spaces.cc       Wed Jan 30 04:19:32 2013
@@ -466,6 +466,7 @@
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_ = static_cast<int>(area_start - base);
+  chunk->parallel_sweeping_ = 0;
   chunk->ResetLiveBytes();
   Bitmap::Clear(chunk);
   chunk->initialize_scan_on_scavenge(false);
@@ -2039,6 +2040,29 @@
         reinterpret_cast<Address>(next);
   }
 }
+
+
+intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
+  intptr_t free_bytes = 0;
+  if (category->top_ != NULL) {
+    ASSERT(category->end_ != NULL);
+    // This is safe (not going to deadlock) since Concatenate operations
+    // are never performed on the same free lists at the same time in
+    // reverse order.
+    ScopedLock lock_target(mutex_);
+    ScopedLock lock_source(category->mutex());
+    free_bytes = category->available();
+    if (end_ == NULL) {
+      end_ = category->end();
+    } else {
+      category->end()->set_next(top_);
+    }
+    top_ = category->top();
+    available_ += category->available();
+    category->Reset();
+  }
+  return free_bytes;
+}


 void FreeListCategory::Reset() {
@@ -2137,6 +2161,16 @@
     : owner_(owner), heap_(owner->heap()) {
   Reset();
 }
+
+
+intptr_t FreeList::Concatenate(FreeList* free_list) {
+  intptr_t free_bytes = 0;
+  free_bytes += small_list_.Concatenate(free_list->small_list());
+  free_bytes += medium_list_.Concatenate(free_list->medium_list());
+  free_bytes += large_list_.Concatenate(free_list->large_list());
+  free_bytes += huge_list_.Concatenate(free_list->huge_list());
+  return free_bytes;
+}


 void FreeList::Reset() {
@@ -2503,7 +2537,10 @@
                reinterpret_cast<intptr_t>(p));
       }
       DecreaseUnsweptFreeBytes(p);
-      freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+      freed_bytes +=
+          MarkCompactCollector::
+ SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+                  this, NULL, p);
     }
     p = next_page;
   } while (p != anchor() && freed_bytes < bytes_to_sweep);
@@ -2533,6 +2570,21 @@
     allocation_info_.limit = NULL;
   }
 }
+
+
+bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  if (collector->AreSweeperThreadsActivated()) {
+    if (FLAG_concurrent_sweeping &&
+        collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
+      collector->WaitUntilSweepingCompleted();
+      return true;
+    }
+    return false;
+  } else {
+    return AdvanceSweeper(size_in_bytes);
+  }
+}


 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
@@ -2544,7 +2596,7 @@
   bool sweeping_complete = false;

   for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
-    sweeping_complete = AdvanceSweeper(size_in_bytes);
+    sweeping_complete = EnsureSweeperProgress(size_in_bytes);

     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2567,7 +2619,7 @@
// Last ditch, sweep all the remaining pages to try to find space. This may
   // cause a pause.
   if (!IsSweepingComplete()) {
-    AdvanceSweeper(kMaxInt);
+    EnsureSweeperProgress(kMaxInt);

     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
=======================================
--- /branches/bleeding_edge/src/spaces.h        Tue Jan 29 01:09:55 2013
+++ /branches/bleeding_edge/src/spaces.h        Wed Jan 30 04:19:32 2013
@@ -453,6 +453,18 @@

   // Return all current flags.
   intptr_t GetFlags() { return flags_; }
+
+  intptr_t parallel_sweeping() const {
+    return parallel_sweeping_;
+  }
+
+  void set_parallel_sweeping(intptr_t state) {
+    parallel_sweeping_ = state;
+  }
+
+  bool TryParallelSweeping() {
+    return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+  }

   // Manage live byte count (count of bytes known to be live,
   // because they are marked black).
@@ -533,8 +545,8 @@
   static const size_t kWriteBarrierCounterOffset =
       kSlotsBufferOffset + kPointerSize + kPointerSize;

-  static const size_t kHeaderSize =
-      kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize;
+ static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
+                                    kIntSize + kIntSize + kPointerSize;

   static const int kBodyOffset =
       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -686,6 +698,8 @@
   // count highest number of bytes ever allocated on the page.
   int high_water_mark_;

+  intptr_t parallel_sweeping_;
+
   static MemoryChunk* Initialize(Heap* heap,
                                  Address base,
                                  size_t size,
@@ -1395,7 +1409,17 @@
 // the end element of the linked list of free memory blocks.
 class FreeListCategory {
  public:
-  FreeListCategory() : top_(NULL), end_(NULL), available_(0) {}
+  FreeListCategory() :
+      top_(NULL),
+      end_(NULL),
+      mutex_(OS::CreateMutex()),
+      available_(0) {}
+
+  ~FreeListCategory() {
+    delete mutex_;
+  }
+
+  intptr_t Concatenate(FreeListCategory* category);

   void Reset();

@@ -1420,6 +1444,8 @@
   int* GetAvailableAddress() { return &available_; }
   int available() const { return available_; }
   void set_available(int available) { available_ = available; }
+
+  Mutex* mutex() { return mutex_; }

 #ifdef DEBUG
   intptr_t SumFreeList();
@@ -1429,6 +1455,7 @@
  private:
   FreeListNode* top_;
   FreeListNode* end_;
+  Mutex* mutex_;

   // Total available bytes in all blocks of this free list category.
   int available_;
@@ -1462,6 +1489,8 @@
  public:
   explicit FreeList(PagedSpace* owner);

+  intptr_t Concatenate(FreeList* free_list);
+
   // Clear the free list.
   void Reset();

@@ -1508,6 +1537,11 @@
   void CountFreeListItems(Page* p, SizeStats* sizes);

   intptr_t EvictFreeListItems(Page* p);
+
+  FreeListCategory* small_list() { return &small_list_; }
+  FreeListCategory* medium_list() { return &medium_list_; }
+  FreeListCategory* large_list() { return &large_list_; }
+  FreeListCategory* huge_list() { return &huge_list_; }

  private:
   // The size range of blocks, in bytes.
@@ -1723,6 +1757,11 @@

   bool AdvanceSweeper(intptr_t bytes_to_sweep);

+  // When parallel sweeper threads are active this function waits
+  // for them to complete, otherwise AdvanceSweeper with size_in_bytes
+  // is called.
+  bool EnsureSweeperProgress(intptr_t size_in_bytes);
+
   bool IsSweepingComplete() {
     return !first_unswept_page_->is_valid();
   }
@@ -1747,6 +1786,12 @@
   }

  protected:
+  FreeList* free_list() { return &free_list_; }
+
+  void AddToAccountingStats(intptr_t bytes) {
+    accounting_stats_.DeallocateBytes(bytes);
+  }
+
   int area_size_;

   // Maximum capacity of this space.
@@ -1796,6 +1841,7 @@
   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);

   friend class PageIterator;
+  friend class SweeperThread;
 };


=======================================
--- /branches/bleeding_edge/tools/gyp/v8.gyp    Wed Jan 23 08:29:48 2013
+++ /branches/bleeding_edge/tools/gyp/v8.gyp    Wed Jan 30 04:19:32 2013
@@ -461,6 +461,8 @@
             '../../src/strtod.h',
             '../../src/stub-cache.cc',
             '../../src/stub-cache.h',
+            '../../src/sweeper-thread.h',
+            '../../src/sweeper-thread.cc',
             '../../src/token.cc',
             '../../src/token.h',
             '../../src/transitions-inl.h',

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.


Reply via email to