Reviewers: jarin,

Description:
Wait for sweeper threads when a scan on scavenge page is not swept.

BUG=

Please review this at https://codereview.chromium.org/404083002/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files (+43, -23 lines):
  M src/mark-compact.h
  M src/mark-compact.cc
  M src/store-buffer.cc


Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 595d6ee3ce09ddf35ae26cd6a7f7444e78c48802..81361f02aae4023a3aa5d7b202ed89fd73828e7f 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -4098,39 +4098,44 @@ int MarkCompactCollector::SweepConservatively(PagedSpace* space,

 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
                                           int required_freed_bytes) {
-  PageIterator it(space);
-  FreeList* free_list = space == heap()->old_pointer_space()
-                            ? free_list_old_pointer_space_.get()
-                            : free_list_old_data_space_.get();
-  FreeList private_free_list(space);
   int max_freed = 0;
   int max_freed_overall = 0;
+  PageIterator it(space);
   while (it.has_next()) {
     Page* p = it.next();
-    if (p->TryParallelSweeping()) {
-      if (space->swept_precisely()) {
-        max_freed = SweepPrecisely<SWEEP_ONLY,
-                                   SWEEP_IN_PARALLEL,
-                                   IGNORE_SKIP_LIST,
-                                   IGNORE_FREE_SPACE>(
-                                       space, &private_free_list, p, NULL);
-      } else {
-        max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
-            space, &private_free_list, p);
-      }
-      ASSERT(max_freed >= 0);
-      free_list->Concatenate(&private_free_list);
-      if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
-        return max_freed;
-      }
-      max_freed_overall = Max(max_freed, max_freed_overall);
+    max_freed = SweepInParallel(p, space);
+    ASSERT(max_freed >= 0);
+    if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
+      return max_freed;
     }
+    max_freed_overall = Max(max_freed, max_freed_overall);
     if (p == space->end_of_unswept_pages()) break;
   }
   return max_freed_overall;
 }


+int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
+  int max_freed = 0;
+  if (page->TryParallelSweeping()) {
+    FreeList* free_list = space == heap()->old_pointer_space()
+                              ? free_list_old_pointer_space_.get()
+                              : free_list_old_data_space_.get();
+    FreeList private_free_list(space);
+    if (space->swept_precisely()) {
+      max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL,
+                                 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
+          space, &private_free_list, page, NULL);
+    } else {
+      max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
+          space, &private_free_list, page);
+    }
+    free_list->Concatenate(&private_free_list);
+  }
+  return max_freed;
+}
+
+
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   space->set_swept_precisely(sweeper == PRECISE ||
                              sweeper == CONCURRENT_PRECISE ||
Index: src/mark-compact.h
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 1145badba6b9429f5d47bf8cc217c53ce4285ea6..638076f0b07ac6f63f0e8f5f0390c2e8daa4561a 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -658,9 +658,14 @@ class MarkCompactCollector {
// Concurrent and parallel sweeping support. If required_freed_bytes was set // to a value larger than 0, then sweeping returns after a block of at least // required_freed_bytes was freed. If required_freed_bytes was set to zero
-  // then the whole given space is swept.
+ // then the whole given space is swept. It returns the size of the maximum
+  // continuous freed memory chunk.
   int SweepInParallel(PagedSpace* space, int required_freed_bytes);

+ // Sweeps a given page concurrently to the sweeper threads. It returns the
+  // size of the maximum continuous freed memory chunk.
+  int SweepInParallel(Page* page, PagedSpace* space);
+
   void EnsureSweepingCompleted();

   // If sweeper threads are not active this method will return true. If
Index: src/store-buffer.cc
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index 3745d91a8a417071b8f40262069ebee34deed332..d1a04d292fb2cb0f79282972caa0b26c8d3b8c2f 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -505,6 +505,16 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
               }
             }
           } else {
+ if (page->parallel_sweeping() > MemoryChunk::SWEEPING_FINALIZE) { + heap_->mark_compact_collector()->SweepInParallel(page, owner); + if (page->parallel_sweeping() > MemoryChunk::SWEEPING_FINALIZE) {
+                // We were not able to sweep that page, i.e., a concurrent
+                // sweeper thread currently owns this page.
+                // TODO(hpayer): This may introduce a huge pause here. We
+ // just care about finish sweeping of the scan on scavenge page.
+                heap_->mark_compact_collector()->EnsureSweepingCompleted();
+              }
+            }
             FindPointersToNewSpaceInRegion(
                 start, end, slot_callback, clear_maps);
           }


--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to