Revision: 22318
Author:   [email protected]
Date:     Thu Jul 10 12:07:28 2014 UTC
Log:      Allow main thread to contribute to the sweeping phase.

BUG=
[email protected]

Review URL: https://codereview.chromium.org/380653003
http://code.google.com/p/v8/source/detail?r=22318

Modified:
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/mark-compact.h
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/spaces.h
 /branches/bleeding_edge/src/sweeper-thread.cc

=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Tue Jul  8 08:44:45 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Thu Jul 10 12:07:28 2014 UTC
@@ -559,7 +559,7 @@
  private:
   // v8::Task overrides.
   virtual void Run() V8_OVERRIDE {
-    heap_->mark_compact_collector()->SweepInParallel(space_);
+    heap_->mark_compact_collector()->SweepInParallel(space_, 0);
heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
   }

@@ -3544,7 +3544,7 @@

         switch (space->identity()) {
           case OLD_DATA_SPACE:
-            SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
+            SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
             break;
           case OLD_POINTER_SPACE:
             SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
@@ -3939,7 +3939,7 @@
                      FreeList* free_list,
                      Address start,
                      int size) {
-  if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
+  if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
     return space->Free(start, size);
   } else {
     return size - free_list->Free(start, size);
@@ -3948,9 +3948,9 @@


 // Force instantiation of templatized SweepConservatively method for
-// SWEEP_SEQUENTIALLY mode.
+// SWEEP_ON_MAIN_THREAD mode.
 template intptr_t MarkCompactCollector::
-    SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+    SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
         PagedSpace*, FreeList*, Page*);


@@ -3975,16 +3975,19 @@
   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
   ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
          free_list != NULL) ||
-         (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
+         (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
          free_list == NULL));

   // When parallel sweeping is active, the page will be marked after
   // sweeping by the main thread.
-  if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
+  if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
+    p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
+  } else {
     p->MarkSweptConservatively();
   }

   intptr_t freed_bytes = 0;
+  intptr_t max_freed_bytes = 0;
   size_t size = 0;

// Skip over all the dead objects at the start of the page and mark them free.
@@ -3999,8 +4002,9 @@

   if (it.Done()) {
     size = p->area_end() - p->area_start();
-    freed_bytes += Free<mode>(space, free_list, p->area_start(),
-                              static_cast<int>(size));
+    freed_bytes = Free<mode>(space, free_list, p->area_start(),
+                             static_cast<int>(size));
+    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
     ASSERT_EQ(0, p->LiveBytes());
     return freed_bytes;
   }
@@ -4010,8 +4014,9 @@
   Address free_end = StartOfLiveObject(cell_base, *cell);
   // Free the first free space.
   size = free_end - p->area_start();
-  freed_bytes += Free<mode>(space, free_list, p->area_start(),
-                            static_cast<int>(size));
+  freed_bytes = Free<mode>(space, free_list, p->area_start(),
+                           static_cast<int>(size));
+  max_freed_bytes = Max(freed_bytes, max_freed_bytes);

// The start of the current free area is represented in undigested form by // the address of the last 32-word section that contained a live object and
@@ -4036,8 +4041,9 @@
// so now we need to find the start of the first live object at the
           // end of the free space.
           free_end = StartOfLiveObject(cell_base, *cell);
-          freed_bytes += Free<mode>(space, free_list, free_start,
- static_cast<int>(free_end - free_start));
+          freed_bytes = Free<mode>(space, free_list, free_start,
+ static_cast<int>(free_end - free_start));
+          max_freed_bytes = Max(freed_bytes, max_freed_bytes);
         }
       }
// Update our undigested record of where the current free area started.
@@ -4051,31 +4057,40 @@
   // Handle the free space at the end of the page.
   if (cell_base - free_start > 32 * kPointerSize) {
     free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += Free<mode>(space, free_list, free_start,
- static_cast<int>(p->area_end() - free_start));
+    freed_bytes = Free<mode>(space, free_list, free_start,
+                             static_cast<int>(p->area_end() - free_start));
+    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
   }

   p->ResetLiveBytes();
-  return freed_bytes;
+  return max_freed_bytes;
 }


-void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
+int MarkCompactCollector::SweepInParallel(PagedSpace* space,
+                                          int required_freed_bytes) {
   PageIterator it(space);
   FreeList* free_list = space == heap()->old_pointer_space()
                             ? free_list_old_pointer_space_.get()
                             : free_list_old_data_space_.get();
   FreeList private_free_list(space);
+  int max_freed = 0;
+  int max_freed_overall = 0;
   while (it.has_next()) {
     Page* p = it.next();

     if (p->TryParallelSweeping()) {
-      SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
+      max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
+          space, &private_free_list, p);
       free_list->Concatenate(&private_free_list);
-      p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
+      if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
+        return max_freed;
+      }
+      max_freed_overall = Max(max_freed, max_freed_overall);
     }
     if (p == space->end_of_unswept_pages()) break;
   }
+  return max_freed_overall;
 }


@@ -4131,7 +4146,7 @@
           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
                  reinterpret_cast<intptr_t>(p));
         }
-        SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
+        SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
         pages_swept++;
         break;
       }
@@ -4142,7 +4157,7 @@
             PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
                    reinterpret_cast<intptr_t>(p));
           }
-          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
+          SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
           pages_swept++;
           parallel_sweeping_active = true;
         } else {
=======================================
--- /branches/bleeding_edge/src/mark-compact.h  Wed Jul  2 12:33:12 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.h  Thu Jul 10 12:07:28 2014 UTC
@@ -577,7 +577,7 @@
   };

   enum SweepingParallelism {
-    SWEEP_SEQUENTIALLY,
+    SWEEP_ON_MAIN_THREAD,
     SWEEP_IN_PARALLEL
   };

@@ -590,7 +590,7 @@
 #endif

   // Sweep a single page from the given space conservatively.
-  // Return a number of reclaimed bytes.
+ // Returns the size of the biggest continuous freed memory chunk in bytes.
   template<SweepingParallelism type>
   static intptr_t SweepConservatively(PagedSpace* space,
                                       FreeList* free_list,
@@ -659,8 +659,11 @@

   MarkingParity marking_parity() { return marking_parity_; }

-  // Concurrent and parallel sweeping support.
-  void SweepInParallel(PagedSpace* space);
+ // Concurrent and parallel sweeping support. If required_freed_bytes was set + // to a value larger than 0, then sweeping returns after a block of at least + // required_freed_bytes was freed. If required_freed_bytes was set to zero
+  // then the whole given space is swept.
+  int SweepInParallel(PagedSpace* space, int required_freed_bytes);

   void WaitUntilSweepingCompleted();

=======================================
--- /branches/bleeding_edge/src/spaces.cc       Tue Jul  8 08:44:45 2014 UTC
+++ /branches/bleeding_edge/src/spaces.cc       Thu Jul 10 12:07:28 2014 UTC
@@ -2581,12 +2581,23 @@
 }


-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+HeapObject* PagedSpace::EnsureSweepingProgress(
     int size_in_bytes) {
   MarkCompactCollector* collector = heap()->mark_compact_collector();

-  // If sweeper threads are still running, wait for them.
   if (collector->IsConcurrentSweepingInProgress(this)) {
+ // If sweeping is still in progress try to sweep pages on the main thread.
+    int free_chunk =
+        collector->SweepInParallel(this, size_in_bytes);
+    if (free_chunk >= size_in_bytes) {
+      HeapObject* object = free_list_.Allocate(size_in_bytes);
+ // We should be able to allocate an object here since we just freed that
+      // much memory.
+      ASSERT(object != NULL);
+      if (object != NULL) return object;
+    }
+
+    // Wait for the sweeper threads here and complete the sweeping phase.
     collector->WaitUntilSweepingCompleted();

     // After waiting for the sweeper threads, there may be new free-list
@@ -2617,7 +2628,7 @@
       && heap()->OldGenerationAllocationLimitReached()) {
     // If sweeper threads are active, wait for them at that point and steal
     // elements form their free-lists.
- HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+    HeapObject* object = EnsureSweepingProgress(size_in_bytes);
     if (object != NULL) return object;
   }

@@ -2630,7 +2641,7 @@
   // If sweeper threads are active, wait for them at that point and steal
   // elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
-  return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+  return EnsureSweepingProgress(size_in_bytes);
 }


=======================================
--- /branches/bleeding_edge/src/spaces.h        Wed Jul  2 12:33:12 2014 UTC
+++ /branches/bleeding_edge/src/spaces.h        Thu Jul 10 12:07:28 2014 UTC
@@ -2014,8 +2014,10 @@
   // address denoted by top in allocation_info_.
   inline HeapObject* AllocateLinearly(int size_in_bytes);

-  MUST_USE_RESULT HeapObject*
-      WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
+ // If sweeping is still in progress try to sweep unswept pages. If that is
+  // not successful, wait for the sweeper threads and re-try free-list
+  // allocation.
+  MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);

   // Slow path of AllocateRaw.  This function is space-dependent.
   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
=======================================
--- /branches/bleeding_edge/src/sweeper-thread.cc Mon Jun 30 13:25:46 2014 UTC +++ /branches/bleeding_edge/src/sweeper-thread.cc Thu Jul 10 12:07:28 2014 UTC
@@ -41,8 +41,8 @@
       return;
     }

-    collector_->SweepInParallel(heap_->old_data_space());
-    collector_->SweepInParallel(heap_->old_pointer_space());
+    collector_->SweepInParallel(heap_->old_data_space(), 0);
+    collector_->SweepInParallel(heap_->old_pointer_space(), 0);
     end_sweeping_semaphore_.Signal();
   }
 }

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to