Revision: 22319
Author:   [email protected]
Date:     Thu Jul 10 12:22:01 2014 UTC
Log:      Revert "Precisely sweeping of scan-on-scavenge pages."

BUG=
[email protected]

Review URL: https://codereview.chromium.org/387483002
http://code.google.com/p/v8/source/detail?r=22319

Modified:
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/objects-inl.h
 /branches/bleeding_edge/src/objects.h
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/store-buffer.cc
 /branches/bleeding_edge/src/store-buffer.h

=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Jul  9 11:08:26 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Thu Jul 10 12:22:01 2014 UTC
@@ -408,7 +408,7 @@
 }


-void Heap::GarbageCollectionPrologue(GarbageCollector collector) {
+void Heap::GarbageCollectionPrologue() {
   {  AllowHeapAllocation for_the_first_part_of_prologue;
     ClearJSFunctionResultCaches();
     gc_count_++;
@@ -439,7 +439,7 @@
   ReportStatisticsBeforeGC();
 #endif  // DEBUG

-  store_buffer()->GCPrologue(collector == MARK_COMPACTOR);
+  store_buffer()->GCPrologue();

   if (isolate()->concurrent_osr_enabled()) {
     isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
@@ -837,7 +837,7 @@
   { GCTracer tracer(this, gc_reason, collector_reason);
     ASSERT(AllowHeapAllocation::IsAllowed());
     DisallowHeapAllocation no_allocation_during_gc;
-    GarbageCollectionPrologue(collector);
+    GarbageCollectionPrologue();
     // The GC count was incremented in the prologue.  Tell the tracer about
     // it.
     tracer.set_gc_count(gc_count_);
=======================================
--- /branches/bleeding_edge/src/heap.h  Tue Jul  8 08:20:22 2014 UTC
+++ /branches/bleeding_edge/src/heap.h  Thu Jul 10 12:22:01 2014 UTC
@@ -1698,7 +1698,7 @@

   // Code that should be run before and after each GC.  Includes some
   // reporting/verification activities when compiled with DEBUG set.
-  void GarbageCollectionPrologue(GarbageCollector collector);
+  void GarbageCollectionPrologue();
   void GarbageCollectionEpilogue();

   // Pretenuring decisions are made based on feedback collected during new
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Thu Jul 10 12:07:28 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Thu Jul 10 12:22:01 2014 UTC
@@ -4161,23 +4161,12 @@
           pages_swept++;
           parallel_sweeping_active = true;
         } else {
-          if (p->scan_on_scavenge()) {
- SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
-                space, p, NULL);
-            pages_swept++;
-            if (FLAG_gc_verbose) {
-              PrintF("Sweeping 0x%" V8PRIxPTR
-                  " scan on scavenge page precisely.\n",
-                  reinterpret_cast<intptr_t>(p));
-            }
-          } else {
-            if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
-                  reinterpret_cast<intptr_t>(p));
-            }
- p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
-            space->IncreaseUnsweptFreeBytes(p);
+          if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+                   reinterpret_cast<intptr_t>(p));
           }
+          p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
+          space->IncreaseUnsweptFreeBytes(p);
         }
         space->set_end_of_unswept_pages(p);
         break;
=======================================
--- /branches/bleeding_edge/src/objects-inl.h   Thu Jul 10 10:54:47 2014 UTC
+++ /branches/bleeding_edge/src/objects-inl.h   Thu Jul 10 12:22:01 2014 UTC
@@ -1478,22 +1478,6 @@
 int HeapObject::Size() {
   return SizeFromMap(map());
 }
-
-
-bool HeapObject::ContainsPointers() {
-  InstanceType type = map()->instance_type();
-  if (type <= LAST_NAME_TYPE) {
-    if (type == SYMBOL_TYPE) {
-      return true;
-    }
-    ASSERT(type < FIRST_NONSTRING_TYPE);
-    // There are four string representations: sequential strings, external
-    // strings, cons strings, and sliced strings.
-    // Only the latter two contain non-map-word pointers to heap objects.
-    return ((type & kIsIndirectStringMask) == kIsIndirectStringTag);
-  }
-  return (type > LAST_DATA_TYPE);
-}


 void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
=======================================
--- /branches/bleeding_edge/src/objects.h       Thu Jul 10 10:54:47 2014 UTC
+++ /branches/bleeding_edge/src/objects.h       Thu Jul 10 12:22:01 2014 UTC
@@ -717,7 +717,6 @@
   FIXED_UINT8_CLAMPED_ARRAY_TYPE,  // LAST_FIXED_TYPED_ARRAY_TYPE

   FIXED_DOUBLE_ARRAY_TYPE,
-  CONSTANT_POOL_ARRAY_TYPE,
   FILLER_TYPE,  // LAST_DATA_TYPE

   // Structs.
@@ -744,6 +743,7 @@
   BREAK_POINT_INFO_TYPE,

   FIXED_ARRAY_TYPE,
+  CONSTANT_POOL_ARRAY_TYPE,
   SHARED_FUNCTION_INFO_TYPE,

// All the following types are subtypes of JSReceiver, which corresponds to
@@ -1719,10 +1719,6 @@
   // Returns the heap object's size in bytes
   inline int Size();

-  // Returns true if this heap object contains only references to other
-  // heap objects.
-  inline bool ContainsPointers();
-
   // Given a heap object's map pointer, returns the heap size in bytes
   // Useful when the map pointer field is used for other purposes.
   // GC internal.
=======================================
--- /branches/bleeding_edge/src/spaces.cc       Thu Jul 10 12:07:28 2014 UTC
+++ /branches/bleeding_edge/src/spaces.cc       Thu Jul 10 12:22:01 2014 UTC
@@ -18,9 +18,6 @@
 // HeapObjectIterator

 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
-  // Check that we actually can iterate this space.
-  ASSERT(space->is_iterable());
-
// You can't actually iterate over the anchor page. It is not a real page, // just an anchor for the double linked page list. Initialize as if we have // reached the end of the anchor page, then the first iteration will move on
@@ -35,9 +32,6 @@

 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
                                        HeapObjectCallback size_func) {
-  // Check that we actually can iterate this space.
-  ASSERT(space->is_iterable());
-
// You can't actually iterate over the anchor page. It is not a real page, // just an anchor for the double linked page list. Initialize the current
   // address and end as NULL, then the first iteration will move on
@@ -72,6 +66,9 @@
                                     Address cur, Address end,
                                     HeapObjectIterator::PageMode mode,
                                     HeapObjectCallback size_f) {
+  // Check that we actually can iterate this space.
+  ASSERT(space->is_iterable());
+
   space_ = space;
   cur_addr_ = cur;
   cur_end_ = end;
=======================================
--- /branches/bleeding_edge/src/store-buffer.cc Tue Jul  8 12:29:15 2014 UTC
+++ /branches/bleeding_edge/src/store-buffer.cc Thu Jul 10 12:22:01 2014 UTC
@@ -11,7 +11,6 @@
 #include "src/base/atomicops.h"
 #include "src/counters.h"
 #include "src/store-buffer-inl.h"
-#include "src/utils.h"

 namespace v8 {
 namespace internal {
@@ -23,13 +22,10 @@
       old_start_(NULL),
       old_limit_(NULL),
       old_top_(NULL),
-      old_regular_limit_(NULL),
       old_reserved_limit_(NULL),
-      old_virtual_memory_(NULL),
-      old_store_buffer_length_(0),
       old_buffer_is_sorted_(false),
       old_buffer_is_filtered_(false),
-      allow_overflow_(false),
+      during_gc_(false),
       store_buffer_rebuilding_enabled_(false),
       callback_(NULL),
       may_move_store_buffer_entries_(true),
@@ -48,16 +44,8 @@
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
   limit_ = start_ + (kStoreBufferSize / kPointerSize);

- // We set the maximum store buffer size to the maximum size of a semi-space. - // The store buffer may reach this limit during a full garbage collection. - // Note that half of the semi-space should be good enough since half of the
-  // memory in the semi-space are not object pointers.
-  old_store_buffer_length_ =
-      Max(static_cast<int>(heap_->MaxSemiSpaceSize() / sizeof(Address)),
-          kOldRegularStoreBufferLength);
-
   old_virtual_memory_ =
-      new base::VirtualMemory(old_store_buffer_length_ * kPointerSize);
+      new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
   old_top_ = old_start_ =
       reinterpret_cast<Address*>(old_virtual_memory_->address());
// Don't know the alignment requirements of the OS, but it is certainly not
@@ -66,12 +54,9 @@
   int initial_length =
       static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
   ASSERT(initial_length > 0);
-  ASSERT(initial_length <= kOldRegularStoreBufferLength);
-  ASSERT(initial_length <= old_store_buffer_length_);
-  ASSERT(kOldRegularStoreBufferLength <= old_store_buffer_length_);
+  ASSERT(initial_length <= kOldStoreBufferLength);
   old_limit_ = old_start_ + initial_length;
-  old_regular_limit_ = old_start_ + kOldRegularStoreBufferLength;
-  old_reserved_limit_ = old_start_ + old_store_buffer_length_;
+  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;

   CHECK(old_virtual_memory_->Commit(
             reinterpret_cast<void*>(old_start_),
@@ -108,13 +93,8 @@
   delete old_virtual_memory_;
   delete[] hash_set_1_;
   delete[] hash_set_2_;
-  old_start_ = NULL;
-  old_top_ = NULL;
-  old_limit_ = NULL;
-  old_reserved_limit_ = NULL;
-  old_regular_limit_ = NULL;
-  start_ = NULL;
-  limit_ = NULL;
+  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
+  start_ = limit_ = NULL;
   heap_->public_set_store_buffer_top(start_);
 }

@@ -146,37 +126,11 @@
 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
   return old_limit_ - old_top_ >= space_needed;
 }
-
-
-template<StoreBuffer::ExemptPopularPagesMode mode>
-void StoreBuffer::IterativelyExemptPopularPages(intptr_t space_needed) {
- // Sample 1 entry in 97 and filter out the pages where we estimate that more
-  // than 1 in 8 pointers are to new space.
-  static const int kSampleFinenesses = 5;
-  static const struct Samples {
-    int prime_sample_step;
-    int threshold;
-  } samples[kSampleFinenesses] =  {
-    { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
-    { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
-    { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
-    { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
-    { 1, 0}
-  };
-  for (int i = 0; i < kSampleFinenesses; i++) {
-    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
- // As a last resort we mark all pages as being exempt from the store buffer.
-    ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
-    if (mode == ENSURE_SPACE && SpaceAvailable(space_needed)) return;
- else if (mode == SHRINK_TO_REGULAR_SIZE && old_top_ < old_limit_) return;
-  }
-}


 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
   while (old_limit_ - old_top_ < space_needed &&
-      ((!allow_overflow_ && old_limit_ < old_regular_limit_) ||
-          (allow_overflow_ && old_limit_ < old_reserved_limit_))) {
+         old_limit_ < old_reserved_limit_) {
     size_t grow = old_limit_ - old_start_;  // Double size.
     CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
                                       grow * kPointerSize,
@@ -208,8 +162,26 @@

   if (SpaceAvailable(space_needed)) return;

-  IterativelyExemptPopularPages<ENSURE_SPACE>(space_needed);
-  ASSERT(SpaceAvailable(space_needed));
+ // Sample 1 entry in 97 and filter out the pages where we estimate that more
+  // than 1 in 8 pointers are to new space.
+  static const int kSampleFinenesses = 5;
+  static const struct Samples {
+    int prime_sample_step;
+    int threshold;
+  } samples[kSampleFinenesses] =  {
+    { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
+    { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
+    { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
+    { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
+    { 1, 0}
+  };
+  for (int i = 0; i < kSampleFinenesses; i++) {
+    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
+ // As a last resort we mark all pages as being exempt from the store buffer.
+    ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
+    if (SpaceAvailable(space_needed)) return;
+  }
+  UNREACHABLE();
 }


@@ -356,9 +328,9 @@
 }


-void StoreBuffer::GCPrologue(bool allow_overflow) {
+void StoreBuffer::GCPrologue() {
   ClearFilteringHashSets();
-  allow_overflow_ = allow_overflow;
+  during_gc_ = true;
 }


@@ -394,13 +366,7 @@


 void StoreBuffer::GCEpilogue() {
-  if (allow_overflow_ && old_limit_ > old_regular_limit_) {
-    IterativelyExemptPopularPages<SHRINK_TO_REGULAR_SIZE>(0);
-    ASSERT(old_limit_ < old_regular_limit_);
- old_virtual_memory_->Uncommit(old_limit_, old_regular_limit_ - old_limit_);
-  }
-
-  allow_overflow_ = false;
+  during_gc_ = false;
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
     Verify();
@@ -522,22 +488,25 @@
FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
         } else {
           Page* page = reinterpret_cast<Page*>(chunk);
-          ASSERT(page->owner() == heap_->map_space() ||
-                 page->owner() == heap_->old_pointer_space());
-          CHECK(!page->WasSweptConservatively());
-
-          HeapObjectIterator iterator(page, NULL);
-          for (HeapObject* heap_object = iterator.Next();
-               heap_object != NULL;
-               heap_object = iterator.Next()) {
-            // We iterate over objects that contain pointers only.
-            if (heap_object->ContainsPointers()) {
-              FindPointersToNewSpaceInRegion(
-                  heap_object->address() + HeapObject::kHeaderSize,
-                  heap_object->address() + heap_object->Size(),
-                  slot_callback,
-                  clear_maps);
+          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+          Address start = page->area_start();
+          Address end = page->area_end();
+          if (owner == heap_->map_space()) {
+            ASSERT(page->WasSweptPrecisely());
+            HeapObjectIterator iterator(page, NULL);
+ for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+                 heap_object = iterator.Next()) {
+              // We skip free space objects.
+              if (!heap_object->IsFiller()) {
+                FindPointersToNewSpaceInRegion(
+                    heap_object->address() + HeapObject::kHeaderSize,
+ heap_object->address() + heap_object->Size(), slot_callback,
+                    clear_maps);
+              }
             }
+          } else {
+            FindPointersToNewSpaceInRegion(
+                start, end, slot_callback, clear_maps);
           }
         }
       }
=======================================
--- /branches/bleeding_edge/src/store-buffer.h  Tue Jul  8 08:20:22 2014 UTC
+++ /branches/bleeding_edge/src/store-buffer.h  Thu Jul 10 12:22:01 2014 UTC
@@ -19,6 +19,11 @@

 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);

+typedef void (StoreBuffer::*RegionCallback)(Address start,
+                                            Address end,
+ ObjectSlotCallback slot_callback,
+                                            bool clear_maps);
+
 // Used to implement the write barrier by collecting addresses of pointers
 // between spaces.
 class StoreBuffer {
@@ -63,13 +68,13 @@
   static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
-  static const int kOldRegularStoreBufferLength = kStoreBufferLength * 16;
+  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
   static const int kHashSetLengthLog2 = 12;
   static const int kHashSetLength = 1 << kHashSetLengthLog2;

   void Compact();

-  void GCPrologue(bool allow_overflow);
+  void GCPrologue();
   void GCEpilogue();

   Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
@@ -113,27 +118,12 @@
   Address* old_start_;
   Address* old_limit_;
   Address* old_top_;
-
-  // The regular limit specifies how big the store buffer may become during
-  // mutator execution or while scavenging.
-  Address* old_regular_limit_;
-
- // The reserved limit is bigger then the regular limit. It should be the size - // of a semi-space to avoid new scan-on-scavenge during new space evacuation
-  // after sweeping in a full garbage collection.
   Address* old_reserved_limit_;
-
   base::VirtualMemory* old_virtual_memory_;
-  int old_store_buffer_length_;

   bool old_buffer_is_sorted_;
   bool old_buffer_is_filtered_;
-
-  // If allow_overflow_ is set, we allow the store buffer to grow until
- // old_reserved_limit_. But we will shrink the store buffer in the epilogue to
-  // stay within the old_regular_limit_.
-  bool allow_overflow_;
-
+  bool during_gc_;
// The garbage collector iterates over many pointers to new space that are not
   // handled by the store buffer.  This flag indicates whether the pointers
   // found by the callbacks should be added to the store buffer or not.
@@ -156,14 +146,6 @@
   void Uniq();
   void ExemptPopularPages(int prime_sample_step, int threshold);

-  enum ExemptPopularPagesMode {
-    ENSURE_SPACE,
-    SHRINK_TO_REGULAR_SIZE
-  };
-
-  template <ExemptPopularPagesMode mode>
-  void IterativelyExemptPopularPages(intptr_t space_needed);
-
   // Set the map field of the object to NULL if contains a map.
   inline void ClearDeadObject(HeapObject *object);

@@ -174,6 +156,17 @@
                                       ObjectSlotCallback slot_callback,
                                       bool clear_maps);

+  // For each region of pointers on a page in use from an old space call
+  // visit_pointer_region callback.
+  // If either visit_pointer_region or callback can cause an allocation
+  // in old space and changes in allocation watermark then
+  // can_preallocate_during_iteration should be set to true.
+  void IteratePointersOnPage(
+      PagedSpace* space,
+      Page* page,
+      RegionCallback region_callback,
+      ObjectSlotCallback slot_callback);
+
   void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
                                     bool clear_maps);

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to