Reviewers: jarin,

Description:
Remove guard page mechanism from promotion queue.

BUG=

Please review this at https://codereview.chromium.org/557243002/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files (+28, -33 lines):
  M src/heap/heap.h
  M src/heap/heap.cc
  M src/heap/heap-inl.h
  M src/heap/spaces.cc
  A test/mjsunit/regress/regress-411210.js


Index: src/heap/heap-inl.h
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index adb6e25bb710c581d8a6ae7b571b5eddc4732908..3b6a8033a291066905ee3807fcb8ca6266b3c2c2 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -31,18 +31,12 @@ void PromotionQueue::insert(HeapObject* target, int size) {
         NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
     DCHECK(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
-    ActivateGuardIfOnTheSamePage();
   }

-  if (guard_) {
-    DCHECK(GetHeadPage() ==
-           Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
-
-    if ((rear_ - 2) < limit_) {
-      RelocateQueueHead();
-      emergency_stack_->Add(Entry(target, size));
-      return;
-    }
+  if ((rear_ - 2) < limit_) {
+    RelocateQueueHead();
+    emergency_stack_->Add(Entry(target, size));
+    return;
   }

   *(--rear_) = reinterpret_cast<intptr_t>(target);
@@ -55,13 +49,6 @@ void PromotionQueue::insert(HeapObject* target, int size) {
 }


-void PromotionQueue::ActivateGuardIfOnTheSamePage() {
-  guard_ = guard_ ||
-           heap_->new_space()->active_space()->current_page()->address() ==
-               GetHeadPage()->address();
-}
-
-
 template <>
 bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
   // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
Index: src/heap/heap.cc
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 1f7d8784f6b8fbf104e121212fc6e5c2b28a9196..f39ad43aa4c9a02be2036479d76c7fb3c56e8b6b 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -1365,7 +1365,6 @@ void PromotionQueue::Initialize() {
   front_ = rear_ =
       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
   emergency_stack_ = NULL;
-  guard_ = false;
 }


@@ -1963,15 +1962,16 @@ class ScavengingVisitor : public StaticVisitorBase {

     HeapObject* target = NULL;  // Initialization to please compiler.
     if (allocation.To(&target)) {
+      // Order is important here: Set the promotion limit before storing a
+      // filler for double alignment or migrating the object. Otherwise we
+      // may end up overwriting promotion queue entries when we migrate the
+      // object.
+      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
       if (alignment != kObjectAlignment) {
         target = EnsureDoubleAligned(heap, target, allocation_size);
       }

-      // Order is important here: Set the promotion limit before migrating
-      // the object. Otherwise we may end up overwriting promotion queue
-      // entries when we migrate the object.
-      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-
       // Order is important: slot might be inside of the target if target
       // was allocated over a dead object and slot comes from the store
       // buffer.
Index: src/heap/heap.h
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 8b4a021e7595eb3e5cde01aa1ae774ca2ed37ba5..8a449cb4ae853a4fbebea1d4211bb4330ab1d332 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -383,18 +383,11 @@ class PromotionQueue {
     emergency_stack_ = NULL;
   }

-  inline void ActivateGuardIfOnTheSamePage();
-
   Page* GetHeadPage() {
     return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
   }

   void SetNewLimit(Address limit) {
-    if (!guard_) {
-      return;
-    }
-
-    DCHECK(GetHeadPage() == Page::FromAllocationTop(limit));
     limit_ = reinterpret_cast<intptr_t*>(limit);

     if (limit_ <= rear_) {
@@ -451,8 +444,6 @@ class PromotionQueue {
   intptr_t* rear_;
   intptr_t* limit_;

-  bool guard_;
-
   static const int kEntrySizeInWords = 2;

   struct Entry {
Index: src/heap/spaces.cc
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index ca0907359fece6101d110f3fd1dd5ed861b4c424..b762f863cd1edc252bcad00f5e7ad28eb74120ba 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -1360,7 +1360,6 @@ bool NewSpace::AddFreshPage() {
   Address limit = NewSpacePage::FromLimit(top)->area_end();
   if (heap()->gc_state() == Heap::SCAVENGE) {
     heap()->promotion_queue()->SetNewLimit(limit);
-    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
   }

   int remaining_in_page = static_cast<int>(limit - top);
Index: test/mjsunit/regress/regress-411210.js
diff --git a/test/mjsunit/regress/regress-411210.js b/test/mjsunit/regress/regress-411210.js
new file mode 100644
index 0000000000000000000000000000000000000000..bb9d3a5fd7bb3aa8aa79ef91ec0fadb528f2aec1
--- /dev/null
+++ b/test/mjsunit/regress/regress-411210.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=439 --random-seed=-423594851
+
+var __v_3;
+function __f_2() {
+  var __v_1 = new Array(3);
+  __v_1[0] = 10;
+  __v_1[1] = 15.5;
+  __v_3 = __f_2();
+  __v_1[2] = 20;
+  return __v_1;
+}
+for (var __v_2 = 0; __v_2 < 3; ++__v_2) {
+  __v_3 = __f_2();
+}


--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to