Revision: 23824
Author:   [email protected]
Date:     Wed Sep 10 07:51:29 2014 UTC
Log:      Remove guard page mechanism from promotion queue.

BUG=chromium:411210
LOG=n
[email protected]

Review URL: https://codereview.chromium.org/557243002
https://code.google.com/p/v8/source/detail?r=23824

Added:
 /branches/bleeding_edge/test/mjsunit/regress/regress-411210.js
Modified:
 /branches/bleeding_edge/src/heap/heap-inl.h
 /branches/bleeding_edge/src/heap/heap.cc
 /branches/bleeding_edge/src/heap/heap.h
 /branches/bleeding_edge/src/heap/spaces.cc

=======================================
--- /dev/null
+++ /branches/bleeding_edge/test/mjsunit/regress/regress-411210.js Wed Sep 10 07:51:29 2014 UTC
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=439 --random-seed=-423594851
+
+var __v_3;
+function __f_2() {
+  var __v_1 = new Array(3);
+  __v_1[0] = 10;
+  __v_1[1] = 15.5;
+  __v_3 = __f_2();
+  __v_1[2] = 20;
+  return __v_1;
+}
+for (var __v_2 = 0; __v_2 < 3; ++__v_2) {
+  __v_3 = __f_2();
+}
=======================================
--- /branches/bleeding_edge/src/heap/heap-inl.h Tue Sep  9 11:41:56 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap-inl.h Wed Sep 10 07:51:29 2014 UTC
@@ -31,18 +31,12 @@
         NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
     DCHECK(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
-    ActivateGuardIfOnTheSamePage();
   }

-  if (guard_) {
-    DCHECK(GetHeadPage() ==
-           Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
-
-    if ((rear_ - 2) < limit_) {
-      RelocateQueueHead();
-      emergency_stack_->Add(Entry(target, size));
-      return;
-    }
+  if ((rear_ - 2) < limit_) {
+    RelocateQueueHead();
+    emergency_stack_->Add(Entry(target, size));
+    return;
   }

   *(--rear_) = reinterpret_cast<intptr_t>(target);
@@ -53,13 +47,6 @@
                               reinterpret_cast<Address>(rear_));
 #endif
 }
-
-
-void PromotionQueue::ActivateGuardIfOnTheSamePage() {
-  guard_ = guard_ ||
-           heap_->new_space()->active_space()->current_page()->address() ==
-               GetHeadPage()->address();
-}


 template <>
=======================================
--- /branches/bleeding_edge/src/heap/heap.cc    Mon Sep  8 09:11:11 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap.cc    Wed Sep 10 07:51:29 2014 UTC
@@ -1365,7 +1365,6 @@
   front_ = rear_ =
       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
   emergency_stack_ = NULL;
-  guard_ = false;
 }


@@ -1963,14 +1962,15 @@

     HeapObject* target = NULL;  // Initialization to please compiler.
     if (allocation.To(&target)) {
+      // Order is important here: Set the promotion limit before storing a
+      // filler for double alignment or migrating the object. Otherwise we
+      // may end up overwriting promotion queue entries when we migrate the
+      // object.
+      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
       if (alignment != kObjectAlignment) {
         target = EnsureDoubleAligned(heap, target, allocation_size);
       }
-
-      // Order is important here: Set the promotion limit before migrating
-      // the object. Otherwise we may end up overwriting promotion queue
-      // entries when we migrate the object.
-      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());

       // Order is important: slot might be inside of the target if target
       // was allocated over a dead object and slot comes from the store
=======================================
--- /branches/bleeding_edge/src/heap/heap.h     Mon Aug 25 15:17:06 2014 UTC
+++ /branches/bleeding_edge/src/heap/heap.h     Wed Sep 10 07:51:29 2014 UTC
@@ -382,19 +382,12 @@
     delete emergency_stack_;
     emergency_stack_ = NULL;
   }
-
-  inline void ActivateGuardIfOnTheSamePage();

   Page* GetHeadPage() {
     return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
   }

   void SetNewLimit(Address limit) {
-    if (!guard_) {
-      return;
-    }
-
-    DCHECK(GetHeadPage() == Page::FromAllocationTop(limit));
     limit_ = reinterpret_cast<intptr_t*>(limit);

     if (limit_ <= rear_) {
@@ -451,8 +444,6 @@
   intptr_t* rear_;
   intptr_t* limit_;

-  bool guard_;
-
   static const int kEntrySizeInWords = 2;

   struct Entry {
=======================================
--- /branches/bleeding_edge/src/heap/spaces.cc  Tue Sep  2 13:36:35 2014 UTC
+++ /branches/bleeding_edge/src/heap/spaces.cc  Wed Sep 10 07:51:29 2014 UTC
@@ -1360,7 +1360,6 @@
   Address limit = NewSpacePage::FromLimit(top)->area_end();
   if (heap()->gc_state() == Heap::SCAVENGE) {
     heap()->promotion_queue()->SetNewLimit(limit);
-    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
   }

   int remaining_in_page = static_cast<int>(limit - top);

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to