Reviewers: Hannes Payer,

Message:
Committed patchset #1 manually as r21992 (tree was closed).

Description:
Revert "Update survival statistics correctly in the Scavenger."

This reverts r21991.

Reason: lots of test failures.

BUG=
[email protected]

Committed: https://code.google.com/p/v8/source/detail?r=21992

Please review this at https://codereview.chromium.org/352083002/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files (+44, -40 lines):
  M src/heap.cc


Index: src/heap.cc
diff --git a/src/heap.cc b/src/heap.cc
index 627ea7792e4a95c7532c16e8b9b8263be3b5ab7f..4dea51050d3f1a37e21b81ae7ea9ec53ceb4d341 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1986,14 +1986,17 @@ class ScavengingVisitor : public StaticVisitorBase {
     }

     Heap* heap = map->GetHeap();
-    AllocationResult allocation;
+    if (heap->ShouldBePromoted(object->address(), object_size)) {
+      AllocationResult allocation;

-    if (!heap->ShouldBePromoted(object->address(), object_size)) {
-      ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
-      allocation = heap->new_space()->AllocateRaw(allocation_size);
+      if (object_contents == DATA_OBJECT) {
+        ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+        allocation = heap->old_data_space()->AllocateRaw(allocation_size);
+      } else {
+        ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
+      }

-      // Allocation in the other semi-space may fail due to fragmentation.
-      // In that case we allocate in the old generation.
       HeapObject* target = NULL;  // Initialization to please compiler.
       if (allocation.To(&target)) {
         if (alignment != kObjectAlignment) {
@@ -2006,48 +2009,49 @@ class ScavengingVisitor : public StaticVisitorBase {
         *slot = target;
         MigrateObject(heap, object, target, object_size);

-        heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-        heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+        if (object_contents == POINTER_OBJECT) {
+          if (map->instance_type() == JS_FUNCTION_TYPE) {
+            heap->promotion_queue()->insert(
+                target, JSFunction::kNonWeakFieldsEndOffset);
+          } else {
+            heap->promotion_queue()->insert(target, object_size);
+          }
+        }
+
+        heap->IncrementPromotedObjectsSize(object_size);
         return;
       }
     }
-
-    if (object_contents == DATA_OBJECT) {
-      ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
-      allocation = heap->old_data_space()->AllocateRaw(allocation_size);
-    } else {
-      ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
-      allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
-    }
-
-    HeapObject* target = NULL;  // Initialization to please compiler.
-    if (allocation.To(&target)) {
-      if (alignment != kObjectAlignment) {
-        target = EnsureDoubleAligned(heap, target, allocation_size);
+    ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
+    AllocationResult allocation =
+        heap->new_space()->AllocateRaw(allocation_size);
+    heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
+    // Allocation in the other semi-space may fail due to fragmentation.
+    // In that case we allocate in the old generation.
+    if (allocation.IsRetry()) {
+      if (object_contents == DATA_OBJECT) {
+        ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+        allocation = heap->old_data_space()->AllocateRaw(allocation_size);
+      } else {
+        ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
       }
+    }

-      // Order is important: slot might be inside of the target if target
-      // was allocated over a dead object and slot comes from the store
-      // buffer.
-      *slot = target;
-      MigrateObject(heap, object, target, object_size);
-
-      if (object_contents == POINTER_OBJECT) {
-        if (map->instance_type() == JS_FUNCTION_TYPE) {
-          heap->promotion_queue()->insert(target,
-              JSFunction::kNonWeakFieldsEndOffset);
-        } else {
-          heap->promotion_queue()->insert(target, object_size);
-        }
-      }
+    HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());

-      heap->IncrementPromotedObjectsSize(object_size);
-      return;
+    if (alignment != kObjectAlignment) {
+      target = EnsureDoubleAligned(heap, target, allocation_size);
     }

-    // The scavenger should always have enough space available in the old
- // generation for promotion. Otherwise a full gc would have been triggered.
-    UNREACHABLE();
+    // Order is important: slot might be inside of the target if target
+    // was allocated over a dead object and slot comes from the store
+    // buffer.
+    *slot = target;
+    MigrateObject(heap, object, target, object_size);
+    heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+    return;
   }




--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to