Revision: 17856
Author: [email protected]
Date: Tue Nov 19 10:17:33 2013 UTC
Log: Bugfix: dependent code field in AllocationSite was keeping code
objects alive even after context death.
BUG=320532
LOG=Y
[email protected]
Review URL: https://codereview.chromium.org/62803008
http://code.google.com/p/v8/source/detail?r=17856
Added:
/branches/bleeding_edge/test/mjsunit/regress/regress-320532.js
Modified:
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/objects-visiting-inl.h
/branches/bleeding_edge/src/objects-visiting.h
/branches/bleeding_edge/src/objects.h
=======================================
--- /dev/null
+++ /branches/bleeding_edge/test/mjsunit/regress/regress-320532.js Tue Nov
19 10:17:33 2013 UTC
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --track-allocation-sites --noalways-opt
+// Flags: --stress-runs=8 --send-idle-notification --gc-global
+
+
+function bar() { return new Array(); }
+bar();
+bar();
+%OptimizeFunctionOnNextCall(bar);
+a = bar();
+function foo(len) { return new Array(len); }
+foo(0);
+foo(0);
+%OptimizeFunctionOnNextCall(bar);
+foo(0);
=======================================
--- /branches/bleeding_edge/src/heap.cc Mon Nov 18 11:44:06 2013 UTC
+++ /branches/bleeding_edge/src/heap.cc Tue Nov 19 10:17:33 2013 UTC
@@ -1784,6 +1784,8 @@
mark_compact_collector()->is_compacting();
ProcessArrayBuffers(retainer, record_slots);
ProcessNativeContexts(retainer, record_slots);
+ // TODO(mvstanton): AllocationSites only need to be processed during
+ // MARK_COMPACT, as they live in old space. Verify and address.
ProcessAllocationSites(retainer, record_slots);
}
@@ -1889,7 +1891,7 @@
}
static void VisitLiveObject(Heap* heap,
- AllocationSite* array_buffer,
+ AllocationSite* site,
WeakObjectRetainer* retainer,
bool record_slots) {}
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Mon Nov 11 17:46:08 2013 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Tue Nov 19 10:17:33 2013 UTC
@@ -2540,6 +2540,17 @@
ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
}
}
+
+ // Iterate over allocation sites, removing dependent code that is not
+ // otherwise kept alive by strong references.
+ Object* undefined = heap()->undefined_value();
+ for (Object* site = heap()->allocation_sites_list();
+ site != undefined;
+ site = AllocationSite::cast(site)->weak_next()) {
+ if (IsMarked(site)) {
+
ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
+ }
+ }
if (heap_->weak_object_to_code_table()->IsHashTable()) {
WeakHashTable* table =
=======================================
--- /branches/bleeding_edge/src/objects-visiting-inl.h Mon Oct 14 13:35:06
2013 UTC
+++ /branches/bleeding_edge/src/objects-visiting-inl.h Tue Nov 19 10:17:33
2013 UTC
@@ -189,10 +189,7 @@
table_.Register(kVisitNativeContext, &VisitNativeContext);
- table_.Register(kVisitAllocationSite,
- &FixedBodyVisitor<StaticVisitor,
- AllocationSite::BodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitAllocationSite, &VisitAllocationSite);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
@@ -386,6 +383,31 @@
HeapObject::RawField(object,
PropertyCell::kPointerFieldsBeginOffset),
HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
}
+
+
+template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ Object** slot =
+ HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
+ if (FLAG_collect_maps) {
+ // Mark allocation site dependent codes array but do not push it onto
+ // marking stack, this will make references from it weak. We will clean
+ // dead codes when we iterate over allocation sites in
+ // ClearNonLiveReferences.
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ } else {
+ StaticVisitor::VisitPointer(heap, slot);
+ }
+
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(object,
AllocationSite::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object,
AllocationSite::kPointerFieldsEndOffset));
+}
template<typename StaticVisitor>
=======================================
--- /branches/bleeding_edge/src/objects-visiting.h Mon Oct 14 13:35:06 2013
UTC
+++ /branches/bleeding_edge/src/objects-visiting.h Tue Nov 19 10:17:33 2013
UTC
@@ -399,6 +399,7 @@
}
INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
+ INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
=======================================
--- /branches/bleeding_edge/src/objects.h Mon Nov 18 17:18:14 2013 UTC
+++ /branches/bleeding_edge/src/objects.h Tue Nov 19 10:17:33 2013 UTC
@@ -8189,6 +8189,12 @@
static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
static const int kSize = kWeakNextOffset + kPointerSize;
+ // During mark compact we need to take special care for the dependent
code
+ // field.
+ static const int kPointerFieldsBeginOffset = kTransitionInfoOffset;
+ static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+
+ // For other visitors, use the fixed body descriptor below.
typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
kDependentCodeOffset + kPointerSize,
kSize> BodyDescriptor;
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.