Revision: 13490
Author: [email protected]
Date: Thu Jan 24 03:55:05 2013
Log: Make embedded maps in optimized code weak.
Each map has a weak array of dependent codes, where the map tracks all the
optimized codes that embed it.
Old space GC either clears the dead dependent codes from the array if the
corresponding map is alive or deoptimizes the live dependent codes if the
map is dead.
BUG=v8:2073
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11575007
http://code.google.com/p/v8/source/detail?r=13490
Added:
/branches/bleeding_edge/test/mjsunit/regress/regress-2073.js
Modified:
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/lithium.cc
/branches/bleeding_edge/src/lithium.h
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/mark-compact.h
/branches/bleeding_edge/src/objects-inl.h
/branches/bleeding_edge/src/objects-visiting-inl.h
/branches/bleeding_edge/src/objects.cc
/branches/bleeding_edge/src/objects.h
/branches/bleeding_edge/test/mjsunit/mjsunit.status
=======================================
--- /dev/null
+++ /branches/bleeding_edge/test/mjsunit/regress/regress-2073.js Thu Jan 24
03:55:05 2013
@@ -0,0 +1,99 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Running this test with --trace_gc will show heap size growth due to
+// leaking objects via embedded maps in optimized code.
+
+var counter = 0;
+
+function nextid() {
+ counter += 1;
+ return counter;
+}
+
+function Scope() {
+ this.id = nextid();
+ this.parent = null;
+ this.left = null;
+ this.right = null;
+ this.head = null;
+ this.tail = null;
+ this.counter = 0;
+}
+
+Scope.prototype = {
+ new: function() {
+ var Child,
+ child;
+ Child = function() {};
+ Child.prototype = this;
+ child = new Child();
+ child.id = nextid();
+ child.parent = this;
+ child.left = this.last;
+ child.right = null;
+ child.head = null;
+ child.tail = null;
+ child.counter = 0;
+ if (this.head) {
+ this.tail.right = child;
+ this.tail = child;
+ } else {
+ this.head = this.tail = child;
+ }
+ return child;
+ },
+
+ destroy: function() {
+ if ($root == this) return;
+ var parent = this.parent;
+ if (parent.head == this) parent.head = this.right;
+ if (parent.tail == this) parent.tail = this.left;
+ if (this.left) this.left.right = this.right;
+ if (this.right) this.right.left = this.left;
+ }
+};
+
+function inc(scope) {
+ scope.counter = scope.counter + 1;
+}
+
+var $root = new Scope();
+
+n = 100000;
+m = 10;
+
+function doit() {
+ var a = $root.new();
+ var b = a.new();
+ inc(b);
+ if (i > m) $root.head.destroy();
+}
+
+for (var i = 0; i < n; i++) {
+ doit();
+}
=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Jan 23 23:54:40 2013
+++ /branches/bleeding_edge/src/heap.cc Thu Jan 24 03:55:05 2013
@@ -2199,6 +2199,8 @@
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ map->set_dependent_codes(DependentCodes::cast(empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
@@ -2334,14 +2336,18 @@
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
+
meta_map()->set_dependent_codes(DependentCodes::cast(empty_fixed_array()));
meta_map()->init_back_pointer(undefined_value());
meta_map()->set_instance_descriptors(empty_descriptor_array());
fixed_array_map()->set_code_cache(empty_fixed_array());
+ fixed_array_map()->set_dependent_codes(
+ DependentCodes::cast(empty_fixed_array()));
fixed_array_map()->init_back_pointer(undefined_value());
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
oddball_map()->set_code_cache(empty_fixed_array());
+
oddball_map()->set_dependent_codes(DependentCodes::cast(empty_fixed_array()));
oddball_map()->init_back_pointer(undefined_value());
oddball_map()->set_instance_descriptors(empty_descriptor_array());
@@ -3790,6 +3796,9 @@
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
code->set_prologue_offset(kPrologueOffsetNotSet);
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ code->set_marked_for_deoptimization(false);
+ }
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
=======================================
--- /branches/bleeding_edge/src/lithium.cc Tue Dec 18 08:25:45 2012
+++ /branches/bleeding_edge/src/lithium.cc Thu Jan 24 03:55:05 2013
@@ -429,6 +429,7 @@
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
+ RegisterDependentCodeForEmbeddedMaps(code);
CodeGenerator::PrintCode(code, info());
return code;
}
@@ -436,4 +437,22 @@
}
+void LChunk::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ if (map->CanTransition()) {
+ maps.Add(map, zone());
+ }
+ }
+ }
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(code);
+ }
+}
+
} } // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/lithium.h Fri Dec 28 08:25:38 2012
+++ /branches/bleeding_edge/src/lithium.h Thu Jan 24 03:55:05 2013
@@ -694,6 +694,8 @@
instructions_(32, graph->zone()),
pointer_maps_(8, graph->zone()),
inlined_closures_(1, graph->zone()) { }
+
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
int spill_slot_count_;
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Wed Jan 23 08:15:15 2013
+++ /branches/bleeding_edge/src/mark-compact.cc Thu Jan 24 03:55:05 2013
@@ -82,6 +82,15 @@
}
}
}
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ if (rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
+ !rinfo->target_object()->IsMap() ||
+ !Map::cast(rinfo->target_object())->CanTransition()) {
+ VisitPointer(rinfo->target_object_address());
+ }
+ }
};
@@ -382,7 +391,7 @@
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
- if (FLAG_collect_maps) ClearNonLiveTransitions();
+ if (FLAG_collect_maps) ClearNonLiveReferences();
ClearWeakMaps();
@@ -823,6 +832,13 @@
#endif
}
+class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
+ public:
+ virtual bool TakeFunction(JSFunction* function) {
+ return function->code()->marked_for_deoptimization();
+ }
+};
+
void MarkCompactCollector::Finish() {
#ifdef DEBUG
@@ -834,6 +850,9 @@
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
heap()->isolate()->stub_cache()->Clear();
+
+ DeoptimizeMarkedCodeFilter filter;
+ Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
}
@@ -2165,7 +2184,7 @@
}
-void MarkCompactCollector::ClearNonLiveTransitions() {
+void MarkCompactCollector::ClearNonLiveReferences() {
HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
@@ -2177,9 +2196,7 @@
if (map->IsFreeSpace()) continue;
ASSERT(map->IsMap());
- // Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
+ if (!map->CanTransition()) continue;
if (map_mark.Get() &&
map->attached_to_shared_function_info()) {
@@ -2191,6 +2208,12 @@
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
+
+ if (map_mark.Get()) {
+ ClearNonLiveDependentCodes(map);
+ } else {
+ ClearAndDeoptimizeDependentCodes(map);
+ }
}
}
@@ -2257,6 +2280,46 @@
parent->ClearNonLiveTransitions(heap());
}
}
+
+
+void MarkCompactCollector::ClearAndDeoptimizeDependentCodes(Map* map) {
+ AssertNoAllocation no_allocation_scope;
+ DependentCodes* codes = map->dependent_codes();
+ int number_of_codes = codes->number_of_codes();
+ if (number_of_codes == 0) return;
+ for (int i = 0; i < number_of_codes; i++) {
+ Code* code = codes->code_at(i);
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ }
+ codes->clear_code_at(i);
+ }
+
map->set_dependent_codes(DependentCodes::cast(heap()->empty_fixed_array()));
+}
+
+
+void MarkCompactCollector::ClearNonLiveDependentCodes(Map* map) {
+ AssertNoAllocation no_allocation_scope;
+ DependentCodes* codes = map->dependent_codes();
+ int number_of_codes = codes->number_of_codes();
+ if (number_of_codes == 0) return;
+ int new_number_of_codes = 0;
+ for (int i = 0; i < number_of_codes; i++) {
+ Code* code = codes->code_at(i);
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ if (new_number_of_codes != i) {
+ codes->set_code_at(new_number_of_codes, code);
+ Object** slot = codes->code_slot_at(new_number_of_codes);
+ RecordSlot(slot, slot, code);
+ new_number_of_codes++;
+ }
+ }
+ }
+ for (int i = new_number_of_codes; i < number_of_codes; i++) {
+ codes->clear_code_at(i);
+ }
+ codes->set_number_of_codes(new_number_of_codes);
+}
void MarkCompactCollector::ProcessWeakMaps() {
=======================================
--- /branches/bleeding_edge/src/mark-compact.h Tue Jan 15 02:00:48 2013
+++ /branches/bleeding_edge/src/mark-compact.h Thu Jan 24 03:55:05 2013
@@ -797,10 +797,13 @@
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
- void ClearNonLiveTransitions();
+ void ClearNonLiveReferences();
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
+ void ClearAndDeoptimizeDependentCodes(Map* map);
+ void ClearNonLiveDependentCodes(Map* map);
+
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps
// back after collection. This is either done during
=======================================
--- /branches/bleeding_edge/src/objects-inl.h Wed Jan 23 04:28:16 2013
+++ /branches/bleeding_edge/src/objects-inl.h Thu Jan 24 03:55:05 2013
@@ -582,6 +582,14 @@
if (FixedArray::cast(this)->length() % 2 != 0) return false;
return true;
}
+
+
+bool Object::IsDependentCodes() {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array
and
+ // a dependent codes array.
+ return true;
+}
bool Object::IsTypeFeedbackCells() {
@@ -2374,6 +2382,7 @@
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
+CAST_ACCESSOR(DependentCodes)
CAST_ACCESSOR(TypeFeedbackCells)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(JSFunctionResultCache)
@@ -3406,6 +3415,47 @@
}
+void Map::AddDependentCode(Handle<Code> code) {
+ Handle<DependentCodes> codes =
+ DependentCodes::Append(Handle<DependentCodes>(dependent_codes()),
code);
+ if (*codes != dependent_codes()) {
+ set_dependent_codes(*codes);
+ }
+}
+
+
+int DependentCodes::number_of_codes() {
+ if (length() == 0) return 0;
+ return Smi::cast(get(kNumberOfCodesIndex))->value();
+}
+
+
+void DependentCodes::set_number_of_codes(int value) {
+ set(kNumberOfCodesIndex, Smi::FromInt(value));
+}
+
+
+Code* DependentCodes::code_at(int i) {
+ return Code::cast(get(kCodesIndex + i));
+}
+
+
+void DependentCodes::set_code_at(int i, Code* value) {
+ set(kCodesIndex + i, value);
+}
+
+
+Object** DependentCodes::code_slot_at(int i) {
+ return HeapObject::RawField(
+ this, FixedArray::OffsetOfElementAt(kCodesIndex + i));
+}
+
+
+void DependentCodes::clear_code_at(int i) {
+ set_undefined(kCodesIndex + i);
+}
+
+
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
// Make sure that all call stubs have an arguments count.
@@ -3681,6 +3731,21 @@
int updated = HasFunctionCacheField::update(previous, flag);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
+
+
+bool Code::marked_for_deoptimization() {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ return MarkedForDeoptimizationField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::set_marked_for_deoptimization(bool flag) {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = MarkedForDeoptimizationField::update(previous, flag);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
bool Code::is_inline_cache_stub() {
@@ -4011,6 +4076,7 @@
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, dependent_codes, DependentCodes, kDependentCodesOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo,
kSharedFunctionInfoOffset)
=======================================
--- /branches/bleeding_edge/src/objects-visiting-inl.h Wed Jan 16 07:02:58
2013
+++ /branches/bleeding_edge/src/objects-visiting-inl.h Thu Jan 24 03:55:05
2013
@@ -175,8 +175,11 @@
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
- StaticVisitor::MarkObject(heap, object);
+ if (!FLAG_collect_maps || rinfo->host()->kind() !=
Code::OPTIMIZED_FUNCTION ||
+ !object->IsMap() || !Map::cast(object)->CanTransition()) {
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ StaticVisitor::MarkObject(heap, object);
+ }
}
@@ -262,12 +265,9 @@
map_object->ClearCodeCache(heap);
}
- // When map collection is enabled we have to mark through map's
- // transitions and back pointers in a special way to make these links
- // weak. Only maps for subclasses of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps &&
- map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ // When map collection is enabled we have to mark through map's
transitions
+ // and back pointers in a special way to make these links weak.
+ if (FLAG_collect_maps && map_object->CanTransition()) {
MarkMapContents(heap, map_object);
} else {
StaticVisitor::VisitPointers(heap,
@@ -394,6 +394,14 @@
// Already marked by marking map->GetBackPointer() above.
ASSERT(transitions->IsMap() || transitions->IsUndefined());
}
+
+ // Mark prototype dependent codes array but do not push it onto marking
+ // stack, this will make references from it weak. We will clean dead
+ // codes when we iterate over maps in ClearNonLiveTransitions.
+ Object** slot = HeapObject::RawField(map, Map::kDependentCodesOffset);
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
@@ -639,7 +647,7 @@
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// There are two places where we iterate code bodies: here and the
- // templated CodeIterateBody (below). They should be kept in sync.
+ // templated CodeIterateBody (below). They should be kept in sync.
IteratePointer(v, kRelocationInfoOffset);
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
@@ -662,8 +670,8 @@
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- // There are two places where we iterate code bodies: here and the
- // non-templated CodeIterateBody (above). They should be kept in sync.
+ // There are two places where we iterate code bodies: here and the non-
+ // templated CodeIterateBody (above). They should be kept in sync.
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
=======================================
--- /branches/bleeding_edge/src/objects.cc Thu Jan 17 00:41:27 2013
+++ /branches/bleeding_edge/src/objects.cc Thu Jan 24 03:55:05 2013
@@ -9478,6 +9478,34 @@
GetHeap()->the_hole_value(),
proto_transitions->length());
}
+
+
+Handle<DependentCodes> DependentCodes::Append(Handle<DependentCodes> codes,
+ Handle<Code> value) {
+ int append_index = codes->number_of_codes();
+ if (append_index > 0 && codes->code_at(append_index - 1) == *value) {
+ // Do not append the code if it is already in the array.
+ // It is sufficient to just check only the last element because
+ // we process embedded maps of an optimized code in one batch.
+ return codes;
+ }
+ if (codes->length() < kCodesIndex + append_index + 1) {
+ Factory* factory = codes->GetIsolate()->factory();
+ int capacity = kCodesIndex + append_index + 1;
+ if (capacity > 5) capacity = capacity * 5 / 4;
+ Handle<DependentCodes> new_codes = Handle<DependentCodes>::cast(
+ factory->CopySizeFixedArray(codes, capacity));
+ // The number of codes can change after GC.
+ append_index = codes->number_of_codes();
+ for (int i = 0; i < append_index; i++) {
+ codes->clear_code_at(i);
+ }
+ codes = new_codes;
+ }
+ codes->set_code_at(append_index, *value);
+ codes->set_number_of_codes(append_index + 1);
+ return codes;
+}
MaybeObject* JSReceiver::SetPrototype(Object* value,
=======================================
--- /branches/bleeding_edge/src/objects.h Wed Jan 23 04:28:16 2013
+++ /branches/bleeding_edge/src/objects.h Thu Jan 24 03:55:05 2013
@@ -862,6 +862,7 @@
V(TransitionArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
+ V(DependentCodes) \
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -4395,6 +4396,12 @@
inline bool has_function_cache();
inline void set_has_function_cache(bool flag);
+
+ // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
+ // the code is going to be deoptimized because of dead embedded maps.
+ inline bool marked_for_deoptimization();
+ inline void set_marked_for_deoptimization(bool flag);
+
bool allowed_in_shared_map_code_cache();
// Get the safepoint entry for the given pc.
@@ -4600,11 +4607,16 @@
static const int kHasFunctionCacheFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kHasFunctionCacheBitCount = 1;
+ static const int kMarkedForDeoptimizationFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount + 1;
+ static const int kMarkedForDeoptimizationBitCount = 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <=
32);
+ STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
+ kMarkedForDeoptimizationBitCount <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
@@ -4614,6 +4626,9 @@
kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
+ class MarkedForDeoptimizationField: public BitField<bool,
+ kMarkedForDeoptimizationFirstBit,
+ kMarkedForDeoptimizationBitCount> {}; // NOLINT
// KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStubMajorKeyFirstBit = 0;
@@ -4661,6 +4676,27 @@
};
+// This class describes the layout of dependent codes array of a map. The
+// first element contains the number of codes as a Smi. The subsequent
+// elements contain code objects. The suffix of the array can be filled
with the
+// undefined value if the number of codes is less than the length of the
array.
+class DependentCodes: public FixedArray {
+ public:
+ inline int number_of_codes();
+ inline void set_number_of_codes(int value);
+ inline Code* code_at(int i);
+ inline void set_code_at(int i, Code* value);
+ inline Object** code_slot_at(int i);
+ inline void clear_code_at(int i);
+ static Handle<DependentCodes> Append(Handle<DependentCodes> codes,
+ Handle<Code> value);
+ static inline DependentCodes* cast(Object* object);
+ private:
+ static const int kNumberOfCodesIndex = 0;
+ static const int kCodesIndex = 1;
+};
+
+
// All heap objects have a Map that describes their structure.
// A Map contains information about:
// - Size information about the object
@@ -4890,6 +4926,9 @@
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
+ // [dependent codes]: list of optimized codes that have this map
embedded.
+ DECL_ACCESSORS(dependent_codes, DependentCodes)
+
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with prototype transitions and
the
// back pointer will be moved into the prototype transitions array if
@@ -5099,6 +5138,14 @@
void ZapPrototypeTransitions();
void ZapTransitions();
+ bool CanTransition() {
+ // Only JSObject and subtypes have map transitions and back pointers.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+ }
+
+ inline void AddDependentCode(Handle<Code> code);
+
// Dispatched behavior.
DECLARE_PRINTER(Map)
DECLARE_VERIFIER(Map)
@@ -5147,7 +5194,8 @@
static const int kDescriptorsOffset =
kTransitionsOrBackPointerOffset + kPointerSize;
static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
- static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
+ static const int kDependentCodesOffset = kCodeCacheOffset + kPointerSize;
+ static const int kBitField3Offset = kDependentCodesOffset + kPointerSize;
static const int kSize = kBitField3Offset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
=======================================
--- /branches/bleeding_edge/test/mjsunit/mjsunit.status Fri Jan 18 05:05:03
2013
+++ /branches/bleeding_edge/test/mjsunit/mjsunit.status Thu Jan 24 03:55:05
2013
@@ -83,6 +83,10 @@
# This test is the same as math-floor-of-div for non ARM architectures.
math-floor-of-div-nosudiv: PASS, SKIP if ($arch != arm && $arch !=
android_arm)
+##############################################################################
+# Long running test that reproduces memory leak and should be run manually.
+regress/regress-2073: SKIP
+
##############################################################################
[ $arch == arm || $arch == android_arm ]
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev