Revision: 5443
Author: [email protected]
Date: Fri Sep 10 11:52:02 2010
Log: [Isolates] Add heap pointer to all maps and use map->heap() more.
Other changes:
1. Spaces (NewSpace, PagedSpace etc) get heap_ member.
2. MarkCompactCollector already has heap_ - use it more.
3. Made Heap::ScavengeObject() a static to avoid pulling heap pointer until
the slow path is taken.
4. Changed HEAP_PROFILE(Call) macro to be HEAP_PROFILE(heap, call) to avoid
2 Isolate::Current() that it had.
5. Heap::IterateDirtyRegions now passes heap pointer to a callback.
6. ScavengingVisitor::EvacuateObject and others use map->heap() to retrieve
heap during GC.
Review URL: http://codereview.chromium.org/3301008
http://code.google.com/p/v8/source/detail?r=5443
Modified:
/branches/experimental/isolates/src/heap-inl.h
/branches/experimental/isolates/src/heap-profiler.cc
/branches/experimental/isolates/src/heap-profiler.h
/branches/experimental/isolates/src/heap.cc
/branches/experimental/isolates/src/heap.h
/branches/experimental/isolates/src/mark-compact.cc
/branches/experimental/isolates/src/objects-inl.h
/branches/experimental/isolates/src/serialize.cc
/branches/experimental/isolates/src/spaces.cc
/branches/experimental/isolates/src/spaces.h
/branches/experimental/isolates/test/cctest/test-spaces.cc
=======================================
--- /branches/experimental/isolates/src/heap-inl.h Thu Sep 9 17:53:48 2010
+++ /branches/experimental/isolates/src/heap-inl.h Fri Sep 10 11:52:02 2010
@@ -267,7 +267,7 @@
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
- if (Heap::InNewSpace(Memory::Object_at(dst))) {
+ if (InNewSpace(Memory::Object_at(dst))) {
marks |= page->GetRegionMaskForAddress(dst);
}
@@ -311,10 +311,15 @@
CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
}
+
+
+void Heap::ScavengePointer(HeapObject** p) {
+ ScavengeObject(p, *p);
+}
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- ASSERT(InFromSpace(object));
+ ASSERT(HEAP->InFromSpace(object));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
=======================================
--- /branches/experimental/isolates/src/heap-profiler.cc Wed Sep 1
10:01:38 2010
+++ /branches/experimental/isolates/src/heap-profiler.cc Fri Sep 10
11:52:02 2010
@@ -412,9 +412,7 @@
void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- ASSERT(profiler != NULL);
- profiler->snapshots_->ObjectMoveEvent(from, to);
+ snapshots_->ObjectMoveEvent(from, to);
}
=======================================
--- /branches/experimental/isolates/src/heap-profiler.h Wed Sep 1 10:01:38
2010
+++ /branches/experimental/isolates/src/heap-profiler.h Fri Sep 10 11:52:02
2010
@@ -39,14 +39,15 @@
class HeapSnapshot;
class HeapSnapshotsCollection;
-#define HEAP_PROFILE(Call) \
- do { \
- if (v8::internal::HeapProfiler::is_profiling()) { \
- v8::internal::HeapProfiler::Call; \
- } \
+#define HEAP_PROFILE(heap,
call) \
+ do
{ \
+ v8::internal::HeapProfiler* profiler =
heap->isolate()->heap_profiler(); \
+ if (profiler != NULL && profiler->is_profiling())
{ \
+
profiler->call; \
+
} \
} while (false)
#else
-#define HEAP_PROFILE(Call) ((void) 0)
+#define HEAP_PROFILE(heap, call) ((void) 0)
#endif // ENABLE_LOGGING_AND_PROFILING
// The HeapProfiler writes data to the log files, which can be
postprocessed
@@ -63,11 +64,10 @@
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
- static void ObjectMoveEvent(Address from, Address to);
-
- static INLINE(bool is_profiling()) {
- HeapProfiler* profiler = Isolate::Current()->heap_profiler();
- return profiler != NULL && profiler->snapshots_->is_tracking_objects();
+ void ObjectMoveEvent(Address from, Address to);
+
+ INLINE(bool is_profiling()) {
+ return snapshots_->is_tracking_objects();
}
// Obsolete interface.
=======================================
--- /branches/experimental/isolates/src/heap.cc Thu Sep 9 17:53:48 2010
+++ /branches/experimental/isolates/src/heap.cc Fri Sep 10 11:52:02 2010
@@ -88,6 +88,7 @@
always_allocate_scope_depth_(0),
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
+ new_space_(this),
old_pointer_space_(NULL),
old_data_space_(NULL),
code_space_(NULL),
@@ -826,7 +827,7 @@
void ScavengePointer(Object** p) {
Object* object = *p;
if (!HEAP->InNewSpace(object)) return;
- HEAP->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
};
@@ -940,7 +941,7 @@
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateDirtyRegions(old_pointer_space_,
- &IteratePointersInDirtyRegion,
+ &Heap::IteratePointersInDirtyRegion,
&ScavengePointer,
WATERMARK_CAN_BE_INVALID);
@@ -1037,7 +1038,7 @@
static inline void VisitPointer(Object** p) {
Object* object = *p;
if (!HEAP->InNewSpace(object)) return;
- HEAP->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
};
@@ -1127,7 +1128,7 @@
enum SizeRestriction { SMALL, UNKNOWN_SIZE };
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
- static void RecordCopiedObject(HeapObject* obj) {
+ static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
#ifdef DEBUG
should_record = FLAG_heap_stats;
@@ -1136,10 +1137,10 @@
should_record = should_record || FLAG_log_gc;
#endif
if (should_record) {
- if (HEAP->new_space()->Contains(obj)) {
- HEAP->new_space()->RecordAllocation(obj);
+ if (heap->new_space()->Contains(obj)) {
+ heap->new_space()->RecordAllocation(obj);
} else {
- HEAP->new_space()->RecordPromotion(obj);
+ heap->new_space()->RecordPromotion(obj);
}
}
}
@@ -1148,20 +1149,21 @@
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the
source
// object. Returns the target object.
- INLINE(static HeapObject* MigrateObject(HeapObject* source,
+ INLINE(static HeapObject* MigrateObject(Heap* heap,
+ HeapObject* source,
HeapObject* target,
int size)) {
// Copy the content of source to target.
- HEAP->CopyBlock(target->address(), source->address(), size);
+ heap->CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Update NewSpace stats if necessary.
- RecordCopiedObject(target);
+ RecordCopiedObject(heap, target);
#endif
- HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
+ HEAP_PROFILE(heap, ObjectMoveEvent(source->address(),
target->address()));
return target;
}
@@ -1176,35 +1178,36 @@
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
- if (HEAP->ShouldBePromoted(object->address(), object_size)) {
+ Heap* heap = map->heap();
+ if (heap->ShouldBePromoted(object->address(), object_size)) {
Object* result;
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxHeapObjectSize)) {
- result = HEAP->lo_space()->AllocateRawFixedArray(object_size);
+ result = heap->lo_space()->AllocateRawFixedArray(object_size);
} else {
if (object_contents == DATA_OBJECT) {
- result = HEAP->old_data_space()->AllocateRaw(object_size);
+ result = heap->old_data_space()->AllocateRaw(object_size);
} else {
- result = HEAP->old_pointer_space()->AllocateRaw(object_size);
+ result = heap->old_pointer_space()->AllocateRaw(object_size);
}
}
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
- *slot = MigrateObject(object, target, object_size);
+ *slot = MigrateObject(heap, object , target, object_size);
if (object_contents == POINTER_OBJECT) {
- HEAP->promotion_queue()->insert(target, object_size);
+ heap->promotion_queue()->insert(target, object_size);
}
- HEAP->tracer()->increment_promoted_objects_size(object_size);
+ heap->tracer()->increment_promoted_objects_size(object_size);
return;
}
}
- Object* result = HEAP->new_space()->AllocateRaw(object_size);
+ Object* result = heap->new_space()->AllocateRaw(object_size);
ASSERT(!result->IsFailure());
- *slot = MigrateObject(object, HeapObject::cast(result), object_size);
+ *slot = MigrateObject(heap, object, HeapObject::cast(result),
object_size);
return;
}
@@ -1255,13 +1258,14 @@
HeapObject* object) {
ASSERT(IsShortcutCandidate(map->instance_type()));
- if (ConsString::cast(object)->unchecked_second() ==
HEAP->empty_string()) {
+ if (ConsString::cast(object)->unchecked_second() ==
+ map->heap()->empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
*slot = first;
- if (!HEAP->InNewSpace(first)) {
+ if (!map->heap()->InNewSpace(first)) {
object->set_map_word(MapWord::FromForwardingAddress(first));
return;
}
@@ -1312,17 +1316,12 @@
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- ASSERT(InFromSpace(object));
+ ASSERT(HEAP->InFromSpace(object));
MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
ScavengingVisitor::Scavenge(map, p, object);
}
-
-
-void Heap::ScavengePointer(HeapObject** p) {
- HEAP->ScavengeObject(p, *p);
-}
Object* Heap::AllocatePartialMap(InstanceType instance_type,
@@ -1354,7 +1353,6 @@
map->set_map(meta_map());
map->set_heap(this);
map->set_instance_type(instance_type);
- reinterpret_cast<Map*>(result)->set_heap(this);
map->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
map->set_prototype(null_value());
@@ -3621,7 +3619,8 @@
#endif // DEBUG
-bool Heap::IteratePointersInDirtyRegion(Address start,
+bool Heap::IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback
copy_object_func) {
Address slot_address = start;
@@ -3629,10 +3628,10 @@
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- if (HEAP->InNewSpace(*slot)) {
+ if (heap->InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
copy_object_func(reinterpret_cast<HeapObject**>(slot));
- if (HEAP->InNewSpace(*slot)) {
+ if (heap->InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
pointers_to_new_space_found = true;
}
@@ -3666,14 +3665,16 @@
Address map_address = start;
bool pointers_to_new_space_found = false;
+ Heap* heap = HEAP;
while (map_address < end) {
- ASSERT(!HEAP->InNewSpace(Memory::Object_at(map_address)));
+ ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
ASSERT(Memory::Object_at(map_address)->IsMap());
Address pointer_fields_start = map_address +
Map::kPointerFieldsBeginOffset;
Address pointer_fields_end = map_address +
Map::kPointerFieldsEndOffset;
- if (HEAP->IteratePointersInDirtyRegion(pointer_fields_start,
+ if (Heap::IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)) {
pointers_to_new_space_found = true;
@@ -3687,6 +3688,7 @@
bool Heap::IteratePointersInDirtyMapsRegion(
+ Heap* heap,
Address start,
Address end,
ObjectSlotCallback copy_object_func) {
@@ -3706,7 +3708,8 @@
Min(prev_map + Map::kPointerFieldsEndOffset, end);
contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(pointer_fields_start,
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
@@ -3728,7 +3731,8 @@
Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
contains_pointers_to_new_space =
- IteratePointersInDirtyRegion(pointer_fields_start,
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
pointer_fields_end,
copy_object_func)
|| contains_pointers_to_new_space;
@@ -3790,7 +3794,7 @@
Address region_end = Min(second_region, area_end);
if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ if (visit_dirty_region(this, region_start, region_end,
copy_object_func)) {
newmarks |= mask;
}
}
@@ -3802,7 +3806,10 @@
while (region_end <= area_end) {
if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ if (visit_dirty_region(this,
+ region_start,
+ region_end,
+ copy_object_func)) {
newmarks |= mask;
}
}
@@ -3818,7 +3825,7 @@
// with region end. Check whether region covering last part of area is
// dirty.
if (marks & mask) {
- if (visit_dirty_region(region_start, area_end, copy_object_func)) {
+ if (visit_dirty_region(this, region_start, area_end,
copy_object_func)) {
newmarks |= mask;
}
}
@@ -4268,13 +4275,19 @@
// Initialize old pointer space.
old_pointer_space_ =
- new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE,
NOT_EXECUTABLE);
+ new OldSpace(this,
+ max_old_generation_size_,
+ OLD_POINTER_SPACE,
+ NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
if (!old_pointer_space_->Setup(NULL, 0)) return false;
// Initialize old data space.
old_data_space_ =
- new OldSpace(max_old_generation_size_, OLD_DATA_SPACE,
NOT_EXECUTABLE);
+ new OldSpace(this,
+ max_old_generation_size_,
+ OLD_DATA_SPACE,
+ NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
if (!old_data_space_->Setup(NULL, 0)) return false;
@@ -4289,12 +4302,12 @@
}
code_space_ =
- new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
+ new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
- map_space_ = new MapSpace(FLAG_use_big_map_space
+ map_space_ = new MapSpace(this, FLAG_use_big_map_space
? max_old_generation_size_
: MapSpace::kMaxMapPageIndex * Page::kPageSize,
FLAG_max_map_space_pages,
@@ -4303,14 +4316,14 @@
if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space.
- cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
+ cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
if (!cell_space_->Setup(NULL, 0)) return false;
// The large object code space may contain code or data. We set the
memory
// to be non-executable here for safety, but this means we need to
enable it
// explicitly when allocating large code objects.
- lo_space_ = new LargeObjectSpace(LO_SPACE);
+ lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
=======================================
--- /branches/experimental/isolates/src/heap.h Thu Sep 9 17:53:48 2010
+++ /branches/experimental/isolates/src/heap.h Fri Sep 10 11:52:02 2010
@@ -215,7 +215,8 @@
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
-typedef bool (*DirtyRegionCallback)(Address start,
+typedef bool (*DirtyRegionCallback)(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback copy_object_func);
@@ -871,7 +872,8 @@
// Iterate pointers to new space found in memory interval from start to
end.
// Return true if pointers to new space was found.
- static bool IteratePointersInDirtyRegion(Address start,
+ static bool IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback callback);
@@ -879,7 +881,8 @@
// Iterate pointers to new space found in memory interval from start to
end.
// This interval is considered to belong to the map space.
// Return true if pointers to new space was found.
- static bool IteratePointersInDirtyMapsRegion(Address start,
+ static bool IteratePointersInDirtyMapsRegion(Heap* heap,
+ Address start,
Address end,
ObjectSlotCallback
callback);
@@ -988,8 +991,8 @@
// necessary, the object might be promoted to an old space. The caller
must
// ensure the precondition that the object is (a) a heap object and (b)
in
// the heap's from space.
- static void ScavengePointer(HeapObject** p);
- inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ static inline void ScavengePointer(HeapObject** p);
+ static inline void ScavengeObject(HeapObject** p, HeapObject* object);
// Commits from space if it is uncommitted.
void EnsureFromSpaceIsCommitted();
@@ -1372,7 +1375,7 @@
#endif
// Slow part of scavenge object.
- void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+ static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Initializes a function with a shared part and prototype.
// Returns the function.
=======================================
--- /branches/experimental/isolates/src/mark-compact.cc Thu Sep 9 17:53:48
2010
+++ /branches/experimental/isolates/src/mark-compact.cc Fri Sep 10 11:52:02
2010
@@ -370,9 +370,9 @@
ASSERT(!obj->IsMarked());
#endif
Map* map = obj->map();
- HEAP->mark_compact_collector()->SetMark(obj);
+ map->heap()->mark_compact_collector()->SetMark(obj);
// Mark the map pointer and the body.
- HEAP->mark_compact_collector()->MarkObject(map);
+ map->heap()->mark_compact_collector()->MarkObject(map);
IterateBody(map, obj);
}
@@ -1613,7 +1613,7 @@
// Update pointers in old spaces.
heap->IterateDirtyRegions(heap->old_pointer_space(),
- &heap->IteratePointersInDirtyRegion,
+ &Heap::IteratePointersInDirtyRegion,
&UpdatePointerToNewGen,
heap->WATERMARK_SHOULD_BE_VALID);
@@ -2410,20 +2410,20 @@
ASSERT(live_news_size == live_young_objects_size_);
// Flip from and to spaces
- HEAP->new_space()->Flip();
-
- HEAP->new_space()->MCCommitRelocationInfo();
+ heap_->new_space()->Flip();
+
+ heap_->new_space()->MCCommitRelocationInfo();
// Set age_mark to bottom in to space
- Address mark = HEAP->new_space()->bottom();
- HEAP->new_space()->set_age_mark(mark);
+ Address mark = heap_->new_space()->bottom();
+ heap_->new_space()->set_age_mark(mark);
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space =
spaces.next())
space->MCCommitRelocationInfo();
- HEAP->CheckNewSpaceExpansionCriteria();
- HEAP->IncrementYoungSurvivorsCounter(live_news_size);
+ heap_->CheckNewSpaceExpansionCriteria();
+ heap_->IncrementYoungSurvivorsCounter(live_news_size);
}
@@ -2490,8 +2490,8 @@
PagedSpace* space) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(HEAP->map_space());
- ASSERT(HEAP->map_space()->Contains(map_addr));
+ Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
+ ASSERT(heap_->map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer.
Address new_addr = GetForwardingAddressInOldSpace(obj);
@@ -2503,12 +2503,12 @@
if (new_addr != old_addr) {
// Move contents.
- if (space == HEAP->old_data_space()) {
- HEAP->MoveBlock(new_addr, old_addr, obj_size);
+ if (space == heap_->old_data_space()) {
+ heap_->MoveBlock(new_addr, old_addr, obj_size);
} else {
- HEAP->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
- old_addr,
- obj_size);
+ heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
}
}
@@ -2518,7 +2518,7 @@
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@@ -2548,20 +2548,21 @@
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(HEAP->map_space());
- ASSERT(HEAP->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ Heap* heap = HEAP;
+ Address map_addr = encoding.DecodeMapAddress(heap->map_space());
+ ASSERT(heap->map_space()->Contains(HeapObject::FromAddress(map_addr)));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
// Reset the map pointer.
- int obj_size = RestoreMap(obj, HEAP->code_space(), new_addr, map_addr);
+ int obj_size = RestoreMap(obj, heap->code_space(), new_addr, map_addr);
Address old_addr = obj->address();
if (new_addr != old_addr) {
// Move contents.
- HEAP->MoveBlock(new_addr, old_addr, obj_size);
+ heap->MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -2571,7 +2572,7 @@
// Notify the logger that compiled code has moved.
PROFILE(CodeMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap, ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
@@ -2582,26 +2583,27 @@
// Get forwarding address
Address old_addr = obj->address();
- int offset = HEAP->new_space()->ToSpaceOffsetForAddress(old_addr);
+ Heap* heap = HEAP;
+ int offset = heap->new_space()->ToSpaceOffsetForAddress(old_addr);
Address new_addr =
- Memory::Address_at(HEAP->new_space()->FromSpaceLow() + offset);
+ Memory::Address_at(heap->new_space()->FromSpaceLow() + offset);
#ifdef DEBUG
- if (HEAP->new_space()->FromSpaceContains(new_addr)) {
- ASSERT(HEAP->new_space()->FromSpaceOffsetForAddress(new_addr) <=
- HEAP->new_space()->ToSpaceOffsetForAddress(old_addr));
+ if (heap->new_space()->FromSpaceContains(new_addr)) {
+ ASSERT(heap->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+ heap->new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
- ASSERT(HEAP->TargetSpace(obj) == HEAP->old_pointer_space() ||
- HEAP->TargetSpace(obj) == HEAP->old_data_space());
+ ASSERT(heap->TargetSpace(obj) == heap->old_pointer_space() ||
+ heap->TargetSpace(obj) == heap->old_data_space());
}
#endif
// New and old addresses cannot overlap.
- if (HEAP->InNewSpace(HeapObject::FromAddress(new_addr))) {
- HEAP->CopyBlock(new_addr, old_addr, obj_size);
+ if (heap->InNewSpace(HeapObject::FromAddress(new_addr))) {
+ heap->CopyBlock(new_addr, old_addr, obj_size);
} else {
- HEAP->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ heap->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
old_addr,
obj_size);
}
@@ -2616,7 +2618,7 @@
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
- HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
+ HEAP_PROFILE(heap, ObjectMoveEvent(old_addr, new_addr));
return obj_size;
}
=======================================
--- /branches/experimental/isolates/src/objects-inl.h Thu Sep 9 17:53:48
2010
+++ /branches/experimental/isolates/src/objects-inl.h Fri Sep 10 11:52:02
2010
@@ -1072,7 +1072,7 @@
// Assert that we are not in GC, implement GC code in a way that it
doesn't
// pull heap from the map.
ASSERT(HEAP->is_safe_to_read_maps());
- return map()->map()->heap();
+ return map()->heap();
}
@@ -2475,8 +2475,6 @@
Heap* Map::heap() {
- ASSERT(instance_type() == MAP_TYPE);
- ASSERT(this == map());
Heap* heap = reinterpret_cast<Heap*>(READ_INTPTR_FIELD(this,
kHeapOffset));
ASSERT(heap != NULL);
ASSERT(heap->isolate() == Isolate::Current());
=======================================
--- /branches/experimental/isolates/src/serialize.cc Thu Sep 9 17:53:48
2010
+++ /branches/experimental/isolates/src/serialize.cc Fri Sep 10 11:52:02
2010
@@ -686,15 +686,11 @@
}
ReadChunk(current, limit, space_number, address);
- if (space == HEAP->map_space()) {
+ if (space == space->heap()->map_space()) {
ASSERT(size == Map::kSize);
HeapObject* obj = HeapObject::FromAddress(address);
Map* map = reinterpret_cast<Map*>(obj);
- if (map->instance_type() == MAP_TYPE) {
- // Meta map has Heap pointer instead of scavenger.
- ASSERT(map == map->map());
- map->set_heap(HEAP);
- }
+ map->set_heap(space->heap());
}
}
=======================================
--- /branches/experimental/isolates/src/spaces.cc Thu Sep 9 17:53:48 2010
+++ /branches/experimental/isolates/src/spaces.cc Fri Sep 10 11:52:02 2010
@@ -771,10 +771,11 @@
//
-----------------------------------------------------------------------------
// PagedSpace implementation
-PagedSpace::PagedSpace(int max_capacity,
+PagedSpace::PagedSpace(Heap* heap,
+ int max_capacity,
AllocationSpace id,
Executability executable)
- : Space(id, executable) {
+ : Space(heap, id, executable) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) /
Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
@@ -878,7 +879,7 @@
Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called before or after mark-compact GC
// because it accesses map pointers.
- ASSERT(!HEAP->mark_compact_collector()->in_use());
+ ASSERT(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Failure::Exception();
@@ -994,14 +995,14 @@
if (available_pages <= 0) return false;
int desired_pages = Min(available_pages,
MemoryAllocator::kPagesPerChunk);
- Page* p = Isolate::Current()->memory_allocator()->AllocatePages(
+ Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
desired_pages, &desired_pages, this);
if (!p->is_valid()) return false;
accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- Isolate::Current()->memory_allocator()->SetNextPage(last_page, p);
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
// Sequentially clear region marks of new pages and and cache the
// new last page in the space.
@@ -1044,9 +1045,9 @@
}
// Free pages after top_page.
- Page* p = Isolate::Current()->memory_allocator()->
+ Page* p = heap()->isolate()->memory_allocator()->
FreePages(top_page->next_page());
- Isolate::Current()->memory_allocator()->SetNextPage(top_page, p);
+ heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
// Find out how many pages we failed to free and update last_page_.
// Please note pages can only be freed in whole chunks.
@@ -1068,7 +1069,7 @@
Page* last_page = AllocationTopPage();
Page* next_page = last_page->next_page();
while (next_page->is_valid()) {
- last_page = Isolate::Current()->memory_allocator()->
+ last_page = heap()->isolate()->memory_allocator()->
FindLastPageInSameChunk(next_page);
next_page = last_page->next_page();
}
@@ -1078,7 +1079,7 @@
if (!Expand(last_page)) return false;
ASSERT(last_page->next_page()->is_valid());
last_page =
- Isolate::Current()->memory_allocator()->FindLastPageInSameChunk(
+ heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
last_page->next_page());
} while (Capacity() < capacity);
@@ -1099,7 +1100,7 @@
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
- ASSERT(Isolate::Current()->memory_allocator()->IsPageInSpace(top_page,
this));
+ ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page,
this));
// Loop over all the pages.
bool above_allocation_top = false;
@@ -1124,7 +1125,7 @@
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(HEAP->map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -1160,8 +1161,8 @@
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
- int initial_semispace_capacity = HEAP->InitialSemiSpaceSize();
- int maximum_semispace_capacity = HEAP->MaxSemiSpaceSize();
+ int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
+ int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -1177,7 +1178,7 @@
#undef SET_NAME
#endif
- ASSERT(size == 2 * HEAP->ReservedSemiSpaceSize());
+ ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(start, size, 0));
if (!to_space_.Setup(start,
@@ -1232,16 +1233,16 @@
#ifdef ENABLE_HEAP_PROTECTION
void NewSpace::Protect() {
- Isolate::Current()->memory_allocator()->Protect(ToSpaceLow(),
Capacity());
- Isolate::Current()->memory_allocator()->Protect(FromSpaceLow(),
Capacity());
+ heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
+ heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(),
Capacity());
}
void NewSpace::Unprotect() {
- Isolate::Current()->memory_allocator()->Unprotect(ToSpaceLow(),
Capacity(),
-
to_space_.executable());
- Isolate::Current()->memory_allocator()->Unprotect(FromSpaceLow(),
Capacity(),
-
from_space_.executable());
+ heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(),
Capacity(),
+ to_space_.executable());
+ heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(),
Capacity(),
+
from_space_.executable());
}
#endif
@@ -1335,7 +1336,7 @@
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(HEAP->map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// The object should not be code or a map.
ASSERT(!object->IsMap());
@@ -1360,7 +1361,7 @@
bool SemiSpace::Commit() {
ASSERT(!is_committed());
- if (!Isolate::Current()->memory_allocator()->CommitBlock(
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
start_, capacity_, executable())) {
return false;
}
@@ -1371,7 +1372,7 @@
bool SemiSpace::Uncommit() {
ASSERT(is_committed());
- if (!Isolate::Current()->memory_allocator()->UncommitBlock(
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
start_, capacity_)) {
return false;
}
@@ -1418,7 +1419,7 @@
int maximum_extra = maximum_capacity_ - capacity_;
int extra = Min(RoundUp(capacity_,
static_cast<int>(OS::AllocateAlignment())),
maximum_extra);
- if (!Isolate::Current()->memory_allocator()->CommitBlock(
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
high(), extra, executable())) {
return false;
}
@@ -1432,7 +1433,7 @@
ASSERT(new_capacity > capacity_);
size_t delta = new_capacity - capacity_;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!Isolate::Current()->memory_allocator()->CommitBlock(
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
high(), delta, executable())) {
return false;
}
@@ -1446,7 +1447,7 @@
ASSERT(new_capacity < capacity_);
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!Isolate::Current()->memory_allocator()->UncommitBlock(
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
high() - delta, delta)) {
return false;
}
@@ -2036,14 +2037,14 @@
first_page_ = last->next_page();
} else {
first = prev->next_page();
- Isolate::Current()->memory_allocator()->SetNextPage(
+ heap()->isolate()->memory_allocator()->SetNextPage(
prev, last->next_page());
}
// Attach it after the last page.
- Isolate::Current()->memory_allocator()->SetNextPage(last_page_, first);
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
last_page_ = last;
- Isolate::Current()->memory_allocator()->SetNextPage(last, NULL);
+ heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
// Clean them up.
do {
@@ -2082,7 +2083,7 @@
if (page_list_is_chunk_ordered_) return;
Page* new_last_in_use = Page::FromAddress(NULL);
- Isolate::Current()->memory_allocator()->RelinkPageListInChunkOrder(
+ heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
this, &first_page_, &last_page_, &new_last_in_use);
ASSERT(new_last_in_use->is_valid());
@@ -2100,7 +2101,7 @@
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
- HEAP->CreateFillerObjectAt(start, size_in_bytes);
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
}
@@ -2127,7 +2128,7 @@
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
- HEAP->CreateFillerObjectAt(start, size_in_bytes);
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
}
}
@@ -2156,7 +2157,7 @@
int bytes_left_to_reserve = bytes;
while (bytes_left_to_reserve > 0) {
if (!reserved_page->next_page()->is_valid()) {
- if (HEAP->OldGenerationAllocationLimitReached()) return false;
+ if (heap()->OldGenerationAllocationLimitReached()) return false;
Expand(reserved_page);
}
bytes_left_to_reserve -= Page::kPageSize;
@@ -2174,7 +2175,7 @@
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
- return HEAP->OldGenerationSpaceAvailable() >= bytes;
+ return heap()->OldGenerationSpaceAvailable() >= bytes;
}
@@ -2193,7 +2194,7 @@
// There is no next page in this space. Try free list allocation unless
that
// is currently forbidden.
- if (!HEAP->linear_allocation()) {
+ if (!heap()->linear_allocation()) {
int wasted_bytes;
Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
@@ -2219,7 +2220,8 @@
// Free list allocation failed and there is no next page. Fail if we
have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!HEAP->always_allocate() &&
HEAP->OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
@@ -2381,7 +2383,7 @@
// - by code kind
// - by code comment
void PagedSpace::CollectCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = heap()->isolate();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
@@ -2494,7 +2496,7 @@
// There is no next page in this space. Try free list allocation unless
// that is currently forbidden. The fixed space free list implicitly
assumes
// that all free blocks are of the fixed size.
- if (!HEAP->linear_allocation()) {
+ if (!heap()->linear_allocation()) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
@@ -2517,7 +2519,8 @@
// Free list allocation failed and there is no next page. Fail if we
have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!HEAP->always_allocate() &&
HEAP->OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
@@ -2617,7 +2620,7 @@
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list
node.
ASSERT(object->IsJSGlobalPropertyCell() ||
- object->map() == HEAP->two_pointer_filler_map());
+ object->map() == heap()->two_pointer_filler_map());
}
#endif
@@ -2682,8 +2685,8 @@
//
-----------------------------------------------------------------------------
// LargeObjectSpace
-LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
- : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
+ : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation
basis
first_chunk_(NULL),
size_(0),
page_count_(0) {}
@@ -2708,10 +2711,10 @@
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
size_t size = chunk->size();
- Isolate::Current()->memory_allocator()->FreeRawMemory(chunk->address(),
- size,
- executable);
- Isolate::Current()->memory_allocator()->PerformAllocationCallback(
+ heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
+ size,
+ executable);
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size);
}
@@ -2725,8 +2728,8 @@
void LargeObjectSpace::Protect() {
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
- Isolate::Current()->memory_allocator()->Protect(chunk->address(),
- chunk->size());
+ heap()->isolate()->memory_allocator()->Protect(chunk->address(),
+ chunk->size());
chunk = chunk->next();
}
}
@@ -2736,7 +2739,7 @@
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
bool is_code = chunk->GetObject()->IsCode();
- Isolate::Current()->memory_allocator()->Unprotect(chunk->address(),
+ heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
chunk = chunk->next();
}
@@ -2752,7 +2755,8 @@
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!HEAP->always_allocate() &&
HEAP->OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return Failure::RetryAfterGC(requested_size, identity());
}
@@ -2859,22 +2863,22 @@
// Iterate regions of the first normal page covering object.
uint32_t first_region_number =
page->GetRegionNumberForAddress(start);
newmarks |=
- HEAP->IterateDirtyRegions(marks >> first_region_number,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object) << first_region_number;
+ heap()->IterateDirtyRegions(marks >> first_region_number,
+ start,
+ end,
+
&Heap::IteratePointersInDirtyRegion,
+ copy_object) <<
first_region_number;
start = end;
end = start + Page::kPageSize;
while (end <= object_end) {
// Iterate next 32 regions.
newmarks |=
- HEAP->IterateDirtyRegions(marks,
- start,
- end,
-
&Heap::IteratePointersInDirtyRegion,
- copy_object);
+ heap()->IterateDirtyRegions(marks,
+ start,
+ end,
+
&Heap::IteratePointersInDirtyRegion,
+ copy_object);
start = end;
end = start + Page::kPageSize;
}
@@ -2883,11 +2887,11 @@
// Iterate the last piece of an object which is less than
// Page::kPageSize.
newmarks |=
- HEAP->IterateDirtyRegions(marks,
- start,
- object_end,
-
&Heap::IteratePointersInDirtyRegion,
- copy_object);
+ heap()->IterateDirtyRegions(marks,
+ start,
+ object_end,
+
&Heap::IteratePointersInDirtyRegion,
+ copy_object);
}
page->SetRegionMarks(newmarks);
@@ -2904,7 +2908,7 @@
HeapObject* object = current->GetObject();
if (object->IsMarked()) {
object->ClearMark();
- HEAP->mark_compact_collector()->tracer()->decrement_marked_count();
+ heap()->mark_compact_collector()->tracer()->decrement_marked_count();
previous = current;
current = current->next();
} else {
@@ -2924,15 +2928,15 @@
}
// Free the chunk.
- HEAP->mark_compact_collector()->ReportDeleteIfNeeded(object);
+ heap()->mark_compact_collector()->ReportDeleteIfNeeded(object);
size_ -= static_cast<int>(chunk_size);
page_count_--;
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
- Isolate::Current()->memory_allocator()->FreeRawMemory(chunk_address,
- chunk_size,
- executable);
- Isolate::Current()->memory_allocator()->PerformAllocationCallback(
+ heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
+ chunk_size,
+ executable);
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size_);
LOG(DeleteEvent("LargeObjectChunk", chunk_address));
}
@@ -2942,7 +2946,7 @@
bool LargeObjectSpace::Contains(HeapObject* object) {
Address address = object->address();
- if (HEAP->new_space()->Contains(address)) {
+ if (heap()->new_space()->Contains(address)) {
return false;
}
Page* page = Page::FromAddress(address);
@@ -2971,7 +2975,7 @@
// in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(HEAP->map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
@@ -2998,9 +3002,9 @@
Object* element = array->get(j);
if (element->IsHeapObject()) {
HeapObject* element_object = HeapObject::cast(element);
- ASSERT(HEAP->Contains(element_object));
+ ASSERT(heap()->Contains(element_object));
ASSERT(element_object->map()->IsMap());
- if (HEAP->InNewSpace(element_object)) {
+ if (heap()->InNewSpace(element_object)) {
Address array_addr = object->address();
Address element_addr = array_addr + FixedArray::kHeaderSize +
j * kPointerSize;
@@ -3038,7 +3042,7 @@
void LargeObjectSpace::CollectCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = heap()->isolate();
LargeObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
=======================================
--- /branches/experimental/isolates/src/spaces.h Thu Sep 9 17:53:48 2010
+++ /branches/experimental/isolates/src/spaces.h Fri Sep 10 11:52:02 2010
@@ -355,10 +355,12 @@
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
public:
- Space(AllocationSpace id, Executability executable)
- : id_(id), executable_(executable) {}
+ Space(Heap* heap, AllocationSpace id, Executability executable)
+ : heap_(heap), id_(id), executable_(executable) {}
virtual ~Space() {}
+
+ Heap* heap() const { return heap_; }
// Does the space need executable memory?
Executability executable() { return executable_; }
@@ -387,6 +389,7 @@
virtual bool ReserveSpace(int bytes) = 0;
private:
+ Heap* heap_;
AllocationSpace id_;
Executability executable_;
};
@@ -990,7 +993,10 @@
class PagedSpace : public Space {
public:
// Creates a space with a maximum capacity, and an id.
- PagedSpace(int max_capacity, AllocationSpace id, Executability
executable);
+ PagedSpace(Heap* heap,
+ int max_capacity,
+ AllocationSpace id,
+ Executability executable);
virtual ~PagedSpace() {}
@@ -1279,7 +1285,7 @@
class SemiSpace : public Space {
public:
// Constructor.
- SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
+ explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
start_ = NULL;
age_mark_ = NULL;
}
@@ -1446,7 +1452,10 @@
class NewSpace : public Space {
public:
// Constructor.
- NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
+ explicit NewSpace(Heap* heap)
+ : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+ to_space_(heap),
+ from_space_(heap) {}
// Sets up the new space using the given chunk.
bool Setup(Address start, int size);
@@ -1833,10 +1842,11 @@
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
- explicit OldSpace(int max_capacity,
- AllocationSpace id,
- Executability executable)
- : PagedSpace(max_capacity, id, executable), free_list_(id) {
+ OldSpace(Heap* heap,
+ int max_capacity,
+ AllocationSpace id,
+ Executability executable)
+ : PagedSpace(heap, max_capacity, id, executable), free_list_(id) {
page_extra_ = 0;
}
@@ -1903,11 +1913,12 @@
class FixedSpace : public PagedSpace {
public:
- FixedSpace(int max_capacity,
+ FixedSpace(Heap* heap,
+ int max_capacity,
AllocationSpace id,
int object_size_in_bytes,
const char* name)
- : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
+ : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name),
free_list_(id, object_size_in_bytes) {
@@ -1978,8 +1989,11 @@
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
- MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
- : FixedSpace(max_capacity, id, Map::kSize, "map"),
+ MapSpace(Heap* heap,
+ int max_capacity,
+ int max_map_space_pages,
+ AllocationSpace id)
+ : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) {
ASSERT(max_map_space_pages < kMaxMapPageIndex);
}
@@ -2083,8 +2097,9 @@
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
- CellSpace(int max_capacity, AllocationSpace id)
- : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
{}
+ CellSpace(Heap* heap, int max_capacity, AllocationSpace id)
+ : FixedSpace(heap, max_capacity, id,
JSGlobalPropertyCell::kSize, "cell")
+ {}
protected:
#ifdef DEBUG
@@ -2158,7 +2173,7 @@
class LargeObjectSpace : public Space {
public:
- explicit LargeObjectSpace(AllocationSpace id);
+ LargeObjectSpace(Heap* heap, AllocationSpace id);
virtual ~LargeObjectSpace() {}
// Initializes internal data structures.
=======================================
--- /branches/experimental/isolates/test/cctest/test-spaces.cc Thu Jul 1
14:55:46 2010
+++ /branches/experimental/isolates/test/cctest/test-spaces.cc Fri Sep 10
11:52:02 2010
@@ -96,7 +96,9 @@
CHECK(isolate->heap()->ConfigureHeapDefault());
CHECK(isolate->memory_allocator()->Setup(isolate->heap()->MaxReserved()));
- OldSpace faked_space(isolate->heap()->MaxReserved(), OLD_POINTER_SPACE,
+ OldSpace faked_space(isolate->heap(),
+ isolate->heap()->MaxReserved(),
+ OLD_POINTER_SPACE,
NOT_EXECUTABLE);
int total_pages = 0;
int requested = 2;
@@ -156,7 +158,7 @@
CHECK(HEAP->ConfigureHeapDefault());
CHECK(Isolate::Current()->memory_allocator()->Setup(HEAP->MaxReserved()));
- NewSpace new_space;
+ NewSpace new_space(HEAP);
void* chunk =
Isolate::Current()->memory_allocator()->ReserveInitialChunk(
@@ -183,7 +185,8 @@
CHECK(HEAP->ConfigureHeapDefault());
CHECK(Isolate::Current()->memory_allocator()->Setup(HEAP->MaxReserved()));
- OldSpace* s = new OldSpace(HEAP->MaxOldGenerationSize(),
+ OldSpace* s = new OldSpace(HEAP,
+ HEAP->MaxOldGenerationSize(),
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
CHECK(s != NULL);
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev