Revision: 9002
Author: [email protected]
Date: Tue Aug 23 06:33:22 2011
Log: Perform TODO(gc) cleanup for TODO-lockdown.
[email protected]
Review URL: http://codereview.chromium.org/7639020
http://code.google.com/p/v8/source/detail?r=9002
Modified:
/branches/experimental/gc/src/arm/assembler-arm-inl.h
/branches/experimental/gc/src/arm/code-stubs-arm.cc
/branches/experimental/gc/src/arm/deoptimizer-arm.cc
/branches/experimental/gc/src/arm/macro-assembler-arm.cc
/branches/experimental/gc/src/heap-inl.h
/branches/experimental/gc/src/heap.cc
/branches/experimental/gc/src/heap.h
/branches/experimental/gc/src/ia32/assembler-ia32-inl.h
/branches/experimental/gc/src/ia32/code-stubs-ia32.cc
/branches/experimental/gc/src/ia32/deoptimizer-ia32.cc
/branches/experimental/gc/src/ia32/macro-assembler-ia32.cc
/branches/experimental/gc/src/ic-inl.h
/branches/experimental/gc/src/incremental-marking.h
/branches/experimental/gc/src/liveedit.cc
/branches/experimental/gc/src/mark-compact.cc
/branches/experimental/gc/src/mark-compact.h
/branches/experimental/gc/src/objects-inl.h
/branches/experimental/gc/src/objects.h
/branches/experimental/gc/src/platform.h
/branches/experimental/gc/src/profile-generator.cc
/branches/experimental/gc/src/serialize.cc
/branches/experimental/gc/src/spaces-inl.h
/branches/experimental/gc/src/spaces.cc
/branches/experimental/gc/src/spaces.h
/branches/experimental/gc/src/store-buffer.cc
/branches/experimental/gc/src/store-buffer.h
/branches/experimental/gc/src/x64/assembler-x64-inl.h
/branches/experimental/gc/src/x64/code-stubs-x64.cc
/branches/experimental/gc/src/x64/macro-assembler-x64.cc
/branches/experimental/gc/test/cctest/test-mark-compact.cc
/branches/experimental/gc/test/cctest/test-spaces.cc
=======================================
--- /branches/experimental/gc/src/arm/assembler-arm-inl.h Fri Jul 8
02:16:20 2011
+++ /branches/experimental/gc/src/arm/assembler-arm-inl.h Tue Aug 23
06:33:22 2011
@@ -79,7 +79,7 @@
Assembler::set_target_address_at(pc_, target);
if (code != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
- // TODO(gc) We do not compact code pages.
+ // TODO(1550) We do not compact code pages.
code->GetHeap()->incremental_marking()->RecordWrite(
code, NULL, HeapObject::cast(target_code));
}
=======================================
--- /branches/experimental/gc/src/arm/code-stubs-arm.cc Wed Aug 17 04:05:46
2011
+++ /branches/experimental/gc/src/arm/code-stubs-arm.cc Tue Aug 23 06:33:22
2011
@@ -6727,8 +6727,6 @@
}
__ mov(r2, Operand(ExternalReference::isolate_address()));
- // TODO(gc): Create a fast version of this C function that does not
duplicate
- // the checks done in the stub.
if (mode == INCREMENTAL_COMPACTION) {
__ CallCFunction(
ExternalReference::incremental_evacuation_record_write_function(
=======================================
--- /branches/experimental/gc/src/arm/deoptimizer-arm.cc Wed Aug 10
05:50:30 2011
+++ /branches/experimental/gc/src/arm/deoptimizer-arm.cc Tue Aug 23
06:33:22 2011
@@ -131,7 +131,6 @@
}
-// TODO(gc) make use of unoptimized_code when supporting incremental
marking.
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
=======================================
--- /branches/experimental/gc/src/arm/macro-assembler-arm.cc Wed Aug 17
04:05:46 2011
+++ /branches/experimental/gc/src/arm/macro-assembler-arm.cc Tue Aug 23
06:33:22 2011
@@ -1907,7 +1907,7 @@
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
- // TODO(gc): Fix this!
+ // TODO(1599): Do not call stubs from stubs that do not allow stub calls.
// ASSERT(allow_stub_calls()); // Stub calls are not allowed in some
stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
=======================================
--- /branches/experimental/gc/src/heap-inl.h Wed Aug 3 09:10:10 2011
+++ /branches/experimental/gc/src/heap-inl.h Tue Aug 23 06:33:22 2011
@@ -95,7 +95,7 @@
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRawData(size)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -128,7 +128,7 @@
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRawData(size)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -192,9 +192,7 @@
} else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
- // TODO(gc) Keep track of whether we are allocating a fixed array here
- // so that we can call AllocateRawFixedArray instead.
- result = lo_space_->AllocateRawData(size_in_bytes);
+ result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else {
=======================================
--- /branches/experimental/gc/src/heap.cc Fri Aug 19 06:14:48 2011
+++ /branches/experimental/gc/src/heap.cc Tue Aug 23 06:33:22 2011
@@ -381,6 +381,7 @@
#endif // DEBUG
LiveObjectList::GCPrologue();
+ store_buffer()->GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
@@ -393,6 +394,7 @@
}
void Heap::GarbageCollectionEpilogue() {
+ store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue();
#ifdef DEBUG
allow_allocation(true);
@@ -830,7 +832,7 @@
CompletelyClearInstanceofCache();
- // TODO(gc) select heuristic for flushing NumberString cache with
+ // TODO(1605) select heuristic for flushing NumberString cache with
// FlushNumberStringCache
if (FLAG_cleanup_code_caches_at_gc) {
polymorphic_code_cache()->set_cache(undefined_value());
@@ -1450,11 +1452,8 @@
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxHeapObjectSize)) {
- if (object_contents == DATA_OBJECT) {
- maybe_result = heap->lo_space()->AllocateRawData(object_size);
- } else {
- maybe_result =
heap->lo_space()->AllocateRawFixedArray(object_size);
- }
+ maybe_result = heap->lo_space()->AllocateRaw(object_size,
+ NOT_EXECUTABLE);
} else {
if (object_contents == DATA_OBJECT) {
maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -2833,7 +2832,7 @@
Object* result;
{ MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
? old_data_space_->AllocateRaw(size)
- : lo_space_->AllocateRawData(size);
+ : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -2916,7 +2915,7 @@
// Large code objects and code objects which should stay at a fixed
address
// are allocated in large object space.
if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
- maybe_result = lo_space_->AllocateRawCode(obj_size);
+ maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
@@ -2961,7 +2960,7 @@
int obj_size = code->Size();
MaybeObject* maybe_result;
if (obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRawCode(obj_size);
+ maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
@@ -3004,7 +3003,7 @@
MaybeObject* maybe_result;
if (new_obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRawCode(new_obj_size);
+ maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
} else {
maybe_result = code_space_->AllocateRaw(new_obj_size);
}
@@ -3702,7 +3701,7 @@
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRawData(size)
+ ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -3819,7 +3818,7 @@
int size = FixedArray::SizeFor(length);
return size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
+ : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
}
@@ -4510,19 +4509,20 @@
}
-static void CheckStoreBuffer(Object** current,
+static void CheckStoreBuffer(Heap* heap,
+ Object** current,
Object** limit,
Object**** store_buffer_position,
Object*** store_buffer_top,
CheckStoreBufferFilter filter,
Address special_garbage_start,
Address special_garbage_end) {
+ Map* free_space_map = heap->free_space_map();
for ( ; current < limit; current++) {
Object* o = *current;
Address current_address = reinterpret_cast<Address>(current);
// Skip free space.
- // TODO(gc) ISOLATES MERGE
- if (o == HEAP->free_space_map()) {
+ if (o == free_space_map) {
Address current_address = reinterpret_cast<Address>(current);
FreeSpace* free_space =
FreeSpace::cast(HeapObject::FromAddress(current_address));
@@ -4549,8 +4549,7 @@
// without trying to cast it to a heap object since the hash field of
// a string can contain values like 1 and 3 which are tagged null
// pointers.
- // TODO(gc) ISOLATES MERGE
- if (!HEAP->InNewSpace(o)) continue;
+ if (!heap->InNewSpace(o)) continue;
while (**store_buffer_position < current &&
*store_buffer_position < store_buffer_top) {
(*store_buffer_position)++;
@@ -4584,7 +4583,8 @@
Object*** store_buffer_top = store_buffer()->Top();
Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(current,
+ CheckStoreBuffer(this,
+ current,
limit,
&store_buffer_position,
store_buffer_top,
@@ -4611,7 +4611,8 @@
Object*** store_buffer_top = store_buffer()->Top();
Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(current,
+ CheckStoreBuffer(this,
+ current,
limit,
&store_buffer_position,
store_buffer_top,
@@ -4634,7 +4635,8 @@
Object** current = reinterpret_cast<Object**>(object->address());
Object** limit =
reinterpret_cast<Object**>(object->address() + object->Size());
- CheckStoreBuffer(current,
+ CheckStoreBuffer(this,
+ current,
limit,
&store_buffer_position,
store_buffer_top,
=======================================
--- /branches/experimental/gc/src/heap.h Tue Aug 16 01:56:33 2011
+++ /branches/experimental/gc/src/heap.h Tue Aug 23 06:33:22 2011
@@ -903,7 +903,6 @@
inline bool CollectGarbage(AllocationSpace space);
static const int kNoGCFlags = 0;
- // TODO(gc) we are ignoring this flag
static const int kForceCompactionMask = 1;
static const int kMakeHeapIterableMask = 2;
@@ -970,7 +969,7 @@
// Heap root getters. We have versions with and without type::cast()
here.
// You can't use type::cast during GC because the assert fails.
- // TODO(gc): Try removing the unchecked accessors, now that GC marking
does
+ // TODO(1490): Try removing the unchecked accessors, now that GC marking
does
// not corrupt the stack.
#define ROOT_ACCESSOR(type, name,
camel_name) \
type* name()
{ \
@@ -1365,7 +1364,6 @@
return &marking_;
}
- // TODO(gc) Rename to IncrementalMarker after merge.
IncrementalMarking* incremental_marking() {
return &incremental_marking_;
}
=======================================
--- /branches/experimental/gc/src/ia32/assembler-ia32-inl.h Wed Jul 6
13:56:48 2011
+++ /branches/experimental/gc/src/ia32/assembler-ia32-inl.h Tue Aug 23
06:33:22 2011
@@ -94,7 +94,8 @@
if (code != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
- // TODO(gc) We are not compacting code space.
+ // TODO(1550) We are passing NULL as a slot because code can never be
on
+ // evacuation candidate.
code->GetHeap()->incremental_marking()->RecordWrite(
code, NULL, HeapObject::cast(target_code));
}
@@ -159,7 +160,8 @@
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (code != NULL) {
- // TODO(gc) We are not compacting cell space.
+ // TODO(1550) We are passing NULL as a slot because code can never be
on
+ // evacuation candidate.
code->GetHeap()->incremental_marking()->RecordWrite(
code, NULL, cell);
}
=======================================
--- /branches/experimental/gc/src/ia32/code-stubs-ia32.cc Wed Aug 3
09:10:10 2011
+++ /branches/experimental/gc/src/ia32/code-stubs-ia32.cc Tue Aug 23
06:33:22 2011
@@ -6509,8 +6509,6 @@
__ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
- // TODO(gc): Create a fast version of this C function that does not
duplicate
- // the checks done in the stub.
if (mode == INCREMENTAL_COMPACTION) {
__ CallCFunction(
ExternalReference::incremental_evacuation_record_write_function(
=======================================
--- /branches/experimental/gc/src/ia32/deoptimizer-ia32.cc Wed Aug 10
05:50:30 2011
+++ /branches/experimental/gc/src/ia32/deoptimizer-ia32.cc Tue Aug 23
06:33:22 2011
@@ -221,7 +221,6 @@
}
-// TODO(gc) make use of unoptimized_code when supporting incremental
marking.
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
@@ -253,7 +252,8 @@
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
- // TODO(gc) we are not compacting code space.
+ // TODO(1550) We are passing NULL as a slot because code can never be on
+ // evacuation candidate.
unoptimized_code->GetHeap()->incremental_marking()->RecordWrite(
unoptimized_code, NULL, replacement_code);
}
@@ -274,8 +274,7 @@
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
- // TODO(gc) ISOLATES MERGE
- HEAP->incremental_marking()->RecordWriteOf(check_code);
+ check_code->GetHeap()->incremental_marking()->RecordWriteOf(check_code);
}
=======================================
--- /branches/experimental/gc/src/ia32/macro-assembler-ia32.cc Wed Aug 3
09:10:10 2011
+++ /branches/experimental/gc/src/ia32/macro-assembler-ia32.cc Tue Aug 23
06:33:22 2011
@@ -1405,8 +1405,8 @@
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
+ // TODO(1599): Do not call stubs from stubs that do not allow stub calls.
// ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
- // TODO(gc): Fix this!
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
=======================================
--- /branches/experimental/gc/src/ic-inl.h Sun Apr 24 04:36:08 2011
+++ /branches/experimental/gc/src/ic-inl.h Tue Aug 23 06:33:22 2011
@@ -87,7 +87,6 @@
}
#endif
Assembler::set_target_address_at(address, target->instruction_start());
- // TODO(gc) ISOLATES MERGE code object should have heap() accessor.
target->GetHeap()->incremental_marking()->RecordWriteOf(target);
}
=======================================
--- /branches/experimental/gc/src/incremental-marking.h Thu Aug 18 09:12:57
2011
+++ /branches/experimental/gc/src/incremental-marking.h Tue Aug 23 06:33:22
2011
@@ -37,7 +37,6 @@
namespace internal {
-// TODO(gc) rename into IncrementalMarker after merge.
class IncrementalMarking {
public:
enum State {
=======================================
--- /branches/experimental/gc/src/liveedit.cc Tue Aug 16 01:56:33 2011
+++ /branches/experimental/gc/src/liveedit.cc Tue Aug 23 06:33:22 2011
@@ -982,8 +982,6 @@
Address substitution_entry = substitution->instruction_start();
for (int i = 0; i < reloc_infos_.length(); i++) {
reloc_infos_[i].set_target_address(substitution_entry, NULL);
- // TODO(gc) more precise barrier.
- // TODO(gc) ISOLATES MERGE: code object should have heap() accessor.
substitution->GetHeap()->incremental_marking()->RecordWriteOf(
substitution);
}
=======================================
--- /branches/experimental/gc/src/mark-compact.cc Tue Aug 23 00:49:32 2011
+++ /branches/experimental/gc/src/mark-compact.cc Tue Aug 23 06:33:22 2011
@@ -363,10 +363,6 @@
heap_->incremental_marking()->WhiteToGreyAndPush(
HeapObject::FromAddress(new_start), new_mark_bit);
heap_->incremental_marking()->RestartIfNotMarking();
- // TODO(gc): if we shift huge array in the loop we might end up
pushing
- // too much into the marking deque. Maybe we should check one or two
- // elements on top/bottom of the marking deque to see whether they
are
- // equal to old_start.
}
#ifdef DEBUG
@@ -402,15 +398,12 @@
void MarkCompactCollector::Prepare(GCTracer* tracer) {
- // TODO(gc) re-enable code flushing.
FLAG_flush_code = false;
FLAG_always_compact = false;
// Disable collection of maps if incremental marking is enabled.
- // TODO(gc) improve maps collection algorithm to work with incremental
- // marking.
- // TODO(gc) consider oscillating collect_maps_ on and off when possible.
This
- // will allow map transition trees to die from both root and leaves.
+ // Map collection algorithm relies on a special map transition tree
traversal
+ // order which is not implemented for incremental marking.
collect_maps_ = FLAG_collect_maps &&
!heap()->incremental_marking()->IsMarking();
@@ -654,7 +647,6 @@
// Since we don't have the object's start, it is impossible to update the
// page dirty marks. Therefore, we only replace the string with its left
// substring when page dirty marks do not change.
- // TODO(gc): Seems like we could relax this restriction with store
buffers.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
@@ -2173,8 +2165,6 @@
undefined,
SKIP_WRITE_BARRIER);
- // TODO(gc) we should not evacuate first page of data space.
- // but we are doing it now to increase coverage.
Object** undefined_slot =
prototype_transitions->data_start() + i;
RecordSlot(undefined_slot, undefined_slot, undefined);
@@ -2401,7 +2391,7 @@
if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
MaybeObject* maybe_result =
- heap()->lo_space()->AllocateRawFixedArray(object_size);
+ heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(),
@@ -3221,7 +3211,6 @@
case LAZY_CONSERVATIVE: {
Page* next_page = p->next_page();
freed_bytes += SweepConservatively(space, p);
- // TODO(gc): tweak the heuristic.
if (freed_bytes >= newspace_size && p != space->LastPage()) {
space->SetPagesToSweep(next_page, space->LastPage());
return;
@@ -3237,8 +3226,6 @@
}
}
}
-
- // TODO(gc): set up allocation top and limit using the free list.
}
@@ -3258,15 +3245,13 @@
SweepSpace(heap()->old_pointer_space(), how_to_sweep);
SweepSpace(heap()->old_data_space(), how_to_sweep);
SweepSpace(heap()->code_space(), PRECISE);
- // TODO(gc): implement specialized sweeper for cell space.
SweepSpace(heap()->cell_space(), PRECISE);
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
EvacuateNewSpaceAndCandidates();
}
- // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of
- // map space to detect whether unmarked map became dead in this
- // collection or in one of the previous ones.
- // TODO(gc): Implement specialized sweeper for map space.
+ // ClearNonLiveTransitions depends on precise sweeping of map space to
+ // detect whether unmarked map became dead in this collection or in one
+ // of the previous ones.
SweepSpace(heap()->map_space(), PRECISE);
ASSERT(live_map_objects_size_ <= heap()->map_space()->Size());
@@ -3276,69 +3261,7 @@
}
-// Iterate the live objects in a range of addresses (eg, a page or a
-// semispace). The live regions of the range have been linked into a list.
-// The first live region is [first_live_start, first_live_end), and the
last
-// address in the range is top. The callback function is used to get the
-// size of each live object.
-int MarkCompactCollector::IterateLiveObjectsInRange(
- Address start,
- Address end,
- LiveObjectCallback size_func) {
- int live_objects_size = 0;
- Address current = start;
- while (current < end) {
- uint32_t encoded_map = Memory::uint32_at(current);
- if (encoded_map == kSingleFreeEncoding) {
- current += kPointerSize;
- } else if (encoded_map == kMultiFreeEncoding) {
- current += Memory::int_at(current + kIntSize);
- } else {
- int size = (this->*size_func)(HeapObject::FromAddress(current));
- current += size;
- live_objects_size += size;
- }
- }
- return live_objects_size;
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
- NewSpace* space, LiveObjectCallback size_f) {
- ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
- int accumulator = 0;
- Address end = space->top();
- NewSpacePageIterator it(space->bottom(), end);
- // The bottom is at the start of its page.
- ASSERT_EQ(space->bottom(),
- NewSpacePage::FromAddress(space->bottom())->body());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- Address start = page->body();
- Address limit = it.has_next() ? page->body_limit() : end;
- accumulator += IterateLiveObjectsInRange(start, limit, size_f);
- }
- return accumulator;
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
- PagedSpace* space, LiveObjectCallback size_f) {
- ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
- // TODO(gc): Do a mark-sweep first with precise sweeping.
- int total = 0;
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
- p->ObjectAreaEnd(),
- size_f);
- }
- return total;
-}
-
-
-// TODO(gc) ReportDeleteIfNeeded is not called currently.
+// TODO(1466) ReportDeleteIfNeeded is not called currently.
// Our profiling tools do not expect intersections between
// code objects. We should either reenable it or change our tools.
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
@@ -3379,7 +3302,6 @@
SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer*
next_buffer) {
- // TODO(gc) Consider maintaining local cache of buffers.
return new SlotsBuffer(next_buffer);
}
=======================================
--- /branches/experimental/gc/src/mark-compact.h Wed Aug 10 05:50:30 2011
+++ /branches/experimental/gc/src/mark-compact.h Tue Aug 23 06:33:22 2011
@@ -652,18 +652,6 @@
// evacuation.
//
-
- // Iterates live objects in a space, passes live objects
- // to a callback function which returns the heap size of the object.
- // Returns the number of live objects iterated.
- int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
- int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
-
- // Iterates the live objects between a range of addresses, returning the
- // number of live objects.
- int IterateLiveObjectsInRange(Address start, Address end,
- LiveObjectCallback size_func);
-
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
=======================================
--- /branches/experimental/gc/src/objects-inl.h Wed Aug 10 05:50:30 2011
+++ /branches/experimental/gc/src/objects-inl.h Tue Aug 23 06:33:22 2011
@@ -1137,7 +1137,11 @@
Heap* HeapObject::GetHeap() {
- return MemoryChunk::FromAddress(address())->heap();
+ Heap* heap =
+ MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
+ ASSERT(heap != NULL);
+ ASSERT(heap->isolate() == Isolate::Current());
+ return heap;
}
@@ -1154,9 +1158,8 @@
void HeapObject::set_map(Map* value) {
set_map_word(MapWord::FromMap(value));
if (value != NULL) {
- // We are passing NULL as a slot because maps can never be on
evacuation
- // candidate.
- // TODO(gc) Maps are compacted by a separate (non-evacuation)
algorithm.
+ // TODO(1600) We are passing NULL as a slot because maps can never be
on
+ // evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, NULL,
value);
}
}
@@ -3450,8 +3453,7 @@
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kCodeOffset, value);
- // TODO(gc) ISOLATESMERGE HEAP
- WRITE_BARRIER(HEAP, this, kCodeOffset, value);
+ WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value);
}
=======================================
--- /branches/experimental/gc/src/objects.h Tue Aug 16 01:56:33 2011
+++ /branches/experimental/gc/src/objects.h Tue Aug 23 06:33:22 2011
@@ -1077,8 +1077,8 @@
inline void set_map_word(MapWord map_word);
// The Heap the object was allocated in. Used also to access Isolate.
- // This method can not be used during GC, it ASSERTs this.
inline Heap* GetHeap();
+
// Convenience method to get current isolate. This method can be
// accessed only when its result is the same as
// Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
=======================================
--- /branches/experimental/gc/src/platform.h Wed Aug 10 05:50:30 2011
+++ /branches/experimental/gc/src/platform.h Tue Aug 23 06:33:22 2011
@@ -361,7 +361,6 @@
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
- // TODO(gc) this interface should be implemented for Windows platform as
well.
static void* ReserveRegion(size_t size);
static bool CommitRegion(void* base, size_t size, bool is_executable);
=======================================
--- /branches/experimental/gc/src/profile-generator.cc Tue Aug 16 01:56:33
2011
+++ /branches/experimental/gc/src/profile-generator.cc Tue Aug 23 06:33:22
2011
@@ -2701,7 +2701,7 @@
bool HeapSnapshotGenerator::GenerateSnapshot() {
v8_heap_explorer_.TagGlobalObjects();
- // TODO(gc) Profiler assumes that any object that is in the heap after
+ // TODO(1562) Profiler assumes that any object that is in the heap after
// full GC is reachable from the root when computing dominators.
// This is not true for weakly reachable objects.
// As a temporary solution we call GC twice.
=======================================
--- /branches/experimental/gc/src/serialize.cc Wed Aug 3 09:10:10 2011
+++ /branches/experimental/gc/src/serialize.cc Tue Aug 23 06:33:22 2011
@@ -639,14 +639,13 @@
ASSERT(SpaceIsLarge(space_index));
LargeObjectSpace* lo_space =
reinterpret_cast<LargeObjectSpace*>(space);
Object* new_allocation;
- if (space_index == kLargeData) {
- new_allocation =
lo_space->AllocateRawData(size)->ToObjectUnchecked();
- } else if (space_index == kLargeFixedArray) {
+ if (space_index == kLargeData || space_index == kLargeFixedArray) {
new_allocation =
- lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked();
+ lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
} else {
ASSERT_EQ(kLargeCode, space_index);
- new_allocation =
lo_space->AllocateRawCode(size)->ToObjectUnchecked();
+ new_allocation =
+ lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
}
HeapObject* new_object = HeapObject::cast(new_allocation);
// Record all large objects in the same space.
=======================================
--- /branches/experimental/gc/src/spaces-inl.h Mon Aug 1 07:06:30 2011
+++ /branches/experimental/gc/src/spaces-inl.h Tue Aug 23 06:33:22 2011
@@ -216,12 +216,11 @@
}
-// TODO(gc) ISOLATESMERGE HEAP
-PointerChunkIterator::PointerChunkIterator()
+PointerChunkIterator::PointerChunkIterator(Heap* heap)
: state_(kOldPointerState),
- old_pointer_iterator_(HEAP->old_pointer_space()),
- map_iterator_(HEAP->map_space()),
- lo_iterator_(HEAP->lo_space()) { }
+ old_pointer_iterator_(heap->old_pointer_space()),
+ map_iterator_(heap->map_space()),
+ lo_iterator_(heap->lo_space()) { }
Page* Page::next_page() {
@@ -354,10 +353,11 @@
bool FreeListNode::IsFreeListNode(HeapObject* object) {
- // TODO(gc) ISOLATES MERGE
- return object->map() == HEAP->raw_unchecked_free_space_map()
- || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
- || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
+ Map* map = object->map();
+ Heap* heap = object->GetHeap();
+ return map == heap->raw_unchecked_free_space_map()
+ || map == heap->raw_unchecked_one_pointer_filler_map()
+ || map == heap->raw_unchecked_two_pointer_filler_map();
}
} } // namespace v8::internal
=======================================
--- /branches/experimental/gc/src/spaces.cc Tue Aug 16 01:56:33 2011
+++ /branches/experimental/gc/src/spaces.cc Tue Aug 23 06:33:22 2011
@@ -758,7 +758,7 @@
void PagedSpace::Shrink() {
- // TODO(gc) release half of pages?
+ // TODO(1614) Not implemented.
}
@@ -2310,8 +2310,8 @@
}
-MaybeObject* LargeObjectSpace::AllocateRawInternal(int object_size,
- Executability
executable) {
+MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
+ Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->always_allocate() &&
@@ -2319,7 +2319,6 @@
return Failure::RetryAfterGC(identity());
}
- // TODO(gc) isolates merge
LargePage* page = heap()->isolate()->memory_allocator()->
AllocateLargePage(object_size, executable, this);
if (page == NULL) return Failure::RetryAfterGC(identity());
@@ -2335,24 +2334,6 @@
heap()->incremental_marking()->OldSpaceStep(object_size);
return page->GetObject();
}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes, EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes, NOT_EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawData(int size_in_bytes) {
- ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes, NOT_EXECUTABLE);
-}
// GC support
=======================================
--- /branches/experimental/gc/src/spaces.h Fri Aug 12 02:21:47 2011
+++ /branches/experimental/gc/src/spaces.h Tue Aug 23 06:33:22 2011
@@ -120,8 +120,6 @@
class FreeList;
class MemoryChunk;
-// TODO(gc): Check that this all gets inlined and register allocated on
-// all platforms.
class MarkBit {
public:
typedef uint32_t CellType;
@@ -916,9 +914,6 @@
bool MemoryAllocationCallbackRegistered(
MemoryAllocationCallback callback);
-
- // TODO(gc) ISOLATSE
-
private:
Isolate* isolate_;
@@ -1072,8 +1067,6 @@
#ifdef DEBUG
bool VerifyPagedAllocation() {
- // TODO(gc): Make this type-correct. NewSpacePage isn't a Page,
- // but NewSpace still uses AllocationInfo.
return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
&& (top <= limit);
}
@@ -2268,19 +2261,8 @@
}
// Given an index, returns the page address.
- // TODO(gc): this limit is artifical just to keep code compilable
+ // TODO(1600): this limit is artifical just to keep code compilable
static const int kMaxMapPageIndex = 1 << 16;
-
- // Are map pointers encodable into map word?
- bool MapPointersEncodable() {
- return false;
- }
-
- // Should be called after forced sweep to find out if map space needs
- // compaction.
- bool NeedsCompaction(int live_maps) {
- return false; // TODO(gc): Bring back map compaction.
- }
virtual int RoundSizeDownToObjectAlignment(int size) {
if (IsPowerOf2(Map::kSize)) {
@@ -2356,17 +2338,15 @@
// Releases internal resources, frees objects in this space.
void TearDown();
- // Allocates a (non-FixedArray, non-Code) large object.
- MUST_USE_RESULT MaybeObject* AllocateRawData(int size_in_bytes);
- // Allocates a large Code object.
- MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
- // Allocates a large FixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
-
static intptr_t ObjectSizeFor(intptr_t chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return
0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
+
+ // Shared implementation of AllocateRaw, AllocateRawCode and
+ // AllocateRawFixedArray.
+ MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
+ Executability executable);
// Available bytes for objects in this space.
inline intptr_t Available();
@@ -2425,11 +2405,6 @@
int page_count_; // number of chunks
intptr_t objects_size_; // size of objects
- // Shared implementation of AllocateRaw, AllocateRawCode and
- // AllocateRawFixedArray.
- MUST_USE_RESULT MaybeObject* AllocateRawInternal(int object_size,
- Executability
executable);
-
friend class LargeObjectIterator;
public:
@@ -2457,7 +2432,7 @@
// pointers to new space.
class PointerChunkIterator BASE_EMBEDDED {
public:
- inline PointerChunkIterator();
+ inline explicit PointerChunkIterator(Heap* heap);
// Return NULL when the iterator is done.
MemoryChunk* next() {
=======================================
--- /branches/experimental/gc/src/store-buffer.cc Fri Jul 1 04:28:38 2011
+++ /branches/experimental/gc/src/store-buffer.cc Tue Aug 23 06:33:22 2011
@@ -84,9 +84,6 @@
hash_map_1_ = new uintptr_t[kHashMapLength];
hash_map_2_ = new uintptr_t[kHashMapLength];
- heap_->AddGCPrologueCallback(&GCPrologue, kGCTypeAll);
- heap_->AddGCEpilogueCallback(&GCEpilogue, kGCTypeAll);
-
ZapHashTables();
}
@@ -161,7 +158,7 @@
old_buffer_is_filtered_ = true;
bool page_has_scan_on_scavenge_flag = false;
- PointerChunkIterator it;
+ PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != NULL) {
if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
@@ -201,7 +198,7 @@
// Sample the store buffer to see if some pages are taking up a lot of
space
// in the store buffer.
void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold)
{
- PointerChunkIterator it;
+ PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != NULL) {
chunk->set_store_buffer_counter(0);
@@ -267,7 +264,7 @@
bool StoreBuffer::PrepareForIteration() {
Compact();
- PointerChunkIterator it;
+ PointerChunkIterator it(heap_);
MemoryChunk* chunk;
bool page_has_scan_on_scavenge_flag = false;
while ((chunk = it.next()) != NULL) {
@@ -343,10 +340,9 @@
}
-void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) {
- // TODO(gc) ISOLATES MERGE
- HEAP->store_buffer()->ZapHashTables();
- HEAP->store_buffer()->during_gc_ = true;
+void StoreBuffer::GCPrologue() {
+ ZapHashTables();
+ during_gc_ = true;
}
@@ -403,10 +399,9 @@
}
-void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) {
- // TODO(gc) ISOLATES MERGE
- HEAP->store_buffer()->during_gc_ = false;
- HEAP->store_buffer()->Verify();
+void StoreBuffer::GCEpilogue() {
+ during_gc_ = false;
+ Verify();
}
@@ -604,7 +599,7 @@
if (callback_ != NULL) {
(*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
}
- PointerChunkIterator it;
+ PointerChunkIterator it(heap_);
MemoryChunk* chunk;
while ((chunk = it.next()) != NULL) {
if (chunk->scan_on_scavenge()) {
=======================================
--- /branches/experimental/gc/src/store-buffer.h Mon Aug 1 07:06:30 2011
+++ /branches/experimental/gc/src/store-buffer.h Tue Aug 23 06:33:22 2011
@@ -89,8 +89,9 @@
static const int kHashMapLength = 1 << kHashMapLengthLog2;
void Compact();
- static void GCPrologue(GCType type, GCCallbackFlags flags);
- static void GCEpilogue(GCType type, GCCallbackFlags flags);
+
+ void GCPrologue();
+ void GCEpilogue();
Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
=======================================
--- /branches/experimental/gc/src/x64/assembler-x64-inl.h Fri Jul 8
02:16:20 2011
+++ /branches/experimental/gc/src/x64/assembler-x64-inl.h Tue Aug 23
06:33:22 2011
@@ -244,7 +244,8 @@
Assembler::set_target_address_at(pc_, target);
Object* target_code = Code::GetCodeFromTargetAddress(target);
if (code != NULL) {
- // TODO(gc) We do not compact code pages.
+ // TODO(1550) We are passing NULL as a slot because code can never
be on
+ // evacuation candidate.
code->GetHeap()->incremental_marking()->RecordWrite(
code, NULL, HeapObject::cast(target_code));
}
=======================================
--- /branches/experimental/gc/src/x64/code-stubs-x64.cc Thu Aug 18 09:12:57
2011
+++ /branches/experimental/gc/src/x64/code-stubs-x64.cc Tue Aug 23 06:33:22
2011
@@ -5494,8 +5494,6 @@
__ movq(arg2, Operand(address, 0));
}
__ LoadAddress(arg3, ExternalReference::isolate_address());
- // TODO(gc): Create a fast version of this C function that does not
duplicate
- // the checks done in the stub.
int argument_count = 3;
__ PrepareCallCFunction(argument_count);
if (mode == INCREMENTAL_COMPACTION) {
=======================================
--- /branches/experimental/gc/src/x64/macro-assembler-x64.cc Thu Aug 18
09:12:57 2011
+++ /branches/experimental/gc/src/x64/macro-assembler-x64.cc Tue Aug 23
06:33:22 2011
@@ -484,8 +484,8 @@
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
+ // TODO(1599): Do not call stubs from stubs that do not allow stub calls.
// ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
- // TODO(gc): Fix this!
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
=======================================
--- /branches/experimental/gc/test/cctest/test-mark-compact.cc Wed May 18
08:02:58 2011
+++ /branches/experimental/gc/test/cctest/test-mark-compact.cc Tue Aug 23
06:33:22 2011
@@ -231,7 +231,7 @@
}
-// TODO(gc): compaction of map space is temporary removed from GC.
+// TODO(1600): compaction of map space is temporary removed from GC.
#if 0
static Handle<Map> CreateMap() {
return FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
=======================================
--- /branches/experimental/gc/test/cctest/test-spaces.cc Wed Aug 10
05:50:30 2011
+++ /branches/experimental/gc/test/cctest/test-spaces.cc Tue Aug 23
06:33:22 2011
@@ -241,7 +241,7 @@
int lo_size = Page::kPageSize;
- Object* obj = lo->AllocateRawData(lo_size)->ToObjectUnchecked();
+ Object* obj = lo->AllocateRaw(lo_size,
NOT_EXECUTABLE)->ToObjectUnchecked();
CHECK(obj->IsHeapObject());
HeapObject* ho = HeapObject::cast(obj);
@@ -254,7 +254,7 @@
while (true) {
intptr_t available = lo->Available();
- { MaybeObject* maybe_obj = lo->AllocateRawData(lo_size);
+ { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
if (!maybe_obj->ToObject(&obj)) break;
}
CHECK(lo->Available() < available);
@@ -262,5 +262,5 @@
CHECK(!lo->IsEmpty());
- CHECK(lo->AllocateRawData(lo_size)->IsFailure());
-}
+ CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
+}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev