Revision: 7889
Author: [email protected]
Date: Fri May 13 04:32:05 2011
Log: Do inline object filtering (via page flags) before call to
RecordWriteStub.
Review URL: http://codereview.chromium.org/7000023
http://code.google.com/p/v8/source/detail?r=7889
Modified:
/branches/experimental/gc/src/ia32/code-stubs-ia32.cc
/branches/experimental/gc/src/ia32/full-codegen-ia32.cc
/branches/experimental/gc/src/ia32/macro-assembler-ia32-inl.h
/branches/experimental/gc/src/ia32/macro-assembler-ia32.cc
/branches/experimental/gc/src/ia32/macro-assembler-ia32.h
/branches/experimental/gc/src/incremental-marking.cc
/branches/experimental/gc/src/incremental-marking.h
/branches/experimental/gc/src/spaces-inl.h
/branches/experimental/gc/src/spaces.cc
/branches/experimental/gc/src/spaces.h
=======================================
--- /branches/experimental/gc/src/ia32/code-stubs-ia32.cc Tue May 10
04:21:11 2011
+++ /branches/experimental/gc/src/ia32/code-stubs-ia32.cc Fri May 13
04:32:05 2011
@@ -5987,25 +5987,16 @@
ASSERT(masm->get_opcode(-2) == kSkipNonIncrementalPartInstruction);
masm->set_opcode(-2, kTwoByteNopInstruction);
}
-
- if (FLAG_debug_code) {
- NearLabel ok;
- __ cmp(value_, Operand(address_, 0));
- __ j(equal, &ok);
- __ Abort("Registers did not match in write barrier");
- __ bind(&ok);
- }
if (emit_remembered_set_ == EMIT_REMEMBERED_SET) {
NearLabel skip;
- __ HasScanOnScavenge(object_, value_, &skip);
__ RememberedSetHelper(address_, value_, save_fp_regs_mode_);
__ bind(&skip);
}
__ ret(0);
__ bind(&skip_non_incremental_part);
-
+ __ mov(value_, Operand(address_, 0));
GenerateIncremental(masm);
}
@@ -6060,7 +6051,11 @@
// the scan_on_scavenge flag on the object's page?
if (emit_remembered_set_ == EMIT_REMEMBERED_SET) {
Label scan_on_scavenge;
- __ HasScanOnScavenge(regs_.object(), regs_.scratch0(),
&scan_on_scavenge);
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &scan_on_scavenge);
GenerateIncrementalValueIsInNewSpaceObjectIsInOldSpaceRememberedSet(masm);
__ bind(&scan_on_scavenge);
}
=======================================
--- /branches/experimental/gc/src/ia32/full-codegen-ia32.cc Mon May 9
14:11:15 2011
+++ /branches/experimental/gc/src/ia32/full-codegen-ia32.cc Fri May 13
04:32:05 2011
@@ -3186,7 +3186,11 @@
NearLabel no_remembered_set;
__ InNewSpace(elements, temp, equal, &no_remembered_set);
- __ HasScanOnScavenge(elements, temp, &no_remembered_set);
+ __ CheckPageFlag(elements,
+ temp,
+ MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &no_remembered_set);
__ mov(object, elements);
// Since we are swapping two objects, the incremental marker is not
disturbed,
=======================================
--- /branches/experimental/gc/src/ia32/macro-assembler-ia32-inl.h Mon May
9 14:11:15 2011
+++ /branches/experimental/gc/src/ia32/macro-assembler-ia32-inl.h Fri May
13 04:32:05 2011
@@ -33,14 +33,18 @@
namespace v8 {
namespace internal {
+
template<typename LabelType>
-void MacroAssembler::HasScanOnScavenge(Register object,
- Register scratch,
- LabelType* scan_on_scavenge) {
+void MacroAssembler::CheckPageFlag(
+ Register object,
+ Register scratch,
+ MemoryChunk::MemoryChunkFlags flag,
+ Condition cc,
+ LabelType* condition_met) {
Move(scratch, object);
and_(scratch, ~Page::kPageAlignmentMask);
- cmpb(Operand(scratch, MemoryChunk::kScanOnScavengeOffset), 0);
- j(not_equal, scan_on_scavenge);
+ test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(1 << flag));
+ j(cc, condition_met);
}
=======================================
--- /branches/experimental/gc/src/ia32/macro-assembler-ia32.cc Mon May 9
14:11:15 2011
+++ /branches/experimental/gc/src/ia32/macro-assembler-ia32.cc Fri May 13
04:32:05 2011
@@ -32,6 +32,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "debug.h"
+#include "macro-assembler-ia32-inl.h"
#include "runtime.h"
#include "serialize.h"
@@ -216,6 +217,15 @@
FLAG_incremental_marking == false) {
return;
}
+
+ if (FLAG_debug_code) {
+ NearLabel ok;
+ cmp(value, Operand(address, 0));
+ j(equal, &ok);
+ Abort("Registers did not match in write barrier");
+ bind(&ok);
+ }
+
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
NearLabel done;
@@ -226,13 +236,22 @@
test(value, Immediate(kSmiTagMask));
j(zero, &done);
}
+
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING,
+ zero,
+ &done);
+ CheckPageFlag(object,
+ value, // Used as scratch.
+ MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING,
+ zero,
+ &done);
RecordWriteStub stub(object, value, address, emit_remembered_set,
fp_mode);
CallStub(&stub);
- if (smi_check == INLINE_SMI_CHECK) {
- bind(&done);
- }
+ bind(&done);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
=======================================
--- /branches/experimental/gc/src/ia32/macro-assembler-ia32.h Mon May 9
14:11:15 2011
+++ /branches/experimental/gc/src/ia32/macro-assembler-ia32.h Fri May 13
04:32:05 2011
@@ -88,11 +88,12 @@
Condition cc, // equal for new space, not_equal
otherwise.
LabelType* branch);
- // Check if old-space object is on a scan-on-scavenge page.
- template <typename LabelType>
- void HasScanOnScavenge(Register object,
- Register scratch,
- LabelType* scan_on_scavenge);
+ template<typename LabelType>
+ void CheckPageFlag(Register object,
+ Register scratch,
+ MemoryChunk::MemoryChunkFlags flag,
+ Condition cc,
+ LabelType* condition_met);
// Check if an object has a given incremental marking colour. Also uses
ecx!
// The colour bits are found by splitting the address at the bit offset
=======================================
--- /branches/experimental/gc/src/incremental-marking.cc Tue May 10
05:50:32 2011
+++ /branches/experimental/gc/src/incremental-marking.cc Fri May 13
04:32:05 2011
@@ -135,12 +135,65 @@
};
-void IncrementalMarking::ClearMarkbits(PagedSpace* space) {
+void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
+ bool is_marking) {
+ if (is_marking) {
+ chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else if (chunk->owner()->identity() == CELL_SPACE ||
+ chunk->scan_on_scavenge()) {
+ chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
+}
+
+
+void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
+ bool is_marking) {
+ chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+ if (is_marking) {
+ chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ } else {
+ chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+ }
+}
+
+
+void IncrementalMarking::DeactivateWriteBarrierForSpace(PagedSpace* space)
{
PageIterator it(space);
-
+ while (it.has_next()) {
+ Page* p = it.next();
+ SetOldSpacePageFlags(p, false);
+ }
+}
+
+
+void IncrementalMarking::DeactivateWriteBarrier() {
+ DeactivateWriteBarrierForSpace(heap_->old_pointer_space());
+ DeactivateWriteBarrierForSpace(heap_->old_data_space());
+ DeactivateWriteBarrierForSpace(heap_->cell_space());
+ DeactivateWriteBarrierForSpace(heap_->map_space());
+ DeactivateWriteBarrierForSpace(heap_->code_space());
+
+ SetNewSpacePageFlags(heap_->new_space()->ActivePage(), false);
+
+ LargePage* lop = heap_->lo_space()->first_page();
+ while (lop->is_valid()) {
+ SetOldSpacePageFlags(lop, false);
+ lop = lop->next_page();
+ }
+}
+
+
+void IncrementalMarking::ClearMarkbits(PagedSpace* space) {
+ PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
p->markbits()->Clear();
+ SetOldSpacePageFlags(p, true);
}
}
@@ -152,6 +205,14 @@
ClearMarkbits(heap_->cell_space());
ClearMarkbits(heap_->map_space());
ClearMarkbits(heap_->code_space());
+
+ SetNewSpacePageFlags(heap_->new_space()->ActivePage(), true);
+
+ LargePage* lop = heap_->lo_space()->first_page();
+ while (lop->is_valid()) {
+ SetOldSpacePageFlags(lop, true);
+ lop = lop->next_page();
+ }
}
@@ -368,7 +429,10 @@
heap_->new_space()->LowerInlineAllocationLimit(0);
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
- if (IsMarking()) PatchIncrementalMarkingRecordWriteStubs(false);
+ if (IsMarking()) {
+ PatchIncrementalMarkingRecordWriteStubs(false);
+ DeactivateWriteBarrier();
+ }
heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
state_ = STOPPED;
}
@@ -381,6 +445,7 @@
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
PatchIncrementalMarkingRecordWriteStubs(false);
+ DeactivateWriteBarrier();
ASSERT(marking_deque_.IsEmpty());
heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
}
=======================================
--- /branches/experimental/gc/src/incremental-marking.h Tue May 10 04:21:11
2011
+++ /branches/experimental/gc/src/incremental-marking.h Fri May 13 04:32:05
2011
@@ -189,6 +189,15 @@
return steps_took_;
}
+ inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
+ SetOldSpacePageFlags(chunk, IsMarking());
+ }
+
+ inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
+ SetNewSpacePageFlags(chunk, IsMarking());
+ }
+
+
private:
void set_should_hurry(bool val) {
should_hurry_ = val;
@@ -210,6 +219,11 @@
void StartMarking();
+ void DeactivateWriteBarrierForSpace(PagedSpace* space);
+ void DeactivateWriteBarrier();
+
+ static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking);
+ static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
Heap* heap_;
=======================================
--- /branches/experimental/gc/src/spaces-inl.h Mon May 9 14:11:15 2011
+++ /branches/experimental/gc/src/spaces-inl.h Fri May 13 04:32:05 2011
@@ -127,6 +127,9 @@
owner->IncreaseCapacity(Page::kObjectAreaSize);
owner->Free(page->ObjectAreaStart(),
page->ObjectAreaEnd() - page->ObjectAreaStart());
+
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
return page;
}
@@ -140,11 +143,13 @@
void MemoryChunk::set_scan_on_scavenge(bool scan) {
if (scan) {
- if (!scan_on_scavenge_) heap_->increment_scan_on_scavenge_pages();
+ if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
+ SetFlag(SCAN_ON_SCAVENGE);
} else {
- if (scan_on_scavenge_) heap_->decrement_scan_on_scavenge_pages();
- }
- scan_on_scavenge_ = scan;
+ if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
+ ClearFlag(SCAN_ON_SCAVENGE);
+ }
+ heap_->incremental_marking()->SetOldSpacePageFlags(this);
}
@@ -267,6 +272,12 @@
return obj;
}
+
+
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+ heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+ return static_cast<LargePage*>(chunk);
+}
intptr_t LargeObjectSpace::Available() {
=======================================
--- /branches/experimental/gc/src/spaces.cc Tue May 10 04:21:11 2011
+++ /branches/experimental/gc/src/spaces.cc Fri May 13 04:32:05 2011
@@ -398,6 +398,7 @@
NOT_EXECUTABLE,
heap->new_space());
chunk->initialize_scan_on_scavenge(true);
+ heap->incremental_marking()->SetNewSpacePageFlags(chunk);
return static_cast<NewSpacePage*>(chunk);
}
@@ -417,7 +418,6 @@
chunk->set_owner(owner);
chunk->markbits()->Clear();
chunk->initialize_scan_on_scavenge(false);
- ASSERT(OFFSET_OF(MemoryChunk, scan_on_scavenge_) ==
kScanOnScavengeOffset);
ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
@@ -930,6 +930,10 @@
SemiSpace tmp = from_space_;
from_space_ = to_space_;
to_space_ = tmp;
+
+ NewSpacePage* old_active_page = from_space_.current_page();
+ NewSpacePage* new_active_page = to_space_.current_page();
+ new_active_page->CopyFlagsFrom(old_active_page);
}
=======================================
--- /branches/experimental/gc/src/spaces.h Tue May 10 04:21:11 2011
+++ /branches/experimental/gc/src/spaces.h Fri May 13 04:32:05 2011
@@ -339,8 +339,14 @@
kFailureTag);
}
- bool scan_on_scavenge() { return scan_on_scavenge_; }
- void initialize_scan_on_scavenge(bool scan) { scan_on_scavenge_ = scan; }
+ bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
+ void initialize_scan_on_scavenge(bool scan) {
+ if (scan) {
+ SetFlag(SCAN_ON_SCAVENGE);
+ } else {
+ ClearFlag(SCAN_ON_SCAVENGE);
+ }
+ }
inline void set_scan_on_scavenge(bool scan);
int store_buffer_counter() { return store_buffer_counter_; }
@@ -360,6 +366,9 @@
IS_EXECUTABLE,
WAS_SWEPT_CONSERVATIVELY,
CONTAINS_ONLY_DATA,
+ POINTERS_TO_HERE_ARE_INTERESTING,
+ POINTERS_FROM_HERE_ARE_INTERESTING,
+ SCAN_ON_SCAVENGE,
NUM_MEMORY_CHUNK_FLAGS
};
@@ -374,6 +383,10 @@
bool IsFlagSet(int flag) {
return (flags_ & (1 << flag)) != 0;
}
+
+ void CopyFlagsFrom(MemoryChunk* chunk) {
+ flags_ = chunk->flags_;
+ }
static const intptr_t kAlignment = (1 << kPageSizeBits);
@@ -448,7 +461,6 @@
inline Heap* heap() { return heap_; }
static const int kFlagsOffset = kPointerSize * 3;
- static const int kScanOnScavengeOffset = kPointerSize * 6;
protected:
MemoryChunk* next_chunk_;
@@ -460,10 +472,6 @@
// in a fixed array.
Address owner_;
Heap* heap_;
- // This flag indicates that the page is not being tracked by the store
buffer.
- // At any point where we have to iterate over pointers to new space, we
must
- // search this page for pointers to new space.
- bool scan_on_scavenge_;
// Used by the store buffer to keep track of which pages to mark scan-on-
// scavenge.
int store_buffer_counter_;
@@ -586,11 +594,7 @@
set_next_chunk(page);
}
private:
- static LargePage* Initialize(Heap* heap,
- MemoryChunk* chunk) {
- // TODO(gc) ISOLATESMERGE initialize chunk to point to heap?
- return static_cast<LargePage*>(chunk);
- }
+ static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
friend class MemoryAllocator;
};
@@ -1861,6 +1865,10 @@
inline intptr_t inline_alloction_limit_step() {
return inline_alloction_limit_step_;
}
+
+ NewSpacePage* ActivePage() {
+ return to_space_.current_page();
+ }
private:
Address chunk_base_;
@@ -2130,6 +2138,8 @@
void Protect();
void Unprotect();
#endif
+
+ LargePage* first_page() { return first_page_; }
#ifdef DEBUG
virtual void Verify();
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev