Revision: 8270
Author: [email protected]
Date: Fri Jun 10 14:58:26 2011
Log: Incremental mode now works for x64. The only difference
between IA32 and x64 is now that in IA32 we can mark
pointer-free objects black without going into runtime
code.
Also fixed a bug where spaces with heightened alignment
requirements could get messed up by the free-list chopping
that was implemented to get timely marking increments.
Review URL: http://codereview.chromium.org/7104107
http://code.google.com/p/v8/source/detail?r=8270
Modified:
/branches/experimental/gc/src/arm/code-stubs-arm.h
/branches/experimental/gc/src/arm/full-codegen-arm.cc
/branches/experimental/gc/src/arm/macro-assembler-arm.cc
/branches/experimental/gc/src/arm/macro-assembler-arm.h
/branches/experimental/gc/src/flag-definitions.h
/branches/experimental/gc/src/heap.cc
/branches/experimental/gc/src/ia32/code-stubs-ia32.cc
/branches/experimental/gc/src/ia32/code-stubs-ia32.h
/branches/experimental/gc/src/ia32/full-codegen-ia32.cc
/branches/experimental/gc/src/ia32/macro-assembler-ia32.cc
/branches/experimental/gc/src/ia32/macro-assembler-ia32.h
/branches/experimental/gc/src/ia32/stub-cache-ia32.cc
/branches/experimental/gc/src/mark-compact.h
/branches/experimental/gc/src/spaces.cc
/branches/experimental/gc/src/spaces.h
/branches/experimental/gc/src/x64/code-stubs-x64.cc
/branches/experimental/gc/src/x64/code-stubs-x64.h
/branches/experimental/gc/src/x64/full-codegen-x64.cc
/branches/experimental/gc/src/x64/lithium-codegen-x64.cc
/branches/experimental/gc/src/x64/macro-assembler-x64.cc
/branches/experimental/gc/src/x64/macro-assembler-x64.h
/branches/experimental/gc/src/x64/stub-cache-x64.cc
/branches/experimental/gc/test/cctest/test-serialize.cc
=======================================
--- /branches/experimental/gc/src/arm/code-stubs-arm.h Mon Jun 6 04:22:38
2011
+++ /branches/experimental/gc/src/arm/code-stubs-arm.h Fri Jun 10 14:58:26
2011
@@ -533,7 +533,17 @@
friend class RecordWriteStub;
};
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
=======================================
--- /branches/experimental/gc/src/arm/full-codegen-arm.cc Tue Jun 7
02:28:08 2011
+++ /branches/experimental/gc/src/arm/full-codegen-arm.cc Fri Jun 10
14:58:26 2011
@@ -3275,6 +3275,10 @@
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
+ // We are swapping two objects in an array and the incremental marker
never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(
index1, scratch2, kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(
=======================================
--- /branches/experimental/gc/src/arm/macro-assembler-arm.cc Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/arm/macro-assembler-arm.cc Fri Jun 10
14:58:26 2011
@@ -3128,11 +3128,11 @@
}
-void MacroAssembler::IsBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* is_black) {
- HasColor(object, scratch0, scratch1, is_black, 1, 0); //
kBlackBitPattern.
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black) {
+ HasColor(object, scratch0, scratch1, on_black, 1, 0); //
kBlackBitPattern.
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
@@ -3164,6 +3164,27 @@
b(second_bit == 1 ? ne : eq, has_color);
bind(&other_color);
}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by
the
+// incremental write barrier which doesn't care about oddballs (they are
always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object) {
+ Label is_data_object;
+ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ b(eq, &is_data_object);
+ ASSERT(kConsStringTag == 1 && kIsConsStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object
containing
+ // no GC pointers.
+ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ tst(scratch, Operand(kIsConsStringMask | kIsNotStringMask));
+ b(ne, not_data_object);
+ bind(&is_data_object);
+}
void MacroAssembler::GetMarkBits(Register addr_reg,
=======================================
--- /branches/experimental/gc/src/arm/macro-assembler-arm.h Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/arm/macro-assembler-arm.h Fri Jun 10
14:58:26 2011
@@ -191,18 +191,23 @@
Condition cc,
Label* condition_met);
- // Check if object is in new space.
- // scratch can be object itself, but it will be clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
- Label* branch);
-
- // Check if an object has a given incremental marking color. The color
bits
- // are found by splitting the address at the bit offset indicated by the
- // mask: bits that are zero in the mask are used for the address of the
- // bitmap, and bits that are one in the mask are used for the index of
the
- // bit.
+ // Check if object is in new space. Jumps if the object is not in new
space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, ne, branch);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch) {
+ InNewSpace(object, scratch, eq, branch);
+ }
+
+ // Check if an object has a given incremental marking color.
void HasColor(Register object,
Register scratch0,
Register scratch1,
@@ -210,10 +215,10 @@
int first_bit,
int second_bit);
- void IsBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* is_black);
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white
and
@@ -226,12 +231,11 @@
Label* object_is_white_and_not_data,
Label::Distance distance);
- // Checks whether an object is data-only, ie it does need to be scanned
by the
- // garbage collector.
- void IsDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance);
+ // Detects conservatively whether an object is data-only, ie it does
need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -1138,6 +1142,12 @@
Register scratch1,
Register scratch2);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cond, // eq for new space, ne otherwise.
+ Label* branch);
+
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Leaves addr_reg unchanged.
=======================================
--- /branches/experimental/gc/src/flag-definitions.h Tue May 24 05:03:26
2011
+++ /branches/experimental/gc/src/flag-definitions.h Fri Jun 10 14:58:26
2011
@@ -266,10 +266,10 @@
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, false,
"flush code that we expect not to use again before full gc")
-#ifdef V8_TARGET_ARCH_IA32
-DEFINE_bool(incremental_marking, true, "use incremental marking")
-#else
+#ifdef V8_TARGET_ARCH_ARM
DEFINE_bool(incremental_marking, false, "use incremental marking")
+#else
+DEFINE_bool(incremental_marking, true, "use incremental marking")
#endif
DEFINE_bool(incremental_marking_steps, true, "do incremental marking
steps")
DEFINE_bool(trace_incremental_marking, false,
=======================================
--- /branches/experimental/gc/src/heap.cc Tue Jun 7 02:39:09 2011
+++ /branches/experimental/gc/src/heap.cc Fri Jun 10 14:58:26 2011
@@ -4626,6 +4626,10 @@
Object* two_pointer_filler_map = HEAP->two_pointer_filler_map();
while (visitable_end < end_of_page) {
+#ifdef DEBUG
+ int offset = visitable_start - page->ObjectAreaStart();
+ ASSERT(offset == space->RoundSizeDownToObjectAlignment(offset));
+#endif
Object* o = *reinterpret_cast<Object**>(visitable_end);
// Skip fillers but not things that look like fillers in the special
// garbage section which can contain anything.
=======================================
--- /branches/experimental/gc/src/ia32/code-stubs-ia32.cc Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/ia32/code-stubs-ia32.cc Fri Jun 10
14:58:26 2011
@@ -6267,10 +6267,9 @@
Label dont_need_remembered_set;
__ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ InNewSpace(regs_.scratch0(),
- regs_.scratch0(),
- zero,
- &dont_need_remembered_set);
+ __ JumpIfNotInNewSpace(regs_.scratch0(),
+ regs_.scratch0(),
+ &dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
@@ -6281,7 +6280,7 @@
// First notify the incremental marker if necessary, then update the
// remembered set.
CheckNeedsToInformIncrementalMarker(
- masm, kRememberedSetOnNoNeedToInformIncrementalMarker);
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker);
InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(
@@ -6300,8 +6299,6 @@
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
- // TODO(gc) we are assuming that xmm registers are not modified by
- // the C function we are calling.
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ mov(Operand(esp, 0 * kPointerSize), regs_.object());
@@ -6326,14 +6323,14 @@
// Let's look at the color of the object: If it is not black we don't
have
// to inform the incremental marker.
- __ IsBlack(regs_.object(),
- regs_.scratch0(),
- regs_.scratch1(),
- &object_is_black,
- Label::kNear);
+ __ JumpIfBlack(regs_.object(),
+ regs_.scratch0(),
+ regs_.scratch1(),
+ &object_is_black,
+ Label::kNear);
regs_.Restore(masm);
- if (on_no_need == kRememberedSetOnNoNeedToInformIncrementalMarker) {
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker)
{
__ RememberedSetHelper(
address_, value_, save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
} else {
@@ -6356,7 +6353,7 @@
__ pop(regs_.object());
regs_.Restore(masm);
- if (on_no_need == kRememberedSetOnNoNeedToInformIncrementalMarker) {
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker)
{
__ RememberedSetHelper(
address_, value_, save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
} else {
=======================================
--- /branches/experimental/gc/src/ia32/code-stubs-ia32.h Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/ia32/code-stubs-ia32.h Fri Jun 10
14:58:26 2011
@@ -707,7 +707,7 @@
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
- kRememberedSetOnNoNeedToInformIncrementalMarker
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
void Generate(MacroAssembler* masm);
=======================================
--- /branches/experimental/gc/src/ia32/full-codegen-ia32.cc Mon Jun 6
04:22:38 2011
+++ /branches/experimental/gc/src/ia32/full-codegen-ia32.cc Fri Jun 10
14:58:26 2011
@@ -3203,10 +3203,13 @@
not_zero,
&no_remembered_set,
Label::kNear);
-
- __ mov(object, elements);
- // Since we are swapping two objects, the incremental marker is not
disturbed,
- // so we don't call the stub that handles this.
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ // We are swapping two objects in an array and the incremental marker
never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(
index_1, temp, kDontSaveFPRegs, MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(
=======================================
--- /branches/experimental/gc/src/ia32/macro-assembler-ia32.cc Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/ia32/macro-assembler-ia32.cc Fri Jun 10
14:58:26 2011
@@ -2289,13 +2289,13 @@
}
-void MacroAssembler::IsBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* is_black,
- Label::Distance is_black_near) {
+void MacroAssembler::JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_near) {
HasColor(object, scratch0, scratch1,
- is_black, is_black_near,
+ on_black, on_black_near,
1, 0); // kBlackBitPattern.
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
@@ -2329,21 +2329,24 @@
}
-void MacroAssembler::IsDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance
not_data_object_distance) {
+// Detect some, but not all, common pointer-free objects. This is used by
the
+// incremental write barrier which doesn't care about oddballs (they are
always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(
+ Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance) {
Label is_data_object;
mov(scratch, FieldOperand(value, HeapObject::kMapOffset));
cmp(scratch, FACTORY->heap_number_map());
j(equal, &is_data_object, Label::kNear);
ASSERT(kConsStringTag == 1 && kIsConsStringMask == 1);
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object that
- // doesn't need scanning.
+ // If it's a string and it's not a cons string then it's an object
containing
+ // no GC pointers.
test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
kIsConsStringMask | kIsNotStringMask);
- // Jump if we need to mark it grey and push it.
j(not_zero, not_data_object, not_data_object_distance);
bind(&is_data_object);
}
@@ -2356,10 +2359,11 @@
mov(bitmap_reg, Operand(addr_reg));
and_(bitmap_reg, ~Page::kPageAlignmentMask);
mov(ecx, Operand(addr_reg));
- shr(ecx, Bitmap::kBitsPerCellLog2);
+ int shift =
+ Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
Bitmap::kBytesPerCellLog2;
+ shr(ecx, shift);
and_(ecx,
- (Page::kPageAlignmentMask >> Bitmap::kBitsPerCellLog2) &
- ~(kPointerSize - 1));
+ (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
add(bitmap_reg, Operand(ecx));
mov(ecx, Operand(addr_reg));
@@ -2406,7 +2410,7 @@
}
// Value is white. We check whether it is data that doesn't need
scanning.
- IsDataObject(value, ecx, value_is_white_and_not_data, distance);
+ JumpIfDataObject(value, ecx, value_is_white_and_not_data, distance);
// Value is a data object, and it is white. Mark it black. Since we
know
// that the object is white we can make it black by flipping one bit.
=======================================
--- /branches/experimental/gc/src/ia32/macro-assembler-ia32.h Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/ia32/macro-assembler-ia32.h Fri Jun 10
14:58:26 2011
@@ -93,17 +93,25 @@
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance = Label::kFar);
+ // Check if object is in new space. Jumps if the object is not in new
space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, zero, branch, distance);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, not_zero, branch, distance);
+ }
// Check if an object has a given incremental marking color. Also uses
ecx!
- // The color bits are found by splitting the address at the bit offset
- // indicated by the mask: bits that are zero in the mask are used for the
- // address of the bitmap, and bits that are one in the mask are used for
the
- // index of the bit.
void HasColor(Register object,
Register scratch0,
Register scratch1,
@@ -112,11 +120,11 @@
int first_bit,
int second_bit);
- void IsBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* is_black,
- Label::Distance is_black_distance = Label::kFar);
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_distance = Label::kFar);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white
and
@@ -129,12 +137,12 @@
Label* object_is_white_and_not_data,
Label::Distance distance);
- // Checks whether an object is data-only, ie it does need to be scanned
by the
- // garbage collector.
- void IsDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance);
+ // Detects conservatively whether an object is data-only, ie it does
need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -785,6 +793,13 @@
Register scratch,
bool gc_allowed);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* condition_met,
+ Label::Distance condition_met_distance = Label::kFar);
+
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Uses ecx as scratch and leaves
addr_reg
=======================================
--- /branches/experimental/gc/src/ia32/stub-cache-ia32.cc Wed May 25
07:05:16 2011
+++ /branches/experimental/gc/src/ia32/stub-cache-ia32.cc Fri Jun 10
14:58:26 2011
@@ -1547,7 +1547,6 @@
Immediate(Smi::FromInt(kAllocationDelta)));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
}
=======================================
--- /branches/experimental/gc/src/mark-compact.h Wed May 18 08:02:58 2011
+++ /branches/experimental/gc/src/mark-compact.h Fri Jun 10 14:58:26 2011
@@ -129,6 +129,16 @@
IMPOSSIBLE_COLOR
};
+ static const char* ColorName(ObjectColor color) {
+ switch (color) {
+ case BLACK_OBJECT: return "black";
+ case WHITE_OBJECT: return "white";
+ case GREY_OBJECT: return "grey";
+ case IMPOSSIBLE_COLOR: return "impossible";
+ }
+ return "error";
+ }
+
static ObjectColor Color(HeapObject* obj) {
return Color(Marking::MarkBitFrom(obj));
}
=======================================
--- /branches/experimental/gc/src/spaces.cc Tue Jun 7 05:30:48 2011
+++ /branches/experimental/gc/src/spaces.cc Fri Jun 10 14:58:26 2011
@@ -1736,13 +1736,14 @@
if (new_node_size - size_in_bytes > kThreshold &&
HEAP->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking_steps) {
+ int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again
whether
// we want to do another increment until the linear area is used up.
- owner_->Free(new_node->address() + size_in_bytes + kThreshold,
- new_node_size - size_in_bytes - kThreshold);
+ owner_->Free(new_node->address() + size_in_bytes + linear_size,
+ new_node_size - size_in_bytes - linear_size);
owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + kThreshold);
+ new_node->address() + size_in_bytes + linear_size);
} else {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
@@ -1843,7 +1844,7 @@
bool PagedSpace::ReserveSpace(int size_in_bytes) {
ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
- ASSERT(size_in_bytes == RoundUp(size_in_bytes, kPointerSize));
+ ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
Address current_top = allocation_info_.top;
Address new_top = current_top + size_in_bytes;
if (new_top <= allocation_info_.limit) return true;
=======================================
--- /branches/experimental/gc/src/spaces.h Tue Jun 7 02:39:09 2011
+++ /branches/experimental/gc/src/spaces.h Fri Jun 10 14:58:26 2011
@@ -170,6 +170,8 @@
static const uint32_t kBitsPerCell = 32;
static const uint32_t kBitsPerCellLog2 = 5;
static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+ static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
+ static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 -
kBitsPerByteLog2;
static const size_t kLength =
(1 << kPageSizeBits) >> (kPointerSizeLog2);
@@ -652,6 +654,14 @@
// Returns size of objects. Can differ from the allocated size
// (e.g. see LargeObjectSpace).
virtual intptr_t SizeOfObjects() { return Size(); }
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (id_ == CODE_SPACE) {
+ return RoundDown(size, kCodeAlignment);
+ } else {
+ return RoundDown(size, kPointerSize);
+ }
+ }
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
@@ -2200,6 +2210,14 @@
bool NeedsCompaction(int live_maps) {
return false; // TODO(gc): Bring back map compaction.
}
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (IsPowerOf2(Map::kSize)) {
+ return RoundDown(size, Map::kSize);
+ } else {
+ return (size / Map::kSize) * Map::kSize;
+ }
+ }
protected:
#ifdef DEBUG
@@ -2230,6 +2248,14 @@
CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
: FixedSpace(heap, max_capacity, id,
JSGlobalPropertyCell::kSize, "cell")
{}
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
+ return RoundDown(size, JSGlobalPropertyCell::kSize);
+ } else {
+ return (size / JSGlobalPropertyCell::kSize) *
JSGlobalPropertyCell::kSize;
+ }
+ }
protected:
#ifdef DEBUG
=======================================
--- /branches/experimental/gc/src/x64/code-stubs-x64.cc Wed Jun 8 04:29:52
2011
+++ /branches/experimental/gc/src/x64/code-stubs-x64.cc Fri Jun 10 14:58:26
2011
@@ -298,24 +298,7 @@
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
- // We don't allow a GC during a store buffer overflow so there is no
need to
- // store the registers in any particular way, but we do have to store and
- // restore them.
- Register saved_regs[] =
- { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
- const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
- for (int i = 0; i < kNumberOfSavedRegs; i++) {
- __ push(saved_regs[i]);
- }
- // R12 to r15 are callee save on all platforms.
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- __ subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movsd(Operand(rsp, i * kDoubleSize), reg);
- }
- }
+ __ PushCallerSaved(save_doubles_);
const int argument_count = 1;
__ PrepareCallCFunction(argument_count);
#ifdef _WIN64
@@ -326,17 +309,7 @@
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
- if (save_doubles_ == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
- XMMRegister reg = XMMRegister::from_code(i);
- __ movsd(reg, Operand(rsp, i * kDoubleSize));
- }
- __ addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- }
- for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
- __ pop(saved_regs[i]);
- }
+ __ PopCallerSaved(save_doubles_);
__ ret(0);
}
@@ -5206,7 +5179,106 @@
}
__ bind(&skip_non_incremental_part);
- __ int3();
+ GenerateIncremental(masm);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm) {
+ regs_.Save(masm);
+
+ if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+ Label dont_need_remembered_set;
+
+ __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ JumpIfNotInNewSpace(regs_.scratch0(),
+ regs_.scratch0(),
+ &dont_need_remembered_set);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch0(),
+ MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &dont_need_remembered_set);
+
+ // First notify the incremental marker if necessary, then update the
+ // remembered set.
+ CheckNeedsToInformIncrementalMarker(
+ masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ RememberedSetHelper(
+ address_, value_, save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
+
+ __ bind(&dont_need_remembered_set);
+ }
+
+ CheckNeedsToInformIncrementalMarker(
+ masm, kReturnOnNoNeedToInformIncrementalMarker);
+ InformIncrementalMarker(masm);
+ regs_.Restore(masm);
+ __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+ regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+#ifdef _WIN64
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+ bool save_address = arg1.is(regs_.address());
+ if (save_address) {
+ __ movq(arg3, regs_.address());
+ }
+ __ Move(arg1, regs_.object());
+ if (save_address) {
+ __ movq(arg2, Operand(arg3, 0));
+ } else {
+ __ movq(arg2, Operand(regs_.address(), 0));
+ }
+ __ LoadAddress(arg3, ExternalReference::isolate_address());
+ // TODO(gc): Create a fast version of this C function that does not
duplicate
+ // the checks done in the stub.
+ int argument_count = 3;
+ __ PrepareCallCFunction(argument_count);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ RecordWriteStub::OnNoNeedToInformIncrementalMarker on_no_need) {
+ Label on_black;
+
+ // Let's look at the color of the object: If it is not black we don't
have
+ // to inform the incremental marker.
+ __ JumpIfBlack(regs_.object(),
+ regs_.scratch0(),
+ regs_.scratch1(),
+ &on_black,
+ Label::kNear);
+
+ regs_.Restore(masm);
+ if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker)
{
+ __ RememberedSetHelper(
+ address_, value_, save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
+ } else {
+ __ ret(0);
+ }
+
+ __ bind(&on_black);
+
+ // TODO(gc): Add call to EnsureNotWhite here.
+ // Fall through when we need to inform the incremental marker.
}
=======================================
--- /branches/experimental/gc/src/x64/code-stubs-x64.h Wed Jun 8 04:29:52
2011
+++ /branches/experimental/gc/src/x64/code-stubs-x64.h Fri Jun 10 14:58:26
2011
@@ -645,35 +645,16 @@
// If we have to call into C then we need to save and restore all
caller-
// saved registers that were not already preserved.
- // The three scratch registers (incl. rcx)
- // will be restored by other means so we don't bother pushing them
here.
+ // The three scratch registers (incl. rcx) will be restored by other
means
+ // so we don't bother pushing them here. Rbx, rbp and r12-15 are
callee
+ // save and don't need to be preserved.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode
mode) {
- masm->int3(); // TODO(gc): Save the caller save registers.
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- masm->subq(rsp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters -
1)));
- // Save all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movsd(Operand(rsp, (i - 1) * kDoubleSize), reg);
- }
- }
+ masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- CpuFeatures::Scope scope(SSE2);
- // Restore all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movsd(reg, Operand(rsp, (i - 1) * kDoubleSize));
- }
- masm->addq(rsp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters -
1)));
- }
- masm->int3(); // TODO(gc): Restore the caller save registers.
+ masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
}
inline Register object() { return object_; }
@@ -708,8 +689,17 @@
friend class RecordWriteStub;
};
+ enum OnNoNeedToInformIncrementalMarker {
+ kReturnOnNoNeedToInformIncrementalMarker,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+ };
+
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm);
+ void CheckNeedsToInformIncrementalMarker(
+ MacroAssembler* masm,
+ OnNoNeedToInformIncrementalMarker on_no_need);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
=======================================
--- /branches/experimental/gc/src/x64/full-codegen-x64.cc Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/x64/full-codegen-x64.cc Fri Jun 10
14:58:26 2011
@@ -3145,17 +3145,27 @@
__ movq(Operand(index_2, 0), object);
__ movq(Operand(index_1, 0), temp);
- Label new_space;
- __ InNewSpace(elements, temp, equal, &new_space);
-
- // Since we are swapping two objects, the incremental marker is not
disturbed,
- // so we don't call the stub that handles this.
+ Label no_remembered_set;
+ __ CheckPageFlag(elements,
+ temp,
+ MemoryChunk::SCAN_ON_SCAVENGE,
+ not_zero,
+ &no_remembered_set,
+ Label::kNear);
+ // Possible optimization: do a check that both values are Smis
+ // (or them and test against Smi mask.)
+
+ // We are swapping two objects in an array and the incremental marker
never
+ // pauses in the middle of scanning a single object. Therefore the
+ // incremental marker is not disturbed, so we don't need to call the
+ // RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(
index_1, temp, kDontSaveFPRegs, MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(
index_2, temp, kDontSaveFPRegs, MacroAssembler::kFallThroughAtEnd);
- __ bind(&new_space);
+ __ bind(&no_remembered_set);
+
// We are done. Drop elements from the stack, and return undefined.
__ addq(rsp, Immediate(3 * kPointerSize));
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
=======================================
--- /branches/experimental/gc/src/x64/lithium-codegen-x64.cc Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/x64/lithium-codegen-x64.cc Fri Jun 10
14:58:26 2011
@@ -2221,7 +2221,6 @@
ASSERT(!value.is(object));
Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
- int offset = JSGlobalPropertyCell::kValueOffset;
__ movq(address, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
// If the cell we are storing to contains the hole it could have
@@ -2239,6 +2238,7 @@
Label smi_store;
__ JumpIfSmi(value, &smi_store, Label::kNear);
+ int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag;
__ lea(object, Operand(address, -offset));
// Cells are always in the remembered set.
__ RecordWrite(object,
=======================================
--- /branches/experimental/gc/src/x64/macro-assembler-x64.cc Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/x64/macro-assembler-x64.cc Fri Jun 10
14:58:26 2011
@@ -200,6 +200,12 @@
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction
and_then) {
+ if (FLAG_debug_code) {
+ Label ok;
+ JumpIfNotInNewSpace(addr, scratch, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
// Load store buffer top.
LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Store pointer to buffer.
@@ -237,7 +243,7 @@
Register scratch,
Condition cc,
Label* branch,
- Label::Distance near_jump) {
+ Label::Distance distance) {
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get
serialized.
// The mask isn't really an address. We load it as an external
reference in
@@ -252,7 +258,7 @@
}
movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
- j(cc, branch, near_jump);
+ j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
intptr_t new_space_start =
@@ -264,7 +270,7 @@
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
- j(cc, branch, near_jump);
+ j(cc, branch, distance);
}
}
@@ -328,10 +334,6 @@
// context register, so we check that none of the clobbered
// registers are rsi.
ASSERT(!value.is(rsi) && !address.is(rsi));
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
- return;
- }
ASSERT(!object.is(value));
ASSERT(!object.is(address));
@@ -481,7 +483,6 @@
void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
// ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
// TODO(gc): Fix this!
- // TODO(gc): Fix this!
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
@@ -838,6 +839,57 @@
GetBuiltinFunction(rdi, id);
movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
+
+
+static const Register saved_regs[] =
+ { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
+static const int kNumberOfSavedRegs = sizeof(saved_regs) /
sizeof(Register);
+
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) {
+ // We don't allow a GC during a store buffer overflow so there is no
need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ for (int i = 0; i < kNumberOfSavedRegs; i++) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3))
{
+ push(reg);
+ }
+ }
+ // R12 to r15 are callee save on all platforms.
+ if (fp_mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(Operand(rsp, i * kDoubleSize), reg);
+ }
+ }
+}
+
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) {
+ if (fp_mode == kSaveFPRegs) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(reg, Operand(rsp, i * kDoubleSize));
+ }
+ addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ }
+ for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3))
{
+ pop(reg);
+ }
+ }
+}
void MacroAssembler::Set(Register dst, int64_t x) {
@@ -3792,6 +3844,75 @@
}
j(cc, condition_met, condition_met_distance);
}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+ Register bitmap_scratch,
+ Register mask_scratch,
+ Label* on_black,
+ Label::Distance on_black_distance) {
+ ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+ GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ // The mask_scratch register contains a 1 at the position of the first
bit
+ // and a 0 at all other positions, including the position of the second
bit.
+ movq(rcx, mask_scratch);
+ // Make rcx into a mask that covers both marking bits using the operation
+ // rcx = mask | (mask << 1).
+ lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ // Note that we are using a 4-byte aligned 8-byte load.
+ and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpq(mask_scratch, rcx);
+ j(equal, on_black, on_black_distance);
+}
+
+
+// Detect some, but not all, common pointer-free objects. This is used by
the
+// incremental write barrier which doesn't care about oddballs (they are
always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(
+ Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance) {
+ Label is_data_object;
+ movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ j(equal, &is_data_object, Label::kNear);
+ ASSERT(kConsStringTag == 1 && kIsConsStringMask == 1);
+ ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ // If it's a string and it's not a cons string then it's an object
containing
+ // no GC pointers.
+ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+ Immediate(kIsConsStringMask | kIsNotStringMask));
+ j(not_zero, not_data_object, not_data_object_distance);
+ bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg) {
+ ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+ movq(bitmap_reg, addr_reg);
+ // Sign extended 32 bit immediate.
+ and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ movq(rcx, addr_reg);
+ int shift =
+ Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
Bitmap::kBytesPerCellLog2;
+ shrl(rcx, Immediate(shift));
+ and_(rcx,
+ Immediate((Page::kPageAlignmentMask >> shift) &
+ ~(Bitmap::kBytesPerCell - 1)));
+
+ addq(bitmap_reg, rcx);
+ movq(rcx, addr_reg);
+ shrl(rcx, Immediate(kPointerSizeLog2));
+ and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ movl(mask_reg, Immediate(1));
+ shl_cl(mask_reg);
+}
} } // namespace v8::internal
=======================================
--- /branches/experimental/gc/src/x64/macro-assembler-x64.h Wed Jun 8
04:29:52 2011
+++ /branches/experimental/gc/src/x64/macro-assembler-x64.h Fri Jun 10
14:58:26 2011
@@ -139,6 +139,18 @@
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
+ // These functions do not arrange the registers in any particular order
so
+ // they are not useful for calls that can cause a GC. The caller can
+ // exclude up to 3 registers that do not need to be saved and restored.
+ void PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ void PopCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
//
---------------------------------------------------------------------------
// GC Support
@@ -162,14 +174,37 @@
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
- // Check if object is in new space. The condition cc can be equal or
- // not_equal. If it is equal a jump will be done if the object is on new
- // space. The register scratch can be object itself, but it will be
clobbered.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
- Label* branch,
- Label::Distance near_jump = Label::kFar);
+ // Check if object is in new space. Jumps if the object is not in new
space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfNotInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, not_equal, branch, distance);
+ }
+
+ // Check if object is in new space. Jumps if the object is in new space.
+ // The register scratch can be object itself, but it will be clobbered.
+ void JumpIfInNewSpace(Register object,
+ Register scratch,
+ Label* branch,
+ Label::Distance distance = Label::kFar) {
+ InNewSpace(object, scratch, equal, branch, distance);
+ }
+
+ // Check if an object has the black incremental marking color. Also
uses rcx!
+ void JumpIfBlack(Register object,
+ Register scratch0,
+ Register scratch1,
+ Label* on_black,
+ Label::Distance on_black_distance = Label::kFar);
+
+ // Detects conservatively whether an object is data-only, ie it does
need to
+ // be scanned by the garbage collector.
+ void JumpIfDataObject(Register value,
+ Register scratch,
+ Label* not_data_object,
+ Label::Distance not_data_object_distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -1237,6 +1272,20 @@
Register scratch,
bool gc_allowed);
+ // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch,
+ Label::Distance distance = Label::kFar);
+
+ // Helper for finding the mark bits for an address. Afterwards, the
+ // bitmap register points at the word with the mark bits and the mask
+ // the position of the first bit. Uses rcx as scratch and leaves
addr_reg
+ // unchanged.
+ inline void GetMarkBits(Register addr_reg,
+ Register bitmap_reg,
+ Register mask_reg);
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
=======================================
--- /branches/experimental/gc/src/x64/stub-cache-x64.cc Wed Jun 8 04:29:52
2011
+++ /branches/experimental/gc/src/x64/stub-cache-x64.cc Fri Jun 10 14:58:26
2011
@@ -1414,7 +1414,7 @@
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label exit, attempt_to_grow_elements, with_write_barrier;
+ Label attempt_to_grow_elements, with_write_barrier;
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1443,7 +1443,6 @@
__ JumpIfNotSmi(rcx, &with_write_barrier);
- __ bind(&exit);
__ ret((argc + 1) * kPointerSize);
__ bind(&with_write_barrier);
@@ -1490,6 +1489,13 @@
for (int i = 1; i < kAllocationDelta; i++) {
__ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
}
+
+ // We know the elements array is in new space so we don't need the
+ // remembered set, but we just pushed a value onto it so we may have
to
+ // tell the incremental marker to rescan the object that we just
grew. We
+ // don't need to worry about the holes because they are in old space
and
+ // already marked black.
+ __ RecordWrite(rbx, rdx, rcx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
// Restore receiver to rdx as finish sequence assumes it's here.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1502,7 +1508,6 @@
__ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
}
@@ -2441,19 +2446,36 @@
Handle<Map>(object->map()));
__ j(not_equal, &miss);
+ // Compute the cell operand to use.
+ __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ Operand cell_operand = FieldOperand(rbx,
JSGlobalPropertyCell::kValueOffset);
+
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
- __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
- Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss);
// Store the value in the cell.
- __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
+ __ movq(cell_operand, rax);
+ Label done;
+ __ JumpIfSmi(rax, &done);
+
+ __ movq(rcx, rax);
+ __ lea(rdx, cell_operand);
+ // Cells are always in the remembered set.
+ __ RecordWrite(rbx, // Object.
+ rdx, // Address.
+ rcx, // Value.
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
// Return the value (register rax).
+ __ bind(&done);
+
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
=======================================
--- /branches/experimental/gc/test/cctest/test-serialize.cc Mon May 9
14:11:15 2011
+++ /branches/experimental/gc/test/cctest/test-serialize.cc Fri Jun 10
14:58:26 2011
@@ -569,9 +569,9 @@
new_space_size,
paged_space_size, // Old pointer space.
paged_space_size, // Old data space.
- paged_space_size, // Code space.
- paged_space_size, // Map space.
- paged_space_size, // Cell space.
+
HEAP->code_space()->RoundSizeDownToObjectAlignment(paged_space_size),
+
HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
+
HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
size); // Large object space.
LinearAllocationScope linear_allocation_scope;
const int kSmallFixedArrayLength = 4;
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev