Revision: 5122
Author: [email protected]
Date: Fri Jul 23 01:47:15 2010
Log: Fixed a couple of issues with store inlining on arm.
Spill and merge virtual frames explicitly in the deferred code.
Account for the fact that the inlined write barrier size depends on
the size of the new space masks.
Review URL: http://codereview.chromium.org/3018015
http://code.google.com/p/v8/source/detail?r=5122
Modified:
/branches/bleeding_edge/src/arm/assembler-arm.cc
/branches/bleeding_edge/src/arm/codegen-arm.cc
/branches/bleeding_edge/src/arm/codegen-arm.h
/branches/bleeding_edge/src/arm/ic-arm.cc
/branches/bleeding_edge/src/v8-counters.h
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.cc Thu Jul 22 01:17:40
2010
+++ /branches/bleeding_edge/src/arm/assembler-arm.cc Fri Jul 23 01:47:15
2010
@@ -827,9 +827,10 @@
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
emit(instr | rn.code()*B16 | rd.code()*B12);
- if (rn.is(pc) || x.rm_.is(pc))
+ if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
+ }
}
=======================================
--- /branches/bleeding_edge/src/arm/codegen-arm.cc Fri Jul 23 01:25:48 2010
+++ /branches/bleeding_edge/src/arm/codegen-arm.cc Fri Jul 23 01:47:15 2010
@@ -151,6 +151,8 @@
//
-------------------------------------------------------------------------
// CodeGenerator implementation
+int CodeGenerator::inlined_write_barrier_size_ = -1;
+
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
masm_(masm),
@@ -6225,7 +6227,13 @@
};
+// Takes value in r0, receiver in r1 and returns the result (the
+// value) in r0.
void DeferredReferenceSetNamedValue::Generate() {
+ // Record the entry frame and spill.
+ VirtualFrame copied_frame(*frame_state()->frame());
+ copied_frame.SpillAll();
+
// Ensure value in r0, receiver in r1 to match store ic calling
// convention.
ASSERT(value_.is(r0) && receiver_.is(r1));
@@ -6241,6 +6249,12 @@
// named store has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
+ // Go back to the frame we entered with. The instructions
+ // generated by this merge are skipped over by the inline store
+ // patching mechanism when looking for the branch instruction that
+ // tells it where the code to patch is.
+ copied_frame.MergeTo(frame_state()->frame());
+
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending
the
// deferred code.
@@ -6365,11 +6379,38 @@
int offset = 0;
__ str(value, MemOperand(receiver, offset));
- // Update the write barrier.
- __ RecordWrite(receiver, Operand(offset), scratch0, scratch1);
+ // Update the write barrier and record its size. We do not use
+ // the RecordWrite macro here because we want the offset
+ // addition instruction first to make it easy to patch.
+ Label record_write_start, record_write_done;
+ __ bind(&record_write_start);
+ // Add offset into the object.
+ __ add(scratch0, receiver, Operand(offset));
+ // Test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ __ InNewSpace(receiver, scratch1, eq, &record_write_done);
+ // Record the actual write.
+ __ RecordWriteHelper(receiver, scratch0, scratch1);
+ __ bind(&record_write_done);
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
+ __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ }
+ // Check that this is the first inlined write barrier or that
+ // this inlined write barrier has the same size as all the other
+ // inlined write barriers.
+ ASSERT((inlined_write_barrier_size_ == -1) ||
+ (inlined_write_barrier_size_ ==
+ masm()->InstructionsGeneratedSince(&record_write_start)));
+ inlined_write_barrier_size_ =
+ masm()->InstructionsGeneratedSince(&record_write_start);
+
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
-
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
+
masm()->InstructionsGeneratedSince(&check_inlined_codesize));
}
deferred->BindExit();
}
=======================================
--- /branches/bleeding_edge/src/arm/codegen-arm.h Thu Jul 22 01:17:40 2010
+++ /branches/bleeding_edge/src/arm/codegen-arm.h Fri Jul 23 01:47:15 2010
@@ -282,7 +282,8 @@
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
- return FLAG_debug_code ? 33 : 14;
+ ASSERT(inlined_write_barrier_size_ != -1);
+ return inlined_write_barrier_size_ + 4;
}
private:
@@ -589,6 +590,9 @@
// to some unlinking code).
bool function_return_is_shadowed_;
+ // Size of inlined write barriers generated by EmitNamedStore.
+ static int inlined_write_barrier_size_;
+
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
=======================================
--- /branches/bleeding_edge/src/arm/ic-arm.cc Thu Jul 22 01:17:40 2010
+++ /branches/bleeding_edge/src/arm/ic-arm.cc Fri Jul 23 01:47:15 2010
@@ -1016,7 +1016,7 @@
// Patch the offset in the add instruction that is part of the
// write barrier.
Address add_offset_instr_address =
- str_property_instr_address + 4 * Assembler::kInstrSize;
+ str_property_instr_address + Assembler::kInstrSize;
Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
@@ -1024,7 +1024,7 @@
Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
// Indicate that code has changed.
- CPU::FlushICache(str_property_instr_address, 5 *
Assembler::kInstrSize);
+ CPU::FlushICache(str_property_instr_address, 2 *
Assembler::kInstrSize);
}
// Patch the map check.
=======================================
--- /branches/bleeding_edge/src/v8-counters.h Fri Jul 2 07:15:04 2010
+++ /branches/bleeding_edge/src/v8-counters.h Fri Jul 23 01:47:15 2010
@@ -169,7 +169,7 @@
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(negative_lookups, V8.NegativeLookups) \
- SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
+ SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev