Reviewers: Rico,
Description:
Optimize loads from root-array in X64.
Move the value of the root-array register to offset 128 from the start of
the root array. This allows indices 16..31 to be reached using only an
8-bit displacement, saving three bytes per access.
Please review this at http://codereview.chromium.org/6594115/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files:
M src/x64/code-stubs-x64.cc
M src/x64/codegen-x64.cc
M src/x64/lithium-codegen-x64.cc
M src/x64/macro-assembler-x64.h
M src/x64/macro-assembler-x64.cc
Index: src/x64/code-stubs-x64.cc
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index
42373e3debbb9746a6186a41bdb35b45e1d6cf87..03b7840e8b57c0e4335af6ca0afe32741a907adc
100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -3561,8 +3561,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm,
bool is_construct) {
// Set up the roots and smi constant registers.
// Needs to be done before any further smi loads.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
+ __ InitializeRootRegister();
__ InitializeSmiConstantRegister();
#ifdef ENABLE_LOGGING_AND_PROFILING
Index: src/x64/codegen-x64.cc
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index
ad114c243108bbf89e2e336f873398a8ddfcfea5..d1bdd005538b71c857df1ca10c915a46ac0636ca
100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -8217,7 +8217,8 @@ Result CodeGenerator::EmitNamedLoad(Handle<String>
name, bool is_contextual) {
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
- masm()->Move(kScratchRegister, Factory::null_value());
+ masm()->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed
@@ -8293,7 +8294,8 @@ Result CodeGenerator::EmitNamedStore(Handle<String>
name, bool is_contextual) {
// the __ macro for the following two instructions because it
// might introduce extra instructions.
__ bind(&patch_site);
- masm()->Move(kScratchRegister, Factory::null_value());
+ masm()->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed size
Index: src/x64/lithium-codegen-x64.cc
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index
0ae8a003c9f3489a66ea701afbb366f87ece7434..e4f7cc124490fb897f01329dae69a3f7afb66681
100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1426,7 +1426,7 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
__ j(equal, &load);
__ movl(result, Immediate(Heap::kFalseValueRootIndex));
__ bind(&load);
- __ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
+ __ LoadRootIndexed(result, result, 0);
} else {
NearLabel true_value, false_value, done;
__ j(equal, &true_value);
@@ -1557,8 +1557,7 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
}
// result is zero if input is a smi, and one otherwise.
ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
- __ movq(result, Operand(kRootRegister, result, times_pointer_size,
- Heap::kTrueValueRootIndex * kPointerSize));
+ __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
}
Index: src/x64/macro-assembler-x64.cc
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index
8845bbb77cc33d11a55b8756dc83d7b3e3de4ca5..9604bd796e97fd3f5a4f12f02de9a409015a2d1d
100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -49,22 +49,35 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex
index) {
- movq(destination, Operand(kRootRegister, index << kPointerSizeLog2));
+ movq(destination, Operand(kRootRegister,
+ (index << kPointerSizeLog2) - kRootRegisterBias));
+}
+
+
+void MacroAssembler::LoadRootIndexed(Register destination,
+ Register variable_offset,
+ int fixed_offset) {
+ movq(destination,
+ Operand(kRootRegister,
+ variable_offset, times_pointer_size,
+ (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index)
{
- movq(Operand(kRootRegister, index << kPointerSizeLog2), source);
+ movq(Operand(kRootRegister, (index << kPointerSizeLog2) -
kRootRegisterBias),
+ source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- push(Operand(kRootRegister, index << kPointerSizeLog2));
+ push(Operand(kRootRegister, (index << kPointerSizeLog2) -
kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index)
{
- cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2));
+ cmpq(with, Operand(kRootRegister,
+ (index << kPointerSizeLog2) - kRootRegisterBias));
}
Index: src/x64/macro-assembler-x64.h
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index
4cf59c4e89cf5dcc72d396127e4bf1ddea3327ec..7f07d9696cd39300af73ecf67d0c89cdf7d96cc5
100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -52,6 +52,9 @@ static const Register kSmiConstantRegister = { 15 }; //
r15 (callee save).
static const Register kRootRegister = { 13 }; // r13 (callee save).
// Value of smi in kSmiConstantRegister.
static const int kSmiConstantRegisterValue = 1;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negitive 8-bit displacement values.
+static const int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
@@ -74,6 +77,12 @@ class MacroAssembler: public Assembler {
MacroAssembler(void* buffer, int size);
void LoadRoot(Register destination, Heap::RootListIndex index);
+ // Load a root value where the index (or part of it) is variable.
+ // The variable_offset register is added to the fixed_offset value
+ // to get the index into the root-array.
+ void LoadRootIndexed(Register destination,
+ Register variable_offset,
+ int fixed_offset);
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
@@ -176,6 +185,12 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Register src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ void InitializeRootRegister() {
+ ExternalReference roots_address = ExternalReference::roots_address();
+ movq(kRootRegister, roots_address);
+ addq(kRootRegister, Immediate(kRootRegisterBias));
+ }
+
//
---------------------------------------------------------------------------
// JavaScript invokes
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev