Reviewers: danno,
Message:
The movq instruction will be translated to movl by the x32 script
generator, but
X32 has to use 8-byte move for double, integer64 and return address.
One approach is to introduce MoveDouble, MoveInteger64 and
MoveReturnAddress,
the other is to use MoveQuadword for all of them. I'd like to know your
recommendation on this. If you prefer MoveQuadword, I will change the title
and
upload the integer64 and return address changes in this CL.
Description:
Introduce MoveDouble to the X64 MacroAssembler
Please review this at https://codereview.chromium.org/26216008/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files (+8, -6 lines):
M src/x64/code-stubs-x64.cc
M src/x64/codegen-x64.cc
M src/x64/lithium-codegen-x64.cc
M src/x64/macro-assembler-x64.h
M src/x64/stub-cache-x64.cc
Index: src/x64/code-stubs-x64.cc
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index
7e6b63c56143ca9680820ccb16fa551dfc48036e..2136c1ddf1d00a75d9194ed7103adb291e44f5e3
100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -593,7 +593,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler*
masm) {
// Input is a HeapNumber. Push it on the FPU stack and load its
// bits into rbx.
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rdx, rbx);
__ bind(&loaded);
Index: src/x64/codegen-x64.cc
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index
53738276561b1efa4a6f90127bb5cbfac016095b..5f14f88c46facc438b6d969fdd4d659cede6db9e
100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -466,7 +466,7 @@ void
ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
__ AllocateHeapNumber(rax, r15, &gc_required);
// rax: new heap number
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
+ __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
__ movq(FieldOperand(r11,
r9,
times_pointer_size,
Index: src/x64/lithium-codegen-x64.cc
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index
6fe64c553ec39222eb7741696892cc436b5d5799..b3066af4eca0ffea8e9528fac2dbe06d8ccf38e1
100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3335,10 +3335,10 @@ void
LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
- __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shl(tmp2, Immediate(1));
__ shr(tmp2, Immediate(1));
- __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
Index: src/x64/macro-assembler-x64.h
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index
d1c24343628d6f3d37e89c9b0a66b244875544d8..3e37e825bdfcd4d1c354d825ecb8794bec799bbb
100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -846,6 +846,8 @@ class MacroAssembler: public Assembler {
void Pop(Register dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
+ void MoveDouble(Register dst, const Operand& src) { movq(dst, src); }
+ void MoveDouble(const Operand& dst, Register src) { movq(dst, src); }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
Index: src/x64/stub-cache-x64.cc
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index
94650fd49a28483023572ca79a94d9b4adf4a873..b066079877f6775f1de570a3dad67f837027151f
100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2440,7 +2440,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Check if the argument is a heap number and load its value.
__ bind(¬_smi);
__ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
@@ -2458,7 +2458,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
__ bind(&negative_sign);
__ xor_(rbx, rdi);
__ AllocateHeapNumber(rax, rdx, &slow);
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
+ __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx);
__ ret(2 * kPointerSize);
// Tail call the full function. We do not have to patch the receiver
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.