Revision: 12711
Author: [email protected]
Date: Fri Oct 12 03:58:25 2012
Log: DoNumberTagD performance improvement
Allocate heap entry untagged and tag at end to avoid having to subtract off
the tag offset before storing the value.
BUG=
Review URL: https://codereview.chromium.org/11028115
Patch from Anthony Berent <[email protected]>.
http://code.google.com/p/v8/source/detail?r=12711
Modified:
/branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.h
/branches/bleeding_edge/src/arm/stub-cache-arm.cc
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Mon Oct 8
05:50:15 2012
+++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Fri Oct 12
03:58:25 2012
@@ -4446,7 +4446,7 @@
if (FLAG_inline_new) {
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ Move(dst, r5);
__ b(&done);
}
@@ -4461,12 +4461,13 @@
__ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, r0);
+ __ sub(dst, dst, Operand(kHeapObjectTag));
// Done. Put the value in dbl_scratch into the value of the allocated
heap
// number.
__ bind(&done);
- __ sub(ip, dst, Operand(kHeapObjectTag));
- __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ __ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4491,13 +4492,16 @@
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this,
instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ // We want the untagged address first for performance
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+ DONT_TAG_RESULT);
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ sub(ip, reg, Operand(kHeapObjectTag));
- __ vstr(input_reg, ip, HeapNumber::kValueOffset);
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ // Now that we have finished with the object's real address tag it
+ __ add(reg, reg, Operand(kHeapObjectTag));
}
@@ -4510,6 +4514,7 @@
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, reg);
}
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Thu Oct 11
05:01:19 2012
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Fri Oct 12
03:58:25 2012
@@ -3124,7 +3124,8 @@
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required) {
+ Label* gc_required,
+ TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a
heap
// object.
AllocateInNewSpace(HeapNumber::kSize,
@@ -3132,11 +3133,16 @@
scratch1,
scratch2,
gc_required,
- TAG_OBJECT);
+ tagging_mode == TAG_RESULT ? TAG_OBJECT :
+ NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ if (tagging_mode == TAG_RESULT) {
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
}
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.h Tue Oct 9
08:12:39 2012
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.h Fri Oct 12
03:58:25 2012
@@ -68,6 +68,13 @@
SIZE_IN_WORDS = 1 << 2
};
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
+};
// Flags used for the ObjectToDoubleVFPRegister function.
enum ObjectToDoubleFlags {
@@ -731,7 +738,8 @@
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required);
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
Register scratch1,
=======================================
--- /branches/bleeding_edge/src/arm/stub-cache-arm.cc Mon Oct 8 05:50:15
2012
+++ /branches/bleeding_edge/src/arm/stub-cache-arm.cc Fri Oct 12 03:58:25
2012
@@ -3787,22 +3787,28 @@
__ Ret();
__ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
-
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ add(r0, r5, Operand(kHeapObjectTag));
__ vmov(s0, value);
__ vcvt_f64_s32(d0, s0);
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ vstr(d0, r5, HeapNumber::kValueOffset);
__ Ret();
} else {
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ mov(r0, r5);
Register dst1 = r1;
Register dst2 = r3;
FloatingPointHelper::Destination dest =
@@ -3838,13 +3844,12 @@
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_u32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
@@ -3876,7 +3881,7 @@
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@@ -3893,19 +3898,18 @@
// AllocateHeapNumber clobbers all registers - also when jumping due
to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
__ vcvt_f64_f32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due
to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
@@ -3961,18 +3965,17 @@
// AllocateHeapNumber clobbers all registers - also when jumping due
to
// exhausted young space.
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ __ vstr(d0, r2, HeapNumber::kValueOffset);
- __ mov(r0, r2);
+ __ add(r0, r2, Operand(kHeapObjectTag));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due
to
// exhausted young space.
__ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
__ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
__ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
@@ -4439,7 +4442,7 @@
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber);
+ heap_number_map, &slow_allocate_heapnumber,
TAG_RESULT);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev