Revision: 21172
Author: [email protected]
Date: Tue May 6 14:28:29 2014 UTC
Log: ARM64: Sign extension on MemOperand for keyed ops
SXTW extend mode is usually cheaper on loads and stores than arithmetic,
so move it to the memory accesses where possible for Keyed loads and
stores.
BUG=
[email protected]
Review URL: https://codereview.chromium.org/268483002
http://code.google.com/p/v8/source/detail?r=21172
Modified:
/branches/bleeding_edge/src/arm64/assembler-arm64-inl.h
/branches/bleeding_edge/src/arm64/assembler-arm64.h
/branches/bleeding_edge/src/arm64/lithium-arm64.cc
/branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc
/branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h
=======================================
--- /branches/bleeding_edge/src/arm64/assembler-arm64-inl.h Tue May 6
11:25:37 2014 UTC
+++ /branches/bleeding_edge/src/arm64/assembler-arm64-inl.h Tue May 6
14:28:29 2014 UTC
@@ -409,6 +409,12 @@
}
return Operand(smi);
}
+
+
+MemOperand::MemOperand()
+ : base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
+}
MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
=======================================
--- /branches/bleeding_edge/src/arm64/assembler-arm64.h Tue May 6 11:00:28
2014 UTC
+++ /branches/bleeding_edge/src/arm64/assembler-arm64.h Tue May 6 14:28:29
2014 UTC
@@ -669,6 +669,7 @@
// MemOperand represents a memory operand in a load or store instruction.
class MemOperand {
public:
+ inline explicit MemOperand();
inline explicit MemOperand(Register base,
ptrdiff_t offset = 0,
AddrMode addrmode = Offset);
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-arm64.cc Tue May 6 12:11:00
2014 UTC
+++ /branches/bleeding_edge/src/arm64/lithium-arm64.cc Tue May 6 14:28:29
2014 UTC
@@ -1666,10 +1666,9 @@
ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* elements = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
if (!instr->is_typed_elements()) {
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
if (instr->representation().IsDouble()) {
LOperand* temp = (!instr->key()->IsConstant() ||
instr->RequiresHoleCheck())
@@ -1697,7 +1696,6 @@
(instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind())));
- LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
LInstruction* result = DefineAsRegister(
new(zone()) LLoadKeyedExternal(elements, key, temp));
@@ -2301,6 +2299,7 @@
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* temp = NULL;
LOperand* elements = NULL;
LOperand* val = NULL;
@@ -2327,19 +2326,16 @@
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
instr->elements()->representation().IsExternal()));
- LOperand* key = UseRegisterOrConstant(instr->key());
return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
} else if (instr->value()->representation().IsDouble()) {
ASSERT(instr->elements()->representation().IsTagged());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
} else {
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->value()->representation().IsSmiOrTagged() ||
instr->value()->representation().IsInteger32());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
}
}
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc Tue May 6
11:00:28 2014 UTC
+++ /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc Tue May 6
14:28:29 2014 UTC
@@ -3481,11 +3481,14 @@
}
-void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
- Register elements,
- Register key,
- bool key_is_tagged,
- ElementsKind elements_kind) {
+MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind,
+ Representation
representation,
+ int additional_index) {
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag ==
0));
int element_size_shift = ElementsKindToShiftSize(elements_kind);
// Even though the HLoad/StoreKeyed instructions force the input
@@ -3494,11 +3497,28 @@
// can be tagged, so that case must be handled here, too.
if (key_is_tagged) {
__ Add(base, elements, Operand::UntagSmiAndScale(key,
element_size_shift));
+ if (representation.IsInteger32()) {
+ ASSERT(elements_kind == FAST_SMI_ELEMENTS);
+ // Read or write only the most-significant 32 bits in the case of
fast smi
+ // arrays.
+ return UntagSmiFieldMemOperand(base, additional_index);
+ } else {
+ return FieldMemOperand(base, additional_index);
+ }
} else {
// Sign extend key because it could be a 32-bit negative value or
contain
// garbage in the top 32-bits. The address computation happens in
64-bit.
ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
- __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ if (representation.IsInteger32()) {
+ ASSERT(elements_kind == FAST_SMI_ELEMENTS);
+ // Read or write only the most-significant 32 bits in the case of
fast smi
+ // arrays.
+ __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ return UntagSmiFieldMemOperand(base, additional_index);
+ } else {
+ __ Add(base, elements, additional_index - kHeapObjectTag);
+ return MemOperand(base, key, SXTW, element_size_shift);
+ }
}
}
@@ -3506,8 +3526,7 @@
void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
Register elements = ToRegister(instr->elements());
DoubleRegister result = ToDoubleRegister(instr->result());
- Register load_base;
- int offset = 0;
+ MemOperand mem_op;
if (instr->key()->IsConstantOperand()) {
ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
@@ -3517,27 +3536,30 @@
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
-
instr->additional_index());
- load_base = elements;
+ int offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+
instr->additional_index());
+ mem_op = FieldMemOperand(elements, offset);
} else {
- load_base = ToRegister(instr->temp());
+ Register load_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged =
instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset =
FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ int offset =
FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ mem_op = PrepareKeyedArrayOperand(load_base, elements, key,
key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ instr->hydrogen()->representation(),
+ offset);
}
- __ Ldr(result, FieldMemOperand(load_base, offset));
+
+ __ Ldr(result, mem_op);
if (instr->hydrogen()->RequiresHoleCheck()) {
Register scratch = ToRegister(instr->temp());
-
- // TODO(all): Is it faster to reload this value to an integer
register, or
- // move from fp to integer?
- __ Fmov(scratch, result);
- __ Cmp(scratch, kHoleNanInt64);
- DeoptimizeIf(eq, instr->environment());
+ // Detect the hole NaN by adding one to the integer representation of
the
+ // result, and checking for overflow.
+ STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
+ __ Ldr(scratch, mem_op);
+ __ Cmn(scratch, 1);
+ DeoptimizeIf(vs, instr->environment());
}
}
@@ -3545,34 +3567,34 @@
void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
Register elements = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
- Register load_base;
- int offset = 0;
+ MemOperand mem_op;
+ Representation representation = instr->hydrogen()->representation();
if (instr->key()->IsConstantOperand()) {
ASSERT(instr->temp() == NULL);
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- load_base = elements;
+ int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
+ (kSmiTag == 0));
+ mem_op = UntagSmiFieldMemOperand(elements, offset);
+ } else {
+ mem_op = FieldMemOperand(elements, offset);
+ }
} else {
- load_base = ToRegister(instr->temp());
+ Register load_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged =
instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ int offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+
+ mem_op = PrepareKeyedArrayOperand(load_base, elements, key,
key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ representation, offset);
}
- Representation representation = instr->hydrogen()->representation();
- if (representation.IsInteger32() &&
- instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
- __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
- Representation::Integer32());
- } else {
- __ Load(result, FieldMemOperand(load_base, offset),
- representation);
- }
+ __ Load(result, mem_op, representation);
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
@@ -5169,31 +5191,32 @@
void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
Register elements = ToRegister(instr->elements());
DoubleRegister value = ToDoubleRegister(instr->value());
- Register store_base = no_reg;
- int offset = 0;
+ MemOperand mem_op;
if (instr->key()->IsConstantOperand()) {
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
-
instr->additional_index());
- store_base = elements;
+ int offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+
instr->additional_index());
+ mem_op = FieldMemOperand(elements, offset);
} else {
- store_base = ToRegister(instr->temp());
+ Register store_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged =
instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset =
FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ int offset =
FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ mem_op = PrepareKeyedArrayOperand(store_base, elements, key,
key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ instr->hydrogen()->representation(),
+ offset);
}
if (instr->NeedsCanonicalization()) {
__ CanonicalizeNaN(double_scratch(), value);
- __ Str(double_scratch(), FieldMemOperand(store_base, offset));
+ __ Str(double_scratch(), mem_op);
} else {
- __ Str(value, FieldMemOperand(store_base, offset));
+ __ Str(value, mem_op);
}
}
@@ -5204,36 +5227,40 @@
Register scratch = no_reg;
Register store_base = no_reg;
Register key = no_reg;
- int offset = 0;
+ MemOperand mem_op;
if (!instr->key()->IsConstantOperand() ||
instr->hydrogen()->NeedsWriteBarrier()) {
scratch = ToRegister(instr->temp());
}
+ Representation representation =
instr->hydrogen()->value()->representation();
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
store_base = elements;
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() ==
STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
+ (kSmiTag == 0));
+ mem_op = UntagSmiFieldMemOperand(store_base, offset);
+ } else {
+ mem_op = FieldMemOperand(store_base, offset);
+ }
} else {
store_base = scratch;
key = ToRegister(instr->key());
bool key_is_tagged =
instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- Representation representation =
instr->hydrogen()->value()->representation();
- if (representation.IsInteger32()) {
- ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
- __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
- Representation::Integer32());
- } else {
- __ Store(value, FieldMemOperand(store_base, offset), representation);
+ int offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+
+ mem_op = PrepareKeyedArrayOperand(store_base, elements, key,
key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ representation, offset);
}
+
+ __ Store(value, mem_op, representation);
if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(representation.IsTagged());
@@ -5243,7 +5270,7 @@
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ Add(element_addr, store_base, offset - kHeapObjectTag);
+ __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
__ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed);
}
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h Tue May 6
11:00:28 2014 UTC
+++ /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h Tue May 6
14:28:29 2014 UTC
@@ -256,11 +256,13 @@
int constant_key,
ElementsKind elements_kind,
int additional_index);
- void CalcKeyedArrayBaseRegister(Register base,
- Register elements,
- Register key,
- bool key_is_tagged,
- ElementsKind elements_kind);
+ MemOperand PrepareKeyedArrayOperand(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind,
+ Representation representation,
+ int additional_index);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.