Revision: 5744
Author: [email protected]
Date: Mon Nov 1 01:55:40 2010
Log: Landing for Rodolph Perfetta.
Reduces the number of movw/movt instructions generated in ProbeTable. It
improves code size for CompileCallMegamorphic by about 10%.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/4220004/show
http://code.google.com/p/v8/source/detail?r=5744
Modified:
/branches/bleeding_edge/src/arm/ic-arm.cc
/branches/bleeding_edge/src/arm/stub-cache-arm.cc
/branches/bleeding_edge/src/ia32/stub-cache-ia32.cc
/branches/bleeding_edge/src/mips/stub-cache-mips.cc
/branches/bleeding_edge/src/stub-cache.h
/branches/bleeding_edge/src/x64/stub-cache-x64.cc
=======================================
--- /branches/bleeding_edge/src/arm/ic-arm.cc Wed Oct 20 05:01:17 2010
+++ /branches/bleeding_edge/src/arm/ic-arm.cc Mon Nov 1 01:55:40 2010
@@ -544,7 +544,7 @@
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@@ -583,7 +583,7 @@
// Probe the stub cache for the value object.
__ bind(&probe);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
__ bind(&miss);
}
@@ -858,7 +858,7 @@
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
+ StubCache::GenerateProbe(masm, flags, r0, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -2163,7 +2163,7 @@
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
- StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
=======================================
--- /branches/bleeding_edge/src/arm/stub-cache-arm.cc Mon Oct 25 08:22:03
2010
+++ /branches/bleeding_edge/src/arm/stub-cache-arm.cc Mon Nov 1 01:55:40
2010
@@ -43,43 +43,49 @@
Code::Flags flags,
StubCache::Table table,
Register name,
- Register offset) {
+ Register offset,
+ Register scratch,
+ Register scratch2) {
ExternalReference key_offset(SCTableReference::keyReference(table));
ExternalReference value_offset(SCTableReference::valueReference(table));
- Label miss;
-
- // Save the offset on the stack.
- __ push(offset);
+ uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+ uint32_t value_off_addr =
reinterpret_cast<uint32_t>(value_offset.address());
+
+ // Check the relative positions of the address fields.
+ ASSERT(value_off_addr > key_off_addr);
+ ASSERT((value_off_addr - key_off_addr) % 4 == 0);
+ ASSERT((value_off_addr - key_off_addr) < (256 * 4));
+
+ Label miss;
+ Register offsets_base_addr = scratch;
// Check that the key in the entry matches the name.
- __ mov(ip, Operand(key_offset));
- __ ldr(ip, MemOperand(ip, offset, LSL, 1));
+ __ mov(offsets_base_addr, Operand(key_offset));
+ __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
__ cmp(name, ip);
__ b(ne, &miss);
// Get the code entry from the cache.
- __ mov(ip, Operand(value_offset));
- __ ldr(offset, MemOperand(ip, offset, LSL, 1));
+ __ add(offsets_base_addr, offsets_base_addr,
+ Operand(value_off_addr - key_off_addr));
+ __ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
// Check that the flags match what we're looking for.
- __ ldr(offset, FieldMemOperand(offset, Code::kFlagsOffset));
- __ and_(offset, offset, Operand(~Code::kFlagsNotUsedInLookup));
- __ cmp(offset, Operand(flags));
+ __ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
+ __ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
+ __ cmp(scratch2, Operand(flags));
__ b(ne, &miss);
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(ip, Operand(value_offset));
- __ ldr(offset, MemOperand(ip, offset, LSL, 1));
+ // Re-load code entry from cache.
+ __ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
// Jump to the first instruction in the code stub.
__ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(offset);
- // Miss: Restore offset and fall through.
+ // Miss: fall through.
__ bind(&miss);
- __ pop(offset);
}
@@ -201,7 +207,8 @@
Register receiver,
Register name,
Register scratch,
- Register extra) {
+ Register extra,
+ Register extra2) {
Label miss;
// Make sure that code is valid. The shifting code relies on the
@@ -214,6 +221,18 @@
// Make sure that there are no register conflicts.
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
+ ASSERT(!extra.is(receiver));
+ ASSERT(!extra.is(name));
+ ASSERT(!extra.is(scratch));
+ ASSERT(!extra2.is(receiver));
+ ASSERT(!extra2.is(name));
+ ASSERT(!extra2.is(scratch));
+ ASSERT(!extra2.is(extra));
+
+ // Check scratch, extra and extra2 registers are valid.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(!extra.is(no_reg));
+ ASSERT(!extra2.is(no_reg));
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
@@ -229,7 +248,7 @@
Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch);
+ ProbeTable(masm, flags, kPrimary, name, scratch, extra, extra2);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name));
@@ -239,7 +258,7 @@
Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch);
+ ProbeTable(masm, flags, kSecondary, name, scratch, extra, extra2);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
=======================================
--- /branches/bleeding_edge/src/ia32/stub-cache-ia32.cc Mon Oct 25 08:29:06
2010
+++ /branches/bleeding_edge/src/ia32/stub-cache-ia32.cc Mon Nov 1 01:55:40
2010
@@ -206,8 +206,10 @@
Register receiver,
Register name,
Register scratch,
- Register extra) {
+ Register extra,
+ Register extra2) {
Label miss;
+ USE(extra2); // The register extra2 is not used on the ia32 platform.
// Make sure that code is valid. The shifting code relies on the
// entry size being 8.
@@ -223,6 +225,11 @@
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
+ // Check scratch and extra registers are valid, and extra2 is unused.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(!extra.is(no_reg));
+ ASSERT(extra2.is(no_reg));
+
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
=======================================
--- /branches/bleeding_edge/src/mips/stub-cache-mips.cc Mon May 17 08:41:35
2010
+++ /branches/bleeding_edge/src/mips/stub-cache-mips.cc Mon Nov 1 01:55:40
2010
@@ -44,7 +44,8 @@
Register receiver,
Register name,
Register scratch,
- Register extra) {
+ Register extra,
+ Register extra2) {
UNIMPLEMENTED_MIPS();
}
=======================================
--- /branches/bleeding_edge/src/stub-cache.h Mon Oct 25 08:22:03 2010
+++ /branches/bleeding_edge/src/stub-cache.h Mon Nov 1 01:55:40 2010
@@ -241,13 +241,15 @@
static void Clear();
// Generate code for probing the stub cache table.
- // If extra != no_reg it might be used as am extra scratch register.
+ // Arguments extra and extra2 may be used to pass additional scratch
+ // registers. Set to no_reg if not needed.
static void GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch,
- Register extra);
+ Register extra,
+ Register extra2 = no_reg);
enum Table {
kPrimary,
=======================================
--- /branches/bleeding_edge/src/x64/stub-cache-x64.cc Mon Oct 25 08:22:03
2010
+++ /branches/bleeding_edge/src/x64/stub-cache-x64.cc Mon Nov 1 01:55:40
2010
@@ -273,9 +273,11 @@
Register receiver,
Register name,
Register scratch,
- Register extra) {
+ Register extra,
+ Register extra2) {
Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
+ USE(extra); // The register extra is not used on the X64 platform.
+ USE(extra2); // The register extra2 is not used on the X64 platform.
// Make sure that code is valid. The shifting code relies on the
// entry size being 16.
ASSERT(sizeof(Entry) == 16);
@@ -287,6 +289,11 @@
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
+ // Check scratch register is valid, extra and extra2 are unused.
+ ASSERT(!scratch.is(no_reg));
+ ASSERT(extra.is(no_reg));
+ ASSERT(extra2.is(no_reg));
+
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev