Revision: 2516
Author: [email protected]
Date: Tue Jul 21 06:30:46 2009
Log: Make stub cache hash work on X64 platform. Stub cache now works.
Switch arguments of 32-bit arithmetic instructions so they are consistent
with 64-bit arithmetic instructions (all on X64 platforms).
Review URL: http://codereview.chromium.org/155849
http://code.google.com/p/v8/source/detail?r=2516
Modified:
/branches/bleeding_edge/src/stub-cache.h
/branches/bleeding_edge/src/x64/assembler-x64.cc
/branches/bleeding_edge/src/x64/assembler-x64.h
/branches/bleeding_edge/src/x64/stub-cache-x64.cc
=======================================
--- /branches/bleeding_edge/src/stub-cache.h Fri Jul 10 02:40:47 2009
+++ /branches/bleeding_edge/src/stub-cache.h Tue Jul 21 06:30:46 2009
@@ -256,11 +256,14 @@
}
// Compute the entry for a given offset in exactly the same way as
- // we done in generated code. This makes it a lot easier to avoid
- // making mistakes in the hashed offset computations.
+ // we do in generated code. We generate an hash code that already
+ // ends in String::kHashShift 0s. Then we shift it so it is a multiple
+ // of sizeof(Entry). This makes it easier to avoid making mistakes
+ // in the hashed offset computations.
static Entry* entry(Entry* table, int offset) {
+ const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
return reinterpret_cast<Entry*>(
- reinterpret_cast<Address>(table) + (offset << 1));
+ reinterpret_cast<Address>(table) + (offset << shift_amount));
}
};
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.cc Wed Jul 15 05:30:28
2009
+++ /branches/bleeding_edge/src/x64/assembler-x64.cc Tue Jul 21 06:30:46
2009
@@ -456,13 +456,13 @@
void Assembler::arithmetic_op_32(byte opcode,
- const Operand& dst,
- Register src) {
+ Register reg,
+ const Operand& rm_reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
- emit_optional_rex_32(src, dst);
+ emit_optional_rex_32(reg, rm_reg);
emit(opcode);
- emit_operand(src, dst);
+ emit_operand(reg, rm_reg);
}
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.h Tue Jul 14 04:39:45 2009
+++ /branches/bleeding_edge/src/x64/assembler-x64.h Tue Jul 21 06:30:46 2009
@@ -521,10 +521,6 @@
void xchg(Register dst, Register src);
// Arithmetics
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
void addl(Register dst, Register src) {
arithmetic_op_32(0x03, dst, src);
}
@@ -532,15 +528,22 @@
void addl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
+
+ void addl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x03, dst, src);
+ }
void addl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
+
+ void addq(Register dst, Register src) {
+ arithmetic_op(0x03, dst, src);
+ }
void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
-
void addq(const Operand& dst, Register src) {
arithmetic_op(0x01, src, dst);
@@ -567,11 +570,11 @@
}
void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, src, dst);
+ arithmetic_op_32(0x3B, dst, src);
}
void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, dst, src);
+ arithmetic_op_32(0x39, src, dst);
}
void cmpl(Register dst, Immediate src) {
@@ -1118,8 +1121,8 @@
// ModR/M byte.
void arithmetic_op(byte opcode, Register dst, Register src);
void arithmetic_op_32(byte opcode, Register dst, Register src);
- void arithmetic_op_32(byte opcode, const Operand& dst, Register src);
- void arithmetic_op(byte opcode, Register reg, const Operand& op);
+ void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
+ void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate
src);
// Operate on a 32-bit word in memory or register.
=======================================
--- /branches/bleeding_edge/src/x64/stub-cache-x64.cc Tue Jul 21 04:06:24
2009
+++ /branches/bleeding_edge/src/x64/stub-cache-x64.cc Tue Jul 21 06:30:46
2009
@@ -562,6 +562,36 @@
// StubCompiler static helper functions
#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset) {
+ ExternalReference key_offset(SCTableReference::keyReference(table));
+ Label miss;
+
+ __ movq(kScratchRegister, key_offset);
+ // Check that the key in the entry matches the name.
+ __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+ __ j(not_equal, &miss);
+ // Get the code entry from the cache.
+ // Use key_offset + kPointerSize, rather than loading value_offset.
+ __ movq(kScratchRegister,
+ Operand(kScratchRegister, offset, times_4, kPointerSize));
+ // Check that the flags match what we're looking for.
+ __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+ __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ cmpl(offset, Immediate(flags));
+ __ j(not_equal, &miss);
+
+ // Jump to the first instruction in the code stub.
+ __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(kScratchRegister);
+
+ __ bind(&miss);
+}
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind)
{
@@ -625,7 +655,43 @@
Register scratch,
Register extra) {
Label miss;
- // TODO(X64): Probe the primary and secondary StubCache tables.
+ USE(extra); // The register extra is not used on the X64 platform.
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 16.
+ ASSERT(sizeof(Entry) == 16);
+
+ // Make sure the flags do not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ // Use only the low 32 bits of the map pointer.
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kPrimaryTableSize - 1) <<
kHeapObjectTagSize));
+
+ // Probe the primary table.
+ ProbeTable(masm, flags, kPrimary, name, scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kPrimaryTableSize - 1) <<
kHeapObjectTagSize));
+ __ subl(scratch, name);
+ __ addl(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kSecondaryTableSize - 1) <<
kHeapObjectTagSize));
+
+ // Probe the secondary table.
+ ProbeTable(masm, flags, kSecondary, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---