Revision: 4787
Author: [email protected]
Date: Wed Jun 2 07:37:47 2010
Log: Change keyed load IC interface on x64 to pass arguments in registers.
Review URL: http://codereview.chromium.org/2470001
http://code.google.com/p/v8/source/detail?r=4787
Modified:
/branches/bleeding_edge/src/ia32/codegen-ia32.cc
/branches/bleeding_edge/src/ia32/ic-ia32.cc
/branches/bleeding_edge/src/x64/builtins-x64.cc
/branches/bleeding_edge/src/x64/codegen-x64.cc
/branches/bleeding_edge/src/x64/debug-x64.cc
/branches/bleeding_edge/src/x64/full-codegen-x64.cc
/branches/bleeding_edge/src/x64/ic-x64.cc
/branches/bleeding_edge/src/x64/stub-cache-x64.cc
/branches/bleeding_edge/src/x64/virtual-frame-x64.cc
=======================================
--- /branches/bleeding_edge/src/ia32/codegen-ia32.cc Tue Jun 1 07:08:19
2010
+++ /branches/bleeding_edge/src/ia32/codegen-ia32.cc Wed Jun 2 07:37:47
2010
@@ -8766,6 +8766,9 @@
key.ToRegister();
receiver.ToRegister();
+ // If key and receiver are shared registers on the frame, their values
will
+ // be automatically saved and restored when going to deferred code.
+ // The result is in elements, which is guaranteed non-shared.
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
=======================================
--- /branches/bleeding_edge/src/ia32/ic-ia32.cc Tue Jun 1 14:11:38 2010
+++ /branches/bleeding_edge/src/ia32/ic-ia32.cc Wed Jun 2 07:37:47 2010
@@ -454,11 +454,10 @@
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size,
cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ cmp(edi, Operand(ecx));
+ __ sub(edi, Operand(ecx));
__ j(above_equal, &slow);
// Load in-object property.
- __ sub(edi, Operand(ecx));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
@@ -690,7 +689,7 @@
__ fincstp();
// Fall through to slow case.
- // Slow case: Load key and receiver from stack and jump to runtime.
+ // Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
=======================================
--- /branches/bleeding_edge/src/x64/builtins-x64.cc Thu May 27 05:30:45 2010
+++ /branches/bleeding_edge/src/x64/builtins-x64.cc Wed Jun 2 07:37:47 2010
@@ -418,9 +418,7 @@
__ movq(rax, Operand(rbp, kIndexOffset));
__ jmp(&entry);
__ bind(&loop);
- __ movq(rcx, Operand(rbp, kArgumentsOffset)); // load arguments
- __ push(rcx);
- __ push(rax);
+ __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments
// Use inline caching to speed up access to arguments.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -430,8 +428,7 @@
// we have generated an inline version of the keyed load. In this
// case, we know that we are not generating a test instruction next.
- // Remove IC arguments from the stack and push the nth argument.
- __ addq(rsp, Immediate(2 * kPointerSize));
+ // Push the nth argument.
__ push(rax);
// Update the index on the stack and in register rax.
=======================================
--- /branches/bleeding_edge/src/x64/codegen-x64.cc Tue Jun 1 07:08:19 2010
+++ /branches/bleeding_edge/src/x64/codegen-x64.cc Wed Jun 2 07:37:47 2010
@@ -660,9 +660,25 @@
void DeferredReferenceGetKeyedValue::Generate() {
- __ push(receiver_); // First IC argument.
- __ push(key_); // Second IC argument.
-
+ if (receiver_.is(rdx)) {
+ if (!key_.is(rax)) {
+ __ movq(rax, key_);
+ } // else do nothing.
+ } else if (receiver_.is(rax)) {
+ if (key_.is(rdx)) {
+ __ xchg(rax, rdx);
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rdx, receiver_);
+ __ movq(rax, key_);
+ }
+ } else if (key_.is(rax)) {
+ __ movq(rdx, receiver_);
+ } else {
+ __ movq(rax, key_);
+ __ movq(rdx, receiver_);
+ }
// Calculate the delta from the IC call instruction to the map check
// movq instruction in the inlined version. This delta is stored in
// a test(rax, delta) instruction after the call so that we can find
@@ -686,8 +702,6 @@
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(key_);
- __ pop(receiver_);
}
@@ -5852,7 +5866,6 @@
frame_->Push(&arguments);
frame_->Push(key_literal->handle());
*result = EmitKeyedLoad();
- frame_->Drop(2); // Drop key and receiver.
done->Jump(result);
}
}
@@ -7447,6 +7460,9 @@
key.ToRegister();
receiver.ToRegister();
+ // If key and receiver are shared registers on the frame, their values
will
+ // be automatically saved and restored when going to deferred code.
+ // The result is returned in elements, which is not shared.
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
@@ -7459,9 +7475,9 @@
// initialization code.
__ bind(deferred->patch_site());
// Use masm-> here instead of the double underscore macro since extra
- // coverage code can interfere with the patching. Do not use
- // root array to load null_value, since it must be patched with
- // the expected receiver map.
+ // coverage code can interfere with the patching. Do not use a load
+ // from the root away to load null_value, since the load must be
patched
+ // with the expected receiver map, which is not in the root array.
masm_->movq(kScratchRegister, Factory::null_value(),
RelocInfo::EMBEDDED_OBJECT);
masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
@@ -7504,8 +7520,6 @@
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
- frame_->Push(&receiver);
- frame_->Push(&key);
} else {
Comment cmnt(masm_, "[ Load from keyed Property");
result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
@@ -7516,7 +7530,7 @@
// the push that follows might be peep-hole optimized away.
__ nop();
}
- ASSERT(frame()->height() == original_height);
+ ASSERT(frame()->height() == original_height - 2);
return result;
}
@@ -7560,7 +7574,6 @@
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- if (!persist_after_get_) set_unloaded();
break;
}
@@ -7573,27 +7586,28 @@
}
Result result = cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->Push(&result);
- if (!persist_after_get_) {
- set_unloaded();
- }
break;
}
case KEYED: {
// A load of a bare identifier (load from global) cannot be keyed.
ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
-
+ if (persist_after_get_) {
+ cgen_->frame()->PushElementAt(1);
+ cgen_->frame()->PushElementAt(1);
+ }
Result value = cgen_->EmitKeyedLoad();
cgen_->frame()->Push(&value);
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
break;
}
default:
UNREACHABLE();
}
+
+ if (!persist_after_get_) {
+ set_unloaded();
+ }
}
=======================================
--- /branches/bleeding_edge/src/x64/debug-x64.cc Mon May 31 06:26:12 2010
+++ /branches/bleeding_edge/src/x64/debug-x64.cc Wed Jun 2 07:37:47 2010
@@ -124,9 +124,10 @@
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- rax : key
+ // -- rdx : receiver
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), false);
}
=======================================
--- /branches/bleeding_edge/src/x64/full-codegen-x64.cc Mon May 31 06:26:12
2010
+++ /branches/bleeding_edge/src/x64/full-codegen-x64.cc Wed Jun 2 07:37:47
2010
@@ -1176,7 +1176,7 @@
// Load the object.
MemOperand object_loc = EmitSlotSearch(object_slot, rax);
- __ push(object_loc);
+ __ movq(rdx, object_loc);
// Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
@@ -1184,7 +1184,7 @@
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- __ Push(key_literal->handle());
+ __ Move(rax, key_literal->handle());
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -1192,8 +1192,7 @@
// Notice: We must not have a "test rax, ..." instruction after the
// call. It is treated specially by the LoadIC code.
__ nop();
- // Drop key and object left on the stack by IC, and push the result.
- DropAndApply(2, context, rax);
+ Apply(context, rax);
}
}
@@ -1699,10 +1698,10 @@
Apply(context_, rax);
} else {
VisitForValue(expr->obj(), kStack);
- VisitForValue(expr->key(), kStack);
+ VisitForValue(expr->key(), kAccumulator);
+ __ pop(rdx);
EmitKeyedPropertyLoad(expr);
- // Drop key and receiver left on the stack by IC.
- DropAndApply(2, context_, rax);
+ Apply(context_, rax);
}
}
@@ -1824,7 +1823,8 @@
// Call to a keyed property, use keyed load IC followed by function
// call.
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
+ __ movq(rdx, Operand(rsp, 0));
// Record source code position for IC call.
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -1832,8 +1832,6 @@
// By emitting a nop we make sure that we do not have a "test
rax,..."
// instruction after the call it is treated specially by the LoadIC
code.
__ nop();
- // Drop key left on the stack by IC.
- __ Drop(1);
// Pop receiver.
__ pop(rbx);
// Push result (function).
@@ -2865,7 +2863,9 @@
EmitNamedPropertyLoad(prop);
} else {
VisitForValue(prop->obj(), kStack);
- VisitForValue(prop->key(), kStack);
+ VisitForValue(prop->key(), kAccumulator);
+ __ movq(rdx, Operand(rsp, 0)); // Leave receiver on stack
+ __ push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}
=======================================
--- /branches/bleeding_edge/src/x64/ic-x64.cc Tue Jun 1 14:11:38 2010
+++ /branches/bleeding_edge/src/x64/ic-x64.cc Wed Jun 2 07:37:47 2010
@@ -313,14 +313,14 @@
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
__ pop(rbx);
- __ push(Operand(rsp, 1 * kPointerSize)); // receiver
- __ push(Operand(rsp, 1 * kPointerSize)); // name
+ __ push(rdx); // receiver
+ __ push(rax); // name
__ push(rbx); // return address
// Perform tail call to the entry.
@@ -331,14 +331,14 @@
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
__ pop(rbx);
- __ push(Operand(rsp, 1 * kPointerSize)); // receiver
- __ push(Operand(rsp, 1 * kPointerSize)); // name
+ __ push(rdx); // receiver
+ __ push(rax); // name
__ push(rbx); // return address
// Perform tail call to the entry.
@@ -348,30 +348,26 @@
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary, check_number_dictionary;
- // Load name and receiver.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
-
// Check that the object isn't a smi.
- __ JumpIfSmi(rcx, &slow);
+ __ JumpIfSmi(rdx, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing
// into string objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(below, &slow);
// Check bit field.
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow);
@@ -380,7 +376,7 @@
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
below
// where a numeric string is converted to a smi.
- __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
@@ -389,92 +385,99 @@
__ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); // Unsigned comparison rejects negative
indices.
// Fast case: Do the load.
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rax, FieldOperand(rcx,
+ SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
+ __ movq(rbx, FieldOperand(rcx,
index.reg,
index.scale,
FixedArray::kHeaderSize));
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, &slow);
+ __ movq(rax, rbx);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0);
- // Check whether the elements is a pixel array.
+ __ bind(&check_pixel_array);
+ // Check whether the elements object is a pixel array.
+ // rdx: receiver
// rax: key
// rcx: elements array
- __ bind(&check_pixel_array);
+ __ SmiToInteger32(rbx, rax); // Used on both directions of next branch.
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
__ j(not_equal, &check_number_dictionary);
- __ SmiToInteger32(rax, rax);
- __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
+ __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
- __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
- __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
+ __ movq(rax, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
+ __ movzxbq(rax, Operand(rax, rbx, times_1, 0));
__ Integer32ToSmi(rax, rax);
__ ret(0);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
+ // rdx: receiver
// rax: key
+ // rbx: key as untagged int32
// rcx: elements
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
- __ SmiToInteger32(rbx, rax);
- GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, rdx, rdi);
+ GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi);
__ ret(0);
- // Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
+ // Slow case: Jump to runtime.
+ // rdx: receiver
+ // rax: key
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
GenerateRuntimeGetProperty(masm);
+
__ bind(&check_string);
// The key is not a smi.
// Is it a string?
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+ // rdx: receiver
+ // rax: key
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &slow);
// Is the string an array index, with cached numeric value?
__ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
+ __ j(not_zero, &index_string); // The value in rbx is used at jump
target.
// Is the string a symbol?
- // rcx: key map.
- __ j(not_zero, &index_string); // The value in rbx is used at jump
target.
ASSERT(kSymbolTag != 0);
- __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
+ __ testb(FieldOperand(rcx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
- __ movq(rbx, FieldOperand(rcx, JSObject::kPropertiesOffset));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
Factory::hash_table_map());
+ __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kHashTableMapRootIndex);
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
- __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movl(rdx, rbx);
- __ shr(rdx, Immediate(KeyedLookupCache::kMapHashShift));
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- __ shr(rax, Immediate(String::kHashShift));
- __ xor_(rdx, rax);
- __ and_(rdx, Immediate(KeyedLookupCache::kCapacityMask));
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movl(rcx, rbx);
+ __ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
+ __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
+ __ shr(rdi, Immediate(String::kHashShift));
+ __ xor_(rcx, rdi);
+ __ and_(rcx, Immediate(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys();
- __ movq(rdi, rdx);
+ __ movq(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ movq(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &slow);
- __ movq(rdi, Operand(kScratchRegister, rdi, times_1, kPointerSize));
- __ cmpq(Operand(rsp, kPointerSize), rdi);
+ __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
__ j(not_equal, &slow);
// Get field offset which is a 32-bit integer and check that it is
@@ -482,31 +485,32 @@
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
__ movq(kScratchRegister, cache_field_offsets);
- __ movl(rax, Operand(kScratchRegister, rdx, times_4, 0));
- __ movzxbq(rdx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ cmpq(rax, rdx);
+ __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
+ __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subq(rdi, rcx);
__ j(above_equal, &slow);
// Load in-object property.
- __ subq(rax, rdx);
- __ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rax, rdx);
- __ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
+ __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addq(rcx, rdi);
+ __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
+ // rdx: receiver
+ // rax: key
GenerateDictionaryLoad(masm,
&slow,
rbx,
- rcx,
rdx,
+ rcx,
rax,
rdi,
DICTIONARY_CHECK_DONE);
- __ movq(rax, rcx);
+ __ movq(rax, rdx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
// If the hash field contains an array index pick it out. The assert
checks
@@ -516,10 +520,11 @@
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- // We want the smi-tagged index in rax.
- // rax: key (string).
- // rbx: hash field.
- // rdx: receiver.
+ // We want the smi-tagged index in rax. Even if we subsequently go to
+ // the slow case, converting the key to a smi is always valid.
+ // rdx: receiver
+ // rax: key (a string)
+ // rbx: key's hash field, including its array index value.
__ and_(rbx, Immediate(String::kArrayIndexValueMask));
__ shr(rbx, Immediate(String::kHashShift));
// Here we actually clobber the key (rax) which will be used if calling
into
@@ -533,9 +538,9 @@
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name (index)
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
Label index_out_of_range;
@@ -546,9 +551,6 @@
Register scratch2 = rcx;
Register result = rax;
- __ movq(index, Operand(rsp, 1 * kPointerSize));
- __ movq(receiver, Operand(rsp, 2 * kPointerSize));
-
StringCharAtGenerator char_at_generator(receiver,
index,
scratch1,
@@ -576,80 +578,80 @@
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label slow, failed_allocation;
- // Load name and receiver.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
-
// Check that the object isn't a smi.
- __ JumpIfSmi(rcx, &slow);
+ __ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Check that the object is a JS object.
- __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
__ j(not_equal, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks. The map is already in rdx.
- __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// rax: index (as a smi)
- // rcx: JSObject
- __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ // rdx: JSObject
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::RootIndexForExternalArrayType(array_type));
__ j(not_equal, &slow);
// Check that the index is in range.
- __ SmiToInteger32(rax, rax);
- __ cmpl(rax, FieldOperand(rcx, ExternalArray::kLengthOffset));
+ __ SmiToInteger32(rcx, rax);
+ __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
- // rax: untagged index
- // rcx: elements array
- __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
- // rcx: base pointer of external storage
+ // rax: index (as a smi)
+ // rdx: receiver (JSObject)
+ // rcx: untagged index
+ // rbx: elements array
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
- __ movsxbq(rax, Operand(rcx, rax, times_1, 0));
+ __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalUnsignedByteArray:
- __ movzxbq(rax, Operand(rcx, rax, times_1, 0));
+ __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
break;
case kExternalShortArray:
- __ movsxwq(rax, Operand(rcx, rax, times_2, 0));
+ __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalUnsignedShortArray:
- __ movzxwq(rax, Operand(rcx, rax, times_2, 0));
+ __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
break;
case kExternalIntArray:
- __ movsxlq(rax, Operand(rcx, rax, times_4, 0));
+ __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalUnsignedIntArray:
- __ movl(rax, Operand(rcx, rax, times_4, 0));
+ __ movl(rcx, Operand(rbx, rcx, times_4, 0));
break;
case kExternalFloatArray:
- __ fld_s(Operand(rcx, rax, times_4, 0));
+ __ fld_s(Operand(rbx, rcx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
+ // rax: index
+ // rdx: receiver
// For integer array types:
- // rax: value
+ // rcx: value
// For floating-point array type:
// FP(0): value
@@ -660,42 +662,45 @@
// it to a HeapNumber.
Label box_int;
if (array_type == kExternalIntArray) {
- __ JumpIfNotValidSmiValue(rax, &box_int);
+ __ JumpIfNotValidSmiValue(rcx, &box_int);
} else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray);
- __ JumpIfUIntNotValidSmiValue(rax, &box_int);
+ __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
}
- __ Integer32ToSmi(rax, rax);
+ __ Integer32ToSmi(rax, rcx);
__ ret(0);
__ bind(&box_int);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
- __ push(rax);
+ __ push(rcx);
if (array_type == kExternalIntArray) {
__ fild_s(Operand(rsp, 0));
} else {
ASSERT(array_type == kExternalUnsignedIntArray);
- // Need to zero-extend the value.
+ // The value is zero-extended on the stack, because all pushes are
+ // 64-bit and we loaded the value from memory with movl.
__ fild_d(Operand(rsp, 0));
}
- __ pop(rax);
+ __ pop(rcx);
// FP(0): value
- __ AllocateHeapNumber(rax, rbx, &failed_allocation);
+ __ AllocateHeapNumber(rcx, rbx, &failed_allocation);
// Set the value.
+ __ movq(rax, rcx);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0);
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
- __ AllocateHeapNumber(rax, rbx, &failed_allocation);
+ __ AllocateHeapNumber(rcx, rbx, &failed_allocation);
// Set the value.
+ __ movq(rax, rcx);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(0);
} else {
- __ Integer32ToSmi(rax, rax);
+ __ Integer32ToSmi(rax, rcx);
__ ret(0);
}
@@ -706,7 +711,7 @@
__ fincstp();
// Fall through to slow case.
- // Slow case: Load name and receiver from stack and jump to runtime.
+ // Slow case: Jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
GenerateRuntimeGetProperty(masm);
@@ -715,37 +720,33 @@
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : key
- // -- rsp[16] : receiver
// -----------------------------------
Label slow;
- // Load key and receiver.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
-
// Check that the receiver isn't a smi.
- __ JumpIfSmi(rcx, &slow);
+ __ JumpIfSmi(rdx, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &slow);
// Get the map of the receiver.
- __ movq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
- __ movb(rdx, FieldOperand(rdx, Map::kBitFieldOffset));
- __ andb(rdx, Immediate(kSlowCaseBitFieldMask));
- __ cmpb(rdx, Immediate(1 << Map::kHasIndexedInterceptor));
+ __ movb(rcx, FieldOperand(rcx, Map::kBitFieldOffset));
+ __ andb(rcx, Immediate(kSlowCaseBitFieldMask));
+ __ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
- __ pop(rdx);
- __ push(rcx); // receiver
+ __ pop(rcx);
+ __ push(rdx); // receiver
__ push(rax); // key
- __ push(rdx); // return address
+ __ push(rcx); // return address
// Perform tail call to the entry.
__ TailCallExternalReference(ExternalReference(
=======================================
--- /branches/bleeding_edge/src/x64/stub-cache-x64.cc Mon May 31 06:26:12
2010
+++ /branches/bleeding_edge/src/x64/stub-cache-x64.cc Wed Jun 2 07:37:47
2010
@@ -1505,14 +1505,12 @@
JSObject* holder,
AccessorInfo* callback)
{
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_callback, 1);
// Check that the name has not changed.
@@ -1520,7 +1518,7 @@
__ j(not_equal, &miss);
Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
+ bool success = GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx,
callback, name, &miss, &failure);
if (!success) return failure;
@@ -1535,21 +1533,19 @@
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_array_length, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadArrayLength(masm(), rcx, rdx, &miss);
+ GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_array_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1564,21 +1560,19 @@
JSObject* holder,
Object* value) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_constant_function, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadConstant(receiver, holder, rcx, rbx, rdx,
+ GenerateLoadConstant(receiver, holder, rdx, rbx, rcx,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
@@ -1591,21 +1585,19 @@
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadFunctionPrototype(masm(), rcx, rdx, rbx, &miss);
+ GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1619,14 +1611,12 @@
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
// -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_interceptor, 1);
// Check that the name has not changed.
@@ -1638,9 +1628,9 @@
GenerateLoadInterceptor(receiver,
holder,
&lookup,
- rcx,
- rax,
rdx,
+ rax,
+ rcx,
rbx,
name,
&miss);
@@ -1655,21 +1645,19 @@
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_string_length, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadStringLength(masm(), rcx, rdx, rbx, &miss);
+ GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1847,21 +1835,19 @@
JSObject* holder,
int index) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16] : receiver
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
- __ movq(rcx, Operand(rsp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_field, 1);
// Check that the name has not changed.
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
+ GenerateLoadField(receiver, holder, rdx, rbx, rcx, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);
=======================================
--- /branches/bleeding_edge/src/x64/virtual-frame-x64.cc Mon May 31
06:26:12 2010
+++ /branches/bleeding_edge/src/x64/virtual-frame-x64.cc Wed Jun 2
07:37:47 2010
@@ -1077,7 +1077,7 @@
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
Result name = Pop();
Result receiver = Pop();
- PrepareForCall(0, 0); // One stack arg, not callee-dropped.
+ PrepareForCall(0, 0);
MoveResultsToRegisters(&name, &receiver, rcx, rax);
return RawCallCodeObject(ic, mode);
@@ -1088,7 +1088,10 @@
// Key and receiver are on top of the frame. The IC expects them on
// the stack. It does not drop them.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
+ Result name = Pop();
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
+ MoveResultsToRegisters(&name, &receiver, rax, rdx);
return RawCallCodeObject(ic, mode);
}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev