Revision: 10229
Author: [email protected]
Date: Fri Dec 9 05:19:57 2011
Log: Merge patches to 3.7 to make MIPS work.
Includes r10107, r10109, r10124, r10128, and r10129.
Review URL: http://codereview.chromium.org/8883049
http://code.google.com/p/v8/source/detail?r=10229
Modified:
/branches/3.7/src/mips/code-stubs-mips.cc
/branches/3.7/src/mips/codegen-mips.cc
/branches/3.7/src/mips/codegen-mips.h
/branches/3.7/src/mips/full-codegen-mips.cc
/branches/3.7/src/mips/lithium-codegen-mips.cc
/branches/3.7/src/mips/lithium-codegen-mips.h
/branches/3.7/src/mips/lithium-mips.cc
/branches/3.7/src/mips/lithium-mips.h
/branches/3.7/src/version.cc
=======================================
--- /branches/3.7/src/mips/code-stubs-mips.cc Thu Dec 1 00:22:35 2011
+++ /branches/3.7/src/mips/code-stubs-mips.cc Fri Dec 9 05:19:57 2011
@@ -255,21 +255,61 @@
}
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
+static void GenerateFastCloneShallowArrayCommon(
+ MacroAssembler* masm,
+ int length,
+ FastCloneShallowArrayStub::Mode mode,
+ Label* fail) {
+ // Registers on entry:
+ // a3: boilerplate literal array.
+ ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
// All sizes here are multiples of kPointerSize.
int elements_size = 0;
- if (length_ > 0) {
- elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length_)
- : FixedArray::SizeFor(length_);
+ if (length > 0) {
+ elements_size = mode ==
FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
}
int size = JSArray::kSize + elements_size;
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size,
+ v0,
+ a1,
+ a2,
+ fail,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ __ lw(a1, FieldMemOperand(a3, i));
+ __ sw(a1, FieldMemOperand(v0, i));
+ }
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ Addu(a2, v0, Operand(JSArray::kSize));
+ __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ ASSERT((elements_size % kPointerSize) == 0);
+ __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+ }
+}
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: constant elements.
+ // [sp + kPointerSize]: literal index.
+ // [sp + (2 * kPointerSize)]: literals array.
+
// Load boilerplate object into r3 and check if we need to create a
// boilerplate.
Label slow_case;
@@ -281,18 +321,43 @@
__ lw(a3, MemOperand(t0));
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
__ Branch(&slow_case, eq, a3, Operand(t1));
+
+ FastCloneShallowArrayStub::Mode mode = mode_;
+ if (mode == CLONE_ANY_ELEMENTS) {
+ Label double_elements, check_fast_elements;
+ __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
+ __ Branch(&check_fast_elements, ne, v0, Operand(t1));
+ GenerateFastCloneShallowArrayCommon(masm, 0,
+ COPY_ON_WRITE_ELEMENTS,
&slow_case);
+ // Return and remove the on-stack parameters.
+ __ DropAndRet(3);
+
+ __ bind(&check_fast_elements);
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&double_elements, ne, v0, Operand(t1));
+ GenerateFastCloneShallowArrayCommon(masm, length_,
+ CLONE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ DropAndRet(3);
+
+ __ bind(&double_elements);
+ mode = CLONE_DOUBLE_ELEMENTS;
+ // Fall through to generate the code to handle double elements.
+ }
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
+ if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
+ } else if (mode == CLONE_DOUBLE_ELEMENTS) {
message = "Expected (writable) fixed double array";
expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
@@ -304,42 +369,59 @@
__ pop(a3);
}
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- // Return new object in v0.
- __ AllocateInNewSpace(size,
- v0,
- a1,
- a2,
- &slow_case,
- TAG_OBJECT);
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(v0, i));
- }
- }
-
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ Addu(a2, v0, Operand(JSArray::kSize));
- __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
+
+
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: object literal flags.
+ // [sp + kPointerSize]: constant properties.
+ // [sp + (2 * kPointerSize)]: literal index.
+ // [sp + (3 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into a3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ lw(a3, MemOperand(sp, 3 * kPointerSize));
+ __ lw(a0, MemOperand(sp, 2 * kPointerSize));
+ __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, t0, a3);
+ __ lw(a3, MemOperand(a3));
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case, eq, a3, Operand(t0));
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
+ __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ lw(a1, FieldMemOperand(a3, i));
+ __ sw(a1, FieldMemOperand(a0, i));
}
// Return and remove the on-stack parameters.
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ Drop(4);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
__ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
}
@@ -4759,8 +4841,12 @@
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
// First check for flat string. None of the following string type tests
will
- // succeed if kIsNotStringTag is set.
- __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ // succeed if subject is not a string or a short external string.
+ __ And(a1,
+ a0,
+ Operand(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ Branch(&seq_string, eq, a1, Operand(zero_reg));
@@ -4774,16 +4860,17 @@
// string. Also in this case the first part of the cons string is known
to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into
account.
- Label cons_string, check_encoding;
+ Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
- __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
-
- // Catch non-string subject (should already have been guarded against).
- STATIC_ASSERT(kNotStringTag != 0);
- __ And(at, a1, Operand(kIsNotStringMask));
+ __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
+
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ Branch(&runtime, ne, at, Operand(zero_reg));
// String is sliced.
@@ -4804,7 +4891,7 @@
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ Branch(&external_string, ne, at, Operand(zero_reg));
__ bind(&seq_string);
// subject: Subject string
@@ -5030,6 +5117,29 @@
__ Addu(sp, sp, Operand(4 * kPointerSize));
__ Ret();
+ // External string. Short external strings have already been ruled out.
+ // a0: scratch
+ __ bind(&external_string);
+ __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ And(at, a0, Operand(kIsIndirectStringMask));
+ __ Assert(eq,
+ "external string expected, but not found",
+ at,
+ Operand(zero_reg));
+ }
+ __ lw(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential
string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
SeqAsciiString::kHeaderSize);
+ __ Subu(subject,
+ subject,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ jmp(&seq_string);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -5288,77 +5398,14 @@
__ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
__ Branch(index_out_of_range_, ls, t0, Operand(index_));
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t0, result_, Operand(kStringRepresentationMask));
- __ Branch(&flat_string, eq, t0, Operand(zero_reg));
-
- // Handle non-flat strings.
- __ And(result_, result_, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
- __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- Label assure_seq_string;
- __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
- __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
- __ Branch(&call_runtime_, ne, result_, Operand(t0));
-
- // Get the first of the two parts.
- __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
- __ jmp(&assure_seq_string);
-
- // SlicedString, unpack and add offset.
- __ bind(&sliced_string);
- __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
- __ Addu(index_, index_, result_);
- __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
- // Assure that we are dealing with a sequential string. Go to runtime if
not.
- __ bind(&assure_seq_string);
- __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // Check that parent is not an external string. Go to runtime otherwise.
- // Note that if the original string is a cons or slice with an external
- // string as underlying string, we pass that unpacked underlying string
with
- // the adjusted index to the runtime function.
- STATIC_ASSERT(kSeqStringTag == 0);
-
- __ And(t0, result_, Operand(kStringRepresentationMask));
- __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ And(t0, result_, Operand(kStringEncodingMask));
- __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
-
- // 2-byte string.
- // Load the 2-byte character code into the result register. We can
- // add without shifting since the smi tag size is the log2 of the
- // number of bytes in a two-byte character.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ Addu(index_, object_, Operand(index_));
- __ lhu(result_, FieldMemOperand(index_, SeqTwoByteString::kHeaderSize));
- __ Branch(&got_char_code);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
-
- __ srl(t0, index_, kSmiTagSize);
- __ Addu(index_, object_, t0);
-
- __ lbu(result_, FieldMemOperand(index_, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
+ __ sra(index_, index_, kSmiTagSize);
+
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_,
+ result_,
+ &call_runtime_);
+
__ sll(result_, result_, kSmiTagSize);
__ bind(&exit_);
}
@@ -5407,6 +5454,7 @@
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
+ __ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
@@ -7463,7 +7511,8 @@
// Update the write barrier for the array store.
__ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
@@ -7472,14 +7521,16 @@
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t2, t1, t2);
__ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
&slow_elements);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
}
=======================================
--- /branches/3.7/src/mips/codegen-mips.cc Thu Nov 10 03:38:15 2011
+++ /branches/3.7/src/mips/codegen-mips.cc Fri Dec 9 05:19:57 2011
@@ -309,6 +309,98 @@
OMIT_SMI_CHECK);
__ pop(ra);
}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ And(at, result, Operand(kIsIndirectStringMask));
+ __ Branch(&check_sequential, eq, at, Operand(zero_reg));
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ And(at, result, Operand(kSlicedNotConsMask));
+ __ Branch(&cons_string, eq, at, Operand(zero_reg));
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ sra(at, result, kSmiTagSize);
+ __ Addu(index, index, at);
+ __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(at, Heap::kEmptyStringRootIndex);
+ __ Branch(call_runtime, ne, result, Operand(at));
+ // Get the first of the two strings and load its instance type.
+ __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(at, result, Operand(kStringRepresentationMask));
+ __ Branch(&external_string, ne, at, Operand(zero_reg));
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
SeqAsciiString::kHeaderSize);
+ __ Addu(string,
+ string,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ jmp(&check_encoding);
+
+ // Handle external strings.
+ __ bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ And(at, result, Operand(kIsIndirectStringMask));
+ __ Assert(eq, "external string expected, but not found",
+ at, Operand(zero_reg));
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ And(at, result, Operand(kShortExternalStringMask));
+ __ Branch(call_runtime, ne, at, Operand(zero_reg));
+ __ lw(string, FieldMemOperand(string,
ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ And(at, result, Operand(kStringEncodingMask));
+ __ Branch(&ascii, ne, at, Operand(zero_reg));
+ // Two-byte string.
+ __ sll(at, index, 1);
+ __ Addu(at, string, at);
+ __ lhu(result, MemOperand(at));
+ __ jmp(&done);
+ __ bind(&ascii);
+ // Ascii string.
+ __ Addu(at, string, index);
+ __ lbu(result, MemOperand(at));
+ __ bind(&done);
+}
#undef __
=======================================
--- /branches/3.7/src/mips/codegen-mips.h Thu Oct 27 00:38:48 2011
+++ /branches/3.7/src/mips/codegen-mips.h Fri Dec 9 05:19:57 2011
@@ -75,6 +75,21 @@
};
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input
and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_
=======================================
--- /branches/3.7/src/mips/full-codegen-mips.cc Thu Dec 1 00:22:35 2011
+++ /branches/3.7/src/mips/full-codegen-mips.cc Fri Dec 9 05:19:57 2011
@@ -1424,10 +1424,11 @@
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a1, Operand(expr->constant_properties()));
+ __ li(a1, Operand(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1436,10 +1437,15 @@
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
__ Push(a3, a2, a1, a0);
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count >
FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
@@ -1540,6 +1546,7 @@
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1549,7 +1556,7 @@
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
__ Push(a3, a2, a1);
- if (constant_elements_values->map() ==
+ if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1564,10 +1571,9 @@
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub::Mode mode = has_fast_elements
+ ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1589,65 +1595,30 @@
__ push(v0);
result_saved = true;
}
+
VisitForAccumulatorValue(subexpr);
- __ lw(t6, MemOperand(sp)); // Copy of array literal.
- __ lw(a1, FieldMemOperand(t6, JSObject::kElementsOffset));
- __ lw(a2, FieldMemOperand(t6, JSObject::kMapOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
- __ CheckFastElements(a2, a3, &double_elements);
-
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
- __ JumpIfSmi(result_register(), &smi_element);
- __ CheckFastSmiOnlyElements(a2, a3, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call
into
- // the runtime.
- __ bind(&slow_elements);
- __ push(t6); // Copy of array literal.
- __ li(a1, Operand(Smi::FromInt(i)));
- __ li(a2, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ li(a3, Operand(Smi::FromInt(strict_mode_flag))); // Strict mode.
- __ Push(a1, result_register(), a2, a3);
- __ CallRuntime(Runtime::kSetProperty, 5);
- __ Branch(&element_done);
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ li(a3, Operand(Smi::FromInt(i)));
- __ StoreNumberToDoubleElements(result_register(), a3, t6, a1, t0, t1,
t5,
- t3, &slow_elements);
- __ Branch(&element_done);
-
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an
object.
- __ bind(&fast_elements);
- __ sw(result_register(), FieldMemOperand(a1, offset));
- // Update the write barrier for the array store.
-
- __ RecordWriteField(
- a1, offset, result_register(), a2, kRAHasBeenSaved,
kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Branch(&element_done);
-
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ sw(result_register(), FieldMemOperand(a1, offset));
- // Fall through
-
- __ bind(&element_done);
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ lw(t2, MemOperand(sp)); // Copy of array literal.
+ __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
+ __ sw(result_register(), FieldMemOperand(a1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(a1, offset, result_register(), a2,
+ kRAHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ lw(a1, MemOperand(sp)); // Copy of array literal.
+ __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+ __ li(a3, Operand(Smi::FromInt(i)));
+ __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(a0, result_register());
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
-
if (result_saved) {
context()->PlugTOS();
} else {
=======================================
--- /branches/3.7/src/mips/lithium-codegen-mips.cc Wed Dec 7 05:07:25 2011
+++ /branches/3.7/src/mips/lithium-codegen-mips.cc Fri Dec 9 05:19:57 2011
@@ -3520,89 +3520,13 @@
LStringCharCodeAt* instr_;
};
- Register temp = scratch1();
- Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
-
- // Fetch the instance type of the receiver into result register.
- __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ And(temp, result, kIsIndirectStringMask);
- __ Branch(&check_sequential, eq, temp, Operand(zero_reg));
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ And(temp, result, kSlicedNotConsMask);
- __ Branch(&cons_string, eq, temp, Operand(zero_reg));
-
- // Handle slices.
- Label indirect_string_loaded;
- __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ sra(temp, result, kSmiTagSize);
- __ addu(index, index, temp);
- __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle conses.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(temp, Heap::kEmptyStringRootIndex);
- __ Branch(deferred->entry(), ne, result, Operand(temp));
- // Get the first of the two strings and load its instance type.
- __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // Check whether the string is sequential. The only non-sequential
- // shapes we support have just been unwrapped above.
- // Note that if the original string is a cons or slice with an external
- // string as underlying string, we pass that unpacked underlying string
with
- // the adjusted index to the runtime function.
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(temp, result, Operand(kStringRepresentationMask));
- __ Branch(deferred->entry(), ne, temp, Operand(zero_reg));
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii_string;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ And(temp, result, Operand(kStringEncodingMask));
- __ Branch(&ascii_string, ne, temp, Operand(zero_reg));
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- Label done;
- __ Addu(result,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ sll(temp, index, 1);
- __ Addu(result, result, temp);
- __ lhu(result, MemOperand(result, 0));
- __ Branch(&done);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ Addu(result,
- string,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ Addu(result, result, index);
- __ lbu(result, MemOperand(result, 0));
-
- __ bind(&done);
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
__ bind(deferred->exit());
}
@@ -4250,20 +4174,99 @@
}
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset) {
+ ASSERT(!source.is(a2));
+ ASSERT(!result.is(a2));
+
+ // Increase the offset so that subsequent objects end up right after
+ // this one.
+ int current_offset = *offset;
+ int size = object->map()->instance_size();
+ *offset += size;
+
+ // Copy object header.
+ ASSERT(object->properties()->length() == 0);
+ ASSERT(object->elements()->length() == 0 ||
+ object->elements()->map() ==
isolate()->heap()->fixed_cow_array_map());
+ int inobject_properties = object->map()->inobject_properties();
+ int header_size = size - inobject_properties * kPointerSize;
+ for (int i = 0; i < header_size; i += kPointerSize) {
+ __ lw(a2, FieldMemOperand(source, i));
+ __ sw(a2, FieldMemOperand(result, current_offset + i));
+ }
+
+ // Copy in-object properties.
+ for (int i = 0; i < inobject_properties; i++) {
+ int total_offset = current_offset +
object->GetInObjectPropertyOffset(i);
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ Addu(a2, result, Operand(*offset));
+ __ sw(a2, FieldMemOperand(result, total_offset));
+ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
+ __ sw(a2, FieldMemOperand(result, total_offset));
+ } else {
+ __ li(a2, Operand(value));
+ __ sw(a2, FieldMemOperand(result, total_offset));
+ }
+ }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+ int size = instr->hydrogen()->total_size();
+
+ // Allocate all objects that are part of the literal in one big
+ // allocation. This avoids multiple limit checks.
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ li(a0, Operand(Smi::FromInt(size)));
+ __ push(a0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+ __ bind(&allocated);
+ int offset = 0;
+ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
+ ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
+
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
__ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
__ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a2, Operand(instr->hydrogen()->constant_properties()));
- __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 :
0)));
+ __ li(a2, Operand(constant_properties));
+ int flags = instr->hydrogen()->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ __ li(a1, Operand(Smi::FromInt(flags)));
__ Push(t0, a3, a2, a1);
// Pick the right runtime function to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count >
FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
=======================================
--- /branches/3.7/src/mips/lithium-codegen-mips.h Thu Dec 1 00:22:35 2011
+++ /branches/3.7/src/mips/lithium-codegen-mips.h Fri Dec 9 05:19:57 2011
@@ -316,6 +316,13 @@
Handle<Map> type,
Handle<String> name);
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset);
+
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
=======================================
--- /branches/3.7/src/mips/lithium-mips.cc Wed Nov 23 04:13:52 2011
+++ /branches/3.7/src/mips/lithium-mips.cc Fri Dec 9 05:19:57 2011
@@ -2071,8 +2071,14 @@
}
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast*
instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralFast, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+ HObjectLiteralGeneric* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, v0), instr);
}
=======================================
--- /branches/3.7/src/mips/lithium-mips.h Tue Nov 29 06:28:56 2011
+++ /branches/3.7/src/mips/lithium-mips.h Fri Dec 9 05:19:57 2011
@@ -134,7 +134,8 @@
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteral) \
+ V(ObjectLiteralFast) \
+ V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -1899,10 +1900,17 @@
};
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
+ public:
+
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
};
=======================================
--- /branches/3.7/src/version.cc Fri Dec 9 04:01:59 2011
+++ /branches/3.7/src/version.cc Fri Dec 9 05:19:57 2011
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 7
#define BUILD_NUMBER 12
-#define PATCH_LEVEL 9
+#define PATCH_LEVEL 10
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev