Revision: 11102
Author: [email protected]
Date: Wed Mar 21 02:52:17 2012
Log: Version 3.9.23
Use correct arguments adaptation environment when inlining function
containing arguments. (Issue 2014)
Performance and stability improvements on all platforms.
http://code.google.com/p/v8/source/detail?r=11102
Modified:
/trunk/ChangeLog
/trunk/src/builtins.cc
/trunk/src/heap.cc
/trunk/src/heap.h
/trunk/src/hydrogen.cc
/trunk/src/hydrogen.h
/trunk/src/mips/builtins-mips.cc
/trunk/src/mips/code-stubs-mips.cc
/trunk/src/mips/debug-mips.cc
/trunk/src/mips/ic-mips.cc
/trunk/src/mips/lithium-codegen-mips.cc
/trunk/src/mips/macro-assembler-mips.cc
/trunk/src/mips/macro-assembler-mips.h
/trunk/src/mips/simulator-mips.h
/trunk/src/mips/stub-cache-mips.cc
/trunk/src/objects.cc
/trunk/src/runtime-profiler.cc
/trunk/src/version.cc
/trunk/test/cctest/test-heap-profiler.cc
/trunk/test/cctest/test-log-stack-tracer.cc
/trunk/test/mjsunit/compiler/inline-arguments.js
=======================================
--- /trunk/ChangeLog Tue Mar 20 06:01:16 2012
+++ /trunk/ChangeLog Wed Mar 21 02:52:17 2012
@@ -1,3 +1,11 @@
+2012-03-21: Version 3.9.23
+
+ Use correct arguments adaptation environment when inlining function
+ containing arguments. (Issue 2014)
+
+ Performance and stability improvements on all platforms.
+
+
2012-03-20: Version 3.9.22
Enabled count-based profiler by default.
=======================================
--- /trunk/src/builtins.cc Mon Mar 19 04:01:52 2012
+++ /trunk/src/builtins.cc Wed Mar 21 02:52:17 2012
@@ -33,6 +33,7 @@
#include "builtins.h"
#include "gdb-jit.h"
#include "ic-inl.h"
+#include "heap-profiler.h"
#include "mark-compact.h"
#include "vm-state-inl.h"
@@ -380,6 +381,8 @@
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(),
-size_delta);
}
+ HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
+ elms->address() + size_delta));
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
}
=======================================
--- /trunk/src/heap.cc Mon Mar 19 04:01:52 2012
+++ /trunk/src/heap.cc Wed Mar 21 02:52:17 2012
@@ -4812,13 +4812,56 @@
}
ASSERT(IsHeapIterable());
}
+
+
+void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
+ // This flag prevents incremental marking from requesting GC via stack
guard
+ idle_notification_will_schedule_next_gc_ = true;
+ incremental_marking()->Step(step_size);
+ idle_notification_will_schedule_next_gc_ = false;
+
+ if (incremental_marking()->IsComplete()) {
+ bool uncommit = false;
+ if (gc_count_at_last_idle_gc_ == gc_count_) {
+ // No GC since the last full GC, the mutator is probably not active.
+ isolate_->compilation_cache()->Clear();
+ uncommit = true;
+ }
+ CollectAllGarbage(kNoGCFlags, "idle notification: finalize
incremental");
+ gc_count_at_last_idle_gc_ = gc_count_;
+ if (uncommit) {
+ new_space_.Shrink();
+ UncommitFromSpace();
+ }
+ }
+}
bool Heap::IdleNotification(int hint) {
- if (hint >= 1000) return IdleGlobalGC();
- if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
+ intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
+ // The size factor is in range [3..100].
+ intptr_t step_size = size_factor *
IncrementalMarking::kAllocatedThreshold;
+
+ if (contexts_disposed_ > 0) {
+ int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
+ if (hint >= mark_sweep_time && !FLAG_expose_gc) {
+ HistogramTimerScope scope(isolate_->counters()->gc_context());
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: contexts disposed");
+ } else {
+ AdvanceIdleIncrementalMarking(step_size);
+ contexts_disposed_ = 0;
+ }
+ // Make sure that we have no pending context disposals.
+ // Take into account that we might have decided to delay full
collection
+ // because incremental marking is in progress.
+ ASSERT((contexts_disposed_ == 0) |
| !incremental_marking()->IsStopped());
+ return false;
+ }
+
+ if (hint >= 1000 || !FLAG_incremental_marking ||
FLAG_expose_gc || Serializer::enabled()) {
- return true;
+ return IdleGlobalGC();
}
// By doing small chunks of GC work in each IdleNotification,
@@ -4830,9 +4873,6 @@
// 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
- intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
- // The size factor is in range [3..100].
- intptr_t step_size = size_factor *
IncrementalMarking::kAllocatedThreshold;
if (incremental_marking()->IsStopped()) {
if (!IsSweepingComplete() &&
@@ -4859,32 +4899,14 @@
}
if (incremental_marking()->IsStopped()) {
- if (hint < 1000 && !WorthStartingGCWhenIdle()) {
+ if (!WorthStartingGCWhenIdle()) {
FinishIdleRound();
return true;
}
incremental_marking()->Start();
}
- // This flag prevents incremental marking from requesting GC via stack
guard
- idle_notification_will_schedule_next_gc_ = true;
- incremental_marking()->Step(step_size);
- idle_notification_will_schedule_next_gc_ = false;
-
- if (incremental_marking()->IsComplete()) {
- bool uncommit = false;
- if (gc_count_at_last_idle_gc_ == gc_count_) {
- // No GC since the last full GC, the mutator is probably not active.
- isolate_->compilation_cache()->Clear();
- uncommit = true;
- }
- CollectAllGarbage(kNoGCFlags, "idle notification: finalize
incremental");
- gc_count_at_last_idle_gc_ = gc_count_;
- if (uncommit) {
- new_space_.Shrink();
- UncommitFromSpace();
- }
- }
+ AdvanceIdleIncrementalMarking(step_size);
return false;
}
@@ -4917,13 +4939,7 @@
}
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
- if (contexts_disposed_ > 0) {
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: contexts disposed");
- } else {
- CollectGarbage(NEW_SPACE, "idle notification");
- }
+ CollectGarbage(NEW_SPACE, "idle notification");
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
} else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
@@ -4942,23 +4958,6 @@
last_idle_notification_gc_count_ = gc_count_;
number_idle_notifications_ = 0;
finished = true;
- } else if (contexts_disposed_ > 0) {
- if (FLAG_expose_gc) {
- contexts_disposed_ = 0;
- } else {
- HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kReduceMemoryFootprintMask,
- "idle notification: contexts disposed");
- last_idle_notification_gc_count_ = gc_count_;
- }
- // If this is the first idle notification, we reset the
- // notification count to avoid letting idle notifications for
- // context disposal garbage collections start a potentially too
- // aggressive idle GC cycle.
- if (number_idle_notifications_ <= 1) {
- number_idle_notifications_ = 0;
- uncommit = false;
- }
} else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
// If we have received more than kIdlesBeforeMarkCompact idle
// notifications we do not perform any cleanup because we don't
@@ -4966,11 +4965,6 @@
finished = true;
}
- // Make sure that we have no pending context disposals and
- // conditionally uncommit from space.
- // Take into account that we might have decided to delay full collection
- // because incremental marking is in progress.
- ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
if (uncommit) UncommitFromSpace();
return finished;
=======================================
--- /trunk/src/heap.h Mon Mar 19 04:01:52 2012
+++ /trunk/src/heap.h Wed Mar 21 02:52:17 2012
@@ -1965,10 +1965,25 @@
}
return incremental_marking()->WorthActivating();
}
+
+ // Estimates how many milliseconds a Mark-Sweep would take to complete.
+ // In idle notification handler we assume that this function will return:
+ // - a number less than 10 for small heaps, which are less than 8Mb.
+ // - a number greater than 10 for large heaps, which are greater than
32Mb.
+ int TimeMarkSweepWouldTakeInMs() {
+ // Rough estimate of how many megabytes of heap can be processed in 1
ms.
+ static const int kMbPerMs = 2;
+
+ int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
+ return heap_size_mb / kMbPerMs;
+ }
// Returns true if no more GC work is left.
bool IdleGlobalGC();
+ void AdvanceIdleIncrementalMarking(intptr_t step_size);
+
+
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
=======================================
--- /trunk/src/hydrogen.cc Tue Mar 20 06:01:16 2012
+++ /trunk/src/hydrogen.cc Wed Mar 21 02:52:17 2012
@@ -4523,7 +4523,7 @@
HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
Property* expr) {
- if (expr->IsUninitialized()) {
+ if (expr->IsUninitialized() && !FLAG_always_opt) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
}
@@ -5749,20 +5749,11 @@
AddInstruction(new(zone()) HWrapReceiver(receiver, function));
PushAndAdd(new(zone()) HPushArgument(wrapped_receiver));
- int parameter_count = environment()->parameter_count();
- for (int i = 1; i < environment()->parameter_count(); i++) {
- PushAndAdd(new(zone()) HPushArgument(environment()->Lookup(i)));
- }
-
- if (environment()->outer()->frame_type() == ARGUMENTS_ADAPTOR) {
- HEnvironment* adaptor = environment()->outer();
- parameter_count = adaptor->parameter_count();
-
- for (int i = environment()->parameter_count();
- i < adaptor->parameter_count();
- i++) {
- PushAndAdd(new(zone()) HPushArgument(adaptor->Lookup(i)));
- }
+ HEnvironment* arguments_env = environment()->arguments_environment();
+
+ int parameter_count = arguments_env->parameter_count();
+ for (int i = 1; i < arguments_env->parameter_count(); i++) {
+ PushAndAdd(new(zone()) HPushArgument(arguments_env->Lookup(i)));
}
HInvokeFunction* call = new(zone()) HInvokeFunction(
=======================================
--- /trunk/src/hydrogen.h Mon Mar 19 04:01:52 2012
+++ /trunk/src/hydrogen.h Wed Mar 21 02:52:17 2012
@@ -399,6 +399,10 @@
if (drop_extra) outer->Drop(1);
return outer;
}
+
+ HEnvironment* arguments_environment() {
+ return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
+ }
// Simple accessors.
Handle<JSFunction> closure() const { return closure_; }
=======================================
--- /trunk/src/mips/builtins-mips.cc Fri Mar 9 02:52:05 2012
+++ /trunk/src/mips/builtins-mips.cc Wed Mar 21 02:52:17 2012
@@ -67,9 +67,11 @@
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
- // JumpToExternalReference expects a0 to contain the number of arguments
+ // JumpToExternalReference expects s0 to contain the number of arguments
// including the receiver and the extra arguments.
- __ Addu(a0, a0, Operand(num_extra_args + 1));
+ __ Addu(s0, a0, num_extra_args + 1);
+ __ sll(s1, s0, kPointerSizeLog2);
+ __ Subu(s1, s1, kPointerSize);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
=======================================
--- /trunk/src/mips/code-stubs-mips.cc Mon Mar 19 04:01:52 2012
+++ /trunk/src/mips/code-stubs-mips.cc Wed Mar 21 02:52:17 2012
@@ -70,13 +70,13 @@
// The ToNumber stub takes one argument in a0.
Label check_heap_number, call_builtin;
__ JumpIfNotSmi(a0, &check_heap_number);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
__ bind(&check_heap_number);
EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
- __ Ret();
__ bind(&call_builtin);
__ push(a0);
@@ -128,9 +128,9 @@
// found in the shared function info object.
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
// Return result. The argument function info has been popped already.
+ __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
__ Ret();
// Create a new closure through the slower runtime call.
@@ -179,8 +179,7 @@
// Remove the on-stack argument and return.
__ mov(cp, v0);
- __ Pop();
- __ Ret();
+ __ DropAndRet(1);
// Need to collect. Call into runtime system.
__ bind(&gc);
@@ -242,8 +241,7 @@
// Remove the on-stack argument and return.
__ mov(cp, v0);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
// Need to collect. Call into runtime system.
__ bind(&gc);
@@ -368,8 +366,7 @@
GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
// Return and remove the on-stack parameters.
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
@@ -405,16 +402,14 @@
// Allocate the JS object and copy header together with all in-object
// properties from the boilerplate.
- __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
+ __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
for (int i = 0; i < size; i += kPointerSize) {
__ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(a0, i));
+ __ sw(a1, FieldMemOperand(v0, i));
}
// Return and remove the on-stack parameters.
- __ Drop(4);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
+ __ DropAndRet(4);
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
@@ -492,8 +487,8 @@
__ Or(at, exponent, Operand(exponent_word_for_1));
__ Movn(exponent, at, source_); // Write exp when source not 0.
// 1, 0 and -1 all have 0 for the second word.
+ __ Ret(USE_DELAY_SLOT);
__ mov(mantissa, zero_reg);
- __ Ret();
__ bind(¬_special);
// Count leading zeros.
@@ -514,9 +509,9 @@
__ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
// And the top (top 20 bits).
__ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
- __ or_(exponent, exponent, source_);
-
- __ Ret();
+
+ __ Ret(USE_DELAY_SLOT);
+ __ or_(exponent, exponent, source_);
}
@@ -1025,9 +1020,9 @@
__ sw(v0, FieldMemOperand(heap_number_result,
HeapNumber::kMantissaOffset));
}
// Place heap_number_result in v0 and return to the pushed return
address.
- __ mov(v0, heap_number_result);
__ pop(ra);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, heap_number_result);
}
@@ -1163,6 +1158,7 @@
}
__ bind(&return_equal);
+
if (cc == less) {
__ li(v0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == greater) {
@@ -1234,8 +1230,8 @@
if (strict) {
// If lhs was not a number and rhs was a Smi then strict equality
cannot
// succeed. Return non-equal (lhs is already not zero).
+ __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
__ mov(v0, lhs);
- __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
@@ -1273,8 +1269,8 @@
if (strict) {
// If lhs was not a number and rhs was a Smi then strict equality
cannot
// succeed. Return non-equal.
+ __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
__ li(v0, Operand(1));
- __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
@@ -1354,12 +1350,13 @@
__ bind(&one_is_nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
+
if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
}
- __ Ret(); // Return.
+ __ Ret();
__ bind(&neither_is_nan);
}
@@ -1408,6 +1405,7 @@
__ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
__ bind(&return_result_equal);
+
__ li(v0, Operand(EQUAL));
__ Ret();
}
@@ -1439,6 +1437,7 @@
__ BranchF(&less_than, NULL, lt, f12, f14);
// Not equal, not less, not NaN, must be greater.
+
__ li(v0, Operand(GREATER));
__ Ret();
@@ -1469,8 +1468,8 @@
// Return non-zero.
Label return_not_equal;
__ bind(&return_not_equal);
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1));
- __ Ret();
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
@@ -1549,8 +1548,8 @@
// Both are symbols. We already checked they weren't the same pointer
// so they are not equal.
+ __ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1)); // Non-zero indicates not equal.
- __ Ret();
__ bind(&object_test);
__ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
@@ -1565,8 +1564,8 @@
__ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
__ and_(a0, a2, a3);
__ And(a0, a0, Operand(1 << Map::kIsUndetectable));
- __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ xori(v0, a0, 1 << Map::kIsUndetectable);
}
@@ -1673,8 +1672,7 @@
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false,
&runtime);
- __ Addu(sp, sp, Operand(1 * kPointerSize));
- __ Ret();
+ __ DropAndRet(1);
__ bind(&runtime);
// Handle number to string in the runtime system if not found in the
cache.
@@ -1696,8 +1694,8 @@
__ JumpIfNotSmi(a2, ¬_two_smis);
__ sra(a1, a1, 1);
__ sra(a0, a0, 1);
- __ Subu(v0, a1, a0);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a1, a0);
__ bind(¬_two_smis);
} else if (FLAG_debug_code) {
__ Or(a2, a1, a0);
@@ -1916,8 +1914,8 @@
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
Label skip;
__ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
+ __ Ret(USE_DELAY_SLOT); // the string length is OK as the return value
__ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- __ Ret(); // the string length is OK as the return value
__ bind(&skip);
}
@@ -2092,8 +2090,8 @@
__ Branch(slow, eq, t0, Operand(zero_reg));
// Return '0 - value'.
- __ Subu(v0, zero_reg, a0);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, zero_reg, a0);
}
@@ -2423,8 +2421,8 @@
// Negating it results in 'lt'.
__ Branch(&skip, lt, scratch2, Operand(zero_reg));
ASSERT(Smi::FromInt(0) == 0);
- __ mov(v0, zero_reg);
- __ Ret(); // Return smi 0 if the non-zero one was positive.
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was
positive.
__ bind(&skip);
// We fall through here if we multiplied a negative number with 0,
because
// that would mean we should produce -0.
@@ -2479,23 +2477,23 @@
}
break;
case Token::BIT_OR:
- __ Or(v0, left, Operand(right));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ or_(v0, left, right);
break;
case Token::BIT_AND:
- __ And(v0, left, Operand(right));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ and_(v0, left, right);
break;
case Token::BIT_XOR:
- __ Xor(v0, left, Operand(right));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ xor_(v0, left, right);
break;
case Token::SAR:
// Remove tags from right operand.
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ srav(scratch1, left, scratch1);
// Smi tag result.
- __ And(v0, scratch1, Operand(~kSmiTagMask));
+ __ And(v0, scratch1, ~kSmiTagMask);
__ Ret();
break;
case Token::SHR:
@@ -2607,8 +2605,8 @@
// kValueOffset. On MIPS this workaround is built into sdc1 so
// there's no point in generating even more instructions.
__ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, result);
- __ Ret();
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
@@ -3482,8 +3480,8 @@
__ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
__ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, cache_entry);
- __ Ret();
__ bind(&invalid_cache);
// The cache is invalid. Call runtime which will recreate the
@@ -3662,7 +3660,7 @@
ne,
double_exponent,
double_scratch);
-
+ // double_scratch can be overwritten in the delay slot.
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
@@ -3682,7 +3680,7 @@
ne,
double_exponent,
double_scratch);
-
+ // double_scratch can be overwritten in the delay slot.
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
@@ -3866,9 +3864,10 @@
__ sw(a1, MemOperand(a0));
}
- // Prepare arguments for C routine: a0 = argc, a1 = argv
+ // Prepare arguments for C routine.
+ // a0 = argc
__ mov(a0, s0);
- __ mov(a1, s1);
+ // a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments.
We
// also need to reserve the 4 argument slots on the stack.
@@ -3888,29 +3887,27 @@
// coverage code can interfere with the proper calculation of ra.
Label find_ra;
masm->bal(&find_ra); // bal exposes branch delay slot.
- masm->nop(); // Branch delay slot nop.
+ masm->mov(a1, s1);
masm->bind(&find_ra);
// Adjust the value in ra to point to the correct return location, 2nd
// instruction past the real call into C code (the jalr(t9)), and push
it.
// This is the return address of the exit frame.
- const int kNumInstructionsToJump = 6;
+ const int kNumInstructionsToJump = 5;
masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
masm->sw(ra, MemOperand(sp)); // This spot was reserved in
EnterExitFrame.
- masm->Subu(sp, sp, kCArgsSlotsSize);
+ // Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
// Call the C routine.
masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for
PIC.
masm->jalr(t9);
- masm->nop(); // Branch delay slot nop.
+ // Set up sp in the delay slot.
+ masm->addiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
ASSERT_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
-
- // Restore stack (remove arg slots).
- __ Addu(sp, sp, kCArgsSlotsSize);
if (always_allocate) {
// It's okay to clobber a2 and a3 here. v0 & v1 contain result.
@@ -3925,14 +3922,16 @@
STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
__ addiu(a2, v0, 1);
__ andi(t0, a2, kFailureTagMask);
- __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
+ __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
+ // Restore stack (remove arg slots) in branch delay slot.
+ __ addiu(sp, sp, kCArgsSlotsSize);
+
// Exit C frame and return.
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0);
- __ Ret();
+ __ LeaveExitFrame(save_doubles_, s0, true);
// Check if we should retry or throw exception.
Label retry;
@@ -3943,8 +3942,10 @@
// Special handling of out of memory exceptions.
Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(throw_out_of_memory_exception, eq,
+ __ Branch(USE_DELAY_SLOT, throw_out_of_memory_exception, eq,
v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ // If we throw the OOM exception, the value of a3 doesn't matter.
+ // Any instruction can be in the delay slot that's not a jump.
// Retrieve the pending exception and clear the variable.
__ li(a3, Operand(isolate->factory()->the_hole_value()));
@@ -3968,8 +3969,9 @@
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS
function
- // a0: number of arguments including receiver
- // a1: pointer to builtin function
+ // s0: number of arguments including receiver
+ // s1: size of arguments excluding receiver
+ // s2: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
@@ -3979,19 +3981,18 @@
// this by performing a garbage collection and retrying the
// builtin once.
+ // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
+ // The reason for this is that these arguments would need to be saved
anyway
+ // so it's faster to set them up directly.
+ // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
+
// Compute the argv pointer in a callee-saved register.
- __ sll(s1, a0, kPointerSizeLog2);
__ Addu(s1, sp, s1);
- __ Subu(s1, s1, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
- // Set up argc and the builtin function in callee-saved registers.
- __ mov(s0, a0);
- __ mov(s2, a1);
-
// s0: number of arguments (C callee-saved)
// s1: pointer to first argument (C callee-saved)
// s2: pointer to builtin function (C callee-saved)
@@ -4693,8 +4694,7 @@
__ Branch(&arguments_loop, lt, t5, Operand(a2));
// Return and remove the on-stack parameters.
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
// Do the runtime call to allocate the arguments object.
// a2 = argument count (tagged)
@@ -4799,8 +4799,7 @@
// Return and remove the on-stack parameters.
__ bind(&done);
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
@@ -5149,8 +5148,7 @@
__ bind(&failure);
// For failure and exception return null.
__ li(v0, Operand(isolate->factory()->null_value()));
- __ Addu(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
+ __ DropAndRet(4);
// Process the result from the native regexp code.
__ bind(&success);
@@ -5211,14 +5209,13 @@
__ sll(a3, a3, kSmiTagSize); // Convert to Smi.
__ sw(a3, MemOperand(a0, 0));
__ Branch(&next_capture, USE_DELAY_SLOT);
- __ addiu(a0, a0, kPointerSize); // In branch delay slot.
+ __ addiu(a0, a0, kPointerSize); // In branch delay slot.
__ bind(&done);
// Return last match info.
__ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
- __ Addu(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
+ __ DropAndRet(4);
// External string. Short external strings have already been ruled out.
// a0: scratch
@@ -5330,8 +5327,7 @@
__ addiu(a3, a3, kPointerSize); // In branch delay slot.
__ bind(&done);
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ DropAndRet(3);
__ bind(&slowcase);
__ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
@@ -6136,7 +6132,7 @@
STATIC_ASSERT(kIsIndirectStringMask != 0);
__ And(t0, a1, Operand(kIsIndirectStringMask));
__ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0,
Operand(zero_reg));
-
+ // t0 is used as a scratch register and can be overwritten in either
case.
__ And(t0, a1, Operand(kSlicedNotConsMask));
__ Branch(&sliced_string, ne, t0, Operand(zero_reg));
// Cons string. Check whether it is flat, then fetch first part.
@@ -6409,8 +6405,7 @@
STATIC_ASSERT(kSmiTag == 0);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
__ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
- __ Ret();
+ __ DropAndRet(2);
__ bind(¬_same);
@@ -6815,16 +6810,16 @@
__ BranchF(&fpu_lt, NULL, lt, f0, f2);
// Otherwise it's greater, so just fall thru, and return.
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(GREATER)); // In delay slot.
+ __ li(v0, Operand(GREATER));
+ __ Ret();
__ bind(&fpu_eq);
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(EQUAL)); // In delay slot.
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
__ bind(&fpu_lt);
- __ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(LESS)); // In delay slot.
+ __ li(v0, Operand(LESS));
+ __ Ret();
}
__ bind(&unordered);
@@ -6924,9 +6919,9 @@
Label left_ne_right;
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
- __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
+ __ Branch(&left_ne_right, ne, left, Operand(right));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, zero_reg); // In the delay slot.
- __ Ret();
__ bind(&left_ne_right);
// Handle not identical strings.
@@ -6939,12 +6934,12 @@
__ And(tmp3, tmp1, Operand(tmp2));
__ And(tmp5, tmp3, Operand(kIsSymbolMask));
Label is_symbol;
- __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
- __ mov(v0, a0); // In the delay slot.
+ __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(a0));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // In the delay slot.
__ bind(&is_symbol);
}
@@ -6988,8 +6983,8 @@
__ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
ASSERT(GetCondition() == eq);
- __ Subu(v0, a0, Operand(a1));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a0, a1);
__ bind(&miss);
GenerateMiss(masm);
@@ -7022,8 +7017,9 @@
__ push(ra);
__ Push(a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
- __ push(t0);
- __ CallExternalReference(miss, 3);
+ __ addiu(sp, sp, -kPointerSize);
+ __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
+ __ sw(t0, MemOperand(sp)); // In the delay slot.
// Compute the entry point of the rewritten stub.
__ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -7333,17 +7329,17 @@
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
if (mode_ == POSITIVE_LOOKUP) {
+ __ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
- __ Ret();
}
__ bind(&in_dictionary);
+ __ Ret(USE_DELAY_SLOT);
__ li(result, 1);
- __ Ret();
__ bind(¬_in_dictionary);
+ __ Ret(USE_DELAY_SLOT);
__ mov(result, zero_reg);
- __ Ret();
}
=======================================
--- /trunk/src/mips/debug-mips.cc Wed Feb 1 02:48:36 2012
+++ /trunk/src/mips/debug-mips.cc Wed Mar 21 02:52:17 2012
@@ -152,8 +152,8 @@
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in -
over");
#endif
- __ mov(a0, zero_reg); // No arguments.
- __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+ __ PrepareCEntryArgs(0); // No arguments.
+ __
PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
CEntryStub ceb(1);
__ CallStub(&ceb);
=======================================
--- /trunk/src/mips/ic-mips.cc Wed Mar 14 04:16:03 2012
+++ /trunk/src/mips/ic-mips.cc Wed Mar 21 02:52:17 2012
@@ -512,8 +512,8 @@
__ Push(a3, a2);
// Call the entry.
- __ li(a0, Operand(2));
- __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+ __ PrepareCEntryArgs(2);
+ __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
CEntryStub stub(1);
__ CallStub(&stub);
@@ -844,8 +844,8 @@
Label slow, notin;
MemOperand mapped_location =
GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, ¬in,
&slow);
+ __ Ret(USE_DELAY_SLOT);
__ lw(v0, mapped_location);
- __ Ret();
__ bind(¬in);
// The unmapped lookup expects that the parameter map is in a2.
MemOperand unmapped_location =
@@ -853,8 +853,8 @@
__ lw(a2, unmapped_location);
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
__ Branch(&slow, eq, a2, Operand(a3));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
- __ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
}
=======================================
--- /trunk/src/mips/lithium-codegen-mips.cc Tue Mar 20 06:01:16 2012
+++ /trunk/src/mips/lithium-codegen-mips.cc Wed Mar 21 02:52:17 2012
@@ -634,13 +634,9 @@
__ bind(&skip);
}
- if (cc == al) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- // TODO(plind): The Arm port is a little different here, due to their
- // DeOpt jump table, which is not used for Mips yet.
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
- }
+ // TODO(plind): The Arm port is a little different here, due to their
+ // DeOpt jump table, which is not used for Mips yet.
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
}
=======================================
--- /trunk/src/mips/macro-assembler-mips.cc Wed Mar 14 04:16:03 2012
+++ /trunk/src/mips/macro-assembler-mips.cc Wed Mar 21 02:52:17 2012
@@ -2438,8 +2438,15 @@
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ Label skip;
+ if (cond != cc_always) {
+ Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
+ }
+ // The first instruction of 'li' may be placed in the delay slot.
+ // This is not an issue, t9 is expected to be clobbered anyway.
li(t9, Operand(target, rmode));
- Jump(t9, cond, rs, rt, bd);
+ Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+ bind(&skip);
}
@@ -2569,7 +2576,7 @@
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt,
bd);
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
@@ -2639,14 +2646,16 @@
nop();
}
+void MacroAssembler::DropAndRet(int drop) {
+ Ret(USE_DELAY_SLOT);
+ addiu(sp, sp, drop * kPointerSize);
+}
void MacroAssembler::DropAndRet(int drop,
Condition cond,
Register r1,
const Operand& r2) {
- // This is a workaround to make sure only one branch instruction is
- // generated. It relies on Drop and Ret not creating branches if
- // cond == cc_always.
+ // Both Drop and Ret need to be conditional.
Label skip;
if (cond != cc_always) {
Branch(&skip, NegateCondition(cond), r1, r2);
@@ -2713,8 +2722,8 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- mov(a0, zero_reg);
- li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ PrepareCEntryArgs(0);
+ PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak,
isolate()));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
@@ -3876,10 +3885,13 @@
//
-----------------------------------------------------------------------------
// Runtime calls.
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
- Register r1, const Operand& r2) {
+void MacroAssembler::CallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some
stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2,
bd);
}
@@ -3962,8 +3974,7 @@
lw(t1, MemOperand(at));
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0);
- Ret();
+ LeaveExitFrame(false, s0, true);
bind(&promote_scheduled_exception);
TailCallExternalReference(
@@ -4161,8 +4172,8 @@
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- li(a0, num_arguments);
- li(a1, Operand(ExternalReference(f, isolate())));
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ExternalReference(f, isolate()));
CEntryStub stub(1);
CallStub(&stub);
}
@@ -4170,8 +4181,8 @@
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
- li(a0, Operand(function->nargs));
- li(a1, Operand(ExternalReference(function, isolate())));
+ PrepareCEntryArgs(function->nargs);
+ PrepareCEntryFunction(ExternalReference(function, isolate()));
CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -4183,12 +4194,13 @@
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- li(a0, Operand(num_arguments));
- li(a1, Operand(ext));
+ int num_arguments,
+ BranchDelaySlot bd) {
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ext);
CEntryStub stub(1);
- CallStub(&stub);
+ CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
}
@@ -4199,7 +4211,7 @@
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- li(a0, Operand(num_arguments));
+ PrepareCEntryArgs(num_arguments);
JumpToExternalReference(ext);
}
@@ -4213,10 +4225,16 @@
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference&
builtin) {
- li(a1, Operand(builtin));
+void MacroAssembler::JumpToExternalReference(const ExternalReference&
builtin,
+ BranchDelaySlot bd) {
+ PrepareCEntryFunction(builtin);
CEntryStub stub(1);
- Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ Jump(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ al,
+ zero_reg,
+ Operand(zero_reg),
+ bd);
}
@@ -4563,7 +4581,8 @@
void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
+ Register argument_count,
+ bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
@@ -4589,11 +4608,17 @@
mov(sp, fp); // Respect ABI stack constraint.
lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
- addiu(sp, sp, 8);
+
if (argument_count.is_valid()) {
sll(t8, argument_count, kPointerSizeLog2);
addu(sp, sp, t8);
}
+
+ if (do_return) {
+ Ret(USE_DELAY_SLOT);
+ // If returning, the instruction in the delay slot will be the addiu
below.
+ }
+ addiu(sp, sp, 8);
}
=======================================
--- /trunk/src/mips/macro-assembler-mips.h Mon Mar 19 04:01:52 2012
+++ /trunk/src/mips/macro-assembler-mips.h Wed Mar 21 02:52:17 2012
@@ -193,10 +193,14 @@
Register reg = no_reg,
const Operand& op = Operand(no_reg));
- void DropAndRet(int drop = 0,
- Condition cond = cc_always,
- Register reg = no_reg,
- const Operand& op = Operand(no_reg));
+ // Trivial case of DropAndRet that utilizes the delay slot and only emits
+ // 2 instructions.
+ void DropAndRet(int drop);
+
+ void DropAndRet(int drop,
+ Condition cond,
+ Register reg,
+ const Operand& op);
// Swap two registers. If the scratch register is omitted then a
slightly
// less efficient form using xor instead of mov is emitted.
@@ -773,7 +777,9 @@
int stack_space = 0);
// Leave the current exit frame.
- void LeaveExitFrame(bool save_doubles, Register arg_count);
+ void LeaveExitFrame(bool save_doubles,
+ Register arg_count,
+ bool do_return = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -1083,10 +1089,23 @@
//
-------------------------------------------------------------------------
// Runtime calls.
+
+ // See comments at the beginning of CEntryStub::Generate.
+ inline void PrepareCEntryArgs(int num_args) {
+ li(s0, num_args);
+ li(s1, (num_args - 1) * kPointerSize);
+ }
+
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(s2, Operand(ref));
+ }
// Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 =
Operand(zero_reg));
+ void CallStub(CodeStub* stub,
+ Condition cond = cc_always,
+ Register r1 = zero_reg,
+ const Operand& r2 = Operand(zero_reg),
+ BranchDelaySlot bd = PROTECT);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
@@ -1102,7 +1121,8 @@
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
- int num_arguments);
+ int num_arguments,
+ BranchDelaySlot bd = PROTECT);
// Tail call of a runtime routine (jump).
// Like JumpToExternalReference, but also takes care of passing the
number
@@ -1168,7 +1188,8 @@
void CallApiFunctionAndReturn(ExternalReference function, int
stack_space);
// Jump to the builtin routine.
- void JumpToExternalReference(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin,
+ BranchDelaySlot bd = PROTECT);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
=======================================
--- /trunk/src/mips/simulator-mips.h Thu Nov 10 03:38:15 2011
+++ /trunk/src/mips/simulator-mips.h Wed Mar 21 02:52:17 2012
@@ -309,6 +309,14 @@
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
void BranchDelayInstructionDecode(Instruction* instr) {
+ if (instr->InstructionBits() == nopInstr) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ set_register(pc, reinterpret_cast<int32_t>(instr) +
+ Instruction::kInstrSize);
+ return;
+ }
+
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
=======================================
--- /trunk/src/mips/stub-cache-mips.cc Wed Mar 14 04:16:03 2012
+++ /trunk/src/mips/stub-cache-mips.cc Wed Mar 21 02:52:17 2012
@@ -577,8 +577,8 @@
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
masm->isolate());
- __ li(a0, Operand(5));
- __ li(a1, Operand(ref));
+ __ PrepareCEntryArgs(5);
+ __ PrepareCEntryFunction(ref);
CEntryStub stub(1);
__ CallStub(&stub);
@@ -4107,7 +4107,8 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ JumpIfNotSmi(a0, &miss_force_generic);
+ __ JumpIfNotSmi(a0, &miss_force_generic, at, USE_DELAY_SLOT);
+ // The delay slot can be safely used here, a1 is an object pointer.
// Get the elements array.
__ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
@@ -4115,7 +4116,7 @@
// Check that the key is within bounds.
__ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Branch(&miss_force_generic, hs, a0, Operand(a3));
+ __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
// Load the result and make sure it's not the hole.
__ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4125,8 +4126,8 @@
__ lw(t0, MemOperand(t0));
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ Branch(&miss_force_generic, eq, t0, Operand(t1));
+ __ Ret(USE_DELAY_SLOT);
__ mov(v0, t0);
- __ Ret();
__ bind(&miss_force_generic);
Handle<Code> stub =
=======================================
--- /trunk/src/objects.cc Mon Mar 19 04:01:52 2012
+++ /trunk/src/objects.cc Wed Mar 21 02:52:17 2012
@@ -7380,7 +7380,6 @@
ASSERT(is_compiled() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
- ASSERT(!shared()->optimization_disabled());
Builtins* builtins = GetIsolate()->builtins();
ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
}
=======================================
--- /trunk/src/runtime-profiler.cc Tue Mar 13 01:09:54 2012
+++ /trunk/src/runtime-profiler.cc Wed Mar 21 02:52:17 2012
@@ -268,6 +268,7 @@
// Do not record non-optimizable functions.
if (!function->IsOptimizable()) continue;
+ if (function->shared()->optimization_disabled()) continue;
// Only record top-level code on top of the execution stack and
// avoid optimizing excessively large scripts since top-level code
=======================================
--- /trunk/src/version.cc Tue Mar 20 06:01:16 2012
+++ /trunk/src/version.cc Wed Mar 21 02:52:17 2012
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
-#define BUILD_NUMBER 22
+#define BUILD_NUMBER 23
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
=======================================
--- /trunk/test/cctest/test-heap-profiler.cc Fri Mar 9 02:52:05 2012
+++ /trunk/test/cctest/test-heap-profiler.cc Wed Mar 21 02:52:17 2012
@@ -352,6 +352,59 @@
#define CHECK_NE_UINT64_T(a, b) \
CHECK((a) != (b)) // NOLINT
+TEST(HeapEntryIdsAndArrayShift) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileRun(
+ "function AnObject() {\n"
+ " this.first = 'first';\n"
+ " this.second = 'second';\n"
+ "}\n"
+ "var a = new Array();\n"
+ "for (var i = 0; i < 10; ++i)\n"
+ " a.push(new AnObject());\n");
+ const v8::HeapSnapshot* snapshot1 =
+ v8::HeapProfiler::TakeSnapshot(v8_str("s1"));
+
+ CompileRun(
+ "for (var i = 0; i < 1; ++i)\n"
+ " a.shift();\n");
+
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+ const v8::HeapSnapshot* snapshot2 =
+ v8::HeapProfiler::TakeSnapshot(v8_str("s2"));
+
+ const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
+ const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
+ CHECK_NE_UINT64_T(0, global1->GetId());
+ CHECK_EQ_UINT64_T(global1->GetId(), global2->GetId());
+
+ const v8::HeapGraphNode* a1 =
+ GetProperty(global1, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, a1);
+ const v8::HeapGraphNode* e1 =
+ GetProperty(a1, v8::HeapGraphEdge::kHidden, "1");
+ CHECK_NE(NULL, e1);
+ const v8::HeapGraphNode* k1 =
+ GetProperty(e1, v8::HeapGraphEdge::kInternal, "elements");
+ CHECK_NE(NULL, k1);
+ const v8::HeapGraphNode* a2 =
+ GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, a2);
+ const v8::HeapGraphNode* e2 =
+ GetProperty(a2, v8::HeapGraphEdge::kHidden, "1");
+ CHECK_NE(NULL, e2);
+ const v8::HeapGraphNode* k2 =
+ GetProperty(e2, v8::HeapGraphEdge::kInternal, "elements");
+ CHECK_NE(NULL, k2);
+
+ CHECK_EQ_UINT64_T(a1->GetId(), a2->GetId());
+ CHECK_EQ_UINT64_T(e1->GetId(), e2->GetId());
+ CHECK_EQ_UINT64_T(k1->GetId(), k2->GetId());
+}
+
TEST(HeapEntryIdsAndGC) {
v8::HandleScope scope;
LocalContext env;
=======================================
--- /trunk/test/cctest/test-log-stack-tracer.cc Tue Mar 20 06:01:16 2012
+++ /trunk/test/cctest/test-log-stack-tracer.cc Wed Mar 21 02:52:17 2012
@@ -277,9 +277,7 @@
TEST(CFromJSStackTrace) {
// BUG(1303) Inlining of JSFuncDoTrace() in JSTrace below breaks this
test.
i::FLAG_use_inlining = false;
- // This test does not work with --always-opt because we don't replace
the code
- // in the JSFunction at deoptimization in that case.
- i::FLAG_always_opt = false;
+
TickSample sample;
InitTraceEnv(&sample);
=======================================
--- /trunk/test/mjsunit/compiler/inline-arguments.js Wed Mar 14 04:16:03
2012
+++ /trunk/test/mjsunit/compiler/inline-arguments.js Wed Mar 21 02:52:17
2012
@@ -80,3 +80,36 @@
F4(1);
%OptimizeFunctionOnNextCall(F4);
F4(1);
+
+
+// Test correct adapation of arguments.
+// Strict mode prevents arguments object from shadowing parameters.
+(function () {
+ "use strict";
+
+ function G2() {
+ assertArrayEquals([1,2], arguments);
+ }
+
+ function G4() {
+ assertArrayEquals([1,2,3,4], arguments);
+ }
+
+ function adapt2to4(a, b, c, d) {
+ G2.apply(this, arguments);
+ }
+
+ function adapt4to2(a, b) {
+ G4.apply(this, arguments);
+ }
+
+ function test_adaptation() {
+ adapt2to4(1, 2);
+ adapt4to2(1, 2, 3, 4);
+ }
+
+ test_adaptation();
+ test_adaptation();
+ %OptimizeFunctionOnNextCall(test_adaptation);
+ test_adaptation();
+})();
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev