Revision: 4783
Author: [email protected]
Date: Wed Jun 2 02:37:02 2010
Log: ARM: Track Smis on top 4 stack positions and Smi loop variables.
Improve code generation for known smis and suspected Smis.
Review URL: http://codereview.chromium.org/2452002
http://code.google.com/p/v8/source/detail?r=4783
Modified:
/branches/bleeding_edge/src/arm/codegen-arm.cc
/branches/bleeding_edge/src/arm/codegen-arm.h
/branches/bleeding_edge/src/arm/jump-target-arm.cc
/branches/bleeding_edge/src/arm/virtual-frame-arm-inl.h
/branches/bleeding_edge/src/arm/virtual-frame-arm.cc
/branches/bleeding_edge/src/arm/virtual-frame-arm.h
/branches/bleeding_edge/src/data-flow.cc
/branches/bleeding_edge/src/type-info.h
/branches/bleeding_edge/src/virtual-frame-light-inl.h
/branches/bleeding_edge/src/virtual-frame-light.cc
=======================================
--- /branches/bleeding_edge/src/arm/codegen-arm.cc Tue Jun 1 07:08:19 2010
+++ /branches/bleeding_edge/src/arm/codegen-arm.cc Wed Jun 2 02:37:02 2010
@@ -109,21 +109,28 @@
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- true_target_(NULL),
- false_target_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
+ previous_(owner->state()) {
+ owner->set_state(this);
}
-CodeGenState::CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target)
- : owner_(owner),
+ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
+ JumpTarget* true_target,
+ JumpTarget* false_target)
+ : CodeGenState(owner),
true_target_(true_target),
- false_target_(false_target),
- previous_(owner->state()) {
- owner_->set_state(this);
+ false_target_(false_target) {
+ owner->set_state(this);
+}
+
+
+TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
+ Slot* slot,
+ TypeInfo type_info)
+ : CodeGenState(owner),
+ slot_(slot) {
+ owner->set_state(this);
+ old_type_info_ = owner->set_type_info(slot, type_info);
}
@@ -133,6 +140,10 @@
}
+TypeInfoCodeGenState::~TypeInfoCodeGenState() {
+ owner()->set_type_info(slot_, old_type_info_);
+}
+
//
-------------------------------------------------------------------------
// CodeGenerator implementation
@@ -145,6 +156,7 @@
cc_reg_(al),
state_(NULL),
loop_nesting_(0),
+ type_info_(NULL),
function_return_is_shadowed_(false) {
}
@@ -162,6 +174,11 @@
// Initialize state.
info_ = info;
+
+ int slots = scope()->num_parameters() + scope()->num_stack_slots();
+ ScopedVector<TypeInfo> type_info_array(slots);
+ type_info_ = &type_info_array;
+
ASSERT(allocator_ == NULL);
RegisterAllocator register_allocator(this);
allocator_ = ®ister_allocator;
@@ -393,6 +410,21 @@
}
allocator_ = NULL;
+ type_info_ = NULL;
+}
+
+
+int CodeGenerator::NumberOfSlot(Slot* slot) {
+ if (slot == NULL) return kInvalidSlotNumber;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return slot->index();
+ case Slot::LOCAL:
+ return slot->index() + scope()->num_parameters();
+ default:
+ break;
+ }
+ return kInvalidSlotNumber;
}
@@ -490,7 +522,7 @@
ASSERT(!has_cc());
int original_height = frame_->height();
- { CodeGenState new_state(this, true_target, false_target);
+ { ConditionCodeGenState new_state(this, true_target, false_target);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@@ -791,63 +823,92 @@
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
+ GenerateInlineSmi inline_smi,
int constant_rhs) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- // sp[0] : y
- // sp[1] : x
- // result : r0
+ // top of virtual frame: y
+ // 2nd elt. on virtual frame : x
+ // result : top of virtual frame
// Stub is entered with a call: 'return address' is in lr.
switch (op) {
case Token::ADD:
case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
+ if (inline_smi) {
+ JumpTarget done;
+ Register rhs = frame_->PopToRegister();
+ Register lhs = frame_->PopToRegister(rhs);
+ Register scratch = VirtualFrame::scratch0();
+ __ orr(scratch, rhs, Operand(lhs));
+ // Check they are both small and positive.
+ __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
+ ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
+ ASSERT_EQ(0, kSmiTag);
+ if (op == Token::ADD) {
+ __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
+ } else {
+ __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
+ }
+ done.Branch(eq);
+ GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs,
constant_rhs);
+ frame_->SpillAll();
+ frame_->CallStub(&stub, 0);
+ done.Bind();
+ frame_->EmitPush(r0);
+ break;
+ } else {
+ // Fall through!
+ }
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR: {
- frame_->EmitPop(r0); // r0 : y
- frame_->EmitPop(r1); // r1 : x
- GenericBinaryOpStub stub(op, overwrite_mode, r1, r0, constant_rhs);
- frame_->CallStub(&stub, 0);
- break;
- }
-
- case Token::COMMA:
- frame_->EmitPop(r0);
- // Simply discard left value.
- frame_->Drop();
- break;
-
- default:
- // Other cases should have been handled before this point.
- UNREACHABLE();
- break;
- }
-}
-
-
-void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
- OverwriteMode
overwrite_mode,
- int constant_rhs) {
- // top of virtual frame: y
- // 2nd elt. on virtual frame : x
- // result : top of virtual frame
-
- // Stub is entered with a call: 'return address' is in lr.
- switch (op) {
- case Token::ADD: // fall through.
- case Token::SUB: // fall through.
+ if (inline_smi) {
+ bool rhs_is_smi = frame_->KnownSmiAt(0);
+ bool lhs_is_smi = frame_->KnownSmiAt(1);
+ Register rhs = frame_->PopToRegister();
+ Register lhs = frame_->PopToRegister(rhs);
+ Register smi_test_reg;
+ Condition cond;
+ if (!rhs_is_smi || !lhs_is_smi) {
+ if (!rhs_is_smi) {
+ smi_test_reg = rhs;
+ } else if (!lhs_is_smi) {
+ smi_test_reg = lhs;
+ } else {
+ smi_test_reg = VirtualFrame::scratch0();
+ __ orr(smi_test_reg, rhs, Operand(lhs));
+ }
+ // Check they are both Smis.
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ cond = eq;
+ } else {
+ cond = al;
+ }
+ ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
+ if (op == Token::BIT_OR) {
+ __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
+ } else if (op == Token::BIT_AND) {
+ __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
+ } else {
+ ASSERT(op == Token::BIT_XOR);
+ ASSERT_EQ(0, kSmiTag);
+ __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
+ }
+ if (cond != al) {
+ JumpTarget done;
+ done.Branch(cond);
+ GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs,
constant_rhs);
+ frame_->SpillAll();
+ frame_->CallStub(&stub, 0);
+ done.Bind();
+ }
+ frame_->EmitPush(r0);
+ break;
+ } else {
+ // Fall through!
+ }
case Token::MUL:
case Token::DIV:
case Token::MOD:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
case Token::SHL:
case Token::SHR:
case Token::SAR: {
@@ -972,7 +1033,8 @@
rhs = r1;
}
} else {
- UNREACHABLE(); // Should have been handled in SmiOperation.
+ ASSERT(op_ == Token::SHL);
+ __ mov(r1, Operand(Smi::FromInt(value_)));
}
break;
}
@@ -1020,6 +1082,8 @@
OverwriteMode mode) {
int int_value = Smi::cast(*value)->value();
+ bool both_sides_are_smi = frame_->KnownSmiAt(0);
+
bool something_to_inline;
switch (op) {
case Token::ADD:
@@ -1030,7 +1094,10 @@
something_to_inline = true;
break;
}
- case Token::SHL:
+ case Token::SHL: {
+ something_to_inline = (both_sides_are_smi || !reversed);
+ break;
+ }
case Token::SHR:
case Token::SAR: {
if (reversed) {
@@ -1067,17 +1134,18 @@
// Push the rhs onto the virtual frame by putting it in a TOS
register.
Register rhs = frame_->GetTOSRegister();
__ mov(rhs, Operand(value));
- frame_->EmitPush(rhs);
- VirtualFrameBinaryOperation(op, mode, int_value);
+ frame_->EmitPush(rhs, TypeInfo::Smi());
+ GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
} else {
// Pop the rhs, then push lhs and rhs in the right order. Only
performs
// at most one pop, the rest takes place in TOS registers.
Register lhs = frame_->GetTOSRegister(); // Get reg for pushing.
Register rhs = frame_->PopToRegister(lhs); // Don't use lhs for
this.
__ mov(lhs, Operand(value));
- frame_->EmitPush(lhs);
- frame_->EmitPush(rhs);
- VirtualFrameBinaryOperation(op, mode, kUnknownIntValue);
+ frame_->EmitPush(lhs, TypeInfo::Smi());
+ TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() :
TypeInfo::Unknown();
+ frame_->EmitPush(rhs, t);
+ GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
kUnknownIntValue);
}
return;
}
@@ -1097,8 +1165,10 @@
__ add(tos, tos, Operand(value), SetCC);
deferred->Branch(vs);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ if (!both_sides_are_smi) {
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ }
deferred->BindExit();
frame_->EmitPush(tos);
break;
@@ -1114,8 +1184,10 @@
__ sub(tos, tos, Operand(value), SetCC);
}
deferred->Branch(vs);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
+ if (!both_sides_are_smi) {
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ }
deferred->BindExit();
frame_->EmitPush(tos);
break;
@@ -1125,25 +1197,65 @@
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
- __ tst(tos, Operand(kSmiTagMask));
- deferred->Branch(ne);
- switch (op) {
- case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
- case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
- default: UNREACHABLE();
- }
- deferred->BindExit();
- frame_->EmitPush(tos);
+ if (both_sides_are_smi) {
+ switch (op) {
+ case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
+ case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ default: UNREACHABLE();
+ }
+ frame_->EmitPush(tos, TypeInfo::Smi());
+ } else {
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode,
tos);
+ __ tst(tos, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ switch (op) {
+ case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
+ case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ default: UNREACHABLE();
+ }
+ deferred->BindExit();
+ TypeInfo result_type =
+ (op == Token::BIT_AND) ? TypeInfo::Smi() :
TypeInfo::Integer32();
+ frame_->EmitPush(tos, result_type);
+ }
break;
}
case Token::SHL:
+ if (reversed) {
+ ASSERT(both_sides_are_smi);
+ int max_shift = 0;
+ int max_result = int_value == 0 ? 1 : int_value;
+ while (Smi::IsValid(max_result << 1)) {
+ max_shift++;
+ max_result <<= 1;
+ }
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
+ // Mask off the last 5 bits of the shift operand (rhs). This is
part
+ // of the definition of shift in JS and we know we have a Smi so we
+ // can safely do this. The masked version gets passed to the
+ // deferred code, but that makes no difference.
+ __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
+ __ cmp(tos, Operand(Smi::FromInt(max_shift)));
+ deferred->Branch(ge);
+ Register scratch = VirtualFrame::scratch0();
+ __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Untag.
+ __ mov(tos, Operand(Smi::FromInt(int_value))); // Load constant.
+ __ mov(tos, Operand(tos, LSL, scratch)); // Shift
constant.
+ deferred->BindExit();
+ TypeInfo result = TypeInfo::Integer32();
+ frame_->EmitPush(tos, result);
+ break;
+ }
+ // Fall through!
case Token::SHR:
case Token::SAR: {
ASSERT(!reversed);
+ TypeInfo result = TypeInfo::Integer32();
Register scratch = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
int shift_value = int_value & 0x1f; // least significant 5 bits
@@ -1151,9 +1263,15 @@
new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
uint32_t problematic_mask = kSmiTagMask;
// For unsigned shift by zero all negative smis are problematic.
- if (shift_value == 0 && op == Token::SHR) problematic_mask |=
0x80000000;
- __ tst(tos, Operand(problematic_mask));
- deferred->Branch(ne); // Go slow for problematic input.
+ bool skip_smi_test = both_sides_are_smi;
+ if (shift_value == 0 && op == Token::SHR) {
+ problematic_mask |= 0x80000000;
+ skip_smi_test = false;
+ }
+ if (!skip_smi_test) {
+ __ tst(tos, Operand(problematic_mask));
+ deferred->Branch(ne); // Go slow for problematic input.
+ }
switch (op) {
case Token::SHL: {
if (shift_value != 0) {
@@ -1188,6 +1306,9 @@
// by 0 or 1 when handed a valid smi
__ tst(scratch, Operand(0xc0000000));
deferred->Branch(ne);
+ } else {
+ ASSERT(shift_value >= 2);
+ result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi.
}
__ mov(tos, Operand(scratch, LSL, kSmiTagSize));
}
@@ -1204,13 +1325,15 @@
__ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) &
0x1f));
// Put tag back.
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
+ // SAR by at least 1 gives a Smi.
+ result = TypeInfo::Smi();
}
break;
}
default: UNREACHABLE();
}
deferred->BindExit();
- frame_->EmitPush(tos);
+ frame_->EmitPush(tos, result);
break;
}
@@ -1219,21 +1342,24 @@
ASSERT(int_value >= 2);
ASSERT(IsPowerOf2(int_value));
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode,
tos);
unsigned mask = (0x80000000u | kSmiTagMask);
__ tst(tos, Operand(mask));
deferred->Branch(ne); // Go to deferred code on non-Smis and
negative.
mask = (int_value << kSmiTagSize) - 1;
__ and_(tos, tos, Operand(mask));
deferred->BindExit();
- frame_->EmitPush(tos);
+ // Mod of positive power of 2 Smi gives a Smi if the lhs is an
integer.
+ frame_->EmitPush(
+ tos,
+ both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
break;
}
case Token::MUL: {
ASSERT(IsEasyToMultiplyBy(int_value));
DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode,
tos);
unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
max_smi_that_wont_overflow <<= kSmiTagSize;
unsigned mask = 0x80000000u;
@@ -1279,45 +1405,66 @@
Register lhs;
Register rhs;
+ bool lhs_is_smi;
+ bool rhs_is_smi;
+
// We load the top two stack positions into registers chosen by the
virtual
// frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion
order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
+ lhs_is_smi = frame_->KnownSmiAt(0);
+ rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister();
rhs = frame_->PopToRegister(lhs); // Don't pop to the same register
again!
} else {
+ rhs_is_smi = frame_->KnownSmiAt(0);
+ lhs_is_smi = frame_->KnownSmiAt(1);
rhs = frame_->PopToRegister();
lhs = frame_->PopToRegister(rhs); // Don't pop to the same register
again!
}
+
+ bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
ASSERT(rhs.is(r0) || rhs.is(r1));
ASSERT(lhs.is(r0) || lhs.is(r1));
- // Now we have the two sides in r0 and r1. We flush any other registers
- // because the stub doesn't know about register allocation.
- frame_->SpillAll();
- Register scratch = VirtualFrame::scratch0();
- __ orr(scratch, lhs, Operand(rhs));
- __ tst(scratch, Operand(kSmiTagMask));
- JumpTarget smi;
- smi.Branch(eq);
-
- // Perform non-smi comparison by stub.
- // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
- // We call with 0 args because there are 0 on the stack.
- if (!rhs.is(r0)) {
- __ Swap(rhs, lhs, ip);
- }
-
- CompareStub stub(cc, strict);
- frame_->CallStub(&stub, 0);
- __ cmp(r0, Operand(0));
- JumpTarget exit;
- exit.Jump();
+ JumpTarget exit;
+
+ if (!both_sides_are_smi) {
+ // Now we have the two sides in r0 and r1. We flush any other
registers
+ // because the stub doesn't know about register allocation.
+ frame_->SpillAll();
+ Register scratch = VirtualFrame::scratch0();
+ Register smi_test_reg;
+ if (lhs_is_smi) {
+ smi_test_reg = rhs;
+ } else if (rhs_is_smi) {
+ smi_test_reg = lhs;
+ } else {
+ __ orr(scratch, lhs, Operand(rhs));
+ smi_test_reg = scratch;
+ }
+ __ tst(smi_test_reg, Operand(kSmiTagMask));
+ JumpTarget smi;
+ smi.Branch(eq);
+
+ // Perform non-smi comparison by stub.
+ // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
+ // We call with 0 args because there are 0 on the stack.
+ if (!rhs.is(r0)) {
+ __ Swap(rhs, lhs, ip);
+ }
+
+ CompareStub stub(cc, strict);
+ frame_->CallStub(&stub, 0);
+ __ cmp(r0, Operand(0));
+ exit.Jump();
+
+ smi.Bind();
+ }
// Do smi comparisons by pointer comparison.
- smi.Bind();
__ cmp(lhs, Operand(rhs));
exit.Bind();
@@ -2090,6 +2237,17 @@
node->break_target()->SetExpectedHeight();
IncrementLoopNesting();
+ // We know that the loop index is a smi if it is not modified in the
+ // loop body and it is checked against a constant limit in the loop
+ // condition. In this case, we reset the static type information of the
+ // loop index to smi before compiling the body, the update expression,
and
+ // the bottom check of the loop condition.
+ TypeInfoCodeGenState type_info_scope(this,
+ node->is_fast_smi_loop() ?
+ node->loop_variable()->slot() :
+ NULL,
+ TypeInfo::Smi());
+
// If there is no update statement, label the top of the loop with the
// continue target, otherwise with the loop target.
JumpTarget loop(JumpTarget::BIDIRECTIONAL);
@@ -2810,7 +2968,8 @@
} else {
Register scratch = VirtualFrame::scratch0();
- frame_->EmitPush(SlotOperand(slot, scratch));
+ TypeInfo info = type_info(slot);
+ frame_->EmitPush(SlotOperand(slot, scratch), info);
if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
@@ -3100,8 +3259,9 @@
#endif
Comment cmnt(masm_, "[ Literal");
Register reg = frame_->GetTOSRegister();
+ bool is_smi = node->handle()->IsSmi();
__ mov(reg, Operand(node->handle()));
- frame_->EmitPush(reg);
+ frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -3332,9 +3492,16 @@
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ GenerateInlineSmi inline_smi =
+ loop_nesting() > 0 ? GENERATE_INLINE_SMI :
DONT_GENERATE_INLINE_SMI;
+ if (literal != NULL) {
+ ASSERT(!literal->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
Load(node->value());
- VirtualFrameBinaryOperation(
- node->binary_op(), overwrite_value ? OVERWRITE_RIGHT :
NO_OVERWRITE);
+ GenericBinaryOperation(node->binary_op(),
+ overwrite_value ? OVERWRITE_RIGHT :
NO_OVERWRITE,
+ inline_smi);
}
} else {
Load(node->value());
@@ -3425,9 +3592,16 @@
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ GenerateInlineSmi inline_smi =
+ loop_nesting() > 0 ? GENERATE_INLINE_SMI :
DONT_GENERATE_INLINE_SMI;
+ if (literal != NULL) {
+ ASSERT(!literal->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
Load(node->value());
- VirtualFrameBinaryOperation(
- node->binary_op(), overwrite_value ? OVERWRITE_RIGHT :
NO_OVERWRITE);
+ GenericBinaryOperation(node->binary_op(),
+ overwrite_value ? OVERWRITE_RIGHT :
NO_OVERWRITE,
+ inline_smi);
}
} else {
// For non-compound assignment just load the right-hand side.
@@ -3532,9 +3706,16 @@
false,
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ GenerateInlineSmi inline_smi =
+ loop_nesting() > 0 ? GENERATE_INLINE_SMI :
DONT_GENERATE_INLINE_SMI;
+ if (literal != NULL) {
+ ASSERT(!literal->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
Load(node->value());
- VirtualFrameBinaryOperation(
- node->binary_op(), overwrite_value ? OVERWRITE_RIGHT :
NO_OVERWRITE);
+ GenericBinaryOperation(node->binary_op(),
+ overwrite_value ? OVERWRITE_RIGHT :
NO_OVERWRITE,
+ inline_smi);
}
} else {
// For non-compound assignment just load the right-hand side.
@@ -5086,9 +5267,36 @@
Variable* var = node->expression()->AsVariableProxy()->AsVariable();
bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
- if (is_postfix) {
+ bool is_slot = (var != NULL && var->mode() == Variable::VAR);
+
+ if (!is_const && is_slot && type_info(var->slot()).IsSmi()) {
+ // The type info declares that this variable is always a Smi. That
+ // means it is a Smi both before and after the increment/decrement.
+ // Lets make use of that to make a very minimal count.
+ Reference target(this, node->expression(), !is_const);
+ ASSERT(!target.is_illegal());
+ target.GetValue(); // Pushes the value.
+ Register value = frame_->PopToRegister();
+ if (is_postfix) frame_->EmitPush(value);
+ if (is_increment) {
+ __ add(value, value, Operand(Smi::FromInt(1)));
+ } else {
+ __ sub(value, value, Operand(Smi::FromInt(1)));
+ }
+ frame_->EmitPush(value);
+ target.SetValue(NOT_CONST_INIT);
+ if (is_postfix) frame_->Pop();
+ ASSERT_EQ(original_height + 1, frame_->height());
+ return;
+ }
+
+ // If it's a postfix expression and its result is not ignored and the
+ // reference is non-trivial, then push a placeholder on the stack now
+ // to hold the result of the expression.
+ bool placeholder_pushed = false;
+ if (!is_slot && is_postfix) {
frame_->EmitPush(Operand(Smi::FromInt(0)));
+ placeholder_pushed = true;
}
// A constant reference is not saved to, so a constant reference is not a
@@ -5097,12 +5305,11 @@
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
- if (!is_postfix) {
- frame_->EmitPush(Operand(Smi::FromInt(0)));
- }
+ if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
ASSERT_EQ(original_height + 1, frame_->height());
return;
}
+
// This pushes 0, 1 or 2 words on the object to be used later when
updating
// the target. It also pushes the current value of the target.
target.GetValue();
@@ -5110,15 +5317,20 @@
JumpTarget slow;
JumpTarget exit;
- // Check for smi operand.
Register value = frame_->PopToRegister();
- __ tst(value, Operand(kSmiTagMask));
- slow.Branch(ne);
// Postfix: Store the old value as the result.
- if (is_postfix) {
+ if (placeholder_pushed) {
frame_->SetElementAt(value, target.size());
- }
+ } else if (is_postfix) {
+ frame_->EmitPush(value);
+ __ mov(VirtualFrame::scratch0(), value);
+ value = VirtualFrame::scratch0();
+ }
+
+ // Check for smi operand.
+ __ tst(value, Operand(kSmiTagMask));
+ slow.Branch(ne);
// Perform optimistic increment/decrement.
if (is_increment) {
@@ -5300,18 +5512,30 @@
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
Load(node->left());
+ if (frame_->KnownSmiAt(0)) overwrite_left = false;
SmiOperation(node->op(),
rliteral->handle(),
false,
- overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
VirtualFrame::RegisterAllocationScope scope(this);
Load(node->right());
+ if (frame_->KnownSmiAt(0)) overwrite_right = false;
SmiOperation(node->op(),
lliteral->handle(),
true,
- overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
+ overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else {
+ GenerateInlineSmi inline_smi =
+ loop_nesting() > 0 ? GENERATE_INLINE_SMI :
DONT_GENERATE_INLINE_SMI;
+ if (lliteral != NULL) {
+ ASSERT(!lliteral->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
+ if (rliteral != NULL) {
+ ASSERT(!rliteral->handle()->IsSmi());
+ inline_smi = DONT_GENERATE_INLINE_SMI;
+ }
VirtualFrame::RegisterAllocationScope scope(this);
OverwriteMode overwrite_mode = NO_OVERWRITE;
if (overwrite_left) {
@@ -5321,7 +5545,7 @@
}
Load(node->left());
Load(node->right());
- VirtualFrameBinaryOperation(node->op(), overwrite_mode);
+ GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
}
}
ASSERT(!has_valid_frame() ||
@@ -5813,6 +6037,7 @@
frame_->scratch0(), frame_->scratch1());
// Load the key and receiver from the stack.
+ bool key_is_known_smi = frame_->KnownSmiAt(0);
Register key = frame_->PopToRegister();
Register receiver = frame_->PopToRegister(key);
VirtualFrame::SpilledScope spilled(frame_);
@@ -5835,18 +6060,21 @@
// Check the map. The null map used below is patched by the inline
cache
// code.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+ // Check that the key is a smi.
+ if (!key_is_known_smi) {
+ __ tst(key, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ }
+
#ifdef DEBUG
- Label check_inlined_codesize;
- masm_->bind(&check_inlined_codesize);
+ Label check_inlined_codesize;
+ masm_->bind(&check_inlined_codesize);
#endif
__ mov(scratch2, Operand(Factory::null_value()));
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
- // Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- deferred->Branch(ne);
-
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(scratch1, FieldMemOperand(receiver,
JSObject::kElementsOffset));
=======================================
--- /branches/bleeding_edge/src/arm/codegen-arm.h Mon May 31 01:52:57 2010
+++ /branches/bleeding_edge/src/arm/codegen-arm.h Wed Jun 2 02:37:02 2010
@@ -43,6 +43,7 @@
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
//
-------------------------------------------------------------------------
@@ -129,24 +130,55 @@
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
- // Create a code generator state based on a code generator's current
- // state. The new state has its own pair of branch labels.
- CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
-
// Destroy a code generator state and restore the owning code generator's
// previous state.
- ~CodeGenState();
-
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
+ virtual ~CodeGenState();
+
+ virtual JumpTarget* true_target() const { return NULL; }
+ virtual JumpTarget* false_target() const { return NULL; }
+
+ protected:
+ inline CodeGenerator* owner() { return owner_; }
+ inline CodeGenState* previous() const { return previous_; }
private:
CodeGenerator* owner_;
+ CodeGenState* previous_;
+};
+
+
+class ConditionCodeGenState : public CodeGenState {
+ public:
+ // Create a code generator state based on a code generator's current
+ // state. The new state has its own pair of branch labels.
+ ConditionCodeGenState(CodeGenerator* owner,
+ JumpTarget* true_target,
+ JumpTarget* false_target);
+
+ virtual JumpTarget* true_target() const { return true_target_; }
+ virtual JumpTarget* false_target() const { return false_target_; }
+
+ private:
JumpTarget* true_target_;
JumpTarget* false_target_;
- CodeGenState* previous_;
+};
+
+
+class TypeInfoCodeGenState : public CodeGenState {
+ public:
+ TypeInfoCodeGenState(CodeGenerator* owner,
+ Slot* slot_number,
+ TypeInfo info);
+ ~TypeInfoCodeGenState();
+
+ virtual JumpTarget* true_target() const { return
previous()->true_target(); }
+ virtual JumpTarget* false_target() const {
+ return previous()->false_target();
+ }
+
+ private:
+ Slot* slot_;
+ TypeInfo old_type_info_;
};
@@ -215,6 +247,23 @@
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
+
+ TypeInfo type_info(Slot* slot) {
+ int index = NumberOfSlot(slot);
+ if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
+ return (*type_info_)[index];
+ }
+
+ TypeInfo set_type_info(Slot* slot, TypeInfo info) {
+ int index = NumberOfSlot(slot);
+ ASSERT(index >= kInvalidSlotNumber);
+ if (index != kInvalidSlotNumber) {
+ TypeInfo previous_value = (*type_info_)[index];
+ (*type_info_)[index] = info;
+ return previous_value;
+ }
+ return TypeInfo::Unknown();
+ }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
@@ -225,7 +274,7 @@
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store.
- static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
+ static const int kInlinedKeyedLoadInstructionsAfterPatch = 17;
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
private:
@@ -239,6 +288,10 @@
// Generating deferred code.
void ProcessDeferred();
+ static const int kInvalidSlotNumber = -1;
+
+ int NumberOfSlot(Slot* slot);
+
// State
bool has_cc() const { return cc_reg_ != al; }
JumpTarget* true_target() const { return state_->true_target(); }
@@ -351,10 +404,8 @@
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
+ GenerateInlineSmi inline_smi,
int known_rhs = kUnknownIntValue);
- void VirtualFrameBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode,
- int known_rhs = kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
@@ -511,6 +562,8 @@
CodeGenState* state_;
int loop_nesting_;
+ Vector<TypeInfo>* type_info_;
+
// Jump targets
BreakTarget function_return_;
=======================================
--- /branches/bleeding_edge/src/arm/jump-target-arm.cc Thu May 27 06:48:52
2010
+++ /branches/bleeding_edge/src/arm/jump-target-arm.cc Wed Jun 2 02:37:02
2010
@@ -50,6 +50,11 @@
ASSERT(cgen()->HasValidEntryRegisters());
if (entry_frame_set_) {
+ if (entry_label_.is_bound()) {
+ // If we already bound and generated code at the destination then it
+ // is too late to ask for less optimistic type assumptions.
+ ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
+ }
// There already a frame expectation at the target.
cgen()->frame()->MergeTo(&entry_frame_);
cgen()->DeleteFrame();
@@ -67,8 +72,12 @@
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
- // Backward branch. We have an expected frame to merge to on the
- // backward edge.
+ if (entry_label_.is_bound()) {
+ // If we already bound and generated code at the destination then it
+ // is too late to ask for less optimistic type assumptions.
+ ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
+ }
+ // We have an expected frame to merge to on the backward edge.
cgen()->frame()->MergeTo(&entry_frame_, cc);
} else {
// Clone the current frame to use as the expected one at the target.
=======================================
--- /branches/bleeding_edge/src/arm/virtual-frame-arm-inl.h Mon May 10
04:32:25 2010
+++ /branches/bleeding_edge/src/arm/virtual-frame-arm-inl.h Wed Jun 2
02:37:02 2010
@@ -47,6 +47,12 @@
MemOperand VirtualFrame::Receiver() {
return ParameterAt(-1);
}
+
+
+void VirtualFrame::Forget(int count) {
+ SpillAll();
+ LowerHeight(count);
+}
} } // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/arm/virtual-frame-arm.cc Fri May 28
04:23:07 2010
+++ /branches/bleeding_edge/src/arm/virtual-frame-arm.cc Wed Jun 2
02:37:02 2010
@@ -43,7 +43,7 @@
// Shuffle things around so the top of stack is in r0 and r1.
MergeTOSTo(R0_R1_TOS);
// Pop the two registers off the stack so they are detached from the
frame.
- element_count_ -= 2;
+ LowerHeight(2);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
@@ -52,7 +52,7 @@
// Shuffle things around so the top of stack is only in r1.
MergeTOSTo(R1_TOS);
// Pop the register off the stack so it is detached from the frame.
- element_count_ -= 1;
+ LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
@@ -61,16 +61,25 @@
// Shuffle things around so the top of stack only in r0.
MergeTOSTo(R0_TOS);
// Pop the register off the stack so it is detached from the frame.
- element_count_ -= 1;
+ LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
+ ASSERT(expected->IsCompatibleWith(this));
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
+ if (Equals(expected)) return;
+ expected->tos_known_smi_map_ &= tos_known_smi_map_;
+ MergeTOSTo(expected->top_of_stack_state_, cond);
+ ASSERT(register_allocation_map_ == expected->register_allocation_map_);
+}
void VirtualFrame::MergeTOSTo(
@@ -420,7 +429,7 @@
}
if (count == 0) return;
__ add(sp, sp, Operand(count * kPointerSize));
- element_count_ -= count;
+ LowerHeight(count);
}
@@ -430,7 +439,7 @@
} else {
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
- element_count_--;
+ LowerHeight(1);
}
@@ -442,7 +451,7 @@
__ mov(reg, kTopRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
- element_count_--;
+ LowerHeight(1);
}
@@ -550,7 +559,7 @@
UNREACHABLE();
}
}
- element_count_++;
+ RaiseHeight(1, tos_known_smi_map_ & 1);
}
@@ -589,7 +598,7 @@
UNREACHABLE();
}
}
- element_count_ += 2;
+ RaiseHeight(2, tos_known_smi_map_ & 3);
}
@@ -597,7 +606,7 @@
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
but_not_to_this_one.is(no_reg));
- element_count_--;
+ LowerHeight(1);
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (but_not_to_this_one.is(r0)) {
__ pop(r1);
@@ -625,8 +634,8 @@
}
-void VirtualFrame::EmitPush(Register reg) {
- element_count_++;
+void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
+ RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (reg.is(cp)) {
// If we are pushing cp then we are about to make a call and things
have to
// be pushed to the physical stack. There's nothing to be gained my
moving
@@ -659,6 +668,9 @@
void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
+ if (this_far_down < kTOSKnownSmiMapSize) {
+ tos_known_smi_map_ &= ~(1 << this_far_down);
+ }
if (this_far_down == 0) {
Pop();
Register dest = GetTOSRegister();
@@ -699,8 +711,8 @@
}
-void VirtualFrame::EmitPush(Operand operand) {
- element_count_++;
+void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
+ RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ mov(r0, operand);
__ push(r0);
@@ -712,8 +724,8 @@
}
-void VirtualFrame::EmitPush(MemOperand operand) {
- element_count_++;
+void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
+ RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ ldr(r0, operand);
__ push(r0);
@@ -726,7 +738,7 @@
void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
- element_count_++;
+ RaiseHeight(1, 0);
if (SpilledScope::is_spilled()) {
__ LoadRoot(r0, index);
__ push(r0);
=======================================
--- /branches/bleeding_edge/src/arm/virtual-frame-arm.h Fri May 28 04:23:07
2010
+++ /branches/bleeding_edge/src/arm/virtual-frame-arm.h Wed Jun 2 02:37:02
2010
@@ -154,10 +154,7 @@
// Forget elements from the top of the frame to match an actual frame
(eg,
// the frame after a runtime call). No code is emitted except to bring
the
// frame to a spilled state.
- void Forget(int count) {
- SpillAll();
- element_count_ -= count;
- }
+ void Forget(int count);
// Spill all values from the frame to memory.
void SpillAll();
@@ -184,7 +181,13 @@
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
+ void MergeTo(VirtualFrame* expected, Condition cond = al);
void MergeTo(const VirtualFrame* expected, Condition cond = al);
+
+ // Checks whether this frame can be branched to by the other frame.
+ bool IsCompatibleWith(const VirtualFrame* other) const {
+ return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
+ }
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
@@ -233,6 +236,11 @@
ASSERT(adjusted_index >= 0);
return MemOperand(sp, adjusted_index * kPointerSize);
}
+
+ bool KnownSmiAt(int index) {
+ if (index >= kTOSKnownSmiMapSize) return false;
+ return (tos_known_smi_map_ & (1 << index)) != 0;
+ }
// A frame-allocated local as an assembly operand.
inline MemOperand LocalAt(int index);
@@ -352,9 +360,9 @@
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
- void EmitPush(Register reg);
- void EmitPush(Operand operand);
- void EmitPush(MemOperand operand);
+ void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
+ void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
+ void EmitPush(MemOperand operand, TypeInfo type_info =
TypeInfo::Unknown());
void EmitPushRoot(Heap::RootListIndex index);
// Overwrite the nth thing on the stack. If the nth position is in a
@@ -419,6 +427,8 @@
int element_count_;
TopOfStack top_of_stack_state_:3;
int register_allocation_map_:kNumberOfAllocatedRegisters;
+ static const int kTOSKnownSmiMapSize = 4;
+ unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
// The index of the element that is at the processor's stack pointer
// (the sp register). For now since everything is in memory it is given
@@ -473,6 +483,25 @@
inline bool Equals(const VirtualFrame* other);
+ inline void LowerHeight(int count) {
+ element_count_ -= count;
+ if (count >= kTOSKnownSmiMapSize) {
+ tos_known_smi_map_ = 0;
+ } else {
+ tos_known_smi_map_ >>= count;
+ }
+ }
+
+ inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
+ ASSERT(known_smi_map < (1u << count));
+ element_count_ += count;
+ if (count >= kTOSKnownSmiMapSize) {
+ tos_known_smi_map_ = known_smi_map;
+ } else {
+ tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
+ }
+ }
+
friend class JumpTarget;
};
=======================================
--- /branches/bleeding_edge/src/data-flow.cc Mon Mar 29 07:23:55 2010
+++ /branches/bleeding_edge/src/data-flow.cc Wed Jun 2 02:37:02 2010
@@ -318,6 +318,9 @@
Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
+ // Don't try to get clever with const or dynamic variables.
+ if (loop_var->mode() != Variable::VAR) return NULL;
+
// The initial value has to be a smi.
Literal* init_lit = init->value()->AsLiteral();
if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
=======================================
--- /branches/bleeding_edge/src/type-info.h Mon Mar 29 04:48:57 2010
+++ /branches/bleeding_edge/src/type-info.h Wed Jun 2 02:37:02 2010
@@ -47,7 +47,7 @@
class TypeInfo {
public:
- TypeInfo() { }
+ TypeInfo() : type_(kUnknownType) { }
static inline TypeInfo Unknown();
// We know it's a primitive type.
=======================================
--- /branches/bleeding_edge/src/virtual-frame-light-inl.h Thu May 27
06:48:52 2010
+++ /branches/bleeding_edge/src/virtual-frame-light-inl.h Wed Jun 2
02:37:02 2010
@@ -42,7 +42,8 @@
VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
: element_count_(0),
top_of_stack_state_(NO_TOS_REGISTERS),
- register_allocation_map_(0) { }
+ register_allocation_map_(0),
+ tos_known_smi_map_(0) { }
// On entry to a function, the virtual frame already contains the receiver,
@@ -50,20 +51,23 @@
VirtualFrame::VirtualFrame()
: element_count_(parameter_count() + 2),
top_of_stack_state_(NO_TOS_REGISTERS),
- register_allocation_map_(0) { }
+ register_allocation_map_(0),
+ tos_known_smi_map_(0) { }
// When cloned, a frame is a deep copy of the original.
VirtualFrame::VirtualFrame(VirtualFrame* original)
: element_count_(original->element_count()),
top_of_stack_state_(original->top_of_stack_state_),
- register_allocation_map_(original->register_allocation_map_) { }
+ register_allocation_map_(original->register_allocation_map_),
+ tos_known_smi_map_(0) { }
bool VirtualFrame::Equals(const VirtualFrame* other) {
ASSERT(element_count() == other->element_count());
if (top_of_stack_state_ != other->top_of_stack_state_) return false;
if (register_allocation_map_ != other->register_allocation_map_) return
false;
+ if (tos_known_smi_map_ != other->tos_known_smi_map_) return false;
return true;
}
=======================================
--- /branches/bleeding_edge/src/virtual-frame-light.cc Mon May 10 04:32:25
2010
+++ /branches/bleeding_edge/src/virtual-frame-light.cc Wed Jun 2 02:37:02
2010
@@ -36,7 +36,7 @@
void VirtualFrame::Adjust(int count) {
ASSERT(count >= 0);
- element_count_ += count;
+ RaiseHeight(count, 0);
}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev