Revision: 24578
Author: [email protected]
Date: Tue Oct 14 07:51:07 2014 UTC
Log: Version 3.30.9 (based on bleeding_edge revision r24572)
Performance and stability improvements on all platforms.
https://code.google.com/p/v8/source/detail?r=24578
Modified:
/trunk/BUILD.gn
/trunk/ChangeLog
/trunk/src/arm/assembler-arm.cc
/trunk/src/arm/assembler-arm.h
/trunk/src/arm/full-codegen-arm.cc
/trunk/src/arm/macro-assembler-arm.cc
/trunk/src/arm/macro-assembler-arm.h
/trunk/src/arm64/full-codegen-arm64.cc
/trunk/src/arm64/macro-assembler-arm64.cc
/trunk/src/arm64/macro-assembler-arm64.h
/trunk/src/assembler.cc
/trunk/src/assembler.h
/trunk/src/code-stubs-hydrogen.cc
/trunk/src/code-stubs.cc
/trunk/src/compiler/arm/instruction-selector-arm.cc
/trunk/src/compiler/arm64/instruction-selector-arm64.cc
/trunk/src/compiler/code-generator.h
/trunk/src/compiler/common-node-cache.h
/trunk/src/compiler/ia32/code-generator-ia32.cc
/trunk/src/compiler/ia32/instruction-selector-ia32.cc
/trunk/src/compiler/instruction-selector-impl.h
/trunk/src/compiler/instruction-selector.cc
/trunk/src/compiler/instruction-selector.h
/trunk/src/compiler/instruction.cc
/trunk/src/compiler/instruction.h
/trunk/src/compiler/js-graph.cc
/trunk/src/compiler/js-graph.h
/trunk/src/compiler/js-typed-lowering.cc
/trunk/src/compiler/machine-operator-reducer.cc
/trunk/src/compiler/machine-operator-reducer.h
/trunk/src/compiler/mips/instruction-selector-mips.cc
/trunk/src/compiler/scheduler.cc
/trunk/src/compiler/scheduler.h
/trunk/src/compiler/x64/code-generator-x64.cc
/trunk/src/compiler/x64/instruction-selector-x64.cc
/trunk/src/execution.cc
/trunk/src/flag-definitions.h
/trunk/src/heap/gc-idle-time-handler.cc
/trunk/src/heap/gc-idle-time-handler.h
/trunk/src/heap/heap.cc
/trunk/src/heap/heap.h
/trunk/src/heap/incremental-marking.cc
/trunk/src/heap/incremental-marking.h
/trunk/src/heap/mark-compact.cc
/trunk/src/heap/mark-compact.h
/trunk/src/ia32/assembler-ia32.cc
/trunk/src/ia32/assembler-ia32.h
/trunk/src/ia32/full-codegen-ia32.cc
/trunk/src/ia32/macro-assembler-ia32.cc
/trunk/src/ia32/macro-assembler-ia32.h
/trunk/src/macro-assembler.h
/trunk/src/mips/full-codegen-mips.cc
/trunk/src/mips/macro-assembler-mips.cc
/trunk/src/mips/macro-assembler-mips.h
/trunk/src/mips64/full-codegen-mips64.cc
/trunk/src/mips64/macro-assembler-mips64.cc
/trunk/src/mips64/macro-assembler-mips64.h
/trunk/src/objects-inl.h
/trunk/src/objects.h
/trunk/src/scanner-character-streams.cc
/trunk/src/serialize.cc
/trunk/src/version.cc
/trunk/src/x64/assembler-x64.cc
/trunk/src/x64/assembler-x64.h
/trunk/src/x64/full-codegen-x64.cc
/trunk/src/x64/macro-assembler-x64.cc
/trunk/src/x64/macro-assembler-x64.h
/trunk/src/x87/full-codegen-x87.cc
/trunk/src/x87/lithium-codegen-x87.cc
/trunk/src/x87/macro-assembler-x87.cc
/trunk/src/x87/macro-assembler-x87.h
/trunk/test/benchmarks/testcfg.py
/trunk/test/cctest/compiler/test-instruction.cc
/trunk/test/cctest/compiler/test-run-machops.cc
/trunk/test/cctest/test-api.cc
/trunk/test/cctest/test-disasm-ia32.cc
/trunk/test/cctest/test-disasm-x64.cc
/trunk/test/cctest/test-heap.cc
/trunk/test/mjsunit/asm/int32-tmod.js
/trunk/test/mjsunit/harmony/super.js
/trunk/test/mjsunit/harmony/typedarrays.js
/trunk/test/test262/test262.status
/trunk/test/unittests/compiler/graph-unittest.cc
/trunk/test/unittests/compiler/graph-unittest.h
/trunk/test/unittests/compiler/js-typed-lowering-unittest.cc
/trunk/test/unittests/compiler/machine-operator-reducer-unittest.cc
/trunk/test/unittests/heap/gc-idle-time-handler-unittest.cc
/trunk/tools/gyp/v8.gyp
/trunk/tools/push-to-trunk/common_includes.py
/trunk/tools/push-to-trunk/releases.py
=======================================
--- /trunk/BUILD.gn Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/BUILD.gn Tue Oct 14 07:51:07 2014 UTC
@@ -1127,6 +1127,10 @@
"src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
+ "src/compiler/mips/code-generator-mips.cc",
+ "src/compiler/mips/instruction-codes-mips.h",
+ "src/compiler/mips/instruction-selector-mips.cc",
+ "src/compiler/mips/linkage-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
=======================================
--- /trunk/ChangeLog Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/ChangeLog Tue Oct 14 07:51:07 2014 UTC
@@ -1,3 +1,8 @@
+2014-10-14: Version 3.30.9
+
+ Performance and stability improvements on all platforms.
+
+
2014-10-13: Version 3.30.8
AST nodes have at most one bailout/typefeedback ID now, saving
lots of
=======================================
--- /trunk/src/arm/assembler-arm.cc Wed Sep 3 08:32:14 2014 UTC
+++ /trunk/src/arm/assembler-arm.cc Tue Oct 14 07:51:07 2014 UTC
@@ -472,7 +472,6 @@
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
- constant_pool_available_ = !FLAG_enable_ool_constant_pool;
ClearRecordedAstId();
}
@@ -1056,7 +1055,8 @@
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
- if (assembler != NULL && !assembler->is_constant_pool_available()) {
+ if (FLAG_enable_ool_constant_pool && assembler != NULL &&
+ !assembler->is_ool_constant_pool_available()) {
return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
@@ -1137,7 +1137,7 @@
mov(rd, target, LeaveCC, cond);
}
} else {
- DCHECK(is_constant_pool_available());
+ DCHECK(!FLAG_enable_ool_constant_pool ||
is_ool_constant_pool_available());
ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
if (section == ConstantPoolArray::EXTENDED_SECTION) {
DCHECK(FLAG_enable_ool_constant_pool);
@@ -2492,7 +2492,7 @@
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
+ } else if (FLAG_enable_vldr_imm && is_ool_constant_pool_available()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise
control
// generated data which also happens to be executable, a
Very Bad
=======================================
--- /trunk/src/arm/assembler-arm.h Fri Sep 26 00:05:23 2014 UTC
+++ /trunk/src/arm/assembler-arm.h Tue Oct 14 07:51:07 2014 UTC
@@ -1487,8 +1487,6 @@
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
-
- bool is_constant_pool_available() const { return
constant_pool_available_; }
bool use_extended_constant_pool() const {
return constant_pool_builder_.current_section() ==
@@ -1548,10 +1546,6 @@
return (const_pool_blocked_nesting_ > 0) ||
(pc_offset() < no_const_pool_before_);
}
-
- void set_constant_pool_available(bool available) {
- constant_pool_available_ = available;
- }
private:
int next_buffer_check_; // pc offset of next buffer check
@@ -1615,10 +1609,6 @@
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- // Indicates whether the constant pool can be accessed, which is only
possible
- // if the pp register points to the current code object's constant pool.
- bool constant_pool_available_;
-
// Code emission
inline void CheckBuffer();
void GrowBuffer();
@@ -1654,9 +1644,6 @@
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
- friend class FrameAndConstantPoolScope;
- friend class ConstantPoolUnavailableScope;
-
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;
=======================================
--- /trunk/src/arm/full-codegen-arm.cc Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/src/arm/full-codegen-arm.cc Tue Oct 14 07:51:07 2014 UTC
@@ -2514,16 +2514,8 @@
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
- // Left-hand side can only be a property, a global or a (parameter or
local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
+ LhsKind assign_type = GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
@@ -2542,6 +2534,42 @@
CallStoreIC();
break;
}
+ case NAMED_SUPER_PROPERTY: {
+ __ Push(r0);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ // stack: value, this; r0: home_object
+ Register scratch = r2;
+ Register scratch2 = r3;
+ __ mov(scratch, result_register()); // home_object
+ __ ldr(r0, MemOperand(sp, kPointerSize)); // value
+ __ ldr(scratch2, MemOperand(sp, 0)); // this
+ __ str(scratch2, MemOperand(sp, kPointerSize)); // this
+ __ str(scratch, MemOperand(sp, 0)); // home_object
+ // stack: this, home_object; r0: value
+ EmitNamedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ __ Push(r0);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(prop->key());
+ Register scratch = r2;
+ Register scratch2 = r3;
+ __ ldr(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
+ // stack: value, this, home_object; r0: key, r3: value
+ __ ldr(scratch, MemOperand(sp, kPointerSize)); // this
+ __ str(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ ldr(scratch, MemOperand(sp, 0)); // home_object
+ __ str(scratch, MemOperand(sp, kPointerSize));
+ __ str(r0, MemOperand(sp, 0));
+ __ Move(r0, scratch2);
+ // stack: this, home_object, key; r0: value.
+ EmitKeyedSuperPropertyStore(prop);
+ break;
+ }
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
VisitForStackValue(prop->obj());
=======================================
--- /trunk/src/arm/macro-assembler-arm.cc Fri Sep 26 00:05:23 2014 UTC
+++ /trunk/src/arm/macro-assembler-arm.cc Tue Oct 14 07:51:07 2014 UTC
@@ -967,7 +967,7 @@
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
if (FLAG_enable_ool_constant_pool) {
LoadConstantPoolPointerRegister();
- set_constant_pool_available(true);
+ set_ool_constant_pool_available(true);
}
}
@@ -992,16 +992,16 @@
}
if (FLAG_enable_ool_constant_pool) {
LoadConstantPoolPointerRegister();
- set_constant_pool_available(true);
+ set_ool_constant_pool_available(true);
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
- bool load_constant_pool) {
+ bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
PushFixedFrame();
- if (FLAG_enable_ool_constant_pool && load_constant_pool) {
+ if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
}
mov(ip, Operand(Smi::FromInt(type)));
=======================================
--- /trunk/src/arm/macro-assembler-arm.h Thu Sep 25 00:05:09 2014 UTC
+++ /trunk/src/arm/macro-assembler-arm.h Tue Oct 14 07:51:07 2014 UTC
@@ -1401,7 +1401,8 @@
}
// Activation support.
- void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
+ void EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
@@ -1530,71 +1531,6 @@
};
-class FrameAndConstantPoolScope {
- public:
- FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
- : masm_(masm),
- type_(type),
- old_has_frame_(masm->has_frame()),
- old_constant_pool_available_(masm->is_constant_pool_available()) {
- // We only want to enable constant pool access for non-manual frame
scopes
- // to ensure the constant pool pointer is valid throughout the scope.
- DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
- masm->set_has_frame(true);
- masm->set_constant_pool_available(true);
- masm->EnterFrame(type, !old_constant_pool_available_);
- }
-
- ~FrameAndConstantPoolScope() {
- masm_->LeaveFrame(type_);
- masm_->set_has_frame(old_has_frame_);
- masm_->set_constant_pool_available(old_constant_pool_available_);
- }
-
- // Normally we generate the leave-frame code when this object goes
- // out of scope. Sometimes we may need to generate the code somewhere
else
- // in addition. Calling this will achieve that, but the object stays in
- // scope, the MacroAssembler is still marked as being in a frame scope,
and
- // the code will be generated again when it goes out of scope.
- void GenerateLeaveFrame() {
- DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
- masm_->LeaveFrame(type_);
- }
-
- private:
- MacroAssembler* masm_;
- StackFrame::Type type_;
- bool old_has_frame_;
- bool old_constant_pool_available_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
-};
-
-
-// Class for scoping the the unavailability of constant pool access.
-class ConstantPoolUnavailableScope {
- public:
- explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
- : masm_(masm),
- old_constant_pool_available_(masm->is_constant_pool_available()) {
- if (FLAG_enable_ool_constant_pool) {
- masm_->set_constant_pool_available(false);
- }
- }
- ~ConstantPoolUnavailableScope() {
- if (FLAG_enable_ool_constant_pool) {
- masm_->set_constant_pool_available(old_constant_pool_available_);
- }
- }
-
- private:
- MacroAssembler* masm_;
- int old_constant_pool_available_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
-};
-
-
//
-----------------------------------------------------------------------------
// Static helper functions.
=======================================
--- /trunk/src/arm64/full-codegen-arm64.cc Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/src/arm64/full-codegen-arm64.cc Tue Oct 14 07:51:07 2014 UTC
@@ -2172,16 +2172,8 @@
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
- // Left-hand side can only be a property, a global or a (parameter or
local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
+ LhsKind assign_type = GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
@@ -2202,6 +2194,42 @@
CallStoreIC();
break;
}
+ case NAMED_SUPER_PROPERTY: {
+ __ Push(x0);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ // stack: value, this; x0: home_object
+ Register scratch = x10;
+ Register scratch2 = x11;
+ __ mov(scratch, result_register()); // home_object
+ __ Peek(x0, kPointerSize); // value
+ __ Peek(scratch2, 0); // this
+ __ Poke(scratch2, kPointerSize); // this
+ __ Poke(scratch, 0); // home_object
+ // stack: this, home_object; x0: value
+ EmitNamedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ __ Push(x0);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ Push(result_register());
+ VisitForAccumulatorValue(prop->key());
+ Register scratch = x10;
+ Register scratch2 = x11;
+ __ Peek(scratch2, 2 * kPointerSize); // value
+ // stack: value, this, home_object; x0: key, x11: value
+ __ Peek(scratch, kPointerSize); // this
+ __ Poke(scratch, 2 * kPointerSize);
+ __ Peek(scratch, 0); // home_object
+ __ Poke(scratch, kPointerSize);
+ __ Poke(x0, 0);
+ __ Move(x0, scratch2);
+ // stack: this, home_object, key; x0: value.
+ EmitKeyedSuperPropertyStore(prop);
+ break;
+ }
case KEYED_PROPERTY: {
__ Push(x0); // Preserve value.
VisitForStackValue(prop->obj());
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.cc Fri Sep 26 00:05:23 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.cc Tue Oct 14 07:51:07 2014 UTC
@@ -3062,6 +3062,13 @@
__ EmitFrameSetupForCodeAgePatching();
}
}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on arm64.
+ UNREACHABLE();
+}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.h Thu Sep 25 00:05:09 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.h Tue Oct 14 07:51:07 2014 UTC
@@ -1627,6 +1627,7 @@
// Activation support.
void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool
load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
// Returns map with validated enum cache in object register.
=======================================
--- /trunk/src/assembler.cc Wed Oct 8 00:05:11 2014 UTC
+++ /trunk/src/assembler.cc Tue Oct 14 07:51:07 2014 UTC
@@ -131,7 +131,8 @@
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
// We may use the assembler without an isolate.
- serializer_enabled_(isolate && isolate->serializer_enabled()) {
+ serializer_enabled_(isolate && isolate->serializer_enabled()),
+ ool_constant_pool_available_(false) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
=======================================
--- /trunk/src/assembler.h Wed Oct 8 00:05:11 2014 UTC
+++ /trunk/src/assembler.h Tue Oct 14 07:51:07 2014 UTC
@@ -78,6 +78,16 @@
bool IsEnabled(CpuFeature f) {
return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0;
}
+
+ bool is_ool_constant_pool_available() const {
+ if (FLAG_enable_ool_constant_pool) {
+ return ool_constant_pool_available_;
+ } else {
+ // Out-of-line constant pool not supported on this architecture.
+ UNREACHABLE();
+ return false;
+ }
+ }
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
@@ -98,6 +108,15 @@
int buffer_size_;
bool own_buffer_;
+ void set_ool_constant_pool_available(bool available) {
+ if (FLAG_enable_ool_constant_pool) {
+ ool_constant_pool_available_ = available;
+ } else {
+ // Out-of-line constant pool not supported on this architecture.
+ UNREACHABLE();
+ }
+ }
+
// The program counter, which points into the buffer above and moves
forward.
byte* pc_;
@@ -108,6 +127,14 @@
bool emit_debug_code_;
bool predictable_code_size_;
bool serializer_enabled_;
+
+ // Indicates whether the constant pool can be accessed, which is only
possible
+ // if the pp register points to the current code object's constant pool.
+ bool ool_constant_pool_available_;
+
+ // Constant pool.
+ friend class FrameAndConstantPoolScope;
+ friend class ConstantPoolUnavailableScope;
};
=======================================
--- /trunk/src/code-stubs-hydrogen.cc Wed Oct 8 00:05:11 2014 UTC
+++ /trunk/src/code-stubs-hydrogen.cc Tue Oct 14 07:51:07 2014 UTC
@@ -233,6 +233,8 @@
// Generate the code for the stub.
masm.set_generating_stub(true);
+ // TODO(yangguo): remove this once we can serialize IC stubs.
+ masm.enable_serializer();
NoCurrentFrameScope scope(&masm);
GenerateLightweightMiss(&masm, miss);
}
=======================================
--- /trunk/src/code-stubs.cc Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/code-stubs.cc Tue Oct 14 07:51:07 2014 UTC
@@ -111,6 +111,8 @@
// Generate the code for the stub.
masm.set_generating_stub(true);
+ // TODO(yangguo): remove this once we can serialize IC stubs.
+ masm.enable_serializer();
NoCurrentFrameScope scope(&masm);
Generate(&masm);
}
=======================================
--- /trunk/src/compiler/arm/instruction-selector-arm.cc Thu Oct 2 00:05:29
2014 UTC
+++ /trunk/src/compiler/arm/instruction-selector-arm.cc Tue Oct 14 07:51:07
2014 UTC
@@ -792,15 +792,14 @@
}
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
- BasicBlock* deoptimization) {
+void InstructionSelector::VisitCall(Node* node) {
ArmOperandGenerator g(this);
- CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
FrameStateDescriptor* frame_state_descriptor = NULL;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
- GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
@@ -809,7 +808,7 @@
// TODO(turbofan): on ARM64 it's probably better to use the code object
in a
// register if there are multiple uses of it. Improve constant pool and
the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(call, &buffer, true, false);
+ InitializeCallBuffer(node, &buffer, true, false);
// TODO(dcarney): might be possible to use claim/poke instead
// Push any stack arguments.
@@ -838,31 +837,35 @@
Instruction* call_instr =
Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
buffer.instruction_args.size(),
&buffer.instruction_args.front());
-
call_instr->MarkAsCall();
- if (deoptimization != NULL) {
- DCHECK(continuation != NULL);
- call_instr->MarkAsControl();
- }
}
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop(this, node, kArmAdd, kArmAdd, cont);
-}
+namespace {
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop(this, node, kArmSub, kArmRsb, cont);
+// Shared routine for multiple float compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(kArmVcmpF64), nullptr,
+ g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()),
g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(
+ cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+ }
}
-// Shared routine for multiple compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation*
cont,
- bool commutative) {
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[5];
@@ -903,63 +906,231 @@
}
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
- switch (node->opcode()) {
- case IrOpcode::kInt32Add:
- return VisitWordCompare(this, node, kArmCmn, cont, true);
- case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, node, kArmCmp, cont, false);
- case IrOpcode::kWord32And:
- return VisitWordCompare(this, node, kArmTst, cont, true);
- case IrOpcode::kWord32Or:
- return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
- case IrOpcode::kWord32Xor:
- return VisitWordCompare(this, node, kArmTeq, cont, true);
- case IrOpcode::kWord32Sar:
- return VisitShift(this, node, TryMatchASR, cont);
- case IrOpcode::kWord32Shl:
- return VisitShift(this, node, TryMatchLSL, cont);
- case IrOpcode::kWord32Shr:
- return VisitShift(this, node, TryMatchLSR, cont);
- case IrOpcode::kWord32Ror:
- return VisitShift(this, node, TryMatchROR, cont);
- default:
- break;
- }
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kArmCmp, cont, false);
+}
- ArmOperandGenerator g(this);
+
+void VisitWordTest(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
InstructionCode opcode =
cont->Encode(kArmTst) |
AddressingModeField::encode(kMode_Operand2_R);
if (cont->IsBranch()) {
- Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ selector->Emit(opcode, nullptr, g.UseRegister(node),
g.UseRegister(node),
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()))->MarkAsControl();
} else {
- Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
- g.UseRegister(node));
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()),
+ g.UseRegister(node), g.UseRegister(node));
}
}
+} // namespace
-void InstructionSelector::VisitWord32Compare(Node* node,
- FlagsContinuation* cont) {
- VisitWordCompare(this, node, kArmCmp, cont, false);
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ ArmOperandGenerator g(this);
+ Node* user = branch;
+ Node* value = branch->InputAt(0);
+
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+ // If we can fall through to the true block, invert the branch.
+ if (IsNextInAssemblyOrder(tbranch)) {
+ cont.Negate();
+ cont.SwapBlocks();
+ }
+
+ // Try to combine with comparisons against 0 by simply inverting the
branch.
+ while (CanCover(user, value) && value->opcode() ==
IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ }
+
+ // Try to combine the branch with a comparison.
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(this, value, &cont);
+ case IrOpcode::kInt32LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(this, value, &cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(this, value, &cont);
+ case IrOpcode::kUint32LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(this, value, &cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(this, value, &cont);
+ case IrOpcode::kFloat64Equal:
+ cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kFloat64LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (OpParameter<size_t>(value) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is
scheduled
+ // *AFTER* this branch).
+ Node* node = value->InputAt(0);
+ Node* result = node->FindProjection(0);
+ if (!result || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, value, kArmCmn, &cont, true);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, value, kArmCmp, &cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, value, kArmTst, &cont, true);
+ case IrOpcode::kWord32Or:
+ return VisitBinop(this, value, kArmOrr, kArmOrr, &cont);
+ case IrOpcode::kWord32Xor:
+ return VisitWordCompare(this, value, kArmTeq, &cont, true);
+ case IrOpcode::kWord32Sar:
+ return VisitShift(this, value, TryMatchASR, &cont);
+ case IrOpcode::kWord32Shl:
+ return VisitShift(this, value, TryMatchLSL, &cont);
+ case IrOpcode::kWord32Shr:
+ return VisitShift(this, value, TryMatchLSR, &cont);
+ case IrOpcode::kWord32Ror:
+ return VisitShift(this, value, TryMatchROR, &cont);
+ default:
+ break;
+ }
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ return VisitWordTest(this, value, &cont);
}
-void InstructionSelector::VisitFloat64Compare(Node* node,
- FlagsContinuation* cont) {
- ArmOperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (cont->IsBranch()) {
- Emit(cont->Encode(kArmVcmpF64), NULL, g.UseRegister(m.left().node()),
- g.UseRegister(m.right().node()), g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
- } else {
- DCHECK(cont->IsSet());
- Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
- g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ Node* const user = node;
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(user);
+ if (m.right().Is(0)) {
+ Node* const value = m.left().node();
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, value, kArmCmn, &cont, true);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, value, kArmCmp, &cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, value, kArmTst, &cont, true);
+ case IrOpcode::kWord32Or:
+ return VisitBinop(this, value, kArmOrr, kArmOrr, &cont);
+ case IrOpcode::kWord32Xor:
+ return VisitWordCompare(this, value, kArmTeq, &cont, true);
+ case IrOpcode::kWord32Sar:
+ return VisitShift(this, value, TryMatchASR, &cont);
+ case IrOpcode::kWord32Shl:
+ return VisitShift(this, value, TryMatchLSL, &cont);
+ case IrOpcode::kWord32Shr:
+ return VisitShift(this, value, TryMatchLSR, &cont);
+ case IrOpcode::kWord32Ror:
+ return VisitShift(this, value, TryMatchROR, &cont);
+ default:
+ break;
+ }
+ return VisitWordTest(this, value, &cont);
+ }
}
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kArmSub, kArmRsb, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kArmSub, kArmRsb, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
}
} // namespace compiler
=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64.cc Fri Oct 10
00:05:16 2014 UTC
+++ /trunk/src/compiler/arm64/instruction-selector-arm64.cc Tue Oct 14
07:51:07 2014 UTC
@@ -813,15 +813,73 @@
}
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm,
cont);
-}
+void InstructionSelector::VisitCall(Node* node) {
+ Arm64OperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ }
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm,
cont);
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM64 it's probably better to use the code object
in a
+ // register if there are multiple uses of it. Improve constant pool and
the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ // Push the arguments to the stack.
+ bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
+ int aligned_push_count = buffer.pushed_nodes.size();
+ // TODO(dcarney): claim and poke probably take small immediates,
+ // loop here or whatever.
+ // Bump the stack pointer(s).
+ if (aligned_push_count > 0) {
+ // TODO(dcarney): it would be better to bump the csp here only
+ // and emit paired stores with increment for non c
frames.
+ Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+ }
+ // Move arguments to the stack.
+ {
+ int slot = buffer.pushed_nodes.size() - 1;
+ // Emit the uneven pushes.
+ if (pushed_count_uneven) {
+ Node* input = buffer.pushed_nodes[slot];
+ Emit(kArm64Poke | MiscField::encode(slot), NULL,
g.UseRegister(input));
+ slot--;
+ }
+ // Now all pushes can be done in pairs.
+ for (; slot >= 0; slot -= 2) {
+ Emit(kArm64PokePair | MiscField::encode(slot), NULL,
+ g.UseRegister(buffer.pushed_nodes[slot]),
+ g.UseRegister(buffer.pushed_nodes[slot - 1]));
+ }
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(),
&buffer.instruction_args.front());
+ call_instr->MarkAsCall();
}
@@ -864,132 +922,287 @@
}
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
- switch (node->opcode()) {
- case IrOpcode::kInt32Add:
- return VisitWordCompare(this, node, kArm64Cmn32, cont, true);
- case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, node, kArm64Cmp32, cont, false);
- case IrOpcode::kWord32And:
- return VisitWordCompare(this, node, kArm64Tst32, cont, true);
- default:
- break;
- }
-
- Arm64OperandGenerator g(this);
- VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
- cont);
+static void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kArm64Cmp32, cont, false);
}
-void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation*
cont) {
- switch (node->opcode()) {
- case IrOpcode::kWord64And:
- return VisitWordCompare(this, node, kArm64Tst, cont, true);
- default:
- break;
- }
-
- Arm64OperandGenerator g(this);
- VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node),
cont);
+static void VisitWordTest(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont)
{
+ Arm64OperandGenerator g(selector);
+ VisitCompare(selector, opcode, g.UseRegister(node), g.UseRegister(node),
+ cont);
}
-void InstructionSelector::VisitWord32Compare(Node* node,
- FlagsContinuation* cont) {
- VisitWordCompare(this, node, kArm64Cmp32, cont, false);
+static void VisitWord32Test(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordTest(selector, node, kArm64Tst32, cont);
}
-void InstructionSelector::VisitWord64Compare(Node* node,
- FlagsContinuation* cont) {
- VisitWordCompare(this, node, kArm64Cmp, cont, false);
+static void VisitWord64Test(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordTest(selector, node, kArm64Tst, cont);
}
-void InstructionSelector::VisitFloat64Compare(Node* node,
- FlagsContinuation* cont) {
- Arm64OperandGenerator g(this);
+// Shared routine for multiple float compare operations.
+static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- VisitCompare(this, kArm64Float64Cmp, g.UseRegister(left),
+ VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(left),
g.UseRegister(right), cont);
}
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
- BasicBlock* deoptimization) {
- Arm64OperandGenerator g(this);
- CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ OperandGenerator g(this);
+ Node* user = branch;
+ Node* value = branch->InputAt(0);
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+ // If we can fall through to the true block, invert the branch.
+ if (IsNextInAssemblyOrder(tbranch)) {
+ cont.Negate();
+ cont.SwapBlocks();
}
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+ // Try to combine with comparisons against 0 by simply inverting the
branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
- // Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object
in a
- // register if there are multiple uses of it. Improve constant pool and
the
- // heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(call, &buffer, true, false);
-
- // Push the arguments to the stack.
- bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
- int aligned_push_count = buffer.pushed_nodes.size();
- // TODO(dcarney): claim and poke probably take small immediates,
- // loop here or whatever.
- // Bump the stack pointer(s).
- if (aligned_push_count > 0) {
- // TODO(dcarney): it would be better to bump the csp here only
- // and emit paired stores with increment for non c
frames.
- Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+ // Try to combine the branch with a comparison.
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(this, value, &cont);
+ case IrOpcode::kInt32LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(this, value, &cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, &cont);
+ case IrOpcode::kUint32LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(this, value, &cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, &cont);
+ case IrOpcode::kWord64Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false);
+ case IrOpcode::kInt64LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false);
+ case IrOpcode::kUint64LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(this, value, kArm64Cmp, &cont, false);
+ case IrOpcode::kFloat64Equal:
+ cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kFloat64LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (OpParameter<size_t>(value) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is
scheduled
+ // *AFTER* this branch).
+ Node* node = value->InputAt(0);
+ Node* result = node->FindProjection(0);
+ if (result == NULL || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(this, node,
kArm64Add32,
+ kArithmeticImm,
&cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int32BinopMatcher>(this, node,
kArm64Sub32,
+ kArithmeticImm,
&cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, value, kArm64Cmn32, &cont, true);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, value, kArm64Cmp32, &cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, value, kArm64Tst32, &cont, true);
+ default:
+ break;
+ }
}
- // Move arguments to the stack.
- {
- int slot = buffer.pushed_nodes.size() - 1;
- // Emit the uneven pushes.
- if (pushed_count_uneven) {
- Node* input = buffer.pushed_nodes[slot];
- Emit(kArm64Poke | MiscField::encode(slot), NULL,
g.UseRegister(input));
- slot--;
- }
- // Now all pushes can be done in pairs.
- for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair | MiscField::encode(slot), NULL,
- g.UseRegister(buffer.pushed_nodes[slot]),
- g.UseRegister(buffer.pushed_nodes[slot - 1]));
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ VisitWord32Test(this, value, &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ Node* const user = node;
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(user);
+ if (m.right().Is(0)) {
+ Node* const value = m.left().node();
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, value, kArm64Cmn32, &cont, true);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, value, kArm64Cmp32, &cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, value, kArm64Tst32, &cont, true);
+ default:
+ break;
+ }
+ return VisitWord32Test(this, value, &cont);
}
}
+ VisitWord32Compare(this, node, &cont);
+}
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ Node* const user = node;
+ FlagsContinuation cont(kEqual, node);
+ Int64BinopMatcher m(user);
+ if (m.right().Is(0)) {
+ Node* const value = m.left().node();
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kArm64Tst, &cont, true);
+ default:
+ break;
+ }
+ return VisitWord64Test(this, value, &cont);
}
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
}
- opcode |= MiscField::encode(descriptor->flags());
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false);
+}
- // Emit the call instruction.
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
- buffer.instruction_args.size(),
&buffer.instruction_args.front());
- call_instr->MarkAsCall();
- if (deoptimization != NULL) {
- DCHECK(continuation != NULL);
- call_instr->MarkAsControl();
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32,
+ kArithmeticImm, &cont);
}
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm,
&cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
+ kArithmeticImm, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm,
&cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWordCompare(this, node, kArm64Cmp, &cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
}
} // namespace compiler
=======================================
--- /trunk/src/compiler/code-generator.h Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/compiler/code-generator.h Tue Oct 14 07:51:07 2014 UTC
@@ -27,7 +27,6 @@
InstructionSequence* code() const { return code_; }
Frame* frame() const { return code()->frame(); }
- Graph* graph() const { return code()->graph(); }
Isolate* isolate() const { return zone()->isolate(); }
Linkage* linkage() const { return code()->linkage(); }
Schedule* schedule() const { return code()->schedule(); }
=======================================
--- /trunk/src/compiler/common-node-cache.h Tue Sep 9 00:05:04 2014 UTC
+++ /trunk/src/compiler/common-node-cache.h Tue Oct 14 07:51:07 2014 UTC
@@ -20,6 +20,10 @@
Node** FindInt32Constant(int32_t value) {
return int32_constants_.Find(zone_, value);
}
+
+ Node** FindInt64Constant(int64_t value) {
+ return int64_constants_.Find(zone_, value);
+ }
Node** FindFloat64Constant(double value) {
// We canonicalize double constants at the bit representation level.
@@ -39,13 +43,15 @@
private:
Int32NodeCache int32_constants_;
+ Int64NodeCache int64_constants_;
Int64NodeCache float64_constants_;
PtrNodeCache external_constants_;
Int64NodeCache number_constants_;
Zone* zone_;
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_COMMON_NODE_CACHE_H_
=======================================
--- /trunk/src/compiler/ia32/code-generator-ia32.cc Fri Oct 10 00:05:16
2014 UTC
+++ /trunk/src/compiler/ia32/code-generator-ia32.cc Tue Oct 14 07:51:07
2014 UTC
@@ -351,10 +351,10 @@
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSECvtss2sd:
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSECvtsd2ss:
- __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
=======================================
--- /trunk/src/compiler/ia32/instruction-selector-ia32.cc Fri Oct 3
00:04:58 2014 UTC
+++ /trunk/src/compiler/ia32/instruction-selector-ia32.cc Tue Oct 14
07:51:07 2014 UTC
@@ -499,8 +499,7 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
IA32OperandGenerator g(this);
- // TODO(turbofan): IA32 SSE conversions should take an operand.
- Emit(kSSECvtss2sd, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
+ Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -530,8 +529,7 @@
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
IA32OperandGenerator g(this);
- // TODO(turbofan): IA32 SSE conversions should take an operand.
- Emit(kSSECvtsd2ss, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
+ Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -578,15 +576,51 @@
}
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop(this, node, kIA32Add, cont);
-}
+void InstructionSelector::VisitCall(Node* node) {
+ IA32OperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+ FrameStateDescriptor* frame_state_descriptor = NULL;
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop(this, node, kIA32Sub, cont);
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Push any stack arguments.
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
+ // TODO(titzer): handle pushing double parameters.
+ Emit(kIA32Push, NULL,
+ g.CanBeImmediate(*input) ? g.UseImmediate(*input) :
g.Use(*input));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(),
&buffer.instruction_args.front());
+ call_instr->MarkAsCall();
}
@@ -630,87 +664,197 @@
}
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
- switch (node->opcode()) {
- case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, node, kIA32Cmp, cont, false);
- case IrOpcode::kWord32And:
- return VisitWordCompare(this, node, kIA32Test, cont, true);
- default:
+static void VisitWordTest(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ IA32OperandGenerator g(selector);
+ VisitCompare(selector, kIA32Test, g.Use(node), g.TempImmediate(-1),
cont);
+}
+
+
+// Shared routine for multiple float compare operations.
+static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ IA32OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right),
+ cont);
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ OperandGenerator g(this);
+ Node* user = branch;
+ Node* value = branch->InputAt(0);
+
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+ // If we can fall through to the true block, invert the branch.
+ if (IsNextInAssemblyOrder(tbranch)) {
+ cont.Negate();
+ cont.SwapBlocks();
+ }
+
+ // Try to combine with comparisons against 0 by simply inverting the
branch.
+ while (CanCover(user, value) && value->opcode() ==
IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
break;
+ }
}
- IA32OperandGenerator g(this);
- VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
+ // Try to combine the branch with a comparison.
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(this, value, kIA32Cmp, &cont, false);
+ case IrOpcode::kInt32LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(this, value, kIA32Cmp, &cont, false);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kIA32Cmp, &cont, false);
+ case IrOpcode::kUint32LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(this, value, kIA32Cmp, &cont, false);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kIA32Cmp, &cont, false);
+ case IrOpcode::kFloat64Equal:
+ cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kFloat64LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (OpParameter<size_t>(value) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is
scheduled
+ // *AFTER* this branch).
+ Node* node = value->InputAt(0);
+ Node* result = node->FindProjection(0);
+ if (result == NULL || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kIA32Add, &cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kIA32Sub, &cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, value, kIA32Cmp, &cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, value, kIA32Test, &cont, true);
+ default:
+ break;
+ }
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ VisitWordTest(this, value, &cont);
}
-void InstructionSelector::VisitWord32Compare(Node* node,
- FlagsContinuation* cont) {
- VisitWordCompare(this, node, kIA32Cmp, cont, false);
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ Node* const user = node;
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(user);
+ if (m.right().Is(0)) {
+ Node* const value = m.left().node();
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, value, kIA32Cmp, &cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, value, kIA32Test, &cont, true);
+ default:
+ break;
+ }
+ return VisitWordTest(this, value, &cont);
+ }
+ }
+ return VisitWordCompare(this, node, kIA32Cmp, &cont, false);
}
-void InstructionSelector::VisitFloat64Compare(Node* node,
- FlagsContinuation* cont) {
- IA32OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right),
cont);
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ return VisitWordCompare(this, node, kIA32Cmp, &cont, false);
}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ return VisitWordCompare(this, node, kIA32Cmp, &cont, false);
+}
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
- BasicBlock* deoptimization) {
- IA32OperandGenerator g(this);
- CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- FrameStateDescriptor* frame_state_descriptor = NULL;
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ return VisitWordCompare(this, node, kIA32Cmp, &cont, false);
+}
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor =
- GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ return VisitWordCompare(this, node, kIA32Cmp, &cont, false);
+}
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(call, &buffer, true, true);
- // Push any stack arguments.
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- // TODO(titzer): handle pushing double parameters.
- Emit(kIA32Push, NULL,
- g.CanBeImmediate(*input) ? g.UseImmediate(*input) :
g.Use(*input));
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kIA32Add, &cont);
}
+ FlagsContinuation cont;
+ VisitBinop(this, node, kIA32Add, &cont);
+}
+
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kIA32Sub, &cont);
}
- opcode |= MiscField::encode(descriptor->flags());
+ FlagsContinuation cont;
+ VisitBinop(this, node, kIA32Sub, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
- // Emit the call instruction.
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
- buffer.instruction_args.size(),
&buffer.instruction_args.front());
- call_instr->MarkAsCall();
- if (deoptimization != NULL) {
- DCHECK(continuation != NULL);
- call_instr->MarkAsControl();
- }
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
}
} // namespace compiler
=======================================
--- /trunk/src/compiler/instruction-selector-impl.h Wed Oct 8 00:05:11
2014 UTC
+++ /trunk/src/compiler/instruction-selector-impl.h Tue Oct 14 07:51:07
2014 UTC
@@ -134,7 +134,6 @@
}
protected:
- Graph* graph() const { return selector()->graph(); }
InstructionSelector* selector() const { return selector_; }
InstructionSequence* sequence() const { return selector()->sequence(); }
Isolate* isolate() const { return zone()->isolate(); }
=======================================
--- /trunk/src/compiler/instruction-selector.cc Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/compiler/instruction-selector.cc Tue Oct 14 07:51:07 2014 UTC
@@ -22,8 +22,8 @@
features_(features),
current_block_(NULL),
instructions_(zone()),
- defined_(graph()->NodeCount(), false, zone()),
- used_(graph()->NodeCount(), false, zone()) {}
+ defined_(sequence->node_count(), false, zone()),
+ used_(sequence->node_count(), false, zone()) {}
void InstructionSelector::SelectInstructions() {
@@ -607,7 +607,7 @@
// TODO(turbofan): only mark non-smis as references.
return MarkAsReference(node), VisitConstant(node);
case IrOpcode::kCall:
- return VisitCall(node, NULL, NULL);
+ return VisitCall(node);
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
return;
@@ -745,111 +745,11 @@
#if V8_TURBOFAN_BACKEND
-void InstructionSelector::VisitWord32Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWord32Test(m.left().node(), &cont);
- }
- VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
- VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
- VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
- VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
- VisitWord32Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitWord64Equal(Node* node) {
- FlagsContinuation cont(kEqual, node);
- Int64BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWord64Test(m.left().node(), &cont);
- }
- VisitWord64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
- FlagsContinuation cont(kOverflow, ovf);
- return VisitInt32AddWithOverflow(node, &cont);
- }
- FlagsContinuation cont;
- VisitInt32AddWithOverflow(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = node->FindProjection(1)) {
- FlagsContinuation cont(kOverflow, ovf);
- return VisitInt32SubWithOverflow(node, &cont);
- }
- FlagsContinuation cont;
- VisitInt32SubWithOverflow(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt64LessThan(Node* node) {
- FlagsContinuation cont(kSignedLessThan, node);
- VisitWord64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kSignedLessThanOrEqual, node);
- VisitWord64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitUint64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
- VisitWord64Compare(node, &cont);
-}
-
-
void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
OperandGenerator g(this);
Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-
-
-void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont(kUnorderedEqual, node);
- VisitFloat64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnorderedLessThan, node);
- VisitFloat64Compare(node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
- VisitFloat64Compare(node, &cont);
-}
void InstructionSelector::VisitLoadStackPointer(Node* node) {
@@ -881,6 +781,9 @@
void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
@@ -893,12 +796,23 @@
void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
UNIMPLEMENTED(); }
void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
@@ -921,23 +835,6 @@
#endif // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
-// 32-bit targets and unsupported architectures need dummy implementations
of
-// selected 64-bit ops.
-#if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
-
-void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation*
cont) {
- UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitWord64Compare(Node* node,
- FlagsContinuation* cont) {
- UNIMPLEMENTED();
-}
-
-#endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
-
-
void InstructionSelector::VisitFinish(Node* node) {
OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -999,119 +896,6 @@
Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
}
}
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- OperandGenerator g(this);
- Node* user = branch;
- Node* value = branch->InputAt(0);
-
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
- // If we can fall through to the true block, invert the branch.
- if (IsNextInAssemblyOrder(tbranch)) {
- cont.Negate();
- cont.SwapBlocks();
- }
-
- // Try to combine with comparisons against 0 by simply inverting the
branch.
- while (CanCover(user, value)) {
- if (value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- } else if (value->opcode() == IrOpcode::kWord64Equal) {
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- } else {
- break;
- }
- }
-
- // Try to combine the branch with a comparison.
- if (CanCover(user, value)) {
- switch (value->opcode()) {
- case IrOpcode::kWord32Equal:
- cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitWord32Compare(value, &cont);
- case IrOpcode::kInt32LessThan:
- cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord32Compare(value, &cont);
- case IrOpcode::kInt32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord32Compare(value, &cont);
- case IrOpcode::kUint32LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord32Compare(value, &cont);
- case IrOpcode::kUint32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWord32Compare(value, &cont);
- case IrOpcode::kWord64Equal:
- cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitWord64Compare(value, &cont);
- case IrOpcode::kInt64LessThan:
- cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWord64Compare(value, &cont);
- case IrOpcode::kInt64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWord64Compare(value, &cont);
- case IrOpcode::kUint64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWord64Compare(value, &cont);
- case IrOpcode::kFloat64Equal:
- cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat64Compare(value, &cont);
- case IrOpcode::kFloat64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
- return VisitFloat64Compare(value, &cont);
- case IrOpcode::kFloat64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
- return VisitFloat64Compare(value, &cont);
- case IrOpcode::kProjection:
- // Check if this is the overflow output projection of an
- // <Operation>WithOverflow node.
- if (OpParameter<size_t>(value) == 1u) {
- // We cannot combine the <Operation>WithOverflow with this branch
- // unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
- // actual value, or was already defined, which means it is
scheduled
- // *AFTER* this branch).
- Node* node = value->InputAt(0);
- Node* result = node->FindProjection(0);
- if (result == NULL || IsDefined(result)) {
- switch (node->opcode()) {
- case IrOpcode::kInt32AddWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitInt32AddWithOverflow(node, &cont);
- case IrOpcode::kInt32SubWithOverflow:
- cont.OverwriteAndNegateIfEqual(kOverflow);
- return VisitInt32SubWithOverflow(node, &cont);
- default:
- break;
- }
- }
- }
- break;
- default:
- break;
- }
- }
-
- // Branch could not be combined with a compare, emit compare against 0.
- VisitWord32Test(value, &cont);
-}
void InstructionSelector::VisitReturn(Node* value) {
@@ -1234,37 +1018,7 @@
#undef DECLARE_UNIMPLEMENTED_SELECTOR
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
- UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitWord32Compare(Node* node,
- FlagsContinuation* cont) {
- UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitFloat64Compare(Node* node,
- FlagsContinuation* cont) {
- UNIMPLEMENTED();
-}
-
-
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
- BasicBlock* deoptimization) {}
+void InstructionSelector::VisitCall(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TURBOFAN_BACKEND
=======================================
--- /trunk/src/compiler/instruction-selector.h Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/compiler/instruction-selector.h Tue Oct 14 07:51:07 2014 UTC
@@ -169,22 +169,12 @@
MACHINE_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
- void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
- void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
-
- void VisitWord32Test(Node* node, FlagsContinuation* cont);
- void VisitWord64Test(Node* node, FlagsContinuation* cont);
- void VisitWord32Compare(Node* node, FlagsContinuation* cont);
- void VisitWord64Compare(Node* node, FlagsContinuation* cont);
- void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
-
void VisitFinish(Node* node);
void VisitParameter(Node* node);
void VisitPhi(Node* node);
void VisitProjection(Node* node);
void VisitConstant(Node* node);
- void VisitCall(Node* call, BasicBlock* continuation,
- BasicBlock* deoptimization);
+ void VisitCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitReturn(Node* value);
@@ -193,7 +183,6 @@
//
===========================================================================
- Graph* graph() const { return sequence()->graph(); }
Linkage* linkage() const { return sequence()->linkage(); }
Schedule* schedule() const { return sequence()->schedule(); }
InstructionSequence* sequence() const { return sequence_; }
=======================================
--- /trunk/src/compiler/instruction.cc Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/compiler/instruction.cc Tue Oct 14 07:51:07 2014 UTC
@@ -318,8 +318,9 @@
InstructionSequence::InstructionSequence(Linkage* linkage, Graph* graph,
Schedule* schedule)
- : graph_(graph),
- node_map_(zone()->NewArray<int>(graph->NodeCount())),
+ : zone_(schedule->zone()),
+ node_count_(graph->NodeCount()),
+ node_map_(zone()->NewArray<int>(node_count_)),
linkage_(linkage),
schedule_(schedule),
constants_(ConstantMap::key_compare(),
@@ -331,7 +332,7 @@
doubles_(std::less<int>(),
VirtualRegisterSet::allocator_type(zone())),
references_(std::less<int>(),
VirtualRegisterSet::allocator_type(zone())),
deoptimization_entries_(zone()) {
- for (int i = 0; i < graph->NodeCount(); ++i) {
+ for (int i = 0; i < node_count_; ++i) {
node_map_[i] = -1;
}
}
=======================================
--- /trunk/src/compiler/instruction.h Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/compiler/instruction.h Tue Oct 14 07:51:07 2014 UTC
@@ -767,7 +767,7 @@
int NextVirtualRegister() { return next_virtual_register_++; }
int VirtualRegisterCount() const { return next_virtual_register_; }
- int ValueCount() const { return graph_->NodeCount(); }
+ int node_count() const { return node_count_; }
int BasicBlockCount() const {
return static_cast<int>(schedule_->rpo_order()->size());
@@ -815,12 +815,11 @@
}
Frame* frame() { return &frame_; }
- Graph* graph() const { return graph_; }
Isolate* isolate() const { return zone()->isolate(); }
Linkage* linkage() const { return linkage_; }
Schedule* schedule() const { return schedule_; }
const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
- Zone* zone() const { return graph_->zone(); }
+ Zone* zone() const { return zone_; }
// Used by the code generator while adding instructions.
int AddInstruction(Instruction* instr, BasicBlock* block);
@@ -874,7 +873,8 @@
typedef std::set<int, std::less<int>, ZoneIntAllocator>
VirtualRegisterSet;
- Graph* graph_;
+ Zone* zone_;
+ int node_count_;
int* node_map_;
Linkage* linkage_;
Schedule* schedule_;
=======================================
--- /trunk/src/compiler/js-graph.cc Wed Oct 8 00:05:11 2014 UTC
+++ /trunk/src/compiler/js-graph.cc Tue Oct 14 07:51:07 2014 UTC
@@ -155,6 +155,15 @@
}
return *loc;
}
+
+
+Node* JSGraph::Int64Constant(int64_t value) {
+ Node** loc = cache_.FindInt64Constant(value);
+ if (*loc == NULL) {
+ *loc = NewNode(common()->Int64Constant(value));
+ }
+ return *loc;
+}
Node* JSGraph::NumberConstant(double value) {
@@ -188,6 +197,7 @@
}
return *loc;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
=======================================
--- /trunk/src/compiler/js-graph.h Wed Oct 8 00:05:11 2014 UTC
+++ /trunk/src/compiler/js-graph.h Tue Oct 14 07:51:07 2014 UTC
@@ -68,6 +68,21 @@
Node* Uint32Constant(uint32_t value) {
return Int32Constant(bit_cast<int32_t>(value));
}
+
+ // Creates a Int64Constant node, usually canonicalized.
+ Node* Int64Constant(int64_t value);
+ Node* Uint64Constant(uint64_t value) {
+ return Int64Constant(bit_cast<int64_t>(value));
+ }
+
+ // Creates a Int32Constant/Int64Constant node, depending on the word
size of
+ // the target machine.
+ // TODO(turbofan): Code using Int32Constant/Int64Constant to store
pointer
+ // constants is probably not serializable.
+ Node* IntPtrConstant(intptr_t value) {
+ return machine()->Is32() ? Int32Constant(static_cast<int32_t>(value))
+ : Int64Constant(static_cast<int64_t>(value));
+ }
// Creates a Float32Constant node, usually canonicalized.
Node* Float32Constant(float value);
=======================================
--- /trunk/src/compiler/js-typed-lowering.cc Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/src/compiler/js-typed-lowering.cc Tue Oct 14 07:51:07 2014 UTC
@@ -543,12 +543,10 @@
ExternalArrayType type = array->type();
uint32_t byte_length;
if (array->byte_length()->ToUint32(&byte_length)) {
- Node* elements = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
base,
- graph()->start());
- Node* pointer = graph()->NewNode(
-
simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
- elements, elements);
+ Handle<ExternalArray> elements =
+ Handle<ExternalArray>::cast(handle(array->elements()));
+ Node* pointer = jsgraph()->IntPtrConstant(
+ bit_cast<intptr_t>(elements->external_pointer()));
Node* length = jsgraph()->Uint32Constant(
static_cast<uint32_t>(byte_length / array->element_size()));
Node* effect = NodeProperties::GetEffectInput(node);
@@ -582,12 +580,10 @@
ExternalArrayType type = array->type();
uint32_t byte_length;
if (array->byte_length()->ToUint32(&byte_length)) {
- Node* elements = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
base,
- graph()->start());
- Node* pointer = graph()->NewNode(
-
simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
- elements, elements);
+ Handle<ExternalArray> elements =
+ Handle<ExternalArray>::cast(handle(array->elements()));
+ Node* pointer = jsgraph()->IntPtrConstant(
+ bit_cast<intptr_t>(elements->external_pointer()));
Node* length = jsgraph()->Uint32Constant(
static_cast<uint32_t>(byte_length / array->element_size()));
Node* effect = NodeProperties::GetEffectInput(node);
=======================================
--- /trunk/src/compiler/machine-operator-reducer.cc Mon Oct 13 00:05:20
2014 UTC
+++ /trunk/src/compiler/machine-operator-reducer.cc Tue Oct 14 07:51:07
2014 UTC
@@ -262,19 +262,8 @@
}
break;
}
- case IrOpcode::kInt32Mod: {
- Int32BinopMatcher m(node);
- if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
- if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
- // TODO(turbofan): if (m.left().Is(0))
- // TODO(turbofan): if (m.right().IsPowerOf2())
- // TODO(turbofan): if (m.right().Is(0))
- // TODO(turbofan): if (m.LeftEqualsRight())
- if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K
- return ReplaceInt32(m.left().Value() % m.right().Value());
- }
- break;
- }
+ case IrOpcode::kInt32Mod:
+ return ReduceInt32Mod(node);
case IrOpcode::kUint32Mod: {
Uint32BinopMatcher m(node);
if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
@@ -508,6 +497,44 @@
}
return NoChange();
}
+
+
+Reduction MachineOperatorReducer::ReduceInt32Mod(Node* const node) {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(1)) return ReplaceInt32(0); // x % 1 => 0
+ if (m.right().Is(-1)) return ReplaceInt32(0); // x % -1 => 0
+ // TODO(turbofan): if (m.left().Is(0))
+ // TODO(turbofan): if (m.right().Is(0))
+ // TODO(turbofan): if (m.LeftEqualsRight())
+ if (m.IsFoldable() && !m.right().Is(0)) { // K % K => K
+ return ReplaceInt32(m.left().Value() % m.right().Value());
+ }
+ if (m.right().IsPowerOf2()) {
+ int32_t const divisor = m.right().Value();
+ Node* zero = Int32Constant(0);
+ Node* mask = Int32Constant(divisor - 1);
+ Node* dividend = m.left().node();
+
+ Node* check = graph()->NewNode(machine()->Int32LessThan(), dividend,
zero);
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, graph()->start());
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* neg = graph()->NewNode(
+ machine()->Int32Sub(), zero,
+ graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Int32Sub(), zero, dividend),
mask));
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* pos = graph()->NewNode(machine()->Word32And(), dividend, mask);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 2), neg, pos,
merge);
+ return Replace(phi);
+ }
+ return NoChange();
+}
Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node*
node) {
=======================================
--- /trunk/src/compiler/machine-operator-reducer.h Mon Sep 29 00:04:53 2014
UTC
+++ /trunk/src/compiler/machine-operator-reducer.h Tue Oct 14 07:51:07 2014
UTC
@@ -49,6 +49,7 @@
return Replace(Int64Constant(value));
}
+ Reduction ReduceInt32Mod(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
Graph* graph() const;
=======================================
--- /trunk/src/compiler/mips/instruction-selector-mips.cc Fri Oct 3
00:04:58 2014 UTC
+++ /trunk/src/compiler/mips/instruction-selector-mips.cc Tue Oct 14
07:51:07 2014 UTC
@@ -401,21 +401,21 @@
Emit(kMipsSqrtD, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
- BasicBlock* deoptimization) {
+
+void InstructionSelector::VisitCall(Node* node) {
MipsOperandGenerator g(this);
- CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
FrameStateDescriptor* frame_state_descriptor = NULL;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
- GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
}
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(call, &buffer, true, false);
+ InitializeCallBuffer(node, &buffer, true, false);
// TODO(dcarney): might be possible to use claim/poke instead
// Push any stack arguments.
@@ -447,26 +447,11 @@
Instruction* call_instr =
Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
buffer.instruction_args.size(),
&buffer.instruction_args.front());
-
call_instr->MarkAsCall();
- if (deoptimization != NULL) {
- DCHECK(continuation != NULL);
- call_instr->MarkAsControl();
- }
}
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop(this, node, kMipsAddOvf, cont);
-}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop(this, node, kMipsSubOvf, cont);
-}
-
+namespace {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode
opcode,
@@ -483,12 +468,23 @@
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left,
right);
}
}
+
+
+// Shared routine for multiple float compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ MipsOperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kMipsCmpD, g.UseRegister(left),
g.UseRegister(right),
+ cont);
+}
// Shared routine for multiple word compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation*
cont,
- bool commutative) {
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
MipsOperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
@@ -508,35 +504,138 @@
}
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
- switch (node->opcode()) {
- case IrOpcode::kWord32And:
- // TODO(plind): understand the significance of 'IR and' special case.
- return VisitWordCompare(this, node, kMipsTst, cont, true);
- default:
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kMipsCmp, cont, false);
+}
+
+
+void VisitWordTest(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ MipsOperandGenerator g(selector);
+ // kMipsTst is a pseudo-instruction to do logical 'and' and leave the
result
+ // in a dedicated tmp register.
+ VisitCompare(selector, kMipsTst, g.UseRegister(node),
g.UseRegister(node),
+ cont);
+}
+
+} // namespace
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ MipsOperandGenerator g(this);
+ Node* user = branch;
+ Node* value = branch->InputAt(0);
+
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+ // If we can fall through to the true block, invert the branch.
+ if (IsNextInAssemblyOrder(tbranch)) {
+ cont.Negate();
+ cont.SwapBlocks();
+ }
+
+ // Try to combine with comparisons against 0 by simply inverting the
branch.
+ while (CanCover(user, value) && value->opcode() ==
IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
break;
+ }
}
- MipsOperandGenerator g(this);
- // kMipsTst is a pseudo-instruction to do logical 'and' and leave the
result
- // in a dedicated tmp register.
- VisitCompare(this, kMipsTst, g.UseRegister(node), g.UseRegister(node),
cont);
+ // Try to combine the branch with a comparison.
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32And:
+ // TODO(plind): understand the significance of 'IR and' special
case.
+ return VisitWordCompare(this, value, kMipsTst, &cont, true);
+ default:
+ break;
+ }
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ return VisitWordTest(this, value, &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ Node* const user = node;
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(user);
+ if (m.right().Is(0)) {
+ Node* const value = m.left().node();
+ return VisitWordTest(this, value, &cont);
+ }
+
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMipsAddOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMipsAddOvf, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMipsSubOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMipsSubOvf, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(this, node, &cont);
}
-void InstructionSelector::VisitWord32Compare(Node* node,
- FlagsContinuation* cont) {
- VisitWordCompare(this, node, kMipsCmp, cont, false);
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
}
-void InstructionSelector::VisitFloat64Compare(Node* node,
- FlagsContinuation* cont) {
- MipsOperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(this, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
- cont);
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
}
} // namespace compiler
=======================================
--- /trunk/src/compiler/scheduler.cc Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/src/compiler/scheduler.cc Tue Oct 14 07:51:07 2014 UTC
@@ -63,7 +63,7 @@
Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
- SchedulerData def = {0, -1, false, false, kUnknown};
+ SchedulerData def = {NULL, 0, false, false, kUnknown};
return def;
}
@@ -315,7 +315,7 @@
void Scheduler::BuildCFG() {
- Trace("---------------- CREATING CFG ------------------\n");
+ Trace("--- CREATING CFG -------------------------------------------\n");
CFGBuilder cfg_builder(zone_, this);
cfg_builder.Run();
// Initialize per-block data.
@@ -326,7 +326,7 @@
void Scheduler::GenerateImmediateDominatorTree() {
// Build the dominator graph. TODO(danno): consider using Lengauer &
Tarjan's
// if this becomes really slow.
- Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
+ Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
BasicBlock* current_rpo = schedule_->rpo_order_[i];
if (current_rpo != schedule_->start()) {
@@ -405,7 +405,7 @@
void Scheduler::PrepareUses() {
- Trace("------------------- PREPARE USES ------------------\n");
+ Trace("--- PREPARE USES -------------------------------------------\n");
// Count the uses of every node, it will be used to ensure that all of a
// node's uses are scheduled before the node itself.
@@ -424,59 +424,55 @@
: scheduler_(scheduler), schedule_(scheduler->schedule_) {}
GenericGraphVisit::Control Pre(Node* node) {
+ Scheduler::SchedulerData* data = scheduler_->GetData(node);
if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
// Fixed nodes already know their schedule early position.
- Scheduler::SchedulerData* data = scheduler_->GetData(node);
- BasicBlock* block = schedule_->block(node);
- DCHECK(block != NULL);
- if (data->minimum_rpo_ < 0) {
- data->minimum_rpo_ = block->rpo_number();
+ if (data->minimum_block_ == NULL) {
+ data->minimum_block_ = schedule_->block(node);
Trace("Preschedule #%d:%s minimum_rpo = %d (fixed)\n", node->id(),
- node->op()->mnemonic(), data->minimum_rpo_);
+ node->op()->mnemonic(), data->minimum_block_->rpo_number());
}
} else {
// For unfixed nodes the minimum RPO is the max of all of the inputs.
- Scheduler::SchedulerData* data = scheduler_->GetData(node);
- if (data->minimum_rpo_ < 0) {
- data->minimum_rpo_ = ComputeMaximumInputRPO(node);
- if (data->minimum_rpo_ < 0) return GenericGraphVisit::REENTER;
+ if (data->minimum_block_ == NULL) {
+ data->minimum_block_ = ComputeMaximumInputRPO(node);
+ if (data->minimum_block_ == NULL) return
GenericGraphVisit::REENTER;
Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
- node->op()->mnemonic(), data->minimum_rpo_);
+ node->op()->mnemonic(), data->minimum_block_->rpo_number());
}
- DCHECK_GE(data->minimum_rpo_, 0);
}
+ DCHECK_NE(data->minimum_block_, NULL);
return GenericGraphVisit::CONTINUE;
}
GenericGraphVisit::Control Post(Node* node) {
+ Scheduler::SchedulerData* data = scheduler_->GetData(node);
if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
- Scheduler::SchedulerData* data = scheduler_->GetData(node);
// For unfixed nodes the minimum RPO is the max of all of the inputs.
- if (data->minimum_rpo_ < 0) {
- data->minimum_rpo_ = ComputeMaximumInputRPO(node);
+ if (data->minimum_block_ == NULL) {
+ data->minimum_block_ = ComputeMaximumInputRPO(node);
Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
- node->op()->mnemonic(), data->minimum_rpo_);
+ node->op()->mnemonic(), data->minimum_block_->rpo_number());
}
- DCHECK_GE(data->minimum_rpo_, 0);
}
+ DCHECK_NE(data->minimum_block_, NULL);
return GenericGraphVisit::CONTINUE;
}
// Computes the maximum of the minimum RPOs for all inputs. If the
maximum
- // cannot be determined (i.e. minimum RPO for at least one input not
known),
- // then a negative number is returned.
- int ComputeMaximumInputRPO(Node* node) {
- int max_rpo = 0;
+ // cannot be determined (i.e. minimum RPO for at least one input is
{NULL}),
+ // then {NULL} is returned.
+ BasicBlock* ComputeMaximumInputRPO(Node* node) {
+ BasicBlock* max_block = schedule_->start();
for (InputIter i = node->inputs().begin(); i != node->inputs().end();
++i) {
DCHECK_NE(node, *i); // Loops only exist for fixed nodes.
- int control_rpo = scheduler_->GetData(*i)->minimum_rpo_;
- if (control_rpo > max_rpo) {
- max_rpo = control_rpo;
- } else if (control_rpo < 0) {
- return control_rpo;
+ BasicBlock* block = scheduler_->GetData(*i)->minimum_block_;
+ if (block == NULL) return NULL;
+ if (block->rpo_number() > max_block->rpo_number()) {
+ max_block = block;
}
}
- return max_rpo;
+ return max_block;
}
private:
@@ -486,7 +482,7 @@
void Scheduler::ScheduleEarly() {
- Trace("------------------- SCHEDULE EARLY ----------------\n");
+ Trace("--- SCHEDULE EARLY -----------------------------------------\n");
// Compute the minimum RPO for each node thereby determining the earliest
// position each node could be placed within a valid schedule.
@@ -532,7 +528,7 @@
}
DCHECK(block != NULL);
- int min_rpo = data->minimum_rpo_;
+ int min_rpo = data->minimum_block_->rpo_number();
Trace(
"Schedule late conservative for #%d:%s is B%d at loop depth %d, "
"minimum_rpo = %d\n",
@@ -619,7 +615,7 @@
void Scheduler::ScheduleLate() {
- Trace("------------------- SCHEDULE LATE -----------------\n");
+ Trace("--- SCHEDULE LATE ------------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
Trace("roots: ");
for (NodeVectorIter i = schedule_root_nodes_.begin();
@@ -965,7 +961,7 @@
BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
Zone tmp_zone(schedule->zone()->isolate());
Zone* zone = &tmp_zone;
- Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
+ Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
// RPO should not have been computed for this schedule yet.
CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number());
CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
=======================================
--- /trunk/src/compiler/scheduler.h Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/src/compiler/scheduler.h Tue Oct 14 07:51:07 2014 UTC
@@ -31,8 +31,8 @@
// Per-node data tracked during scheduling.
struct SchedulerData {
+ BasicBlock* minimum_block_; // Minimum legal RPO placement.
int unscheduled_count_; // Number of unscheduled uses of this
node.
- int minimum_rpo_; // Minimum legal RPO placement.
bool is_connected_control_; // {true} if control-connected to the end
node.
bool is_floating_control_; // {true} if control, but not
control-connected
// to the end node.
=======================================
--- /trunk/src/compiler/x64/code-generator-x64.cc Wed Oct 8 00:05:11 2014
UTC
+++ /trunk/src/compiler/x64/code-generator-x64.cc Tue Oct 14 07:51:07 2014
UTC
@@ -405,10 +405,18 @@
}
break;
case kSSECvtss2sd:
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
break;
case kSSECvtsd2ss:
- __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
break;
case kSSEFloat64ToInt32:
if (instr->InputAt(0)->IsDoubleRegister()) {
=======================================
--- /trunk/src/compiler/x64/instruction-selector-x64.cc Wed Oct 8 00:05:11
2014 UTC
+++ /trunk/src/compiler/x64/instruction-selector-x64.cc Tue Oct 14 07:51:07
2014 UTC
@@ -608,8 +608,7 @@
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
X64OperandGenerator g(this);
- // TODO(turbofan): X64 SSE conversions should take an operand.
- Emit(kSSECvtss2sd, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
+ Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -651,8 +650,7 @@
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
X64OperandGenerator g(this);
- // TODO(turbofan): X64 SSE conversions should take an operand.
- Emit(kSSECvtsd2ss, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
+ Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -705,15 +703,50 @@
}
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop(this, node, kX64Add32, cont);
-}
+void InstructionSelector::VisitCall(Node* node) {
+ X64OperandGenerator g(this);
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor = GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())));
+ }
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
- FlagsContinuation*
cont) {
- VisitBinop(this, node, kX64Sub32, cont);
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Push any stack arguments.
+ for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+ input != buffer.pushed_nodes.rend(); input++) {
+ // TODO(titzer): handle pushing double parameters.
+ Emit(kX64Push, NULL,
+ g.CanBeImmediate(*input) ? g.UseImmediate(*input) :
g.Use(*input));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ Instruction* call_instr =
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(),
&buffer.instruction_args.front());
+ call_instr->MarkAsCall();
}
@@ -753,107 +786,261 @@
}
-void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
- switch (node->opcode()) {
- case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, node, kX64Cmp32, cont, false);
- case IrOpcode::kWord32And:
- return VisitWordCompare(this, node, kX64Test32, cont, true);
- default:
- break;
- }
+static void VisitWordTest(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont)
{
+ X64OperandGenerator g(selector);
+ VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(-1), cont);
+}
- X64OperandGenerator g(this);
- VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
+
+static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ VisitCompare(selector, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right),
+ cont);
}
-void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation*
cont) {
- switch (node->opcode()) {
- case IrOpcode::kInt64Sub:
- return VisitWordCompare(this, node, kX64Cmp, cont, false);
- case IrOpcode::kWord64And:
- return VisitWordCompare(this, node, kX64Test, cont, true);
- default:
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+ BasicBlock* fbranch) {
+ OperandGenerator g(this);
+ Node* user = branch;
+ Node* value = branch->InputAt(0);
+
+ FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+ // If we can fall through to the true block, invert the branch.
+ if (IsNextInAssemblyOrder(tbranch)) {
+ cont.Negate();
+ cont.SwapBlocks();
+ }
+
+ // Try to combine with comparisons against 0 by simply inverting the
branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
+ } else {
+ break;
+ }
+ } else {
break;
+ }
}
- X64OperandGenerator g(this);
- VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
+ // Try to combine the branch with a comparison.
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(this, value, kX64Cmp32, &cont, false);
+ case IrOpcode::kInt32LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(this, value, kX64Cmp32, &cont, false);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kX64Cmp32, &cont, false);
+ case IrOpcode::kUint32LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(this, value, kX64Cmp32, &cont, false);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kX64Cmp32, &cont, false);
+ case IrOpcode::kWord64Equal:
+ cont.OverwriteAndNegateIfEqual(kEqual);
+ return VisitWordCompare(this, value, kX64Cmp, &cont, false);
+ case IrOpcode::kInt64LessThan:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWordCompare(this, value, kX64Cmp, &cont, false);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWordCompare(this, value, kX64Cmp, &cont, false);
+ case IrOpcode::kUint64LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWordCompare(this, value, kX64Cmp, &cont, false);
+ case IrOpcode::kFloat64Equal:
+ cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kFloat64LessThan:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, &cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (OpParameter<size_t>(value) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is
scheduled
+ // *AFTER* this branch).
+ Node* node = value->InputAt(0);
+ Node* result = node->FindProjection(0);
+ if (result == NULL || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kX64Add32, &cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kX64Sub32, &cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, value, kX64Cmp32, &cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, value, kX64Test32, &cont, true);
+ default:
+ break;
+ }
+ }
+
+ // Branch could not be combined with a compare, emit compare against 0.
+ VisitWordTest(this, value, kX64Test32, &cont);
}
-void InstructionSelector::VisitWord32Compare(Node* node,
- FlagsContinuation* cont) {
- VisitWordCompare(this, node, kX64Cmp32, cont, false);
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ Node* const user = node;
+ FlagsContinuation cont(kEqual, node);
+ Int32BinopMatcher m(user);
+ if (m.right().Is(0)) {
+ Node* const value = m.left().node();
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, value, kX64Cmp32, &cont, false);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(this, value, kX64Test32, &cont, true);
+ default:
+ break;
+ }
+ return VisitWordTest(this, value, kX64Test32, &cont);
+ }
+ }
+ VisitWordCompare(this, node, kX64Cmp32, &cont, false);
}
-void InstructionSelector::VisitWord64Compare(Node* node,
- FlagsContinuation* cont) {
- VisitWordCompare(this, node, kX64Cmp, cont, false);
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWordCompare(this, node, kX64Cmp32, &cont, false);
}
-void InstructionSelector::VisitFloat64Compare(Node* node,
- FlagsContinuation* cont) {
- X64OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right),
cont);
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, kX64Cmp32, &cont, false);
}
-void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
- BasicBlock* deoptimization) {
- X64OperandGenerator g(this);
- CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWordCompare(this, node, kX64Cmp32, &cont, false);
+}
- FrameStateDescriptor* frame_state_descriptor = NULL;
- if (descriptor->NeedsFrameState()) {
- frame_state_descriptor = GetFrameStateDescriptor(
- call->InputAt(static_cast<int>(descriptor->InputCount())));
- }
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, kX64Cmp32, &cont, false);
+}
- // Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(call, &buffer, true, true);
- // Push any stack arguments.
- for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
- input != buffer.pushed_nodes.rend(); input++) {
- // TODO(titzer): handle pushing double parameters.
- Emit(kX64Push, NULL,
- g.CanBeImmediate(*input) ? g.UseImmediate(*input) :
g.Use(*input));
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ Node* const user = node;
+ FlagsContinuation cont(kEqual, node);
+ Int64BinopMatcher m(user);
+ if (m.right().Is(0)) {
+ Node* const value = m.left().node();
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt64Sub:
+ return VisitWordCompare(this, value, kX64Cmp, &cont, false);
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kX64Test, &cont, true);
+ default:
+ break;
+ }
+ return VisitWordTest(this, value, kX64Test, &cont);
+ }
}
+ VisitWordCompare(this, node, kX64Cmp, &cont, false);
+}
+
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
- break;
- }
- case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ VisitBinop(this, node, kX64Add32, &cont);
}
- opcode |= MiscField::encode(descriptor->flags());
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Add32, &cont);
+}
- // Emit the call instruction.
- Instruction* call_instr =
- Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
- buffer.instruction_args.size(),
&buffer.instruction_args.front());
- call_instr->MarkAsCall();
- if (deoptimization != NULL) {
- DCHECK(continuation != NULL);
- call_instr->MarkAsControl();
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kX64Sub32, &cont);
}
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Sub32, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont(kSignedLessThan, node);
+ VisitWordCompare(this, node, kX64Cmp, &cont, false);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kSignedLessThanOrEqual, node);
+ VisitWordCompare(this, node, kX64Cmp, &cont, false);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont(kUnsignedLessThan, node);
+ VisitWordCompare(this, node, kX64Cmp, &cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont(kUnorderedEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
}
} // namespace compiler
=======================================
--- /trunk/src/execution.cc Tue Sep 2 12:59:15 2014 UTC
+++ /trunk/src/execution.cc Tue Oct 14 07:51:07 2014 UTC
@@ -32,6 +32,21 @@
thread_local_.climit_ = thread_local_.real_climit_;
isolate_->heap()->SetStackLimits();
}
+
+
+static void PrintDeserializedCodeInfo(Handle<JSFunction> function) {
+ if (function->code() == function->shared()->code() &&
+ function->shared()->deserialized()) {
+ PrintF("Running deserialized script: ");
+ Object* script = function->shared()->script();
+ if (script->IsScript()) {
+ Script::cast(script)->name()->ShortPrint();
+ } else {
+ function->shared()->script()->ShortPrint();
+ }
+ PrintF("\n");
+ }
+}
MUST_USE_RESULT static MaybeHandle<Object> Invoke(
@@ -87,6 +102,7 @@
JSFunction* func = *function;
Object* recv = *receiver;
Object*** argv = reinterpret_cast<Object***>(args);
+ if (FLAG_profile_deserialization) PrintDeserializedCodeInfo(function);
value =
CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc,
argv);
}
=======================================
--- /trunk/src/flag-definitions.h Fri Oct 10 00:05:16 2014 UTC
+++ /trunk/src/flag-definitions.h Tue Oct 14 07:51:07 2014 UTC
@@ -901,7 +901,7 @@
#undef FLAG
#define FLAG FLAG_READONLY
-// assembler-arm.h
+// assembler.h
DEFINE_BOOL(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
"enable use of out-of-line constant pools (ARM only)")
=======================================
--- /trunk/src/heap/gc-idle-time-handler.cc Fri Oct 3 00:04:58 2014 UTC
+++ /trunk/src/heap/gc-idle-time-handler.cc Tue Oct 14 07:51:07 2014 UTC
@@ -63,6 +63,9 @@
size_t GCIdleTimeHandler::EstimateMarkCompactTime(
size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
+ // TODO(hpayer): Be more precise about the type of mark-compact event. It
+ // makes a huge difference if it is incremental or non-incremental and if
+ // compaction is happening.
if (mark_compact_speed_in_bytes_per_ms == 0) {
mark_compact_speed_in_bytes_per_ms =
kInitialConservativeMarkCompactSpeed;
}
@@ -71,7 +74,7 @@
}
-bool GCIdleTimeHandler::DoScavenge(
+bool GCIdleTimeHandler::ShouldDoScavenge(
size_t idle_time_in_ms, size_t new_space_size, size_t
used_new_space_size,
size_t scavenge_speed_in_bytes_per_ms,
size_t new_space_allocation_throughput_in_bytes_per_ms) {
@@ -108,6 +111,15 @@
}
return false;
}
+
+
+bool GCIdleTimeHandler::ShouldDoMarkCompact(
+ size_t idle_time_in_ms, size_t size_of_objects,
+ size_t mark_compact_speed_in_bytes_per_ms) {
+ return idle_time_in_ms >=
+ EstimateMarkCompactTime(size_of_objects,
+ mark_compact_speed_in_bytes_per_ms);
+}
// The following logic is implemented by the controller:
@@ -128,10 +140,11 @@
// that this currently may trigger a full garbage collection.
GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
HeapState heap_state) {
- if (DoScavenge(idle_time_in_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size,
- heap_state.scavenge_speed_in_bytes_per_ms,
-
heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
+ if (ShouldDoScavenge(
+ idle_time_in_ms, heap_state.new_space_capacity,
+ heap_state.used_new_space_size,
+ heap_state.scavenge_speed_in_bytes_per_ms,
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
return GCIdleTimeAction::Scavenge();
}
@@ -148,10 +161,8 @@
}
if (heap_state.incremental_marking_stopped) {
- size_t estimated_time_in_ms =
- EstimateMarkCompactTime(heap_state.size_of_objects,
-
heap_state.mark_compact_speed_in_bytes_per_ms);
- if (idle_time_in_ms >= estimated_time_in_ms ||
+ if (ShouldDoMarkCompact(idle_time_in_ms, heap_state.size_of_objects,
+ heap_state.mark_compact_speed_in_bytes_per_ms)
||
(heap_state.size_of_objects < kSmallHeapSize &&
heap_state.contexts_disposed > 0)) {
// If there are no more than two GCs left in this idle round and we
are
=======================================
--- /trunk/src/heap/gc-idle-time-handler.h Fri Oct 3 00:04:58 2014 UTC
+++ /trunk/src/heap/gc-idle-time-handler.h Tue Oct 14 07:51:07 2014 UTC
@@ -155,7 +155,11 @@
static size_t EstimateMarkCompactTime(
size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
- static bool DoScavenge(
+ static bool ShouldDoMarkCompact(size_t idle_time_in_ms,
+ size_t size_of_objects,
+ size_t
mark_compact_speed_in_bytes_per_ms);
+
+ static bool ShouldDoScavenge(
size_t idle_time_in_ms, size_t new_space_size, size_t
used_new_space_size,
size_t scavenger_speed_in_bytes_per_ms,
size_t new_space_allocation_throughput_in_bytes_per_ms);
=======================================
--- /trunk/src/heap/heap.cc Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/heap/heap.cc Tue Oct 14 07:51:07 2014 UTC
@@ -4268,11 +4268,14 @@
}
-void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- incremental_marking()->Step(step_size,
- IncrementalMarking::NO_GC_VIA_STACK_GUARD,
true);
-
- if (incremental_marking()->IsComplete()) {
+void Heap::TryFinalizeIdleIncrementalMarking(
+ size_t idle_time_in_ms, size_t size_of_objects,
+ size_t mark_compact_speed_in_bytes_per_ms) {
+ if (incremental_marking()->IsComplete() ||
+ (mark_compact_collector()->IsMarkingDequeEmpty() &&
+ gc_idle_time_handler_.ShouldDoMarkCompact(
+ idle_time_in_ms, size_of_objects,
+ mark_compact_speed_in_bytes_per_ms))) {
bool uncommit = false;
if (gc_count_at_last_idle_gc_ == gc_count_) {
// No GC since the last full GC, the mutator is probably not active.
@@ -4332,16 +4335,28 @@
gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
bool result = false;
+ int actual_time_in_ms = 0;
switch (action.type) {
case DONE:
result = true;
break;
- case DO_INCREMENTAL_MARKING:
+ case DO_INCREMENTAL_MARKING: {
if (incremental_marking()->IsStopped()) {
incremental_marking()->Start();
}
- AdvanceIdleIncrementalMarking(action.parameter);
+ incremental_marking()->Step(action.parameter,
+
IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_MARKING,
+
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+ actual_time_in_ms =
static_cast<int>(timer.Elapsed().InMilliseconds());
+ int remaining_idle_time_in_ms = idle_time_in_ms - actual_time_in_ms;
+ if (remaining_idle_time_in_ms > 0) {
+ TryFinalizeIdleIncrementalMarking(
+ remaining_idle_time_in_ms, heap_state.size_of_objects,
+ heap_state.mark_compact_speed_in_bytes_per_ms);
+ }
break;
+ }
case DO_FULL_GC: {
HistogramTimerScope scope(isolate_->counters()->gc_context());
const char* message = contexts_disposed_
@@ -4361,20 +4376,20 @@
break;
}
- int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
- if (actual_time_ms <= idle_time_in_ms) {
+ actual_time_in_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
+ if (actual_time_in_ms <= idle_time_in_ms) {
if (action.type != DONE && action.type != DO_NOTHING) {
isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
- idle_time_in_ms - actual_time_ms);
+ idle_time_in_ms - actual_time_in_ms);
}
} else {
isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
- actual_time_ms - idle_time_in_ms);
+ actual_time_in_ms - idle_time_in_ms);
}
if (FLAG_trace_idle_notification) {
PrintF("Idle notification: requested idle time %d ms, actual time %d
ms [",
- idle_time_in_ms, actual_time_ms);
+ idle_time_in_ms, actual_time_in_ms);
action.Print();
PrintF("]\n");
}
=======================================
--- /trunk/src/heap/heap.h Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/heap/heap.h Tue Oct 14 07:51:07 2014 UTC
@@ -1949,7 +1949,9 @@
void SelectScavengingVisitorsTable();
- void AdvanceIdleIncrementalMarking(intptr_t step_size);
+ void TryFinalizeIdleIncrementalMarking(
+ size_t idle_time_in_ms, size_t size_of_objects,
+ size_t mark_compact_speed_in_bytes_per_ms);
bool WorthActivatingIncrementalMarking();
=======================================
--- /trunk/src/heap/incremental-marking.cc Thu Oct 9 00:05:16 2014 UTC
+++ /trunk/src/heap/incremental-marking.cc Tue Oct 14 07:51:07 2014 UTC
@@ -27,6 +27,7 @@
should_hurry_(false),
marking_speed_(0),
allocated_(0),
+ idle_marking_delay_counter_(0),
no_marking_scope_depth_(0),
unscanned_bytes_of_large_object_(0) {}
@@ -891,24 +892,27 @@
}
-void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction
action,
- bool force_marking) {
+intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
+ CompletionAction action,
+ ForceMarkingAction marking,
+ ForceCompletionAction completion) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
!FLAG_incremental_marking_steps ||
(state_ != SWEEPING && state_ != MARKING)) {
- return;
+ return 0;
}
allocated_ += allocated_bytes;
- if (!force_marking && allocated_ < kAllocatedThreshold &&
+ if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold
&&
write_barriers_invoked_since_last_step_ <
kWriteBarriersInvokedThreshold) {
- return;
+ return 0;
}
- if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
+ if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
+ intptr_t bytes_processed = 0;
{
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
@@ -929,7 +933,6 @@
write_barriers_invoked_since_last_step_ = 0;
bytes_scanned_ += bytes_to_process;
- intptr_t bytes_processed = 0;
if (state_ == SWEEPING) {
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
@@ -942,7 +945,14 @@
}
} else if (state_ == MARKING) {
bytes_processed = ProcessMarkingDeque(bytes_to_process);
- if (marking_deque_.IsEmpty()) MarkingComplete(action);
+ if (marking_deque_.IsEmpty()) {
+ if (completion == FORCE_COMPLETION ||
+ IsIdleMarkingDelayCounterLimitReached()) {
+ MarkingComplete(action);
+ } else {
+ IncrementIdleMarkingDelayCounter();
+ }
+ }
}
steps_count_++;
@@ -958,6 +968,7 @@
// process the marking deque.
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
}
+ return bytes_processed;
}
@@ -977,5 +988,20 @@
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
return heap_->MaxOldGenerationSize() -
heap_->PromotedSpaceSizeOfObjects();
}
+
+
+bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
+ return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
+}
+
+
+void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
+ idle_marking_delay_counter_++;
+}
+
+
+void IncrementalMarking::ClearIdleMarkingDelayCounter() {
+ idle_marking_delay_counter_ = 0;
+}
}
} // namespace v8::internal
=======================================
--- /trunk/src/heap/incremental-marking.h Wed Sep 17 00:05:08 2014 UTC
+++ /trunk/src/heap/incremental-marking.h Tue Oct 14 07:51:07 2014 UTC
@@ -20,6 +20,10 @@
enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
+ enum ForceMarkingAction { FORCE_MARKING, DO_NOT_FORCE_MARKING };
+
+ enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
+
explicit IncrementalMarking(Heap* heap);
static void Initialize();
@@ -83,10 +87,15 @@
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
+ // This is the upper bound for how many times we allow finalization of
+ // incremental marking to be postponed.
+ static const size_t kMaxIdleMarkingDelayCounter = 3;
+
void OldSpaceStep(intptr_t allocated);
- void Step(intptr_t allocated, CompletionAction action,
- bool force_marking = false);
+ intptr_t Step(intptr_t allocated, CompletionAction action,
+ ForceMarkingAction marking = DO_NOT_FORCE_MARKING,
+ ForceCompletionAction completion = FORCE_COMPLETION);
inline void RestartIfNotMarking() {
if (state_ == COMPLETE) {
@@ -164,6 +173,10 @@
void NotifyIncompleteScanOfObject(int unscanned_bytes) {
unscanned_bytes_of_large_object_ = unscanned_bytes;
}
+
+ void ClearIdleMarkingDelayCounter();
+
+ bool IsIdleMarkingDelayCounterLimitReached();
private:
int64_t SpaceLeftInOldSpace();
@@ -195,6 +208,8 @@
INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
+ void IncrementIdleMarkingDelayCounter();
+
Heap* heap_;
State state_;
@@ -213,6 +228,7 @@
intptr_t bytes_scanned_;
intptr_t allocated_;
intptr_t write_barriers_invoked_since_last_step_;
+ size_t idle_marking_delay_counter_;
int no_marking_scope_depth_;
=======================================
--- /trunk/src/heap/mark-compact.cc Fri Oct 10 00:05:16 2014 UTC
+++ /trunk/src/heap/mark-compact.cc Tue Oct 14 07:51:07 2014 UTC
@@ -862,6 +862,8 @@
Deoptimizer::DeoptimizeMarkedCode(isolate());
have_code_to_deoptimize_ = false;
}
+
+ heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
}
@@ -1935,6 +1937,11 @@
MarkBit mark_bit = Marking::MarkBitFrom(site);
SetMark(site, mark_bit);
}
+
+
+bool MarkCompactCollector::IsMarkingDequeEmpty() {
+ return marking_deque_.IsEmpty();
+}
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
=======================================
--- /trunk/src/heap/mark-compact.h Fri Oct 3 00:04:58 2014 UTC
+++ /trunk/src/heap/mark-compact.h Tue Oct 14 07:51:07 2014 UTC
@@ -657,6 +657,8 @@
// to artificially keep AllocationSites alive for a time.
void MarkAllocationSite(AllocationSite* site);
+ bool IsMarkingDequeEmpty();
+
private:
class SweeperTask;
=======================================
--- /trunk/src/ia32/assembler-ia32.cc Fri Oct 10 00:05:16 2014 UTC
+++ /trunk/src/ia32/assembler-ia32.cc Tue Oct 14 07:51:07 2014 UTC
@@ -1951,7 +1951,7 @@
}
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
+void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
@@ -1960,7 +1960,7 @@
}
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
+void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
=======================================
--- /trunk/src/ia32/assembler-ia32.h Fri Oct 10 00:05:16 2014 UTC
+++ /trunk/src/ia32/assembler-ia32.h Tue Oct 14 07:51:07 2014 UTC
@@ -958,9 +958,14 @@
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst,
Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
-
+ void cvtss2sd(XMMRegister dst, const Operand& src);
+ void cvtss2sd(XMMRegister dst, XMMRegister src) {
+ cvtss2sd(dst, Operand(src));
+ }
+ void cvtsd2ss(XMMRegister dst, const Operand& src);
+ void cvtsd2ss(XMMRegister dst, XMMRegister src) {
+ cvtsd2ss(dst, Operand(src));
+ }
void addsd(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
=======================================
--- /trunk/src/ia32/full-codegen-ia32.cc Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/src/ia32/full-codegen-ia32.cc Tue Oct 14 07:51:07 2014 UTC
@@ -2430,16 +2430,8 @@
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
- // Left-hand side can only be a property, a global or a (parameter or
local)
- // slot.
- enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
- LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
- if (prop != NULL) {
- assign_type = (prop->key()->IsPropertyName())
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
- }
+ LhsKind assign_type = GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
@@ -2458,6 +2450,42 @@
CallStoreIC();
break;
}
+ case NAMED_SUPER_PROPERTY: {
+ __ push(eax);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ // stack: value, this; eax: home_object
+ Register scratch = ecx;
+ Register scratch2 = edx;
+ __ mov(scratch, result_register()); // home_object
+ __ mov(eax, MemOperand(esp, kPointerSize)); // value
+ __ mov(scratch2, MemOperand(esp, 0)); // this
+ __ mov(MemOperand(esp, kPointerSize), scratch2); // this
+ __ mov(MemOperand(esp, 0), scratch); // home_object
+ // stack: this, home_object. eax: value
+ EmitNamedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ __ push(eax);
+ VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+ EmitLoadHomeObject(prop->obj()->AsSuperReference());
+ __ push(result_register());
+ VisitForAccumulatorValue(prop->key());
+ Register scratch = ecx;
+ Register scratch2 = edx;
+ __ mov(scratch2, MemOperand(esp, 2 * kPointerSize)); // value
+ // stack: value, this, home_object; eax: key, edx: value
+ __ mov(scratch, MemOperand(esp, kPointerSize)); // this
+ __ mov(MemOperand(esp, 2 * kPointerSize), scratch);
+ __ mov(scratch, MemOperand(esp, 0)); // home_object
+ __ mov(MemOperand(esp, kPointerSize), scratch);
+ __ mov(MemOperand(esp, 0), eax);
+ __ mov(eax, scratch2);
+ // stack: this, home_object, key; eax: value.
+ EmitKeyedSuperPropertyStore(prop);
+ break;
+ }
case KEYED_PROPERTY: {
__ push(eax); // Preserve value.
VisitForStackValue(prop->obj());
=======================================
--- /trunk/src/ia32/macro-assembler-ia32.cc Fri Oct 10 00:05:16 2014 UTC
+++ /trunk/src/ia32/macro-assembler-ia32.cc Tue Oct 14 07:51:07 2014 UTC
@@ -894,6 +894,13 @@
push(edi); // Callee's JS function.
}
}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on ia32.
+ UNREACHABLE();
+}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
=======================================
--- /trunk/src/ia32/macro-assembler-ia32.h Fri Oct 10 00:05:16 2014 UTC
+++ /trunk/src/ia32/macro-assembler-ia32.h Tue Oct 14 07:51:07 2014 UTC
@@ -941,6 +941,7 @@
// Activation support.
void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool
load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
// Expects object in eax and returns map with validated enum cache
=======================================
***Additional files exist in this changeset.***
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.