Revision: 25089
Author:   [email protected]
Date:     Tue Nov  4 01:04:58 2014 UTC
Log: Version 3.30.30 (based on ce06f447d6c49dbee94a889a996bdee3b50f8e52)

Performance and stability improvements on all platforms.
https://code.google.com/p/v8/source/detail?r=25089

Added:
 /trunk/src/compiler/select-lowering.cc
 /trunk/src/compiler/select-lowering.h
 /trunk/test/cctest/compiler/test-graph-visualizer.cc
 /trunk/test/mjsunit/asm/uint32div.js
 /trunk/test/mjsunit/asm/uint32mod.js
 /trunk/test/unittests/compiler/register-allocator-unittest.cc
 /trunk/test/unittests/compiler/select-lowering-unittest.cc
Modified:
 /trunk/BUILD.gn
 /trunk/ChangeLog
 /trunk/include/v8.h
 /trunk/src/api.cc
 /trunk/src/arm64/macro-assembler-arm64-inl.h
 /trunk/src/arm64/macro-assembler-arm64.h
 /trunk/src/ast.h
 /trunk/src/builtins.cc
 /trunk/src/compiler/arm/code-generator-arm.cc
 /trunk/src/compiler/arm/instruction-codes-arm.h
 /trunk/src/compiler/arm/instruction-selector-arm.cc
 /trunk/src/compiler/arm/linkage-arm.cc
 /trunk/src/compiler/arm64/code-generator-arm64.cc
 /trunk/src/compiler/arm64/instruction-codes-arm64.h
 /trunk/src/compiler/arm64/instruction-selector-arm64.cc
 /trunk/src/compiler/arm64/linkage-arm64.cc
 /trunk/src/compiler/change-lowering.cc
 /trunk/src/compiler/generic-algorithm.h
 /trunk/src/compiler/generic-graph.h
 /trunk/src/compiler/graph-reducer.cc
 /trunk/src/compiler/graph-replay.cc
 /trunk/src/compiler/graph-replay.h
 /trunk/src/compiler/graph-visualizer.cc
 /trunk/src/compiler/ia32/code-generator-ia32.cc
 /trunk/src/compiler/ia32/instruction-codes-ia32.h
 /trunk/src/compiler/ia32/instruction-selector-ia32.cc
 /trunk/src/compiler/ia32/linkage-ia32.cc
 /trunk/src/compiler/instruction-selector.cc
 /trunk/src/compiler/instruction.cc
 /trunk/src/compiler/instruction.h
 /trunk/src/compiler/js-context-specialization.cc
 /trunk/src/compiler/js-generic-lowering.cc
 /trunk/src/compiler/js-generic-lowering.h
 /trunk/src/compiler/js-inlining.cc
 /trunk/src/compiler/linkage-impl.h
 /trunk/src/compiler/linkage.cc
 /trunk/src/compiler/linkage.h
 /trunk/src/compiler/machine-operator-reducer.cc
 /trunk/src/compiler/machine-operator-reducer.h
 /trunk/src/compiler/machine-operator.cc
 /trunk/src/compiler/machine-operator.h
 /trunk/src/compiler/mips/code-generator-mips.cc
 /trunk/src/compiler/mips/instruction-codes-mips.h
 /trunk/src/compiler/mips/instruction-selector-mips.cc
 /trunk/src/compiler/mips/linkage-mips.cc
 /trunk/src/compiler/opcodes.h
 /trunk/src/compiler/pipeline.cc
 /trunk/src/compiler/raw-machine-assembler.cc
 /trunk/src/compiler/raw-machine-assembler.h
 /trunk/src/compiler/register-allocator.h
 /trunk/src/compiler/scheduler.cc
 /trunk/src/compiler/simplified-lowering.cc
 /trunk/src/compiler/typer.cc
 /trunk/src/compiler/typer.h
 /trunk/src/compiler/verifier.cc
 /trunk/src/compiler/x64/code-generator-x64.cc
 /trunk/src/compiler/x64/instruction-codes-x64.h
 /trunk/src/compiler/x64/instruction-selector-x64.cc
 /trunk/src/compiler/x64/linkage-x64.cc
 /trunk/src/compiler.cc
 /trunk/src/debug.cc
 /trunk/src/debug.h
 /trunk/src/global-handles.cc
 /trunk/src/global-handles.h
 /trunk/src/globals.h
 /trunk/src/heap/mark-compact.cc
 /trunk/src/mips/macro-assembler-mips.cc
 /trunk/src/mips/macro-assembler-mips.h
 /trunk/src/objects-inl.h
 /trunk/src/objects.cc
 /trunk/src/objects.h
 /trunk/src/transitions-inl.h
 /trunk/src/transitions.cc
 /trunk/src/transitions.h
 /trunk/src/version.cc
 /trunk/src/x64/assembler-x64.cc
 /trunk/src/x64/assembler-x64.h
 /trunk/test/cctest/cctest.gyp
 /trunk/test/cctest/compiler/test-codegen-deopt.cc
 /trunk/test/cctest/compiler/test-instruction.cc
 /trunk/test/cctest/compiler/test-linkage.cc
 /trunk/test/cctest/compiler/test-node-algorithm.cc
 /trunk/test/cctest/compiler/test-run-machops.cc
 /trunk/test/cctest/test-api.cc
 /trunk/test/cctest/test-disasm-x64.cc
 /trunk/test/cctest/test-heap.cc
 /trunk/test/mjsunit/array-natives-elements.js
 /trunk/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
 /trunk/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
 /trunk/test/unittests/compiler/change-lowering-unittest.cc
 /trunk/test/unittests/compiler/instruction-selector-unittest.cc
 /trunk/test/unittests/compiler/machine-operator-reducer-unittest.cc
 /trunk/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
 /trunk/test/unittests/unittests.gyp
 /trunk/tools/gyp/v8.gyp
 /trunk/tools/push-to-trunk/auto_roll.py
 /trunk/tools/push-to-trunk/common_includes.py
 /trunk/tools/push-to-trunk/test_scripts.py

=======================================
--- /dev/null
+++ /trunk/src/compiler/select-lowering.cc      Tue Nov  4 01:04:58 2014 UTC
@@ -0,0 +1,54 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/select-lowering.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SelectLowering::SelectLowering(Graph* graph, CommonOperatorBuilder* common)
+    : common_(common),
+      graph_(graph),
+ merges_(Merges::key_compare(), Merges::allocator_type(graph->zone())) {}
+
+
+SelectLowering::~SelectLowering() {}
+
+
+Reduction SelectLowering::Reduce(Node* node) {
+  if (node->opcode() != IrOpcode::kSelect) return NoChange();
+  SelectParameters const p = SelectParametersOf(node->op());
+
+  Node* const cond = node->InputAt(0);
+  Node* const control = graph()->start();
+
+  // Check if we already have a diamond for this condition.
+  auto i = merges_.find(cond);
+  if (i == merges_.end()) {
+    // Create a new diamond for this condition and remember its merge node.
+ Node* branch = graph()->NewNode(common()->Branch(p.hint()), cond, control);
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    i = merges_.insert(std::make_pair(cond, merge)).first;
+  }
+
+  DCHECK_EQ(cond, i->first);
+
+  // Create a Phi hanging off the previously determined merge.
+  node->set_op(common()->Phi(p.type(), 2));
+  node->ReplaceInput(0, node->InputAt(1));
+  node->ReplaceInput(1, node->InputAt(2));
+  node->ReplaceInput(2, i->second);
+  return Changed(node);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
=======================================
--- /dev/null
+++ /trunk/src/compiler/select-lowering.h       Tue Nov  4 01:04:58 2014 UTC
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SELECT_LOWERING_H_
+#define V8_COMPILER_SELECT_LOWERING_H_
+
+#include <map>
+
+#include "src/compiler/graph-reducer.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+
+
+// Lowers Select nodes to diamonds.
+class SelectLowering FINAL : public Reducer {
+ public:
+  SelectLowering(Graph* graph, CommonOperatorBuilder* common);
+  ~SelectLowering();
+
+  Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  typedef std::map<Node*, Node*, std::less<Node*>,
+                   zone_allocator<std::pair<Node* const, Node*>>> Merges;
+
+  CommonOperatorBuilder* common() const { return common_; }
+  Graph* graph() const { return graph_; }
+
+  CommonOperatorBuilder* common_;
+  Graph* graph_;
+  Merges merges_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SELECT_LOWERING_H_
=======================================
--- /dev/null
+++ /trunk/test/cctest/compiler/test-graph-visualizer.cc Tue Nov 4 01:04:58 2014 UTC
@@ -0,0 +1,92 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/verifier.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(NodeWithNullInputReachableFromEnd) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+
+  Node* start = graph.NewNode(common.Start(0));
+  graph.SetStart(start);
+  Node* k = graph.NewNode(common.Int32Constant(0));
+  Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 1), k, start);
+  phi->ReplaceInput(0, NULL);
+  graph.SetEnd(phi);
+
+  OFStream os(stdout);
+  os << AsDOT(graph);
+  os << AsJSON(graph);
+}
+
+
+TEST(NodeWithNullControlReachableFromEnd) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+
+  Node* start = graph.NewNode(common.Start(0));
+  graph.SetStart(start);
+  Node* k = graph.NewNode(common.Int32Constant(0));
+  Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 1), k, start);
+  phi->ReplaceInput(1, NULL);
+  graph.SetEnd(phi);
+
+  OFStream os(stdout);
+  os << AsDOT(graph);
+  os << AsJSON(graph);
+}
+
+
+TEST(NodeWithNullInputReachableFromStart) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+
+  Node* start = graph.NewNode(common.Start(0));
+  graph.SetStart(start);
+  Node* k = graph.NewNode(common.Int32Constant(0));
+  Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 1), k, start);
+  phi->ReplaceInput(0, NULL);
+  graph.SetEnd(start);
+
+  OFStream os(stdout);
+  os << AsDOT(graph);
+  os << AsJSON(graph);
+}
+
+
+TEST(NodeWithNullControlReachableFromStart) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+
+  Node* start = graph.NewNode(common.Start(0));
+  graph.SetStart(start);
+  Node* merge = graph.NewNode(common.Merge(2), start, start);
+  merge->ReplaceInput(1, NULL);
+  graph.SetEnd(merge);
+
+  OFStream os(stdout);
+  os << AsDOT(graph);
+  os << AsJSON(graph);
+}
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/asm/uint32div.js        Tue Nov  4 01:04:58 2014 UTC
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+function Uint32Div(divisor) {
+  var name = "div_";
+  name += divisor;
+  var m = eval("function Module(stdlib, foreign, heap) {\n"
+      + " \"use asm\";\n"
+      + " function " + name + "(dividend) {\n"
+      + "  return ((dividend >>> 0) / " + divisor + ") >>> 0;\n"
+      + " }\n"
+      + " return { f: " + name + "}\n"
+      + "}; Module");
+  return m(stdlib, foreign, heap).f;
+}
+
+var divisors = [0, 1, 3, 4, 10, 42, 64, 100, 1024, 2147483647, 4294967295];
+for (var i in divisors) {
+  var divisor = divisors[i];
+  var mod = Uint32Div(divisor);
+  for (var dividend = 0; dividend < 4294967296; dividend += 3999773) {
+    assertEquals((dividend / divisor) >>> 0, mod(dividend));
+  }
+}
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/asm/uint32mod.js        Tue Nov  4 01:04:58 2014 UTC
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+function Uint32Mod(divisor) {
+  var name = "mod_";
+  name += divisor;
+  var m = eval("function Module(stdlib, foreign, heap) {\n"
+      + " \"use asm\";\n"
+      + " function " + name + "(dividend) {\n"
+      + "  return ((dividend >>> 0) % " + divisor + ") >>> 0;\n"
+      + " }\n"
+      + " return { f: " + name + "}\n"
+      + "}; Module");
+  return m(stdlib, foreign, heap).f;
+}
+
+var divisors = [0, 1, 3, 4, 10, 42, 64, 100, 1024, 2147483647, 4294967295];
+for (var i in divisors) {
+  var divisor = divisors[i];
+  var mod = Uint32Mod(divisor);
+  for (var dividend = 0; dividend < 4294967296; dividend += 3999773) {
+    assertEquals((dividend % divisor) >>> 0, mod(dividend));
+  }
+}
=======================================
--- /dev/null
+++ /trunk/test/unittests/compiler/register-allocator-unittest.cc Tue Nov 4 01:04:58 2014 UTC
@@ -0,0 +1,196 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/register-allocator.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef BasicBlock::RpoNumber Rpo;
+
+namespace {
+
+static const char* general_register_names_[kMaxGeneralRegisters];
+static const char* double_register_names_[kMaxDoubleRegisters];
+static char register_names_[10 * (kMaxGeneralRegisters + kMaxDoubleRegisters)];
+
+
+static const char* GeneralRegisterName(int allocation_index) {
+  return general_register_names_[allocation_index];
+}
+
+
+static const char* DoubleRegisterName(int allocation_index) {
+  return double_register_names_[allocation_index];
+}
+
+
+static void InitializeRegisterNames() {
+  char* loc = register_names_;
+  for (int i = 0; i < kMaxGeneralRegisters; ++i) {
+    general_register_names_[i] = loc;
+    loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
+    *loc++ = 0;
+  }
+  for (int i = 0; i < kMaxDoubleRegisters; ++i) {
+    double_register_names_[i] = loc;
+    loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
+    *loc++ = 0;
+  }
+}
+
+}  // namespace
+
+
+// TODO(dcarney): fake opcodes.
+// TODO(dcarney): fix printing of sequence w.r.t fake opcodes and registers.
+class RegisterAllocatorTest : public TestWithZone {
+ public:
+  static const int kDefaultNRegs = 4;
+
+  RegisterAllocatorTest()
+      : basic_blocks_(zone()),
+        instruction_blocks_(zone()),
+        current_block_(NULL) {
+    InitializeRegisterNames();
+    config_.num_general_registers_ = kDefaultNRegs;
+    config_.num_double_registers_ = kDefaultNRegs;
+    config_.num_aliased_double_registers_ = kDefaultNRegs;
+    config_.GeneralRegisterName = GeneralRegisterName;
+    config_.DoubleRegisterName = DoubleRegisterName;
+  }
+
+  Frame* frame() {
+    if (frame_.is_empty()) {
+      frame_.Reset(new Frame());
+    }
+    return frame_.get();
+  }
+
+  InstructionSequence* sequence() {
+    if (sequence_.is_empty()) {
+ sequence_.Reset(new InstructionSequence(zone(), &instruction_blocks_));
+    }
+    return sequence_.get();
+  }
+
+  RegisterAllocator* allocator() {
+    if (allocator_.is_empty()) {
+      allocator_.Reset(
+          new RegisterAllocator(config_, zone(), frame(), sequence()));
+    }
+    return allocator_.get();
+  }
+
+  InstructionBlock* StartBlock(Rpo loop_header = Rpo::Invalid(),
+                               Rpo loop_end = Rpo::Invalid()) {
+    CHECK(current_block_ == NULL);
+    BasicBlock::Id block_id =
+        BasicBlock::Id::FromSize(instruction_blocks_.size());
+    BasicBlock* basic_block = new (zone()) BasicBlock(zone(), block_id);
+    basic_block->set_rpo_number(block_id.ToInt());
+    basic_block->set_ao_number(block_id.ToInt());
+    if (loop_header.IsValid()) {
+      basic_block->set_loop_depth(1);
+      basic_block->set_loop_header(basic_blocks_[loop_header.ToSize()]);
+      basic_block->set_loop_end(loop_end.ToInt());
+    }
+    InstructionBlock* instruction_block =
+        new (zone()) InstructionBlock(zone(), basic_block);
+    basic_blocks_.push_back(basic_block);
+    instruction_blocks_.push_back(instruction_block);
+    current_block_ = instruction_block;
+    sequence()->StartBlock(basic_block);
+    return instruction_block;
+  }
+
+  void EndBlock() {
+    CHECK(current_block_ != NULL);
+ sequence()->EndBlock(basic_blocks_[current_block_->rpo_number().ToSize()]);
+    current_block_ = NULL;
+  }
+
+  void Allocate() {
+    if (FLAG_trace_alloc) {
+      OFStream os(stdout);
+      os << "Before: " << std::endl << *sequence() << std::endl;
+    }
+    allocator()->Allocate();
+    if (FLAG_trace_alloc) {
+      OFStream os(stdout);
+      os << "After: " << std::endl << *sequence() << std::endl;
+    }
+  }
+
+  int NewReg() { return sequence()->NextVirtualRegister(); }
+
+  int Parameter() {
+    // TODO(dcarney): assert parameters before other instructions.
+    int vreg = NewReg();
+    InstructionOperand* outputs[1]{
+        Unallocated(UnallocatedOperand::MUST_HAVE_REGISTER, vreg)};
+    sequence()->AddInstruction(
+        Instruction::New(zone(), kArchNop, 1, outputs, 0, NULL, 0, NULL));
+    return vreg;
+  }
+
+  void Return(int vreg) {
+    InstructionOperand* inputs[1]{
+        Unallocated(UnallocatedOperand::MUST_HAVE_REGISTER, vreg)};
+    sequence()->AddInstruction(
+        Instruction::New(zone(), kArchNop, 0, NULL, 1, inputs, 0, NULL));
+  }
+
+  Instruction* Emit(int output_vreg, int input_vreg_0, int input_vreg_1) {
+    InstructionOperand* outputs[1]{
+        Unallocated(UnallocatedOperand::MUST_HAVE_REGISTER, output_vreg)};
+    InstructionOperand* inputs[2]{
+        Unallocated(UnallocatedOperand::MUST_HAVE_REGISTER, input_vreg_0),
+        Unallocated(UnallocatedOperand::MUST_HAVE_REGISTER, input_vreg_1)};
+    Instruction* instruction =
+        Instruction::New(zone(), kArchNop, 1, outputs, 2, inputs, 0, NULL);
+    sequence()->AddInstruction(instruction);
+    return instruction;
+  }
+
+ private:
+ InstructionOperand* Unallocated(UnallocatedOperand::ExtendedPolicy policy,
+                                  int vreg) {
+    UnallocatedOperand* op =
+ new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
+    op->set_virtual_register(vreg);
+    return op;
+  }
+
+  RegisterAllocator::Config config_;
+  ZoneVector<BasicBlock*> basic_blocks_;
+  InstructionBlocks instruction_blocks_;
+  InstructionBlock* current_block_;
+  SmartPointer<Frame> frame_;
+  SmartPointer<RegisterAllocator> allocator_;
+  SmartPointer<InstructionSequence> sequence_;
+};
+
+
+TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) {
+  StartBlock();
+  int a_reg = Parameter();
+  int b_reg = Parameter();
+  int c_reg = NewReg();
+  Instruction* res = Emit(c_reg, a_reg, b_reg);
+  Return(c_reg);
+  EndBlock();
+
+  Allocate();
+
+  ASSERT_TRUE(res->OutputAt(0)->IsRegister());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
=======================================
--- /dev/null
+++ /trunk/test/unittests/compiler/select-lowering-unittest.cc Tue Nov 4 01:04:58 2014 UTC
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/select-lowering.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SelectLoweringTest : public GraphTest {
+ public:
+  SelectLoweringTest() : GraphTest(5), lowering_(graph(), common()) {}
+
+ protected:
+  Reduction Reduce(Node* node) { return lowering_.Reduce(node); }
+
+ private:
+  SelectLowering lowering_;
+};
+
+
+TEST_F(SelectLoweringTest, SelectWithSameConditions) {
+  Node* const p0 = Parameter(0);
+  Node* const p1 = Parameter(1);
+  Node* const p2 = Parameter(2);
+  Node* const p3 = Parameter(3);
+  Node* const p4 = Parameter(4);
+
+  Capture<Node*> branch;
+  Capture<Node*> merge;
+  {
+    Reduction const r =
+        Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, p1, p2));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(
+        r.replacement(),
+        IsPhi(
+            kMachInt32, p1, p2,
+            AllOf(CaptureEq(&merge),
+                  IsMerge(IsIfTrue(CaptureEq(&branch)),
+                          IsIfFalse(AllOf(CaptureEq(&branch),
+ IsBranch(p0, graph()->start())))))));
+  }
+  {
+    Reduction const r =
+        Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, p3, p4));
+    ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsPhi(kMachInt32, p3, p4, CaptureEq(&merge)));
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
=======================================
--- /trunk/BUILD.gn     Sat Nov  1 22:28:42 2014 UTC
+++ /trunk/BUILD.gn     Tue Nov  4 01:04:58 2014 UTC
@@ -576,6 +576,8 @@
     "src/compiler/schedule.h",
     "src/compiler/scheduler.cc",
     "src/compiler/scheduler.h",
+    "src/compiler/select-lowering.cc",
+    "src/compiler/select-lowering.h",
     "src/compiler/simplified-lowering.cc",
     "src/compiler/simplified-lowering.h",
     "src/compiler/simplified-operator-reducer.cc",
=======================================
--- /trunk/ChangeLog    Sun Nov  2 20:02:29 2014 UTC
+++ /trunk/ChangeLog    Tue Nov  4 01:04:58 2014 UTC
@@ -1,3 +1,8 @@
+2014-11-04: Version 3.30.30
+
+        Performance and stability improvements on all platforms.
+
+
 2014-11-02: Version 3.30.27

         Performance and stability improvements on all platforms.
=======================================
--- /trunk/include/v8.h Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/include/v8.h Tue Nov  4 01:04:58 2014 UTC
@@ -516,6 +516,18 @@
       P* parameter,
       typename WeakCallbackData<S, P>::Callback callback);

+ // Phantom persistents work like weak persistents, except that the pointer to + // the object being collected is not available in the finalization callback. + // This enables the garbage collector to collect the object and any objects
+  // it references transitively in one GC cycle.
+  template <typename P>
+  V8_INLINE void SetPhantom(P* parameter,
+ typename WeakCallbackData<T, P>::Callback callback);
+
+  template <typename S, typename P>
+  V8_INLINE void SetPhantom(P* parameter,
+ typename WeakCallbackData<S, P>::Callback callback);
+
   template<typename P>
   V8_INLINE P* ClearWeak();

@@ -5477,14 +5489,15 @@
  private:
   V8();

+  enum WeakHandleType { PhantomHandle, NonphantomHandle };
+
   static internal::Object** GlobalizeReference(internal::Isolate* isolate,
                                                internal::Object** handle);
   static internal::Object** CopyPersistent(internal::Object** handle);
   static void DisposeGlobal(internal::Object** global_handle);
   typedef WeakCallbackData<Value, void>::Callback WeakCallback;
-  static void MakeWeak(internal::Object** global_handle,
-                       void* data,
-                       WeakCallback weak_callback);
+  static void MakeWeak(internal::Object** global_handle, void* data,
+                       WeakCallback weak_callback, WeakHandleType phantom);
   static void* ClearWeak(internal::Object** global_handle);
   static void Eternalize(Isolate* isolate,
                          Value* handle,
@@ -6355,9 +6368,8 @@
     typename WeakCallbackData<S, P>::Callback callback) {
   TYPE_CHECK(S, T);
   typedef typename WeakCallbackData<Value, void>::Callback Callback;
-  V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_),
-               parameter,
-               reinterpret_cast<Callback>(callback));
+  V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
+               reinterpret_cast<Callback>(callback), V8::NonphantomHandle);
 }


@@ -6371,7 +6383,26 @@


 template <class T>
-template<typename P>
+template <typename S, typename P>
+void PersistentBase<T>::SetPhantom(
+    P* parameter, typename WeakCallbackData<S, P>::Callback callback) {
+  TYPE_CHECK(S, T);
+  typedef typename WeakCallbackData<Value, void>::Callback Callback;
+  V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
+               reinterpret_cast<Callback>(callback), V8::PhantomHandle);
+}
+
+
+template <class T>
+template <typename P>
+void PersistentBase<T>::SetPhantom(
+    P* parameter, typename WeakCallbackData<T, P>::Callback callback) {
+  SetPhantom<T, P>(parameter, callback);
+}
+
+
+template <class T>
+template <typename P>
 P* PersistentBase<T>::ClearWeak() {
   return reinterpret_cast<P*>(
     V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_)));
=======================================
--- /trunk/src/api.cc   Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/api.cc   Tue Nov  4 01:04:58 2014 UTC
@@ -490,10 +490,12 @@
 }


-void V8::MakeWeak(i::Object** object,
-                  void* parameters,
-                  WeakCallback weak_callback) {
-  i::GlobalHandles::MakeWeak(object, parameters, weak_callback);
+void V8::MakeWeak(i::Object** object, void* parameters,
+ WeakCallback weak_callback, V8::WeakHandleType weak_type) {
+  i::GlobalHandles::PhantomState phantom;
+  phantom = weak_type == V8::PhantomHandle ? i::GlobalHandles::Phantom
+                                           : i::GlobalHandles::Nonphantom;
+  i::GlobalHandles::MakeWeak(object, parameters, weak_callback, phantom);
 }


=======================================
--- /trunk/src/arm64/macro-assembler-arm64-inl.h Thu Oct 23 08:44:45 2014 UTC +++ /trunk/src/arm64/macro-assembler-arm64-inl.h Tue Nov 4 01:04:58 2014 UTC
@@ -1124,6 +1124,14 @@
   DCHECK(!rd.IsZero());
   smulh(rd, rn, rm);
 }
+
+
+void MacroAssembler::Umull(const Register& rd, const Register& rn,
+                           const Register& rm) {
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  umaddl(rd, rn, rm, xzr);
+}


 void MacroAssembler::Stnp(const CPURegister& rt,
=======================================
--- /trunk/src/arm64/macro-assembler-arm64.h    Thu Oct 23 08:44:45 2014 UTC
+++ /trunk/src/arm64/macro-assembler-arm64.h    Tue Nov  4 01:04:58 2014 UTC
@@ -490,6 +490,7 @@
   inline void Smulh(const Register& rd,
                     const Register& rn,
                     const Register& rm);
+ inline void Umull(const Register& rd, const Register& rn, const Register& rm);
   inline void Stnp(const CPURegister& rt,
                    const CPURegister& rt2,
                    const MemOperand& dst);
=======================================
--- /trunk/src/ast.h    Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/ast.h    Tue Nov  4 01:04:58 2014 UTC
@@ -2094,6 +2094,8 @@
                   Expression* right, int pos)
       : Expression(zone, pos),
         op_(static_cast<byte>(op)),
+        has_fixed_right_arg_(false),
+        fixed_right_arg_value_(0),
         left_(left),
         right_(right) {
     DCHECK(Token::IsBinaryOp(op));
=======================================
--- /trunk/src/builtins.cc      Wed Oct 29 14:24:00 2014 UTC
+++ /trunk/src/builtins.cc      Tue Nov  4 01:04:58 2014 UTC
@@ -200,6 +200,19 @@
   iter.Advance();
   return iter.IsAtEnd();
 }
+
+
+static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
+                                                     JSArray* receiver) {
+  if (!FLAG_clever_optimizations) return false;
+  DisallowHeapAllocation no_gc;
+  Context* native_context = heap->isolate()->context()->native_context();
+  JSObject* array_proto =
+      JSObject::cast(native_context->array_function()->prototype());
+  PrototypeIterator iter(heap->isolate(), receiver);
+  return iter.GetCurrent() == array_proto &&
+         ArrayPrototypeHasNoElements(heap, native_context, array_proto);
+}


 // Returns empty handle if not applicable.
@@ -213,13 +226,13 @@
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
// If there may be elements accessors in the prototype chain, the fast path
   // cannot be used if there arguments to add to the array.
- if (args != NULL && array->map()->DictionaryElementsInPrototypeChainOnly()) {
+  Heap* heap = isolate->heap();
+  if (args != NULL && !IsJSArrayFastElementMovingAllowed(heap, *array)) {
     return MaybeHandle<FixedArrayBase>();
   }
   if (array->map()->is_observed()) return MaybeHandle<FixedArrayBase>();
   if (!array->map()->is_extensible()) return MaybeHandle<FixedArrayBase>();
   Handle<FixedArrayBase> elms(array->elements(), isolate);
-  Heap* heap = isolate->heap();
   Map* map = elms->map();
   if (map == heap->fixed_array_map()) {
     if (args == NULL || array->HasFastObjectElements()) return elms;
@@ -262,19 +275,6 @@
   }
   return elms;
 }
-
-
-static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
-                                                     JSArray* receiver) {
-  if (!FLAG_clever_optimizations) return false;
-  DisallowHeapAllocation no_gc;
-  Context* native_context = heap->isolate()->context()->native_context();
-  JSObject* array_proto =
-      JSObject::cast(native_context->array_function()->prototype());
-  PrototypeIterator iter(heap->isolate(), receiver);
-  return iter.GetCurrent() == array_proto &&
-         ArrayPrototypeHasNoElements(heap, native_context, array_proto);
-}


 MUST_USE_RESULT static Object* CallJsBuiltin(
@@ -453,8 +453,7 @@
       EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
   Handle<FixedArrayBase> elms_obj;
   if (!maybe_elms_obj.ToHandle(&elms_obj) ||
-      !IsJSArrayFastElementMovingAllowed(heap,
- *Handle<JSArray>::cast(receiver))) {
+      !IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(*receiver))) {
     return CallJsBuiltin(isolate, "ArrayShift", args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -499,11 +498,9 @@
   Heap* heap = isolate->heap();
   Handle<Object> receiver = args.receiver();
   MaybeHandle<FixedArrayBase> maybe_elms_obj =
-      EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
+      EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
   Handle<FixedArrayBase> elms_obj;
-  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
-      !IsJSArrayFastElementMovingAllowed(heap,
- *Handle<JSArray>::cast(receiver))) {
+  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
     return CallJsBuiltin(isolate, "ArrayUnshift", args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -524,9 +521,6 @@

   Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);

-  JSObject::EnsureCanContainElements(array, &args, 1, to_add,
-                                     DONT_ALLOW_DOUBLE_ELEMENTS);
-
   if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
@@ -708,9 +702,7 @@
   MaybeHandle<FixedArrayBase> maybe_elms_obj =
       EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
   Handle<FixedArrayBase> elms_obj;
-  if (!maybe_elms_obj.ToHandle(&elms_obj) ||
-      !IsJSArrayFastElementMovingAllowed(heap,
- *Handle<JSArray>::cast(receiver))) {
+  if (!maybe_elms_obj.ToHandle(&elms_obj)) {
     return CallJsBuiltin(isolate, "ArraySplice", args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
=======================================
--- /trunk/src/compiler/arm/code-generator-arm.cc Fri Oct 31 13:19:44 2014 UTC +++ /trunk/src/compiler/arm/code-generator-arm.cc Tue Nov 4 01:04:58 2014 UTC
@@ -248,6 +248,10 @@
                i.InputRegister(2));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
+    case kArmUmull:
+ __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+               i.InputRegister(1), i.OutputSBit());
+      break;
     case kArmSdiv: {
       CpuFeatureScope scope(masm(), SUDIV);
       __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
=======================================
--- /trunk/src/compiler/arm/instruction-codes-arm.h Fri Oct 31 13:19:44 2014 UTC +++ /trunk/src/compiler/arm/instruction-codes-arm.h Tue Nov 4 01:04:58 2014 UTC
@@ -28,6 +28,7 @@
   V(ArmMls)                        \
   V(ArmSmmul)                      \
   V(ArmSmmla)                      \
+  V(ArmUmull)                      \
   V(ArmSdiv)                       \
   V(ArmUdiv)                       \
   V(ArmMov)                        \
=======================================
--- /trunk/src/compiler/arm/instruction-selector-arm.cc Fri Oct 31 13:19:44 2014 UTC +++ /trunk/src/compiler/arm/instruction-selector-arm.cc Tue Nov 4 01:04:58 2014 UTC
@@ -86,6 +86,7 @@
       case kArmMls:
       case kArmSmmul:
       case kArmSmmla:
+      case kArmUmull:
       case kArmSdiv:
       case kArmUdiv:
       case kArmBfc:
@@ -656,6 +657,15 @@
Emit(kArmSmmul, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
        g.UseRegister(node->InputAt(1)));
 }
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  ArmOperandGenerator g(this);
+ InstructionOperand* outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
+  InstructionOperand* inputs[] = {g.UseRegister(node->InputAt(0)),
+                                  g.UseRegister(node->InputAt(1))};
+  Emit(kArmUmull, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}


 static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
=======================================
--- /trunk/src/compiler/arm/linkage-arm.cc      Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/arm/linkage-arm.cc      Tue Nov  4 01:04:58 2014 UTC
@@ -35,8 +35,9 @@

 typedef LinkageHelper<ArmLinkageHelperTraits> LH;

-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
-  return LH::GetJSCallDescriptor(zone, parameter_count);
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
 }


=======================================
--- /trunk/src/compiler/arm64/code-generator-arm64.cc Fri Oct 31 13:19:44 2014 UTC +++ /trunk/src/compiler/arm64/code-generator-arm64.cc Tue Nov 4 01:04:58 2014 UTC
@@ -170,7 +170,16 @@
int64_t imm = i.InputOperand##width(1).immediate().value(); \ __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \ } \
-  } while (0);
+  } while (0)
+
+
+#define ASSEMBLE_TEST_AND_BRANCH(asm_instr, width)             \
+  do {                                                         \
+    bool fallthrough = IsNextInAssemblyOrder(i.InputRpo(3));   \
+    __ asm_instr(i.InputRegister##width(0), i.InputInt6(1),    \
+                 code_->GetLabel(i.InputRpo(2)));              \
+    if (!fallthrough) __ B(code_->GetLabel(i.InputRpo(3)));    \
+  } while (0)


// Assembles an instruction after register allocation, producing machine code.
@@ -267,6 +276,9 @@
     case kArm64Smull:
__ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
       break;
+    case kArm64Umull:
+ __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
     case kArm64Madd:
       __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
               i.InputRegister(2));
@@ -418,6 +430,18 @@
       __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
               i.InputInt8(2));
       break;
+    case kArm64Tbz:
+      ASSEMBLE_TEST_AND_BRANCH(Tbz, 64);
+      break;
+    case kArm64Tbz32:
+      ASSEMBLE_TEST_AND_BRANCH(Tbz, 32);
+      break;
+    case kArm64Tbnz:
+      ASSEMBLE_TEST_AND_BRANCH(Tbnz, 64);
+      break;
+    case kArm64Tbnz32:
+      ASSEMBLE_TEST_AND_BRANCH(Tbnz, 32);
+      break;
     case kArm64Claim: {
       int words = MiscField::decode(instr->opcode());
       __ Claim(words);
=======================================
--- /trunk/src/compiler/arm64/instruction-codes-arm64.h Fri Oct 31 13:19:44 2014 UTC +++ /trunk/src/compiler/arm64/instruction-codes-arm64.h Tue Nov 4 01:04:58 2014 UTC
@@ -37,6 +37,7 @@
   V(Arm64Mul)                      \
   V(Arm64Mul32)                    \
   V(Arm64Smull)                    \
+  V(Arm64Umull)                    \
   V(Arm64Madd)                     \
   V(Arm64Madd32)                   \
   V(Arm64Msub)                     \
@@ -67,6 +68,10 @@
   V(Arm64Sxtw)                     \
   V(Arm64Ubfx)                     \
   V(Arm64Ubfx32)                   \
+  V(Arm64Tbz)                      \
+  V(Arm64Tbz32)                    \
+  V(Arm64Tbnz)                     \
+  V(Arm64Tbnz32)                   \
   V(Arm64Claim)                    \
   V(Arm64Poke)                     \
   V(Arm64PokePairZero)             \
=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64.cc Fri Oct 31 13:19:44 2014 UTC +++ /trunk/src/compiler/arm64/instruction-selector-arm64.cc Tue Nov 4 01:04:58 2014 UTC
@@ -776,6 +776,16 @@
        g.UseRegister(node->InputAt(1)));
Emit(kArm64Asr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
 }
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  // TODO(arm64): Can we do better here?
+  Arm64OperandGenerator g(this);
+  InstructionOperand* const smull_operand = g.TempRegister();
+  Emit(kArm64Umull, smull_operand, g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)));
+ Emit(kArm64Lsr, g.DefineAsRegister(node), smull_operand, g.TempImmediate(32));
+}


 void InstructionSelector::VisitInt32Div(Node* node) {
@@ -1194,12 +1204,44 @@
       case IrOpcode::kInt32Sub:
         return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
                                 kArithmeticImm);
-      case IrOpcode::kWord32And:
+      case IrOpcode::kWord32And: {
+        Int32BinopMatcher m(value);
+        if (m.right().HasValue() &&
+            (base::bits::CountPopulation32(m.right().Value()) == 1)) {
+          // If the mask has only one bit set, we can use tbz/tbnz.
+          DCHECK((cont.condition() == kEqual) ||
+                 (cont.condition() == kNotEqual));
+          ArchOpcode opcode =
+              (cont.condition() == kEqual) ? kArm64Tbz32 : kArm64Tbnz32;
+          Emit(opcode, NULL, g.UseRegister(m.left().node()),
+               g.TempImmediate(
+                   base::bits::CountTrailingZeros32(m.right().Value())),
+               g.Label(cont.true_block()),
+               g.Label(cont.false_block()))->MarkAsControl();
+          return;
+        }
         return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
                                 kLogical32Imm);
-      case IrOpcode::kWord64And:
+      }
+      case IrOpcode::kWord64And: {
+        Int64BinopMatcher m(value);
+        if (m.right().HasValue() &&
+            (base::bits::CountPopulation64(m.right().Value()) == 1)) {
+          // If the mask has only one bit set, we can use tbz/tbnz.
+          DCHECK((cont.condition() == kEqual) ||
+                 (cont.condition() == kNotEqual));
+          ArchOpcode opcode =
+              (cont.condition() == kEqual) ? kArm64Tbz : kArm64Tbnz;
+          Emit(opcode, NULL, g.UseRegister(m.left().node()),
+               g.TempImmediate(
+                   base::bits::CountTrailingZeros64(m.right().Value())),
+               g.Label(cont.true_block()),
+               g.Label(cont.false_block()))->MarkAsControl();
+          return;
+        }
         return VisitWordCompare(this, value, kArm64Tst, &cont, true,
                                 kLogical64Imm);
+      }
       default:
         break;
     }
=======================================
--- /trunk/src/compiler/arm64/linkage-arm64.cc  Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/arm64/linkage-arm64.cc  Tue Nov  4 01:04:58 2014 UTC
@@ -35,8 +35,9 @@

 typedef LinkageHelper<Arm64LinkageHelperTraits> LH;

-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
-  return LH::GetJSCallDescriptor(zone, parameter_count);
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
 }


=======================================
--- /trunk/src/compiler/change-lowering.cc      Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/change-lowering.cc      Tue Nov  4 01:04:58 2014 UTC
@@ -53,7 +53,7 @@
 Node* ChangeLowering::SmiMaxValueConstant() {
const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize() : SmiTagging<8>::SmiValueSize();
-  return jsgraph()->IntPtrConstant(
+  return jsgraph()->Int32Constant(
       -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
 }

=======================================
--- /trunk/src/compiler/generic-algorithm.h     Thu Aug 28 09:24:16 2014 UTC
+++ /trunk/src/compiler/generic-algorithm.h     Tue Nov  4 01:04:58 2014 UTC
@@ -23,16 +23,9 @@
 // by specifying custom traits.
 class GenericGraphVisit {
  public:
-  enum Control {
-    CONTINUE = 0x0,  // Continue depth-first normally
-    SKIP = 0x1,      // Skip this node and its successors
-    REENTER = 0x2,   // Allow reentering this node
-    DEFER = SKIP | REENTER
-  };
-
   // struct Visitor {
-  //   Control Pre(Traits::Node* current);
-  //   Control Post(Traits::Node* current);
+  //   void Pre(Traits::Node* current);
+  //   void Post(Traits::Node* current);
   //   void PreEdge(Traits::Node* from, int index, Traits::Node* to);
   //   void PostEdge(Traits::Node* from, int index, Traits::Node* to);
   // }
@@ -54,9 +47,8 @@
       DCHECK(id < Traits::max_id(graph));  // Must be a valid id.
       bool visit = !GetVisited(&visited, id);
       if (visit) {
-        Control control = visitor->Pre(current);
-        visit = !IsSkip(control);
-        if (!IsReenter(control)) SetVisited(&visited, id, true);
+        visitor->Pre(current);
+        SetVisited(&visited, id);
       }
Iterator begin(visit ? Traits::begin(current) : Traits::end(current));
       Iterator end(Traits::end(current));
@@ -66,9 +58,8 @@
         NodeState top = stack.top();
         if (top.first == top.second) {
           if (visit) {
-            Control control = visitor->Post(post_order_node);
-            DCHECK(!IsSkip(control));
- SetVisited(&visited, post_order_node->id(), !IsReenter(control));
+            visitor->Post(post_order_node);
+            SetVisited(&visited, post_order_node->id());
           }
           stack.pop();
           if (stack.empty()) {
@@ -101,23 +92,19 @@

   template <class B, class S>
   struct NullNodeVisitor {
-    Control Pre(GenericNode<B, S>* node) { return CONTINUE; }
-    Control Post(GenericNode<B, S>* node) { return CONTINUE; }
+    void Pre(GenericNode<B, S>* node) {}
+    void Post(GenericNode<B, S>* node) {}
void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {} void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
   };

  private:
-  static bool IsSkip(Control c) { return c & SKIP; }
-  static bool IsReenter(Control c) { return c & REENTER; }
-
-  // TODO(turbofan): resizing could be optionally templatized away.
-  static void SetVisited(BoolVector* visited, int id, bool value) {
+  static void SetVisited(BoolVector* visited, int id) {
     if (id >= static_cast<int>(visited->size())) {
       // Resize and set all values to unvisited.
       visited->resize((3 * id) / 2, false);
     }
-    visited->at(id) = value;
+    visited->at(id) = true;
   }

   static bool GetVisited(BoolVector* visited, int id) {
=======================================
--- /trunk/src/compiler/generic-graph.h Mon Sep 15 00:05:18 2014 UTC
+++ /trunk/src/compiler/generic-graph.h Tue Nov  4 01:04:58 2014 UTC
@@ -34,8 +34,8 @@
   explicit GenericGraph(Zone* zone)
       : GenericGraphBase(zone), start_(NULL), end_(NULL) {}

-  V* start() { return start_; }
-  V* end() { return end_; }
+  V* start() const { return start_; }
+  V* end() const { return end_; }

   void SetStart(V* start) { start_ = start; }
   void SetEnd(V* end) { end_ = end; }
=======================================
--- /trunk/src/compiler/graph-reducer.cc        Mon Sep 29 00:04:53 2014 UTC
+++ /trunk/src/compiler/graph-reducer.cc        Tue Nov  4 01:04:58 2014 UTC
@@ -72,10 +72,7 @@
 // A helper class to reuse the node traversal algorithm.
 struct GraphReducerVisitor FINAL : public NullNodeVisitor {
explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
-  GenericGraphVisit::Control Post(Node* node) {
-    reducer_->ReduceNode(node);
-    return GenericGraphVisit::CONTINUE;
-  }
+  void Post(Node* node) { reducer_->ReduceNode(node); }
   GraphReducer* reducer_;
 };

=======================================
--- /trunk/src/compiler/graph-replay.cc Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/graph-replay.cc Tue Nov  4 01:04:58 2014 UTC
@@ -24,14 +24,13 @@
 }


-GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) {
+void GraphReplayPrinter::Pre(Node* node) {
   PrintReplayOpCreator(node->op());
   PrintF("  Node* n%d = graph.NewNode(op", node->id());
   for (int i = 0; i < node->InputCount(); ++i) {
     PrintF(", nil");
   }
   PrintF("); USE(n%d);\n", node->id());
-  return GenericGraphVisit::CONTINUE;
 }


=======================================
--- /trunk/src/compiler/graph-replay.h  Fri Sep  5 00:05:05 2014 UTC
+++ /trunk/src/compiler/graph-replay.h  Tue Nov  4 01:04:58 2014 UTC
@@ -25,7 +25,7 @@
   static void PrintReplay(Graph* graph) {}
 #endif

-  GenericGraphVisit::Control Pre(Node* node);
+  void Pre(Node* node);
   void PostEdge(Node* from, int index, Node* to);

  private:
=======================================
--- /trunk/src/compiler/graph-visualizer.cc     Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/graph-visualizer.cc     Tue Nov  4 01:04:58 2014 UTC
@@ -8,7 +8,6 @@
 #include <string>

 #include "src/code-stubs.h"
-#include "src/compiler/generic-algorithm.h"
 #include "src/compiler/generic-node.h"
 #include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
@@ -26,9 +25,62 @@
 namespace v8 {
 namespace internal {
 namespace compiler {
+
+static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }

 #define DEAD_COLOR "#999999"

+class AllNodes {
+ public:
+  enum State { kDead, kGray, kLive };
+
+  AllNodes(Zone* local_zone, const Graph* graph)
+      : state(graph->NodeCount(), kDead, local_zone),
+        live(local_zone),
+        gray(local_zone) {
+    Node* end = graph->end();
+    state[end->id()] = kLive;
+    live.push_back(end);
+    // Find all live nodes reachable from end.
+    for (size_t i = 0; i < live.size(); i++) {
+      for (Node* const input : live[i]->inputs()) {
+        if (input == NULL) {
+          // TODO(titzer): print a warning.
+          continue;
+        }
+        if (input->id() >= graph->NodeCount()) {
+          // TODO(titzer): print a warning.
+          continue;
+        }
+        if (state[input->id()] != kLive) {
+          live.push_back(input);
+          state[input->id()] = kLive;
+        }
+      }
+    }
+
+    // Find all nodes that are not reachable from end that use live nodes.
+    for (size_t i = 0; i < live.size(); i++) {
+      for (Node* const use : live[i]->uses()) {
+        if (state[use->id()] == kDead) {
+          gray.push_back(use);
+          state[use->id()] = kGray;
+        }
+      }
+    }
+  }
+
+  bool IsLive(Node* node) {
+    return node != NULL && node->id() < static_cast<int>(state.size()) &&
+           state[node->id()] == kLive;
+  }
+
+  ZoneVector<State> state;
+  NodeVector live;
+  NodeVector gray;
+};
+
+
 class Escaped {
  public:
   explicit Escaped(const std::ostringstream& os,
@@ -56,105 +108,104 @@
   const char* const escaped_chars_;
 };

-class JSONGraphNodeWriter : public NullNodeVisitor {
+class JSONGraphNodeWriter {
  public:
-  JSONGraphNodeWriter(std::ostream& os, Zone* zone,
-                      const Graph* graph)  // NOLINT
-      : os_(os),
-        graph_(graph),
-        first_node_(true) {}
+  JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph)
+      : os_(os), all_(zone, graph), first_node_(true) {}

- void Print() { const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this); }
+  void Print() {
+    for (Node* const node : all_.live) PrintNode(node);
+  }

-  GenericGraphVisit::Control Pre(Node* node);
+  void PrintNode(Node* node) {
+    if (first_node_) {
+      first_node_ = false;
+    } else {
+      os_ << ",";
+    }
+    std::ostringstream label;
+    label << *node->op();
+ os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << Escaped(label, "\"")
+        << "\"";
+    IrOpcode::Value opcode = node->opcode();
+    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
+          << "]";
+ os_ << ",\"rankWithInput\":[" << NodeProperties::FirstControlIndex(node)
+          << "]";
+ } else if (opcode == IrOpcode::kIfTrue || opcode == IrOpcode::kIfFalse ||
+               opcode == IrOpcode::kLoop) {
+      os_ << ",\"rankInputs\":[" << NodeProperties::FirstControlIndex(node)
+          << "]";
+    }
+    if (opcode == IrOpcode::kBranch) {
+      os_ << ",\"rankInputs\":[0]";
+    }
+    os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
+    os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
+                                                               : "false");
+    os_ << "}";
+  }

  private:
   std::ostream& os_;
-  const Graph* const graph_;
+  AllNodes all_;
   bool first_node_;

   DISALLOW_COPY_AND_ASSIGN(JSONGraphNodeWriter);
 };


-GenericGraphVisit::Control JSONGraphNodeWriter::Pre(Node* node) {
-  if (first_node_) {
-    first_node_ = false;
-  } else {
-    os_ << ",";
+class JSONGraphEdgeWriter {
+ public:
+  JSONGraphEdgeWriter(std::ostream& os, Zone* zone, const Graph* graph)
+      : os_(os), all_(zone, graph), first_edge_(true) {}
+
+  void Print() {
+    for (Node* const node : all_.live) PrintEdges(node);
   }
-  std::ostringstream label;
-  label << *node->op();
- os_ << "{\"id\":" << node->id() << ",\"label\":\"" << Escaped(label, "\"")
-      << "\"";
-  IrOpcode::Value opcode = node->opcode();
-  if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
-    os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
-        << "]";
- os_ << ",\"rankWithInput\":[" << NodeProperties::FirstControlIndex(node)
-        << "]";
-  } else if (opcode == IrOpcode::kIfTrue || opcode == IrOpcode::kIfFalse ||
-             opcode == IrOpcode::kLoop) {
-    os_ << ",\"rankInputs\":[" << NodeProperties::FirstControlIndex(node)
-        << "]";
+
+  void PrintEdges(Node* node) {
+    for (int i = 0; i < node->InputCount(); i++) {
+      Node* input = node->InputAt(i);
+      if (input == NULL) continue;
+      PrintEdge(node, i, input);
+    }
   }
-  if (opcode == IrOpcode::kBranch) {
-    os_ << ",\"rankInputs\":[0]";
+
+  void PrintEdge(Node* from, int index, Node* to) {
+    if (first_edge_) {
+      first_edge_ = false;
+    } else {
+      os_ << ",";
+    }
+    const char* edge_type = NULL;
+    if (index < NodeProperties::FirstValueIndex(from)) {
+      edge_type = "unknown";
+    } else if (index < NodeProperties::FirstContextIndex(from)) {
+      edge_type = "value";
+    } else if (index < NodeProperties::FirstFrameStateIndex(from)) {
+      edge_type = "context";
+    } else if (index < NodeProperties::FirstEffectIndex(from)) {
+      edge_type = "frame-state";
+    } else if (index < NodeProperties::FirstControlIndex(from)) {
+      edge_type = "effect";
+    } else {
+      edge_type = "control";
+    }
+    os_ << "{\"source\":" << SafeId(to) << ",\"target\":" << SafeId(from)
+        << ",\"index\":" << index << ",\"type\":\"" << edge_type << "\"}";
   }
-  os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
-  os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
-                                                             : "false");
-  os_ << "}";
-  return GenericGraphVisit::CONTINUE;
-}
-
-
-class JSONGraphEdgeWriter : public NullNodeVisitor {
- public:
-  JSONGraphEdgeWriter(std::ostream& os, Zone* zone,
-                      const Graph* graph)  // NOLINT
-      : os_(os),
-        graph_(graph),
-        first_edge_(true) {}
-
- void Print() { const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this); }
-
-  void PreEdge(Node* from, int index, Node* to);

  private:
   std::ostream& os_;
-  const Graph* const graph_;
+  AllNodes all_;
   bool first_edge_;

   DISALLOW_COPY_AND_ASSIGN(JSONGraphEdgeWriter);
 };


-void JSONGraphEdgeWriter::PreEdge(Node* from, int index, Node* to) {
-  if (first_edge_) {
-    first_edge_ = false;
-  } else {
-    os_ << ",";
-  }
-  const char* edge_type = NULL;
-  if (index < NodeProperties::FirstValueIndex(from)) {
-    edge_type = "unknown";
-  } else if (index < NodeProperties::FirstContextIndex(from)) {
-    edge_type = "value";
-  } else if (index < NodeProperties::FirstFrameStateIndex(from)) {
-    edge_type = "context";
-  } else if (index < NodeProperties::FirstEffectIndex(from)) {
-    edge_type = "frame-state";
-  } else if (index < NodeProperties::FirstControlIndex(from)) {
-    edge_type = "effect";
-  } else {
-    edge_type = "control";
-  }
-  os_ << "{\"source\":" << to->id() << ",\"target\":" << from->id()
-      << ",\"index\":" << index << ",\"type\":\"" << edge_type << "\"}";
-}
-
-
 std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
   Zone tmp_zone(ad.graph.zone()->isolate());
   os << "{\"nodes\":[";
@@ -166,24 +217,20 @@
 }


-class GraphVisualizer : public NullNodeVisitor {
+class GraphVisualizer {
  public:
- GraphVisualizer(std::ostream& os, Zone* zone, const Graph* graph); // NOLINT
+  GraphVisualizer(std::ostream& os, Zone* zone, const Graph* graph)
+      : all_(zone, graph), os_(os) {}

   void Print();

-  GenericGraphVisit::Control Pre(Node* node);
+  void PrintNode(Node* node, bool gray);

  private:
-  void AnnotateNode(Node* node);
   void PrintEdge(Node::Edge edge);

-  Zone* zone_;
-  NodeSet all_nodes_;
-  NodeSet white_nodes_;
-  bool use_to_def_;
+  AllNodes all_;
   std::ostream& os_;
-  const Graph* const graph_;

   DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
 };
@@ -194,49 +241,22 @@
     return node;
   } else if (node->op()->ControlInputCount() == 1) {
     Node* control = NodeProperties::GetControlInput(node, 0);
-    return OperatorProperties::IsBasicBlockBegin(control->op()) ? control
-                                                                : NULL;
+    return control != NULL &&
+                   OperatorProperties::IsBasicBlockBegin(control->op())
+               ? control
+               : NULL;
   } else {
     return NULL;
   }
 }


-GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) {
-  if (all_nodes_.count(node) == 0) {
-    Node* control_cluster = GetControlCluster(node);
-    if (control_cluster != NULL) {
- os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
-    }
-    os_ << "  ID" << node->id() << " [\n";
-    AnnotateNode(node);
-    os_ << "  ]\n";
-    if (control_cluster != NULL) os_ << "  }\n";
-    all_nodes_.insert(node);
-    if (use_to_def_) white_nodes_.insert(node);
-  }
-  return GenericGraphVisit::CONTINUE;
-}
-
-
-static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
-  if (from->opcode() == IrOpcode::kPhi ||
-      from->opcode() == IrOpcode::kEffectPhi) {
-    Node* control = NodeProperties::GetControlInput(from, 0);
- return control->opcode() != IrOpcode::kMerge && control != to && index != 0;
-  } else if (from->opcode() == IrOpcode::kLoop) {
-    return index != 0;
-  } else {
-    return false;
-  }
-}
-
-
-void GraphVisualizer::AnnotateNode(Node* node) {
-  if (!use_to_def_) {
-    os_ << "    style=\"filled\"\n"
-        << "    fillcolor=\"" DEAD_COLOR "\"\n";
+void GraphVisualizer::PrintNode(Node* node, bool gray) {
+  Node* control_cluster = GetControlCluster(node);
+  if (control_cluster != NULL) {
+ os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
   }
+  os_ << "  ID" << SafeId(node) << " [\n";

   os_ << "    shape=\"record\"\n";
   switch (node->opcode()) {
@@ -254,31 +274,36 @@
     default:
       break;
   }
+
+  if (gray) {
+    os_ << "    style=\"filled\"\n"
+        << "    fillcolor=\"" DEAD_COLOR "\"\n";
+  }

   std::ostringstream label;
   label << *node->op();
-  os_ << "    label=\"{{#" << node->id() << ":" << Escaped(label);
+  os_ << "    label=\"{{#" << SafeId(node) << ":" << Escaped(label);

   InputIter i = node->inputs().begin();
   for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
-    os_ << "|<I" << i.index() << ">#" << (*i)->id();
+    os_ << "|<I" << i.index() << ">#" << SafeId(*i);
   }
   for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
        ++i, j--) {
-    os_ << "|<I" << i.index() << ">X #" << (*i)->id();
+    os_ << "|<I" << i.index() << ">X #" << SafeId(*i);
   }
for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j
0;
        ++i, j--) {
-    os_ << "|<I" << i.index() << ">F #" << (*i)->id();
+    os_ << "|<I" << i.index() << ">F #" << SafeId(*i);
   }
   for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
-    os_ << "|<I" << i.index() << ">E #" << (*i)->id();
+    os_ << "|<I" << i.index() << ">E #" << SafeId(*i);
   }

-  if (!use_to_def_ || OperatorProperties::IsBasicBlockBegin(node->op()) ||
+  if (OperatorProperties::IsBasicBlockBegin(node->op()) ||
       GetControlCluster(node) == NULL) {
     for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
-      os_ << "|<I" << i.index() << ">C #" << (*i)->id();
+      os_ << "|<I" << i.index() << ">C #" << SafeId(*i);
     }
   }
   os_ << "}";
@@ -292,6 +317,23 @@
     os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
   }
   os_ << "}\"\n";
+
+  os_ << "  ]\n";
+  if (control_cluster != NULL) os_ << "  }\n";
+}
+
+
+static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
+  if (from->opcode() == IrOpcode::kPhi ||
+      from->opcode() == IrOpcode::kEffectPhi) {
+    Node* control = NodeProperties::GetControlInput(from, 0);
+    return control != NULL && control->opcode() != IrOpcode::kMerge &&
+           control != to && index != 0;
+  } else if (from->opcode() == IrOpcode::kLoop) {
+    return index != 0;
+  } else {
+    return false;
+  }
 }


@@ -299,21 +341,23 @@
   Node* from = edge.from();
   int index = edge.index();
   Node* to = edge.to();
+
+  if (!all_.IsLive(to)) return;  // skip inputs that point to dead or NULL.
+
   bool unconstrained = IsLikelyBackEdge(from, index, to);
-  os_ << "  ID" << from->id();
-  if (all_nodes_.count(to) == 0) {
-    os_ << ":I" << index << ":n -> DEAD_INPUT";
-  } else if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
-             GetControlCluster(from) == NULL ||
-             (from->op()->ControlInputCount() > 0 &&
-              NodeProperties::GetControlInput(from) != to)) {
-    os_ << ":I" << index << ":n -> ID" << to->id() << ":s"
+  os_ << "  ID" << SafeId(from);
+
+  if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
+      GetControlCluster(from) == NULL ||
+      (from->op()->ControlInputCount() > 0 &&
+       NodeProperties::GetControlInput(from) != to)) {
+    os_ << ":I" << index << ":n -> ID" << SafeId(to) << ":s"
         << "[" << (unconstrained ? "constraint=false, " : "")
         << (NodeProperties::IsControlEdge(edge) ? "style=bold, " : "")
         << (NodeProperties::IsEffectEdge(edge) ? "style=dotted, " : "")
<< (NodeProperties::IsContextEdge(edge) ? "style=dashed, " : "") << "]";
   } else {
-    os_ << " -> ID" << to->id() << ":s [color=transparent, "
+    os_ << " -> ID" << SafeId(to) << ":s [color=transparent, "
         << (unconstrained ? "constraint=false, " : "")
<< (NodeProperties::IsControlEdge(edge) ? "style=dashed, " : "") << "]";
   }
@@ -332,43 +376,17 @@
       << "  \n";

   // Make sure all nodes have been output before writing out the edges.
-  use_to_def_ = true;
-  // TODO(svenpanne) Remove the need for the const_casts.
-  const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this);
-  white_nodes_.insert(const_cast<Graph*>(graph_)->start());
-
-  // Visit all uses of white nodes.
-  use_to_def_ = false;
-  GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
-      const_cast<Graph*>(graph_), zone_, white_nodes_.begin(),
-      white_nodes_.end(), this);
-
-  os_ << "  DEAD_INPUT [\n"
-      << "    style=\"filled\" \n"
-      << "    fillcolor=\"" DEAD_COLOR "\"\n"
-      << "  ]\n"
-      << "\n";
+  for (Node* const node : all_.live) PrintNode(node, false);
+  for (Node* const node : all_.gray) PrintNode(node, true);

   // With all the nodes written, add the edges.
-  for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) {
-    Node::Inputs inputs = (*i)->inputs();
-    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-         ++iter) {
-      PrintEdge(iter.edge());
+  for (Node* const node : all_.live) {
+    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+      PrintEdge(i.edge());
     }
   }
   os_ << "}\n";
 }
-
-
-GraphVisualizer::GraphVisualizer(std::ostream& os, Zone* zone,
-                                 const Graph* graph)  // NOLINT
-    : zone_(zone),
-      all_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
-      white_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
-      use_to_def_(true),
-      os_(os),
-      graph_(graph) {}


 std::ostream& operator<<(std::ostream& os, const AsDOT& ad) {
@@ -487,7 +505,7 @@
 }


-void GraphC1Visualizer::PrintNodeId(Node* n) { os_ << "n" << n->id(); }
+void GraphC1Visualizer::PrintNodeId(Node* n) { os_ << "n" << SafeId(n); }


 void GraphC1Visualizer::PrintNode(Node* n) {
=======================================
--- /trunk/src/compiler/ia32/code-generator-ia32.cc Mon Nov 3 09:39:57 2014 UTC +++ /trunk/src/compiler/ia32/code-generator-ia32.cc Tue Nov 4 01:04:58 2014 UTC
@@ -247,6 +247,9 @@
     case kIA32ImulHigh:
       __ imul(i.InputRegister(1));
       break;
+    case kIA32UmulHigh:
+      __ mul(i.InputRegister(1));
+      break;
     case kIA32Idiv:
       __ cdq();
       __ idiv(i.InputOperand(1));
=======================================
--- /trunk/src/compiler/ia32/instruction-codes-ia32.h Fri Oct 31 13:19:44 2014 UTC +++ /trunk/src/compiler/ia32/instruction-codes-ia32.h Tue Nov 4 01:04:58 2014 UTC
@@ -21,6 +21,7 @@
   V(IA32Sub)                       \
   V(IA32Imul)                      \
   V(IA32ImulHigh)                  \
+  V(IA32UmulHigh)                  \
   V(IA32Idiv)                      \
   V(IA32Udiv)                      \
   V(IA32Not)                       \
=======================================
--- /trunk/src/compiler/ia32/instruction-selector-ia32.cc Mon Nov 3 14:02:45 2014 UTC +++ /trunk/src/compiler/ia32/instruction-selector-ia32.cc Tue Nov 4 01:04:58 2014 UTC
@@ -646,22 +646,43 @@
 }


-void InstructionSelector::VisitInt32MulHigh(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32ImulHigh, g.DefineAsFixed(node, edx),
-       g.UseFixed(node->InputAt(0), eax),
-       g.UseUniqueRegister(node->InputAt(1)));
+namespace {
+
+void VisitMulHigh(InstructionSelector* selector, Node* node,
+                  ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUniqueRegister(node->InputAt(1)));
 }


-static inline void VisitDiv(InstructionSelector* selector, Node* node,
-                            ArchOpcode opcode) {
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   IA32OperandGenerator g(selector);
   InstructionOperand* temps[] = {g.TempRegister(edx)};
-  size_t temp_count = arraysize(temps);
   selector->Emit(opcode, g.DefineAsFixed(node, eax),
                  g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)), temp_count, temps);
+                 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)));
+}
+
+}  // namespace
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  VisitMulHigh(this, node, kIA32ImulHigh);
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  VisitMulHigh(this, node, kIA32UmulHigh);
 }


@@ -673,15 +694,6 @@
 void InstructionSelector::VisitUint32Div(Node* node) {
   VisitDiv(this, node, kIA32Udiv);
 }
-
-
-static inline void VisitMod(InstructionSelector* selector, Node* node,
-                            ArchOpcode opcode) {
-  IA32OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, edx),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)));
-}


 void InstructionSelector::VisitInt32Mod(Node* node) {
=======================================
--- /trunk/src/compiler/ia32/linkage-ia32.cc    Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/ia32/linkage-ia32.cc    Tue Nov  4 01:04:58 2014 UTC
@@ -30,8 +30,9 @@

 typedef LinkageHelper<IA32LinkageHelperTraits> LH;

-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
-  return LH::GetJSCallDescriptor(zone, parameter_count);
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
 }


=======================================
--- /trunk/src/compiler/instruction-selector.cc Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/instruction-selector.cc Tue Nov  4 01:04:58 2014 UTC
@@ -738,6 +738,8 @@
       return VisitUint32LessThanOrEqual(node);
     case IrOpcode::kUint32Mod:
       return VisitUint32Mod(node);
+    case IrOpcode::kUint32MulHigh:
+      return VisitUint32MulHigh(node);
     case IrOpcode::kInt64Add:
       return VisitInt64Add(node);
     case IrOpcode::kInt64Sub:
=======================================
--- /trunk/src/compiler/instruction.cc  Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/instruction.cc  Tue Nov  4 01:04:58 2014 UTC
@@ -373,9 +373,11 @@
 }


-static void InitializeInstructionBlocks(Zone* zone, const Schedule* schedule,
-                                        InstructionBlocks* blocks) {
-  DCHECK(blocks->size() == schedule->rpo_order()->size());
+InstructionBlocks* InstructionSequence::InstructionBlocksFor(
+    Zone* zone, const Schedule* schedule) {
+  InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
+  new (blocks) InstructionBlocks(
+      static_cast<int>(schedule->rpo_order()->size()), NULL, zone);
   size_t rpo_number = 0;
for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
        it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
@@ -383,14 +385,14 @@
     DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number);
     (*blocks)[rpo_number] = new (zone) InstructionBlock(zone, *it);
   }
+  return blocks;
 }


 InstructionSequence::InstructionSequence(Zone* instruction_zone,
-                                         const Schedule* schedule)
+ InstructionBlocks* instruction_blocks)
     : zone_(instruction_zone),
- instruction_blocks_(static_cast<int>(schedule->rpo_order()->size()), NULL,
-                          zone()),
+      instruction_blocks_(instruction_blocks),
       constants_(ConstantMap::key_compare(),
                  ConstantMap::allocator_type(zone())),
       immediates_(zone()),
@@ -399,9 +401,7 @@
       pointer_maps_(zone()),
doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())), references_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
-      deoptimization_entries_(zone()) {
-  InitializeInstructionBlocks(zone(), schedule, &instruction_blocks_);
-}
+      deoptimization_entries_(zone()) {}


 Label* InstructionSequence::GetLabel(BasicBlock::RpoNumber rpo) {
@@ -461,8 +461,8 @@
     DCHECK_LE(0, instruction_index);
     Instruction* instruction = InstructionAt(instruction_index--);
     if (instruction->IsBlockStart()) {
-      return instruction_blocks_
- [BlockStartInstruction::cast(instruction)->rpo_number().ToSize()];
+      return instruction_blocks_->at(
+          BlockStartInstruction::cast(instruction)->rpo_number().ToSize());
     }
   }
 }
=======================================
--- /trunk/src/compiler/instruction.h   Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/instruction.h   Tue Nov  4 01:04:58 2014 UTC
@@ -809,11 +809,13 @@
   inline bool IsLoopHeader() const { return loop_end_.IsValid(); }

   typedef ZoneVector<BasicBlock::RpoNumber> Predecessors;
+  Predecessors& predecessors() { return predecessors_; }
   const Predecessors& predecessors() const { return predecessors_; }
   size_t PredecessorCount() const { return predecessors_.size(); }
   size_t PredecessorIndexOf(BasicBlock::RpoNumber rpo_number) const;

   typedef ZoneVector<BasicBlock::RpoNumber> Successors;
+  Successors& successors() { return successors_; }
   const Successors& successors() const { return successors_; }
   size_t SuccessorCount() const { return successors_.size(); }

@@ -825,12 +827,12 @@
   Successors successors_;
   Predecessors predecessors_;
   PhiInstructions phis_;
-  BasicBlock::Id id_;
-  BasicBlock::RpoNumber ao_number_;  // Assembly order number.
+  const BasicBlock::Id id_;
+  const BasicBlock::RpoNumber ao_number_;  // Assembly order number.
   // TODO(dcarney): probably dont't need this.
-  BasicBlock::RpoNumber rpo_number_;
-  BasicBlock::RpoNumber loop_header_;
-  BasicBlock::RpoNumber loop_end_;
+  const BasicBlock::RpoNumber rpo_number_;
+  const BasicBlock::RpoNumber loop_header_;
+  const BasicBlock::RpoNumber loop_end_;
   int32_t code_start_;   // start index of arch-specific code.
   int32_t code_end_;     // end index of arch-specific code.
   const bool deferred_;  // Block contains deferred code.
@@ -850,31 +852,34 @@
 // TODO(titzer): s/IsDouble/IsFloat64/
 class InstructionSequence FINAL {
  public:
-  InstructionSequence(Zone* zone, const Schedule* schedule);
+  static InstructionBlocks* InstructionBlocksFor(Zone* zone,
+                                                 const Schedule* schedule);
+
+  InstructionSequence(Zone* zone, InstructionBlocks* instruction_blocks);

   int NextVirtualRegister() { return next_virtual_register_++; }
   int VirtualRegisterCount() const { return next_virtual_register_; }

   const InstructionBlocks& instruction_blocks() const {
-    return instruction_blocks_;
+    return *instruction_blocks_;
   }

   int InstructionBlockCount() const {
-    return static_cast<int>(instruction_blocks_.size());
+    return static_cast<int>(instruction_blocks_->size());
   }

   InstructionBlock* InstructionBlockAt(BasicBlock::RpoNumber rpo_number) {
-    return instruction_blocks_[rpo_number.ToSize()];
+    return instruction_blocks_->at(rpo_number.ToSize());
   }

   int LastLoopInstructionIndex(const InstructionBlock* block) {
-    return instruction_blocks_[block->loop_end().ToSize() - 1]
+    return instruction_blocks_->at(block->loop_end().ToSize() - 1)
         ->last_instruction_index();
   }

   const InstructionBlock* InstructionBlockAt(
       BasicBlock::RpoNumber rpo_number) const {
-    return instruction_blocks_[rpo_number.ToSize()];
+    return instruction_blocks_->at(rpo_number.ToSize());
   }

   const InstructionBlock* GetInstructionBlock(int instruction_index) const;
@@ -961,7 +966,7 @@
typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;

   Zone* const zone_;
-  InstructionBlocks instruction_blocks_;
+  InstructionBlocks* const instruction_blocks_;
   ConstantMap constants_;
   ConstantDeque immediates_;
   InstructionDeque instructions_;
=======================================
--- /trunk/src/compiler/js-context-specialization.cc Wed Oct 1 00:05:35 2014 UTC +++ /trunk/src/compiler/js-context-specialization.cc Tue Nov 4 01:04:58 2014 UTC
@@ -20,7 +20,7 @@
   explicit ContextSpecializationVisitor(JSContextSpecializer* spec)
       : spec_(spec) {}

-  GenericGraphVisit::Control Post(Node* node) {
+  void Post(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kJSLoadContext: {
         Reduction r = spec_->ReduceJSLoadContext(node);
@@ -41,7 +41,6 @@
       default:
         break;
     }
-    return GenericGraphVisit::CONTINUE;
   }

  private:
=======================================
--- /trunk/src/compiler/js-generic-lowering.cc  Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/js-generic-lowering.cc  Tue Nov  4 01:04:58 2014 UTC
@@ -9,6 +9,7 @@
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/unique.h"

@@ -64,7 +65,6 @@
     Lower##x(node);     \
     break;
     DECLARE_CASE(Branch)
-    DECLARE_CASE(Select)
     JS_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
     default:
@@ -240,23 +240,6 @@
     node->ReplaceInput(0, test);
   }
 }
-
-
-void JSGenericLowering::LowerSelect(Node* node) {
-  // TODO(bmeurer): This should probably be moved into a separate file.
-  SelectParameters const& p = SelectParametersOf(node->op());
- Node* branch = graph()->NewNode(common()->Branch(p.hint()), node->InputAt(0),
-                                  graph()->start());
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = node->InputAt(1);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = node->InputAt(2);
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  node->set_op(common()->Phi(p.type(), 2));
-  node->ReplaceInput(0, vtrue);
-  node->ReplaceInput(1, vfalse);
-  node->ReplaceInput(2, merge);
-}


 void JSGenericLowering::LowerJSUnaryNot(Node* node) {
@@ -403,11 +386,53 @@
   PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
   PatchOperator(node, common()->Call(desc));
 }
+
+
+bool JSGenericLowering::TryLowerDirectJSCall(Node* node) {
+  // Lower to a direct call to a constant JSFunction if legal.
+  const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+  int arg_count = static_cast<int>(p.arity() - 2);
+
+  // Check the function is a constant and is really a JSFunction.
+  HeapObjectMatcher<Object> function_const(node->InputAt(0));
+  if (!function_const.HasValue()) return false;  // not a constant.
+  Handle<Object> func = function_const.Value().handle();
+  if (!func->IsJSFunction()) return false;  // not a function.
+  Handle<JSFunction> function = Handle<JSFunction>::cast(func);
+ if (arg_count != function->shared()->formal_parameter_count()) return false;
+
+  // Check the receiver doesn't need to be wrapped.
+  Node* receiver = node->InputAt(1);
+  if (!NodeProperties::IsTyped(receiver)) return false;
+ Type* ok_receiver = Type::Union(Type::Undefined(), Type::Receiver(), zone()); + if (!NodeProperties::GetBounds(receiver).upper->Is(ok_receiver)) return false;
+
+  int index = NodeProperties::FirstContextIndex(node);
+
+  // TODO(titzer): total hack to share function context constants.
+  // Remove this when the JSGraph canonicalizes heap constants.
+  Node* context = node->InputAt(index);
+  HeapObjectMatcher<Context> context_const(context);
+  if (!context_const.HasValue() ||
+      *(context_const.Value().handle()) != function->context()) {
+ context = jsgraph()->HeapConstant(Handle<Context>(function->context()));
+  }
+  node->ReplaceInput(index, context);
+  CallDescriptor* desc = linkage()->GetJSCallDescriptor(
+      1 + arg_count, jsgraph()->zone(), FlagsForNode(node));
+  PatchOperator(node, common()->Call(desc));
+  return true;
+}


 void JSGenericLowering::LowerJSCallFunction(Node* node) {
+  // Fast case: call function directly.
+  if (TryLowerDirectJSCall(node)) return;
+
+  // General case: CallFunctionStub.
   const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
- CallFunctionStub stub(isolate(), static_cast<int>(p.arity() - 2), p.flags());
+  int arg_count = static_cast<int>(p.arity() - 2);
+  CallFunctionStub stub(isolate(), arg_count, p.flags());
   CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
   CallDescriptor* desc = linkage()->GetStubCallDescriptor(
       d, static_cast<int>(p.arity() - 1), FlagsForNode(node));
=======================================
--- /trunk/src/compiler/js-generic-lowering.h   Thu Oct  2 00:05:29 2014 UTC
+++ /trunk/src/compiler/js-generic-lowering.h   Tue Nov  4 01:04:58 2014 UTC
@@ -66,6 +66,8 @@
   CompilationInfo* info_;
   JSGraph* jsgraph_;
   Linkage* linkage_;
+
+  bool TryLowerDirectJSCall(Node* node);
 };

 }  // namespace compiler
=======================================
--- /trunk/src/compiler/js-inlining.cc  Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/js-inlining.cc  Tue Nov  4 01:04:58 2014 UTC
@@ -32,7 +32,7 @@
  public:
   explicit InlinerVisitor(JSInliner* inliner) : inliner_(inliner) {}

-  GenericGraphVisit::Control Post(Node* node) {
+  void Post(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kJSCallFunction:
         inliner_->TryInlineJSCall(node);
@@ -45,7 +45,6 @@
       default:
         break;
     }
-    return GenericGraphVisit::CONTINUE;
   }

  private:
@@ -167,7 +166,7 @@
sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "sentinel", 0, 0,
                      0, 0, 0, 0) {}

-  GenericGraphVisit::Control Post(Node* original) {
+  void Post(Node* original) {
     NodeVector inputs(temp_zone_);
     for (InputIter it = original->inputs().begin();
          it != original->inputs().end(); ++it) {
@@ -180,7 +179,6 @@
target_graph_->NewNode(original->op(), static_cast<int>(inputs.size()),
                                (inputs.empty() ? NULL : &inputs.front()));
     copies_[original->id()] = copy;
-    return GenericGraphVisit::CONTINUE;
   }

   Node* GetCopy(Node* original) {
=======================================
--- /trunk/src/compiler/linkage-impl.h  Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/linkage-impl.h  Tue Nov  4 01:04:58 2014 UTC
@@ -28,8 +28,8 @@
   }

   // TODO(turbofan): cache call descriptors for JSFunction calls.
-  static CallDescriptor* GetJSCallDescriptor(Zone* zone,
-                                             int js_parameter_count) {
+ static CallDescriptor* GetJSCallDescriptor(Zone* zone, int js_parameter_count,
+                                             CallDescriptor::Flags flags) {
     const size_t return_count = 1;
     const size_t context_count = 1;
     const size_t parameter_count = js_parameter_count + context_count;
@@ -56,16 +56,17 @@
     // The target for JS function calls is the JSFunction object.
     MachineType target_type = kMachAnyTagged;
LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg()); - return new (zone) CallDescriptor(CallDescriptor::kCallJSFunction, // kind - target_type, // target MachineType - target_loc, // target location
-                                     types.Build(),       // machine_sig
-                                     locations.Build(),   // location_sig
- js_parameter_count, // js_parameter_count - Operator::kNoProperties, // properties - kNoCalleeSaved, // callee-saved - CallDescriptor::kNeedsFrameState, // flags
-                                     "js-call");
+    return new (zone) CallDescriptor(     // --
+        CallDescriptor::kCallJSFunction,  // kind
+        target_type,                      // target MachineType
+        target_loc,                       // target location
+        types.Build(),                    // machine_sig
+        locations.Build(),                // location_sig
+        js_parameter_count,               // js_parameter_count
+        Operator::kNoProperties,          // properties
+        kNoCalleeSaved,                   // callee-saved
+        flags,                            // flags
+        "js-call");
   }


@@ -116,16 +117,17 @@
     // The target for runtime calls is a code object.
     MachineType target_type = kMachAnyTagged;
     LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject, // kind - target_type, // target MachineType - target_loc, // target location
-                                     types.Build(),       // machine_sig
-                                     locations.Build(),   // location_sig
- js_parameter_count, // js_parameter_count
-                                     properties,          // properties
-                                     kNoCalleeSaved,      // callee-saved
-                                     flags,               // flags
-                                     function->name);     // debug name
+    return new (zone) CallDescriptor(     // --
+        CallDescriptor::kCallCodeObject,  // kind
+        target_type,                      // target MachineType
+        target_loc,                       // target location
+        types.Build(),                    // machine_sig
+        locations.Build(),                // location_sig
+        js_parameter_count,               // js_parameter_count
+        properties,                       // properties
+        kNoCalleeSaved,                   // callee-saved
+        flags,                            // flags
+        function->name);                  // debug name
   }


@@ -169,16 +171,17 @@
     // The target for stub calls is a code object.
     MachineType target_type = kMachAnyTagged;
     LinkageLocation target_loc = LinkageLocation::AnyRegister();
- return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject, // kind - target_type, // target MachineType - target_loc, // target location
-                                     types.Build(),       // machine_sig
-                                     locations.Build(),   // location_sig
- js_parameter_count, // js_parameter_count - Operator::kNoProperties, // properties - kNoCalleeSaved, // callee-saved registers
-                                     flags,           // flags
- descriptor.DebugName(zone->isolate()));
+    return new (zone) CallDescriptor(     // --
+        CallDescriptor::kCallCodeObject,  // kind
+        target_type,                      // target MachineType
+        target_loc,                       // target location
+        types.Build(),                    // machine_sig
+        locations.Build(),                // location_sig
+        js_parameter_count,               // js_parameter_count
+        Operator::kNoProperties,          // properties
+        kNoCalleeSaved,                   // callee-saved registers
+        flags,                            // flags
+        descriptor.DebugName(zone->isolate()));
   }

   static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
@@ -201,15 +204,16 @@
     // The target for C calls is always an address (i.e. machine pointer).
     MachineType target_type = kMachPtr;
     LinkageLocation target_loc = LinkageLocation::AnyRegister();
-    return new (zone) CallDescriptor(CallDescriptor::kCallAddress,  // kind
- target_type, // target MachineType
-                                     target_loc,         // target location
-                                     msig,               // machine_sig
-                                     locations.Build(),  // location_sig
- 0, // js_parameter_count - Operator::kNoProperties, // properties
-                                     LinkageTraits::CCalleeSaveRegisters(),
-                                     CallDescriptor::kNoFlags, "c-call");
+    return new (zone) CallDescriptor(  // --
+        CallDescriptor::kCallAddress,  // kind
+        target_type,                   // target MachineType
+        target_loc,                    // target location
+        msig,                          // machine_sig
+        locations.Build(),             // location_sig
+        0,                             // js_parameter_count
+        Operator::kNoProperties,       // properties
+        LinkageTraits::CCalleeSaveRegisters(), CallDescriptor::kNoFlags,
+        "c-call");
   }

   static LinkageLocation regloc(Register reg) {
=======================================
--- /trunk/src/compiler/linkage.cc      Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/linkage.cc      Tue Nov  4 01:04:58 2014 UTC
@@ -42,13 +42,15 @@
   if (info->function() != NULL) {
// If we already have the function literal, use the number of parameters
     // plus the receiver.
- return GetJSCallDescriptor(1 + info->function()->parameter_count(), zone); + return GetJSCallDescriptor(1 + info->function()->parameter_count(), zone,
+                               CallDescriptor::kNoFlags);
   }
   if (!info->closure().is_null()) {
     // If we are compiling a JS function, use a JS call descriptor,
     // plus the receiver.
     SharedFunctionInfo* shared = info->closure()->shared();
-    return GetJSCallDescriptor(1 + shared->formal_parameter_count(), zone);
+    return GetJSCallDescriptor(1 + shared->formal_parameter_count(), zone,
+                               CallDescriptor::kNoFlags);
   }
   if (info->code_stub() != NULL) {
     // Use the code stub interface descriptor.
@@ -88,8 +90,9 @@
 }


-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) const {
-  return GetJSCallDescriptor(parameter_count, zone_);
+CallDescriptor* Linkage::GetJSCallDescriptor(
+    int parameter_count, CallDescriptor::Flags flags) const {
+  return GetJSCallDescriptor(parameter_count, zone_, flags);
 }


@@ -217,7 +220,8 @@
// Provide unimplemented methods on unsupported architectures, to at least link. //==============================================================================
 #if !V8_TURBOFAN_BACKEND
-CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) { +CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
   UNIMPLEMENTED();
   return NULL;
 }
=======================================
--- /trunk/src/compiler/linkage.h       Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/linkage.h       Tue Nov  4 01:04:58 2014 UTC
@@ -174,8 +174,10 @@
   // The call descriptor for this compilation unit describes the locations
   // of incoming parameters and the outgoing return value(s).
   CallDescriptor* GetIncomingDescriptor() const { return incoming_; }
-  CallDescriptor* GetJSCallDescriptor(int parameter_count) const;
- static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
+  CallDescriptor* GetJSCallDescriptor(int parameter_count,
+                                      CallDescriptor::Flags flags) const;
+ static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags);
   CallDescriptor* GetRuntimeCallDescriptor(
       Runtime::FunctionId function, int parameter_count,
       Operator::Properties properties) const;
=======================================
--- /trunk/src/compiler/machine-operator-reducer.cc Fri Oct 31 13:19:44 2014 UTC +++ /trunk/src/compiler/machine-operator-reducer.cc Tue Nov 4 01:04:58 2014 UTC
@@ -49,11 +49,13 @@


 Node* MachineOperatorReducer::Word32Sar(Node* lhs, uint32_t rhs) {
+  if (rhs == 0) return lhs;
return graph()->NewNode(machine()->Word32Sar(), lhs, Uint32Constant(rhs));
 }


 Node* MachineOperatorReducer::Word32Shr(Node* lhs, uint32_t rhs) {
+  if (rhs == 0) return lhs;
return graph()->NewNode(machine()->Word32Shr(), lhs, Uint32Constant(rhs));
 }

@@ -78,7 +80,8 @@
 }


-Node* MachineOperatorReducer::TruncatingDiv(Node* dividend, int32_t divisor) {
+Node* MachineOperatorReducer::Int32Div(Node* dividend, int32_t divisor) {
+  DCHECK_NE(0, divisor);
   DCHECK_NE(std::numeric_limits<int32_t>::min(), divisor);
   base::MagicNumbersForDivision<uint32_t> const mag =
       base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
@@ -89,10 +92,25 @@
   } else if (divisor < 0 && bit_cast<int32_t>(mag.multiplier) > 0) {
     quotient = Int32Sub(quotient, dividend);
   }
-  if (mag.shift) {
-    quotient = Word32Sar(quotient, mag.shift);
+  return Int32Add(Word32Sar(quotient, mag.shift), Word32Shr(dividend, 31));
+}
+
+
+Node* MachineOperatorReducer::Uint32Div(Node* dividend, uint32_t divisor) {
+  DCHECK_LT(0, divisor);
+  base::MagicNumbersForDivision<uint32_t> const mag =
+      base::UnsignedDivisionByConstant(bit_cast<uint32_t>(divisor));
+  Node* quotient = graph()->NewNode(machine()->Uint32MulHigh(), dividend,
+                                    Uint32Constant(mag.multiplier));
+  if (mag.add) {
+    DCHECK_LE(1, mag.shift);
+    quotient = Word32Shr(
+        Int32Add(Word32Shr(Int32Sub(dividend, quotient), 1), quotient),
+        mag.shift - 1);
+  } else {
+    quotient = Word32Shr(quotient, mag.shift);
   }
-  return Int32Add(quotient, Word32Shr(dividend, 31));
+  return quotient;
 }


@@ -572,7 +590,7 @@
       quotient = Int32Add(Word32Shr(quotient, 32u - shift), dividend);
       quotient = Word32Sar(quotient, shift);
     } else {
-      quotient = TruncatingDiv(quotient, Abs(divisor));
+      quotient = Int32Div(quotient, Abs(divisor));
     }
     if (divisor < 0) {
       node->set_op(machine()->Int32Sub());
@@ -600,11 +618,17 @@
     Node* const zero = Int32Constant(0);
     return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
   }
-  if (m.right().IsPowerOf2()) {  // x / 2^n => x >> n
-    node->TrimInputCount(2);
-    node->set_op(machine()->Word32Shr());
- node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
-    return Changed(node);
+  if (m.right().HasValue()) {
+    Node* const dividend = m.left().node();
+    uint32_t const divisor = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(divisor)) {  // x / 2^n => x >> n
+      node->set_op(machine()->Word32Shr());
+ node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
+      node->TrimInputCount(2);
+      return Changed(node);
+    } else {
+      return Replace(Uint32Div(dividend, divisor));
+    }
   }
   return NoChange();
 }
@@ -630,8 +654,8 @@

       Node* check =
           graph()->NewNode(machine()->Int32LessThan(), dividend, zero);
-      Node* branch =
-          graph()->NewNode(common()->Branch(), check, graph()->start());
+      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                      check, graph()->start());

       Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* neg = Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask));
@@ -640,17 +664,20 @@
       Node* pos = Word32And(dividend, mask);

Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-      Node* phi =
-          graph()->NewNode(common()->Phi(kMachInt32, 2), neg, pos, merge);
-      return Replace(phi);
+
+      DCHECK_EQ(3, node->InputCount());
+      node->set_op(common()->Phi(kMachInt32, 2));
+      node->ReplaceInput(0, neg);
+      node->ReplaceInput(1, pos);
+      node->ReplaceInput(2, merge);
     } else {
-      Node* quotient = TruncatingDiv(dividend, divisor);
+      Node* quotient = Int32Div(dividend, divisor);
       node->set_op(machine()->Int32Sub());
       DCHECK_EQ(dividend, node->InputAt(0));
       node->ReplaceInput(1, Int32Mul(quotient, Int32Constant(divisor)));
       node->TrimInputCount(2);
-      return Changed(node);
     }
+    return Changed(node);
   }
   return NoChange();
 }
@@ -666,10 +693,19 @@
     return ReplaceUint32(
         base::bits::UnsignedMod32(m.left().Value(), m.right().Value()));
   }
-  if (m.right().IsPowerOf2()) {  // x % 2^n => x & 2^n-1
+  if (m.right().HasValue()) {
+    Node* const dividend = m.left().node();
+    uint32_t const divisor = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(divisor)) {  // x % 2^n => x & 2^n-1
+      node->set_op(machine()->Word32And());
+      node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
+    } else {
+      Node* quotient = Uint32Div(dividend, divisor);
+      node->set_op(machine()->Int32Sub());
+      DCHECK_EQ(dividend, node->InputAt(0));
+      node->ReplaceInput(1, Int32Mul(quotient, Uint32Constant(divisor)));
+    }
     node->TrimInputCount(2);
-    node->set_op(machine()->Word32And());
-    node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
     return Changed(node);
   }
   return NoChange();
=======================================
--- /trunk/src/compiler/machine-operator-reducer.h Mon Oct 27 07:54:22 2014 UTC +++ /trunk/src/compiler/machine-operator-reducer.h Tue Nov 4 01:04:58 2014 UTC
@@ -41,8 +41,8 @@
   Node* Int32Add(Node* lhs, Node* rhs);
   Node* Int32Sub(Node* lhs, Node* rhs);
   Node* Int32Mul(Node* lhs, Node* rhs);
-
-  Node* TruncatingDiv(Node* dividend, int32_t divisor);
+  Node* Int32Div(Node* dividend, int32_t divisor);
+  Node* Uint32Div(Node* dividend, uint32_t divisor);

   Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
   Reduction ReplaceFloat32(volatile float value) {
=======================================
--- /trunk/src/compiler/machine-operator.cc     Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/machine-operator.cc     Tue Nov  4 01:04:58 2014 UTC
@@ -84,6 +84,7 @@
V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1) \ V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \ V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \ + V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \ V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \ V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
=======================================
--- /trunk/src/compiler/machine-operator.h      Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/compiler/machine-operator.h      Tue Nov  4 01:04:58 2014 UTC
@@ -108,6 +108,7 @@
   const Operator* Uint32LessThan();
   const Operator* Uint32LessThanOrEqual();
   const Operator* Uint32Mod();
+  const Operator* Uint32MulHigh();
   bool Int32DivIsSafe() const { return flags_ & kInt32DivIsSafe; }
   bool Int32ModIsSafe() const { return flags_ & kInt32ModIsSafe; }
   bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
=======================================
--- /trunk/src/compiler/mips/code-generator-mips.cc Tue Oct 28 09:48:49 2014 UTC +++ /trunk/src/compiler/mips/code-generator-mips.cc Tue Nov 4 01:04:58 2014 UTC
@@ -188,6 +188,9 @@
     case kMipsMulHigh:
       __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
+    case kMipsMulHighU:
+      __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
     case kMipsDiv:
       __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
=======================================
--- /trunk/src/compiler/mips/instruction-codes-mips.h Wed Oct 15 13:35:30 2014 UTC +++ /trunk/src/compiler/mips/instruction-codes-mips.h Tue Nov 4 01:04:58 2014 UTC
@@ -18,6 +18,7 @@
   V(MipsSubOvf)                    \
   V(MipsMul)                       \
   V(MipsMulHigh)                   \
+  V(MipsMulHighU)                  \
   V(MipsDiv)                       \
   V(MipsDivU)                      \
   V(MipsMod)                       \
=======================================
***Additional files exist in this changeset.***

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to