Revision: 25122
Author:   [email protected]
Date:     Wed Nov  5 01:04:48 2014 UTC
Log: Version 3.30.33 (based on 6bee6dcebc3033d4665a8069020302ce5018522d)

`1..isPrototypeOf.call(null)` should return false, not throw TypeError (issue 3483).

Refactor ObjectGetOwnPropertyKeys to accept bitmask rather than boolean (issue 3549).

Add debug mirror support for ES6 Map/Set iterators (Chromium issue 427868).

Performance and stability improvements on all platforms.
https://code.google.com/p/v8/source/detail?r=25122

Added:
 /trunk/src/compiler/diamond.h
 /trunk/src/compiler/register-configuration.cc
 /trunk/src/compiler/register-configuration.h
 /trunk/test/mjsunit/es6/mirror-iterators.js
 /trunk/test/mjsunit/regress/regress-3483.js
 /trunk/test/unittests/compiler/diamond-unittest.cc
Modified:
 /trunk/BUILD.gn
 /trunk/ChangeLog
 /trunk/include/v8.h
 /trunk/src/api.cc
 /trunk/src/arm/interface-descriptors-arm.cc
 /trunk/src/arm64/interface-descriptors-arm64.cc
 /trunk/src/bootstrapper.cc
 /trunk/src/code-factory.cc
 /trunk/src/code-factory.h
 /trunk/src/code-stubs-hydrogen.cc
 /trunk/src/code-stubs.cc
 /trunk/src/code-stubs.h
 /trunk/src/compiler/change-lowering.cc
 /trunk/src/compiler/graph-visualizer.cc
 /trunk/src/compiler/instruction.cc
 /trunk/src/compiler/instruction.h
 /trunk/src/compiler/js-builtin-reducer.cc
 /trunk/src/compiler/js-intrinsic-builder.cc
 /trunk/src/compiler/machine-operator-reducer.cc
 /trunk/src/compiler/mips/code-generator-mips.cc
 /trunk/src/compiler/mips/instruction-selector-mips.cc
 /trunk/src/compiler/pipeline.cc
 /trunk/src/compiler/register-allocator.cc
 /trunk/src/compiler/register-allocator.h
 /trunk/src/compiler/select-lowering.cc
 /trunk/src/compiler/simplified-lowering.cc
 /trunk/src/factory.cc
 /trunk/src/factory.h
 /trunk/src/flag-definitions.h
 /trunk/src/heap/mark-compact.cc
 /trunk/src/heap/spaces.cc
 /trunk/src/ia32/interface-descriptors-ia32.cc
 /trunk/src/ic/ic.cc
 /trunk/src/interface-descriptors.h
 /trunk/src/isolate.cc
 /trunk/src/isolate.h
 /trunk/src/mips/interface-descriptors-mips.cc
 /trunk/src/mips/macro-assembler-mips.cc
 /trunk/src/mips64/interface-descriptors-mips64.cc
 /trunk/src/mirror-debugger.js
 /trunk/src/objects-inl.h
 /trunk/src/objects.h
 /trunk/src/runtime/runtime-collections.cc
 /trunk/src/runtime/runtime.h
 /trunk/src/symbol.js
 /trunk/src/transitions-inl.h
 /trunk/src/transitions.cc
 /trunk/src/transitions.h
 /trunk/src/v8natives.js
 /trunk/src/version.cc
 /trunk/src/x64/interface-descriptors-x64.cc
 /trunk/test/cctest/compiler/test-codegen-deopt.cc
 /trunk/test/cctest/compiler/test-control-reducer.cc
 /trunk/test/cctest/compiler/test-gap-resolver.cc
 /trunk/test/cctest/test-api.cc
 /trunk/test/mjsunit/asm/uint32div.js
 /trunk/test/mjsunit/function-call.js
 /trunk/test/mjsunit/harmony/classes.js
 /trunk/test/mjsunit/harmony/regress/regress-343928.js
 /trunk/test/mjsunit/mjsunit.status
 /trunk/test/mjsunit/polymorph-arrays.js
 /trunk/test/test262-es6/test262-es6.status
 /trunk/test/unittests/compiler/change-lowering-unittest.cc
 /trunk/test/unittests/compiler/instruction-selector-unittest.cc
 /trunk/test/unittests/compiler/node-test-utils.cc
 /trunk/test/unittests/compiler/node-test-utils.h
 /trunk/test/unittests/compiler/register-allocator-unittest.cc
 /trunk/test/unittests/unittests.gyp
 /trunk/tools/gyp/v8.gyp

=======================================
--- /dev/null
+++ /trunk/src/compiler/diamond.h       Wed Nov  5 01:04:48 2014 UTC
@@ -0,0 +1,85 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DIAMOND_H_
+#define V8_COMPILER_DIAMOND_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper to make it easier to build diamond-shaped control patterns.
+struct Diamond {
+  Graph* graph;
+  CommonOperatorBuilder* common;
+  Node* branch;
+  Node* if_true;
+  Node* if_false;
+  Node* merge;
+
+  Diamond(Graph* g, CommonOperatorBuilder* b, Node* cond,
+          BranchHint hint = BranchHint::kNone) {
+    graph = g;
+    common = b;
+    branch = graph->NewNode(common->Branch(hint), cond, graph->start());
+    if_true = graph->NewNode(common->IfTrue(), branch);
+    if_false = graph->NewNode(common->IfFalse(), branch);
+    merge = graph->NewNode(common->Merge(2), if_true, if_false);
+  }
+
+  // Place {this} after {that} in control flow order.
+  void Chain(Diamond& that) { branch->ReplaceInput(1, that.merge); }
+
+  // Place {this} after {that} in control flow order.
+  void Chain(Node* that) { branch->ReplaceInput(1, that); }
+
+  // Nest {this} into either the if_true or if_false branch of {that}.
+  void Nest(Diamond& that, bool if_true) {
+    if (if_true) {
+      branch->ReplaceInput(1, that.if_true);
+      that.merge->ReplaceInput(0, merge);
+    } else {
+      branch->ReplaceInput(1, that.if_false);
+      that.merge->ReplaceInput(1, merge);
+    }
+  }
+
+  Node* Phi(MachineType machine_type, Node* tv, Node* fv) {
+    return graph->NewNode(common->Phi(machine_type, 2), tv, fv, merge);
+  }
+
+  Node* EffectPhi(Node* tv, Node* fv) {
+    return graph->NewNode(common->EffectPhi(2), tv, fv, merge);
+  }
+
+  void OverwriteWithPhi(Node* node, MachineType machine_type, Node* tv,
+                        Node* fv) {
+    DCHECK(node->InputCount() >= 3);
+    node->set_op(common->Phi(machine_type, 2));
+    node->ReplaceInput(0, tv);
+    node->ReplaceInput(1, fv);
+    node->ReplaceInput(2, merge);
+    node->TrimInputCount(3);
+  }
+
+  void OverwriteWithEffectPhi(Node* node, Node* te, Node* fe) {
+    DCHECK(node->InputCount() >= 3);
+    node->set_op(common->EffectPhi(2));
+    node->ReplaceInput(0, te);
+    node->ReplaceInput(1, fe);
+    node->ReplaceInput(2, merge);
+    node->TrimInputCount(3);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_DIAMOND_H_
=======================================
--- /dev/null
+++ /trunk/src/compiler/register-configuration.cc Wed Nov 5 01:04:48 2014 UTC
@@ -0,0 +1,68 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/register-configuration.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
+              Register::kNumRegisters);
+STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
+              DoubleRegister::kMaxNumRegisters);
+
+class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
+ public:
+  ArchDefaultRegisterConfiguration()
+      : RegisterConfiguration(Register::kMaxNumAllocatableRegisters,
+                              DoubleRegister::kMaxNumAllocatableRegisters,
+ DoubleRegister::NumAllocatableAliasedRegisters(),
+                              general_register_name_table_,
+                              double_register_name_table_) {
+    DCHECK_EQ(Register::kMaxNumAllocatableRegisters,
+              Register::NumAllocatableRegisters());
+    for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+ general_register_name_table_[i] = Register::AllocationIndexToString(i);
+    }
+    for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
+      double_register_name_table_[i] =
+          DoubleRegister::AllocationIndexToString(i);
+    }
+  }
+
+  const char*
+      general_register_name_table_[Register::kMaxNumAllocatableRegisters];
+  const char*
+ double_register_name_table_[DoubleRegister::kMaxNumAllocatableRegisters];
+};
+
+
+static base::LazyInstance<ArchDefaultRegisterConfiguration>::type
+    kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
+
+}  // namepace
+
+
+const RegisterConfiguration* RegisterConfiguration::ArchDefault() {
+  return &kDefaultRegisterConfiguration.Get();
+}
+
+RegisterConfiguration::RegisterConfiguration(
+    int num_general_registers, int num_double_registers,
+ int num_aliased_double_registers, const char* const* general_register_names,
+    const char* const* double_register_names)
+    : num_general_registers_(num_general_registers),
+      num_double_registers_(num_double_registers),
+      num_aliased_double_registers_(num_aliased_double_registers),
+      general_register_names_(general_register_names),
+      double_register_names_(double_register_names) {}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
=======================================
--- /dev/null
+++ /trunk/src/compiler/register-configuration.h Wed Nov 5 01:04:48 2014 UTC
@@ -0,0 +1,56 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REGISTER_CONFIGURATION_H_
+#define V8_COMPILER_REGISTER_CONFIGURATION_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An architecture independent representation of the sets of registers available
+// for instruction creation.
+class RegisterConfiguration {
+ public:
+  // Architecture independent maxes.
+  static const int kMaxGeneralRegisters = 32;
+  static const int kMaxDoubleRegisters = 32;
+
+  static const RegisterConfiguration* ArchDefault();
+
+ RegisterConfiguration(int num_general_registers, int num_double_registers,
+                        int num_aliased_double_registers,
+                        const char* const* general_register_name,
+                        const char* const* double_register_name);
+
+  int num_general_registers() const { return num_general_registers_; }
+  int num_double_registers() const { return num_double_registers_; }
+  int num_aliased_double_registers() const {
+    return num_aliased_double_registers_;
+  }
+
+  const char* general_register_name(int offset) const {
+    DCHECK(offset >= 0 && offset < kMaxGeneralRegisters);
+    return general_register_names_[offset];
+  }
+  const char* double_register_name(int offset) const {
+    DCHECK(offset >= 0 && offset < kMaxDoubleRegisters);
+    return double_register_names_[offset];
+  }
+
+ private:
+  const int num_general_registers_;
+  const int num_double_registers_;
+  const int num_aliased_double_registers_;
+  const char* const* general_register_names_;
+  const char* const* double_register_names_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_REGISTER_CONFIGURATION_H_
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/es6/mirror-iterators.js Wed Nov  5 01:04:48 2014 UTC
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+// Test the mirror object for collection iterators.
+
+function testIteratorMirror(iter, offset, expected) {
+  while (offset-- > 0) iter.next();
+
+  var mirror = debug.MakeMirror(iter);
+  assertTrue(mirror.isIterator());
+
+  var preview = mirror.preview();
+  assertArrayEquals(expected, preview);
+
+  // Check that iterator has not changed after taking preview.
+  var values = [];
+  for (var i of iter) values.push(i);
+  assertArrayEquals(expected, values);
+}
+
+var o1 = { foo: 1 };
+var o2 = { foo: 2 };
+
+var map = new Map();
+map.set(41, 42);
+map.set(o1, o2);
+
+testIteratorMirror(map.keys(), 0, [41, o1]);
+testIteratorMirror(map.values(), 0, [42, o2]);
+testIteratorMirror(map.entries(), 0, [[41, 42], [o1, o2]]);
+
+testIteratorMirror(map.keys(), 1, [o1]);
+testIteratorMirror(map.values(), 1, [o2]);
+testIteratorMirror(map.entries(), 1, [[o1, o2]]);
+
+testIteratorMirror(map.keys(), 2, []);
+testIteratorMirror(map.values(), 2, []);
+testIteratorMirror(map.entries(), 2, []);
+
+var set = new Set();
+set.add(41);
+set.add(42);
+set.add(o1);
+set.add(o2);
+
+testIteratorMirror(set.keys(), 0, [41, 42, o1, o2]);
+testIteratorMirror(set.values(), 0, [41, 42, o1, o2]);
+testIteratorMirror(set.entries(), 0, [[41, 41], [42, 42], [o1, o1], [o2, o2]]);
+
+testIteratorMirror(set.keys(), 1, [42, o1, o2]);
+testIteratorMirror(set.values(), 1, [42, o1, o2]);
+testIteratorMirror(set.entries(), 1, [[42, 42], [o1, o1], [o2, o2]]);
+
+testIteratorMirror(set.keys(), 3, [o2]);
+testIteratorMirror(set.values(), 3, [o2]);
+testIteratorMirror(set.entries(), 3, [[o2, o2]]);
+
+testIteratorMirror(set.keys(), 5, []);
+testIteratorMirror(set.values(), 5, []);
+testIteratorMirror(set.entries(), 5, []);
=======================================
--- /dev/null
+++ /trunk/test/mjsunit/regress/regress-3483.js Wed Nov  5 01:04:48 2014 UTC
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertFalse(Object.prototype.isPrototypeOf.call());
+assertFalse(Object.prototype.isPrototypeOf.call(null, 1));
+assertFalse(Object.prototype.isPrototypeOf.call(undefined, 1));
=======================================
--- /dev/null
+++ /trunk/test/unittests/compiler/diamond-unittest.cc Wed Nov 5 01:04:48 2014 UTC
@@ -0,0 +1,161 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class DiamondTest : public GraphTest {
+ public:
+  DiamondTest() : GraphTest(5) {}
+};
+
+
+TEST_F(DiamondTest, SimpleDiamond) {
+  Node* p = Parameter(0);
+  Diamond d(graph(), common(), p);
+  EXPECT_THAT(d.branch, IsBranch(p, graph()->start()));
+  EXPECT_THAT(d.if_true, IsIfTrue(d.branch));
+  EXPECT_THAT(d.if_false, IsIfFalse(d.branch));
+  EXPECT_THAT(d.merge, IsMerge(d.if_true, d.if_false));
+}
+
+
+TEST_F(DiamondTest, DiamondChainDiamond) {
+  Node* p0 = Parameter(0);
+  Node* p1 = Parameter(1);
+  Diamond d0(graph(), common(), p0);
+  Diamond d1(graph(), common(), p1);
+  d1.Chain(d0);
+  EXPECT_THAT(d1.branch, IsBranch(p1, d0.merge));
+  EXPECT_THAT(d0.branch, IsBranch(p0, graph()->start()));
+}
+
+
+TEST_F(DiamondTest, DiamondChainNode) {
+  Node* p1 = Parameter(1);
+  Diamond d1(graph(), common(), p1);
+  Node* other = graph()->NewNode(common()->Merge(0));
+  d1.Chain(other);
+  EXPECT_THAT(d1.branch, IsBranch(p1, other));
+}
+
+
+TEST_F(DiamondTest, DiamondChainN) {
+ Node* params[5] = {Parameter(0), Parameter(1), Parameter(2), Parameter(3),
+                     Parameter(4)};
+  Diamond d[5] = {Diamond(graph(), common(), params[0]),
+                  Diamond(graph(), common(), params[1]),
+                  Diamond(graph(), common(), params[2]),
+                  Diamond(graph(), common(), params[3]),
+                  Diamond(graph(), common(), params[4])};
+
+  for (int i = 1; i < 5; i++) {
+    d[i].Chain(d[i - 1]);
+    EXPECT_THAT(d[i].branch, IsBranch(params[i], d[i - 1].merge));
+  }
+}
+
+
+TEST_F(DiamondTest, DiamondNested_true) {
+  Node* p0 = Parameter(0);
+  Node* p1 = Parameter(1);
+  Diamond d0(graph(), common(), p0);
+  Diamond d1(graph(), common(), p1);
+
+  d1.Nest(d0, true);
+
+  EXPECT_THAT(d0.branch, IsBranch(p0, graph()->start()));
+  EXPECT_THAT(d0.if_true, IsIfTrue(d0.branch));
+  EXPECT_THAT(d0.if_false, IsIfFalse(d0.branch));
+  EXPECT_THAT(d0.merge, IsMerge(d1.merge, d0.if_false));
+
+  EXPECT_THAT(d1.branch, IsBranch(p1, d0.if_true));
+  EXPECT_THAT(d1.if_true, IsIfTrue(d1.branch));
+  EXPECT_THAT(d1.if_false, IsIfFalse(d1.branch));
+  EXPECT_THAT(d1.merge, IsMerge(d1.if_true, d1.if_false));
+}
+
+
+TEST_F(DiamondTest, DiamondNested_false) {
+  Node* p0 = Parameter(0);
+  Node* p1 = Parameter(1);
+  Diamond d0(graph(), common(), p0);
+  Diamond d1(graph(), common(), p1);
+
+  d1.Nest(d0, false);
+
+  EXPECT_THAT(d0.branch, IsBranch(p0, graph()->start()));
+  EXPECT_THAT(d0.if_true, IsIfTrue(d0.branch));
+  EXPECT_THAT(d0.if_false, IsIfFalse(d0.branch));
+  EXPECT_THAT(d0.merge, IsMerge(d0.if_true, d1.merge));
+
+  EXPECT_THAT(d1.branch, IsBranch(p1, d0.if_false));
+  EXPECT_THAT(d1.if_true, IsIfTrue(d1.branch));
+  EXPECT_THAT(d1.if_false, IsIfFalse(d1.branch));
+  EXPECT_THAT(d1.merge, IsMerge(d1.if_true, d1.if_false));
+}
+
+
+TEST_F(DiamondTest, DiamondPhis) {
+  Node* p0 = Parameter(0);
+  Node* p1 = Parameter(1);
+  Node* p2 = Parameter(2);
+  Diamond d(graph(), common(), p0);
+
+  MachineType types[] = {kMachAnyTagged, kMachUint32, kMachInt32};
+
+  for (size_t i = 0; i < arraysize(types); i++) {
+    Node* phi = d.Phi(types[i], p1, p2);
+
+    EXPECT_THAT(d.branch, IsBranch(p0, graph()->start()));
+    EXPECT_THAT(d.if_true, IsIfTrue(d.branch));
+    EXPECT_THAT(d.if_false, IsIfFalse(d.branch));
+    EXPECT_THAT(d.merge, IsMerge(d.if_true, d.if_false));
+    EXPECT_THAT(phi, IsPhi(types[i], p1, p2, d.merge));
+  }
+}
+
+
+TEST_F(DiamondTest, DiamondEffectPhis) {
+  Node* p0 = Parameter(0);
+  Node* p1 = Parameter(1);
+  Node* p2 = Parameter(2);
+  Diamond d(graph(), common(), p0);
+
+  Node* phi = d.EffectPhi(p1, p2);
+
+  EXPECT_THAT(d.branch, IsBranch(p0, graph()->start()));
+  EXPECT_THAT(d.if_true, IsIfTrue(d.branch));
+  EXPECT_THAT(d.if_false, IsIfFalse(d.branch));
+  EXPECT_THAT(d.merge, IsMerge(d.if_true, d.if_false));
+  EXPECT_THAT(phi, IsEffectPhi(p1, p2, d.merge));
+}
+
+
+TEST_F(DiamondTest, BranchHint) {
+  Diamond dn(graph(), common(), Parameter(0));
+  CHECK(BranchHint::kNone == BranchHintOf(dn.branch->op()));
+
+  Diamond dt(graph(), common(), Parameter(0), BranchHint::kTrue);
+  CHECK(BranchHint::kTrue == BranchHintOf(dt.branch->op()));
+
+  Diamond df(graph(), common(), Parameter(0), BranchHint::kFalse);
+  CHECK(BranchHint::kFalse == BranchHintOf(df.branch->op()));
+}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
=======================================
--- /trunk/BUILD.gn     Tue Nov  4 14:02:29 2014 UTC
+++ /trunk/BUILD.gn     Wed Nov  5 01:04:48 2014 UTC
@@ -498,6 +498,7 @@
     "src/compiler/control-builders.h",
     "src/compiler/control-reducer.cc",
     "src/compiler/control-reducer.h",
+    "src/compiler/diamond.h",
     "src/compiler/frame.h",
     "src/compiler/gap-resolver.cc",
     "src/compiler/gap-resolver.h",
@@ -571,6 +572,8 @@
     "src/compiler/raw-machine-assembler.h",
     "src/compiler/register-allocator.cc",
     "src/compiler/register-allocator.h",
+    "src/compiler/register-configuration.cc",
+    "src/compiler/register-configuration.h",
     "src/compiler/representation-change.h",
     "src/compiler/schedule.cc",
     "src/compiler/schedule.h",
=======================================
--- /trunk/ChangeLog    Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/ChangeLog    Wed Nov  5 01:04:48 2014 UTC
@@ -1,3 +1,17 @@
+2014-11-05: Version 3.30.33
+
+ `1..isPrototypeOf.call(null)` should return false, not throw TypeError
+        (issue 3483).
+
+ Refactor ObjectGetOwnPropertyKeys to accept bitmask rather than boolean
+        (issue 3549).
+
+        Add debug mirror support for ES6 Map/Set iterators (Chromium issue
+        427868).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-11-04: Version 3.30.30

         Performance and stability improvements on all platforms.
=======================================
--- /trunk/include/v8.h Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/include/v8.h Wed Nov  5 01:04:48 2014 UTC
@@ -1625,6 +1625,18 @@
    */
   bool IsSet() const;

+  /**
+   * Returns true if this value is a Map Iterator.
+   * This is an experimental feature.
+   */
+  bool IsMapIterator() const;
+
+  /**
+   * Returns true if this value is a Set Iterator.
+   * This is an experimental feature.
+   */
+  bool IsSetIterator() const;
+
   /**
    * Returns true if this value is a WeakMap.
    * This is an experimental feature.
=======================================
--- /trunk/src/api.cc   Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/api.cc   Wed Nov  5 01:04:48 2014 UTC
@@ -2569,6 +2569,16 @@
 bool Value::IsGeneratorObject() const {
   return Utils::OpenHandle(this)->IsJSGeneratorObject();
 }
+
+
+bool Value::IsMapIterator() const {
+  return Utils::OpenHandle(this)->IsJSMapIterator();
+}
+
+
+bool Value::IsSetIterator() const {
+  return Utils::OpenHandle(this)->IsJSSetIterator();
+}


 Local<String> Value::ToString(Isolate* v8_isolate) const {
=======================================
--- /trunk/src/arm/interface-descriptors-arm.cc Wed Oct  1 00:05:35 2014 UTC
+++ /trunk/src/arm/interface-descriptors-arm.cc Wed Nov  5 01:04:48 2014 UTC
@@ -150,6 +150,15 @@
   Register registers[] = {cp, r0, r1};
   data->Initialize(arraysize(registers), registers, NULL);
 }
+
+
+void AllocateHeapNumberDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  Register registers[] = {cp};
+  data->Initialize(arraysize(registers), registers, nullptr);
+}


 void ArrayConstructorConstantArgCountDescriptor::Initialize(
=======================================
--- /trunk/src/arm64/interface-descriptors-arm64.cc Wed Oct 1 00:05:35 2014 UTC +++ /trunk/src/arm64/interface-descriptors-arm64.cc Wed Nov 5 01:04:48 2014 UTC
@@ -183,6 +183,14 @@
   Register registers[] = {cp, x0, x1};
   data->Initialize(arraysize(registers), registers, NULL);
 }
+
+
+void AllocateHeapNumberDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  Register registers[] = {cp};
+  data->Initialize(arraysize(registers), registers, nullptr);
+}


 void ArrayConstructorConstantArgCountDescriptor::Initialize(
=======================================
--- /trunk/src/bootstrapper.cc  Wed Oct 22 07:27:53 2014 UTC
+++ /trunk/src/bootstrapper.cc  Wed Nov  5 01:04:48 2014 UTC
@@ -215,11 +215,9 @@
   void InstallNativeFunctions_##id();             \
   void InitializeGlobal_##id();

-  SHIPPING_FEATURES(DECLARE_FEATURE_INITIALIZATION)
-  HARMONY_FEATURES(DECLARE_FEATURE_INITIALIZATION)
-  STAGED_FEATURES(DECLARE_FEATURE_INITIALIZATION)
-
-  DECLARE_FEATURE_INITIALIZATION(harmony_proxies, "")
+  HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
+  HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
+  HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
 #undef DECLARE_FEATURE_INITIALIZATION

Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
@@ -1344,7 +1342,7 @@

 #define FEATURE_INITIALIZE_GLOBAL(id, descr) InitializeGlobal_##id();

-  SHIPPING_FEATURES(FEATURE_INITIALIZE_GLOBAL)
+  HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
 #undef FEATURE_INITIALIZE_GLOBAL

   // Initialize the embedder data slot.
@@ -1379,8 +1377,8 @@
 void Genesis::InitializeExperimentalGlobal() {
 #define FEATURE_INITIALIZE_GLOBAL(id, descr) InitializeGlobal_##id();

-  HARMONY_FEATURES(FEATURE_INITIALIZE_GLOBAL)
-  STAGED_FEATURES(FEATURE_INITIALIZE_GLOBAL)
+  HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
+  HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
 #undef FEATURE_INITIALIZE_GLOBAL
 }

@@ -1565,8 +1563,7 @@
   INSTALL_NATIVE(JSFunction, "ArrayValues", array_values_iterator);

#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
-
-  SHIPPING_FEATURES(INSTALL_NATIVE_FUNCTIONS_FOR)
+  HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
 #undef INSTALL_NATIVE_FUNCTIONS_FOR
 }

@@ -1580,10 +1577,8 @@
   }

#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
-
-  HARMONY_FEATURES(INSTALL_NATIVE_FUNCTIONS_FOR)
-  STAGED_FEATURES(INSTALL_NATIVE_FUNCTIONS_FOR)
-  INSTALL_NATIVE_FUNCTIONS_FOR(harmony_proxies, "")
+  HARMONY_INPROGRESS(INSTALL_NATIVE_FUNCTIONS_FOR)
+  HARMONY_STAGED(INSTALL_NATIVE_FUNCTIONS_FOR)
 #undef INSTALL_NATIVE_FUNCTIONS_FOR
 }

@@ -1626,6 +1621,7 @@
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_literals)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)

 void Genesis::InitializeGlobal_harmony_regexps() {
   Handle<JSObject> builtins(native_context()->builtins());
@@ -2180,16 +2176,14 @@
       }                                                              \
     }                                                                \
   }
-    INSTALL_EXPERIMENTAL_NATIVES(harmony_proxies, "");
     // Iterate over flags that are not enabled by default.
-    HARMONY_FEATURES(INSTALL_EXPERIMENTAL_NATIVES);
-    STAGED_FEATURES(INSTALL_EXPERIMENTAL_NATIVES);
+    HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
+    HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
 #undef INSTALL_EXPERIMENTAL_NATIVES
   }

 #define USE_NATIVES_FOR_FEATURE(id, descr) USE(id##_natives);
-
-  SHIPPING_FEATURES(USE_NATIVES_FOR_FEATURE)
+  HARMONY_SHIPPING(USE_NATIVES_FOR_FEATURE)
 #undef USE_NATIVES_FOR_FEATURE

   InstallExperimentalNativeFunctions();
=======================================
--- /trunk/src/code-factory.cc  Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/src/code-factory.cc  Wed Nov  5 01:04:48 2014 UTC
@@ -101,6 +101,13 @@
   StringAddStub stub(isolate, flags, pretenure_flag);
   return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
 }
+
+
+// static
+Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
+  AllocateHeapNumberStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}


 // static
=======================================
--- /trunk/src/code-factory.h   Mon Oct 13 00:05:20 2014 UTC
+++ /trunk/src/code-factory.h   Wed Nov  5 01:04:48 2014 UTC
@@ -55,9 +55,13 @@
   static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
                             PretenureFlag pretenure_flag);

+  static Callable AllocateHeapNumber(Isolate* isolate);
+
   static Callable CallFunction(Isolate* isolate, int argc,
                                CallFunctionFlags flags);
 };
-}
-}
+
+}  // namespace internal
+}  // namespace v8
+
 #endif  // V8_CODE_FACTORY_H_
=======================================
--- /trunk/src/code-stubs-hydrogen.cc   Wed Oct 15 13:35:30 2014 UTC
+++ /trunk/src/code-stubs-hydrogen.cc   Wed Nov  5 01:04:48 2014 UTC
@@ -882,6 +882,22 @@
 Handle<Code> TransitionElementsKindStub::GenerateCode() {
   return DoGenerateCode(this);
 }
+
+
+template <>
+HValue* CodeStubGraphBuilder<AllocateHeapNumberStub>::BuildCodeStub() {
+  HValue* result =
+ Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapNumber(),
+                     NOT_TENURED, HEAP_NUMBER_TYPE);
+  AddStoreMapConstant(result, isolate()->factory()->heap_number_map());
+  return result;
+}
+
+
+Handle<Code> AllocateHeapNumberStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+

 HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
     ElementsKind kind,
=======================================
--- /trunk/src/code-stubs.cc    Tue Oct 14 07:51:07 2014 UTC
+++ /trunk/src/code-stubs.cc    Wed Nov  5 01:04:48 2014 UTC
@@ -695,6 +695,13 @@
   descriptor->Initialize(
       Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
 }
+
+
+void AllocateHeapNumberStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(
+      Runtime::FunctionForId(Runtime::kAllocateHeapNumber)->entry);
+}


void CompareNilICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
=======================================
--- /trunk/src/code-stubs.h     Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/code-stubs.h     Wed Nov  5 01:04:48 2014 UTC
@@ -51,6 +51,7 @@
   V(StubFailureTrampoline)                  \
   V(SubString)                              \
   /* HydrogenCodeStubs */                   \
+  V(AllocateHeapNumber)                     \
   V(ArrayNArgumentsConstructor)             \
   V(ArrayNoArgumentConstructor)             \
   V(ArraySingleArgumentConstructor)         \
@@ -2113,6 +2114,17 @@
 };


+class AllocateHeapNumberStub FINAL : public HydrogenCodeStub {
+ public:
+  explicit AllocateHeapNumberStub(Isolate* isolate)
+      : HydrogenCodeStub(isolate) {}
+
+ private:
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
+  DEFINE_HYDROGEN_CODE_STUB(AllocateHeapNumber, HydrogenCodeStub);
+};
+
+
 class ArrayConstructorStubBase : public HydrogenCodeStub {
  public:
   ArrayConstructorStubBase(Isolate* isolate,
=======================================
--- /trunk/src/compiler/change-lowering.cc      Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/compiler/change-lowering.cc      Wed Nov  5 01:04:48 2014 UTC
@@ -4,6 +4,8 @@

 #include "src/compiler/change-lowering.h"

+#include "src/code-factory.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/machine-operator.h"
@@ -66,19 +68,16 @@


Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) { - // The AllocateHeapNumber() runtime function does not use the context, so we
-  // can safely pass in Smi zero here.
+ // The AllocateHeapNumberStub does not use the context, so we can safely pass
+  // in Smi zero here.
+  Callable callable = CodeFactory::AllocateHeapNumber(isolate());
+  CallDescriptor* descriptor = linkage()->GetStubCallDescriptor(
+      callable.descriptor(), 0, CallDescriptor::kNoFlags);
+  Node* target = jsgraph()->HeapConstant(callable.code());
   Node* context = jsgraph()->ZeroConstant();
   Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
-  const Runtime::Function* function =
-      Runtime::FunctionForId(Runtime::kAllocateHeapNumber);
-  DCHECK_EQ(0, function->nargs);
-  CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
-      function->function_id, 0, Operator::kNoProperties);
-  Node* heap_number = graph()->NewNode(
-      common()->Call(desc), jsgraph()->CEntryStubConstant(),
-      jsgraph()->ExternalConstant(ExternalReference(function, isolate())),
-      jsgraph()->Int32Constant(function->nargs), context, effect, control);
+  Node* heap_number = graph()->NewNode(common()->Call(descriptor), target,
+                                       context, effect, control);
   Node* store = graph()->NewNode(
       machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
@@ -103,20 +102,11 @@


 Reduction ChangeLowering::ChangeBitToBool(Node* val, Node* control) {
-  Node* branch = graph()->NewNode(common()->Branch(), val, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* true_value = jsgraph()->TrueConstant();
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* false_value = jsgraph()->FalseConstant();
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(
-      common()->Phi(static_cast<MachineType>(kTypeBool | kRepTagged), 2),
-      true_value, false_value, merge);
-
-  return Replace(phi);
+  Diamond d(graph(), common(), val);
+  d.Chain(control);
+ MachineType machine_type = static_cast<MachineType>(kTypeBool | kRepTagged);
+  return Replace(d.Phi(machine_type, jsgraph()->TrueConstant(),
+                       jsgraph()->FalseConstant()));
 }


@@ -142,21 +132,12 @@
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val);
   Node* ovf = graph()->NewNode(common()->Projection(1), add);

-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), ovf, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Diamond d(graph(), common(), ovf, BranchHint::kFalse);
+  d.Chain(control);
   Node* heap_number = AllocateHeapNumberWithValue(
-      graph()->NewNode(machine()->ChangeInt32ToFloat64(), val), if_true);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), val), d.if_true);
   Node* smi = graph()->NewNode(common()->Projection(0), add);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), heap_number,
-                               smi, merge);
-
-  return Replace(phi);
+  return Replace(d.Phi(kMachAnyTagged, heap_number, smi));
 }


@@ -167,23 +148,17 @@

   Node* tag = graph()->NewNode(machine()->WordAnd(), val,
                                jsgraph()->IntPtrConstant(kSmiTagMask));
-  Node* branch = graph()->NewNode(common()->Branch(), tag, control);

-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Diamond d(graph(), common(), tag, BranchHint::kFalse);
+  d.Chain(control);
   const Operator* op = (signedness == kSigned)
                            ? machine()->ChangeFloat64ToInt32()
                            : machine()->ChangeFloat64ToUint32();
-  Node* change = graph()->NewNode(op, LoadHeapNumberValue(val, if_true));
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* load = graph()->NewNode(op, LoadHeapNumberValue(val, d.if_true));
   Node* number = ChangeSmiToInt32(val);

-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(
-      common()->Phi((signedness == kSigned) ? kMachInt32 : kMachUint32, 2),
-      change, number, merge);
-
-  return Replace(phi);
+  return Replace(
+ d.Phi((signedness == kSigned) ? kMachInt32 : kMachUint32, load, number));
 }


@@ -193,20 +168,13 @@

   Node* tag = graph()->NewNode(machine()->WordAnd(), val,
                                jsgraph()->IntPtrConstant(kSmiTagMask));
-  Node* branch = graph()->NewNode(common()->Branch(), tag, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* load = LoadHeapNumberValue(val, if_true);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Diamond d(graph(), common(), tag, BranchHint::kFalse);
+  d.Chain(control);
+  Node* load = LoadHeapNumberValue(val, d.if_true);
   Node* number = graph()->NewNode(machine()->ChangeInt32ToFloat64(),
                                   ChangeSmiToInt32(val));

-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi =
- graph()->NewNode(common()->Phi(kMachFloat64, 2), load, number, merge);
-
-  return Replace(phi);
+  return Replace(d.Phi(kMachFloat64, load, number));
 }


@@ -216,10 +184,8 @@

   Node* cmp = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
                                SmiMaxValueConstant());
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), cmp, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Diamond d(graph(), common(), cmp, BranchHint::kTrue);
+  d.Chain(control);
   Node* smi = graph()->NewNode(
       machine()->WordShl(),
       machine()->Is64()
@@ -227,15 +193,10 @@
           : val,
       SmiShiftBitsConstant());

-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
   Node* heap_number = AllocateHeapNumberWithValue(
-      graph()->NewNode(machine()->ChangeUint32ToFloat64(), val), if_false);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), smi,
-                               heap_number, merge);
+ graph()->NewNode(machine()->ChangeUint32ToFloat64(), val), d.if_false);

-  return Replace(phi);
+  return Replace(d.Phi(kMachAnyTagged, smi, heap_number));
 }


=======================================
--- /trunk/src/compiler/graph-visualizer.cc     Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/compiler/graph-visualizer.cc     Wed Nov  5 01:04:48 2014 UTC
@@ -680,7 +680,9 @@
       for (int j = instruction_block->first_instruction_index();
            j <= instruction_block->last_instruction_index(); j++) {
         PrintIndent();
-        os_ << j << " " << *instructions->InstructionAt(j) << " <|@\n";
+ PrintableInstruction printable = {RegisterConfiguration::ArchDefault(),
+                                          instructions->InstructionAt(j)};
+        os_ << j << " " << printable << " <|@\n";
       }
     }
   }
=======================================
--- /trunk/src/compiler/instruction.cc  Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/compiler/instruction.cc  Wed Nov  5 01:04:48 2014 UTC
@@ -6,17 +6,15 @@
 #include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/instruction.h"
-#include "src/macro-assembler.h"

 namespace v8 {
 namespace internal {
 namespace compiler {

-STATIC_ASSERT(kMaxGeneralRegisters >= Register::kNumRegisters);
-STATIC_ASSERT(kMaxDoubleRegisters >= DoubleRegister::kMaxNumRegisters);
-
-
-std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionOperand& printable) {
+  const InstructionOperand& op = *printable.op_;
+  const RegisterConfiguration* conf = printable.register_configuration_;
   switch (op.kind()) {
     case InstructionOperand::INVALID:
       return os << "(0)";
@@ -30,10 +28,10 @@
         case UnallocatedOperand::NONE:
           return os;
         case UnallocatedOperand::FIXED_REGISTER:
-          return os << "(=" << Register::AllocationIndexToString(
+          return os << "(=" << conf->general_register_name(
                                    unalloc->fixed_register_index()) << ")";
         case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
-          return os << "(=" << DoubleRegister::AllocationIndexToString(
+          return os << "(=" << conf->double_register_name(
                                    unalloc->fixed_register_index()) << ")";
         case UnallocatedOperand::MUST_HAVE_REGISTER:
           return os << "(R)";
@@ -52,11 +50,9 @@
     case InstructionOperand::DOUBLE_STACK_SLOT:
       return os << "[double_stack:" << op.index() << "]";
     case InstructionOperand::REGISTER:
-      return os << "[" << Register::AllocationIndexToString(op.index())
-                << "|R]";
+      return os << "[" << conf->general_register_name(op.index()) << "|R]";
     case InstructionOperand::DOUBLE_REGISTER:
- return os << "[" << DoubleRegister::AllocationIndexToString(op.index())
-                << "|R]";
+      return os << "[" << conf->double_register_name(op.index()) << "|R]";
   }
   UNREACHABLE();
   return os;
@@ -100,10 +96,18 @@
 #undef INSTRUCTION_OPERAND_TEARDOWN
 }

+
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableMoveOperands& printable) {
+  const MoveOperands& mo = *printable.move_operands_;
+ PrintableInstructionOperand printable_op = {printable.register_configuration_,
+                                              mo.destination()};

-std::ostream& operator<<(std::ostream& os, const MoveOperands& mo) {
-  os << *mo.destination();
-  if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
+  os << printable_op;
+  if (!mo.source()->Equals(mo.destination())) {
+    printable_op.op_ = mo.source();
+    os << " = " << printable_op;
+  }
   return os << ";";
 }

@@ -116,14 +120,17 @@
 }


-std::ostream& operator<<(std::ostream& os, const ParallelMove& pm) {
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableParallelMove& printable) {
+  const ParallelMove& pm = *printable.parallel_move_;
   bool first = true;
   for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
        move != pm.move_operands()->end(); ++move) {
     if (move->IsEliminated()) continue;
     if (!first) os << " ";
     first = false;
-    os << *move;
+    PrintableMoveOperands pmo = {printable.register_configuration_, move};
+    os << pmo;
   }
   return os;
 }
@@ -256,11 +263,16 @@
 }


-std::ostream& operator<<(std::ostream& os, const Instruction& instr) {
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstruction& printable) {
+  const Instruction& instr = *printable.instr_;
+ PrintableInstructionOperand printable_op = {printable.register_configuration_,
+                                              NULL};
   if (instr.OutputCount() > 1) os << "(";
   for (size_t i = 0; i < instr.OutputCount(); i++) {
     if (i > 0) os << ", ";
-    os << *instr.OutputAt(i);
+    printable_op.op_ = instr.OutputAt(i);
+    os << printable_op;
   }

   if (instr.OutputCount() > 1) os << ") = ";
@@ -272,7 +284,11 @@
     for (int i = GapInstruction::FIRST_INNER_POSITION;
          i <= GapInstruction::LAST_INNER_POSITION; i++) {
       os << "(";
-      if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
+      if (gap->parallel_moves_[i] != NULL) {
+        PrintableParallelMove ppm = {printable.register_configuration_,
+                                     gap->parallel_moves_[i]};
+        os << ppm;
+      }
       os << ") ";
     }
   } else if (instr.IsSourcePosition()) {
@@ -293,7 +309,8 @@
   }
   if (instr.InputCount() > 0) {
     for (size_t i = 0; i < instr.InputCount(); i++) {
-      os << " " << *instr.InputAt(i);
+      printable_op.op_ = instr.InputAt(i);
+      os << " " << printable_op;
     }
   }
   return os;
@@ -585,7 +602,9 @@
 }


-std::ostream& operator<<(std::ostream& os, const InstructionSequence& code) {
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionSequence& printable) {
+  const InstructionSequence& code = *printable.sequence_;
   for (size_t i = 0; i < code.immediates_.size(); ++i) {
     Constant constant = code.immediates_[i];
     os << "IMM#" << i << ": " << constant << "\n";
@@ -626,19 +645,15 @@
     }

     ScopedVector<char> buf(32);
+    PrintableInstruction printable_instr;
+ printable_instr.register_configuration_ = printable.register_configuration_;
     for (int j = block->first_instruction_index();
          j <= block->last_instruction_index(); j++) {
       // TODO(svenpanne) Add some basic formatting to our streams.
       SNPrintF(buf, "%5d", j);
-      os << "   " << buf.start() << ": " << *code.InstructionAt(j) << "\n";
+      printable_instr.instr_ = code.InstructionAt(j);
+      os << "   " << buf.start() << ": " << printable_instr << "\n";
     }
-
-    // TODO(dcarney): add this back somehow?
-    // os << "  " << block->control();
-
-    // if (block->control_input() != NULL) {
-    //   os << " v" << block->control_input()->id();
-    // }

     for (auto succ : block->successors()) {
       const InstructionBlock* succ_block = code.InstructionBlockAt(succ);
=======================================
--- /trunk/src/compiler/instruction.h   Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/compiler/instruction.h   Wed Nov  5 01:04:48 2014 UTC
@@ -14,6 +14,7 @@
 #include "src/compiler/frame.h"
 #include "src/compiler/instruction-codes.h"
 #include "src/compiler/opcodes.h"
+#include "src/compiler/register-configuration.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/source-position.h"
 #include "src/zone-allocator.h"
@@ -27,18 +28,13 @@
 const InstructionCode kBlockStartInstruction = -2;
 const InstructionCode kSourcePositionInstruction = -3;

-// Platform independent maxes.
-static const int kMaxGeneralRegisters = 32;
-static const int kMaxDoubleRegisters = 32;
-
-
-#define INSTRUCTION_OPERAND_LIST(V)           \
-  V(Constant, CONSTANT, 0)                    \
-  V(Immediate, IMMEDIATE, 0)                  \
-  V(StackSlot, STACK_SLOT, 128)               \
-  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)  \
-  V(Register, REGISTER, kMaxGeneralRegisters) \
-  V(DoubleRegister, DOUBLE_REGISTER, kMaxDoubleRegisters)
+#define INSTRUCTION_OPERAND_LIST(V)                                  \
+  V(Constant, CONSTANT, 0)                                           \
+  V(Immediate, IMMEDIATE, 0)                                         \
+  V(StackSlot, STACK_SLOT, 128)                                      \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)                         \
+  V(Register, REGISTER, RegisterConfiguration::kMaxGeneralRegisters) \
+ V(DoubleRegister, DOUBLE_REGISTER, RegisterConfiguration::kMaxDoubleRegisters)

 class InstructionOperand : public ZoneObject {
  public:
@@ -87,7 +83,13 @@

 typedef ZoneVector<InstructionOperand*> InstructionOperandVector;

-std::ostream& operator<<(std::ostream& os, const InstructionOperand& op);
+struct PrintableInstructionOperand {
+  const RegisterConfiguration* register_configuration_;
+  const InstructionOperand* op_;
+};
+
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionOperand& op);

 class UnallocatedOperand : public InstructionOperand {
  public:
@@ -305,8 +307,16 @@
   InstructionOperand* source_;
   InstructionOperand* destination_;
 };
+
+
+struct PrintableMoveOperands {
+  const RegisterConfiguration* register_configuration_;
+  const MoveOperands* move_operands_;
+};

-std::ostream& operator<<(std::ostream& os, const MoveOperands& mo);
+
+std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
+

 template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
 class SubKindOperand FINAL : public InstructionOperand {
@@ -359,7 +369,15 @@
   ZoneList<MoveOperands> move_operands_;
 };

-std::ostream& operator<<(std::ostream& os, const ParallelMove& pm);
+
+struct PrintableParallelMove {
+  const RegisterConfiguration* register_configuration_;
+  const ParallelMove* parallel_move_;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PrintableParallelMove& pm);
+

 class PointerMap FINAL : public ZoneObject {
  public:
@@ -533,8 +551,14 @@
   PointerMap* pointer_map_;
   InstructionOperand* operands_[1];
 };
+
+
+struct PrintableInstruction {
+  const RegisterConfiguration* register_configuration_;
+  const Instruction* instr_;
+};
+std::ostream& operator<<(std::ostream& os, const PrintableInstruction& instr);

-std::ostream& operator<<(std::ostream& os, const Instruction& instr);

// Represents moves inserted before an instruction due to register allocation. // TODO(titzer): squash GapInstruction back into Instruction, since essentially
@@ -585,7 +609,8 @@
   }

  private:
- friend std::ostream& operator<<(std::ostream& os, const Instruction& instr);
+  friend std::ostream& operator<<(std::ostream& os,
+                                  const PrintableInstruction& instr);
   ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
 };

@@ -847,6 +872,9 @@
 typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
 typedef ZoneVector<InstructionBlock*> InstructionBlocks;

+struct PrintableInstructionSequence;
+
+
// Represents architecture-specific generated code before, during, and after
 // register allocation.
 // TODO(titzer): s/IsDouble/IsFloat64/
@@ -961,7 +989,7 @@

  private:
   friend std::ostream& operator<<(std::ostream& os,
-                                  const InstructionSequence& code);
+ const PrintableInstructionSequence& code);

typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;

@@ -977,7 +1005,15 @@
   DeoptimizationVector deoptimization_entries_;
 };

-std::ostream& operator<<(std::ostream& os, const InstructionSequence& code);
+
+struct PrintableInstructionSequence {
+  const RegisterConfiguration* register_configuration_;
+  const InstructionSequence* sequence_;
+};
+
+
+std::ostream& operator<<(std::ostream& os,
+                         const PrintableInstructionSequence& code);

 }  // namespace compiler
 }  // namespace internal
=======================================
--- /trunk/src/compiler/js-builtin-reducer.cc   Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/js-builtin-reducer.cc   Wed Nov  5 01:04:48 2014 UTC
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.

+#include "src/compiler/diamond.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/js-builtin-reducer.h"
 #include "src/compiler/node-matchers.h"
@@ -106,17 +107,10 @@
     // Math.abs(a:number) -> (a > 0 ? a : 0 - a)
     Node* value = r.left();
     Node* zero = jsgraph()->ZeroConstant();
-    Node* control = graph()->start();
- Node* tag = graph()->NewNode(simplified()->NumberLessThan(), zero, value);
-
-    Node* branch = graph()->NewNode(common()->Branch(), tag, control);
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-
+ Node* cmp = graph()->NewNode(simplified()->NumberLessThan(), zero, value);
+    Diamond d(graph(), common(), cmp);
Node* neg = graph()->NewNode(simplified()->NumberSubtract(), zero, value); - value = graph()->NewNode(common()->Phi(kMachNone, 2), value, neg, merge);
-    return Replace(value);
+    return Replace(d.Phi(kMachNone, value, neg));
   }
   return NoChange();
 }
@@ -150,15 +144,9 @@
     Node* value = r.GetJSCallInput(0);
     for (int i = 1; i < r.GetJSCallArity(); i++) {
       Node* p = r.GetJSCallInput(i);
-      Node* control = graph()->start();
- Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value, p);
-
-      Node* branch = graph()->NewNode(common()->Branch(), tag, control);
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-
- value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value, merge); + Node* cmp = graph()->NewNode(simplified()->NumberLessThan(), value, p);
+      Diamond d(graph(), common(), cmp);
+      value = d.Phi(kMachNone, p, value);
     }
     return Replace(value);
   }
=======================================
--- /trunk/src/compiler/js-intrinsic-builder.cc Tue Oct 21 12:48:28 2014 UTC
+++ /trunk/src/compiler/js-intrinsic-builder.cc Wed Nov  5 01:04:48 2014 UTC
@@ -4,6 +4,7 @@

 #include "src/compiler/access-builder.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/generic-node-inl.h"
 #include "src/compiler/js-intrinsic-builder.h"
 #include "src/compiler/js-operator.h"
@@ -68,29 +69,23 @@
   SimplifiedOperatorBuilder simplified(jsgraph_->zone());

   Node* is_smi = graph()->NewNode(simplified.ObjectIsSmi(), object);
- Node* branch = graph()->NewNode(common()->Branch(), is_smi, graph()->start());
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Diamond d(graph(), common(), is_smi);

Node* map = graph()->NewNode(simplified.LoadField(AccessBuilder::ForMap()),
-                               object, effect, if_false);
+                               object, effect, d.if_false);

   Node* instance_type = graph()->NewNode(
       simplified.LoadField(AccessBuilder::ForMapInstanceType()), map, map,
-      if_false);
+      d.if_false);

   Node* has_map_type =
       graph()->NewNode(jsgraph_->machine()->Word32Equal(), instance_type,
                        jsgraph_->Int32Constant(map_type));

-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = d.Phi(static_cast<MachineType>(kTypeBool | kRepTagged),
+                    jsgraph_->FalseConstant(), has_map_type);

-  Node* phi =
- graph()->NewNode(common()->Phi((MachineType)(kTypeBool | kRepTagged), 2),
-                       jsgraph_->FalseConstant(), has_map_type, merge);
-
-  Node* ephi =
- graph()->NewNode(common()->EffectPhi(2), effect, instance_type, merge);
+  Node* ephi = d.EffectPhi(effect, instance_type);

   return ResultAndEffect(phi, ephi);
 }
@@ -112,44 +107,32 @@
   SimplifiedOperatorBuilder simplified(jsgraph_->zone());

   Node* is_smi = graph()->NewNode(simplified.ObjectIsSmi(), object);
- Node* branch = graph()->NewNode(common()->Branch(), is_smi, graph()->start());
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+  Diamond if_is_smi(graph(), common(), is_smi);

Node* map = graph()->NewNode(simplified.LoadField(AccessBuilder::ForMap()),
-                               object, effect, if_false);
+                               object, effect, if_is_smi.if_false);

   Node* instance_type = graph()->NewNode(
       simplified.LoadField(AccessBuilder::ForMapInstanceType()), map, map,
-      if_false);
+      if_is_smi.if_false);

   Node* is_value =
       graph()->NewNode(jsgraph_->machine()->Word32Equal(), instance_type,
                        jsgraph_->Constant(JS_VALUE_TYPE));

-  Node* branch_is_value =
-      graph()->NewNode(common()->Branch(), is_value, if_false);
- Node* is_value_true = graph()->NewNode(common()->IfTrue(), branch_is_value); - Node* is_value_false = graph()->NewNode(common()->IfFalse(), branch_is_value);
+  Diamond if_is_value(graph(), common(), is_value);
+  if_is_value.Nest(if_is_smi, false);

   Node* value =
graph()->NewNode(simplified.LoadField(AccessBuilder::ForValue()), object,
-                       instance_type, is_value_true);
-
-  Node* merge_is_value =
-      graph()->NewNode(common()->Merge(2), is_value_true, is_value_false);
-
- Node* phi_is_value = graph()->NewNode(common()->Phi((MachineType)kTypeAny, 2),
-                                        value, object, merge_is_value);
-
+                       instance_type, if_is_value.if_true);

- Node* merge = graph()->NewNode(common()->Merge(2), if_true, merge_is_value);
+  Node* phi_is_value = if_is_value.Phi(kTypeAny, value, object);

- Node* phi = graph()->NewNode(common()->Phi((MachineType)kTypeAny, 2), object,
-                               phi_is_value, merge);
+  Node* phi = if_is_smi.Phi(kTypeAny, object, phi_is_value);

-  Node* ephi =
- graph()->NewNode(common()->EffectPhi(2), effect, instance_type, merge);
+  Node* ephi = if_is_smi.EffectPhi(effect, instance_type);

   return ResultAndEffect(phi, ephi);
 }
=======================================
--- /trunk/src/compiler/machine-operator-reducer.cc Tue Nov 4 14:02:29 2014 UTC +++ /trunk/src/compiler/machine-operator-reducer.cc Wed Nov 5 01:04:48 2014 UTC
@@ -7,6 +7,7 @@
 #include "src/base/bits.h"
 #include "src/base/division-by-constant.h"
 #include "src/codegen.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/js-graph.h"
@@ -640,22 +641,10 @@

       Node* check =
           graph()->NewNode(machine()->Int32LessThan(), dividend, zero);
-      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                      check, graph()->start());
-
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+      Diamond d(graph(), common(), check, BranchHint::kFalse);
Node* neg = Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask));
-
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
       Node* pos = Word32And(dividend, mask);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-
-      DCHECK_EQ(3, node->InputCount());
-      node->set_op(common()->Phi(kMachInt32, 2));
-      node->ReplaceInput(0, neg);
-      node->ReplaceInput(1, pos);
-      node->ReplaceInput(2, merge);
+      d.OverwriteWithPhi(node, kMachInt32, neg, pos);
     } else {
       Node* quotient = Int32Div(dividend, divisor);
       node->set_op(machine()->Int32Sub());
=======================================
--- /trunk/src/compiler/mips/code-generator-mips.cc Tue Nov 4 01:04:58 2014 UTC +++ /trunk/src/compiler/mips/code-generator-mips.cc Wed Nov 5 01:04:48 2014 UTC
@@ -240,11 +240,10 @@
       __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
       break;
     case kMipsTst:
-      // Psuedo-instruction used for tst/branch.
-      __ And(kCompareReg, i.InputRegister(0), i.InputOperand(1));
+      // Pseudo-instruction used for tst/branch. No opcode emitted here.
       break;
     case kMipsCmp:
-      // Psuedo-instruction used for cmp/branch. No opcode emitted here.
+      // Pseudo-instruction used for cmp/branch. No opcode emitted here.
       break;
     case kMipsMov:
// TODO(plind): Should we combine mov/li like this, or use separate instr?
@@ -418,7 +417,6 @@
   //    not separated by other instructions.

   if (instr->arch_opcode() == kMipsTst) {
-    // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register.
     switch (condition) {
       case kNotEqual:
         cc = ne;
@@ -430,7 +428,8 @@
         UNSUPPORTED_COND(kMipsTst, condition);
         break;
     }
-    __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Branch(tlabel, cc, at, Operand(zero_reg));

   } else if (instr->arch_opcode() == kMipsAddOvf ||
              instr->arch_opcode() == kMipsSubOvf) {
@@ -557,7 +556,6 @@
   // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
   //    not separated by other instructions.
   if (instr->arch_opcode() == kMipsTst) {
-    // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register.
     switch (condition) {
       case kNotEqual:
         cc = ne;
@@ -569,7 +567,8 @@
         UNSUPPORTED_COND(kMipsTst, condition);
         break;
     }
-    __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
     __ li(result, Operand(1));  // In delay slot.

   } else if (instr->arch_opcode() == kMipsAddOvf ||
=======================================
--- /trunk/src/compiler/mips/instruction-selector-mips.cc Tue Nov 4 01:04:58 2014 UTC +++ /trunk/src/compiler/mips/instruction-selector-mips.cc Wed Nov 5 01:04:48 2014 UTC
@@ -541,70 +541,114 @@
   VisitWordCompare(selector, node, kMipsCmp, cont, false);
 }

+}  // namespace

-void VisitWordTest(InstructionSelector* selector, Node* node,
-                   FlagsContinuation* cont) {
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  while (selector->CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWordCompare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWordCompare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* const node = value->InputAt(0);
+          Node* const result = node->FindProjection(0);
+          if (!result || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMipsAddOvf, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMipsSubOvf, cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(selector, value, kMipsTst, cont, true);
+      default:
+        break;
+    }
+    break;
+  }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
   MipsOperandGenerator g(selector);
- // kMipsTst is a pseudo-instruction to do logical 'and' and leave the result
-  // in a dedicated tmp register.
- VisitCompare(selector, kMipsTst, g.UseRegister(node), g.UseRegister(node),
-               cont);
+  InstructionCode const opcode = cont->Encode(kMipsCmp);
+  InstructionOperand* const value_operand = g.UseRegister(value);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+                   g.TempImmediate(0));
+  }
 }

-}  // namespace
-

 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
-  MipsOperandGenerator g(this);
-  Node* user = branch;
-  Node* value = branch->InputAt(0);
-
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-
   // If we can fall through to the true block, invert the branch.
   if (IsNextInAssemblyOrder(tbranch)) {
     cont.Negate();
     cont.SwapBlocks();
   }
-
- // Try to combine with comparisons against 0 by simply inverting the branch. - while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
-    Int32BinopMatcher m(value);
-    if (m.right().Is(0)) {
-      user = value;
-      value = m.left().node();
-      cont.Negate();
-    } else {
-      break;
-    }
-  }
-
-  // Try to combine the branch with a comparison.
-  if (CanCover(user, value)) {
-    switch (value->opcode()) {
-      case IrOpcode::kWord32And:
- // TODO(plind): understand the significance of 'IR and' special case.
-        return VisitWordCompare(this, value, kMipsTst, &cont, true);
-      default:
-        break;
-    }
-  }
-
-  // Branch could not be combined with a compare, emit compare against 0.
-  return VisitWordTest(this, value, &cont);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }


 void InstructionSelector::VisitWord32Equal(Node* const node) {
-  Node* const user = node;
   FlagsContinuation cont(kEqual, node);
-  Int32BinopMatcher m(user);
+  Int32BinopMatcher m(node);
   if (m.right().Is(0)) {
-    Node* const value = m.left().node();
-    return VisitWordTest(this, value, &cont);
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   }
-
   VisitWordCompare(this, node, &cont);
 }

=======================================
--- /trunk/src/compiler/pipeline.cc     Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/compiler/pipeline.cc     Wed Nov  5 01:04:48 2014 UTC
@@ -558,8 +558,10 @@

   if (FLAG_trace_turbo) {
     OFStream os(stdout);
+    PrintableInstructionSequence printable = {
+        RegisterConfiguration::ArchDefault(), &sequence};
     os << "----- Instruction sequence before register allocation -----\n"
-       << sequence;
+       << printable;
     TurboCfgFile tcf(isolate());
     tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
                  &sequence);
@@ -587,7 +589,7 @@
 #endif


-    RegisterAllocator allocator(RegisterAllocator::PlatformConfig(),
+    RegisterAllocator allocator(RegisterConfiguration::ArchDefault(),
                                 zone_scope.zone(), &frame, &sequence,
                                 debug_name.get());
     if (!allocator.Allocate(data->pipeline_statistics())) {
@@ -602,8 +604,10 @@

   if (FLAG_trace_turbo) {
     OFStream os(stdout);
+    PrintableInstructionSequence printable = {
+        RegisterConfiguration::ArchDefault(), &sequence};
     os << "----- Instruction sequence after register allocation -----\n"
-       << sequence;
+       << printable;
   }

   if (data->pipeline_statistics() != NULL) {
=======================================
--- /trunk/src/compiler/register-allocator.cc   Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/compiler/register-allocator.cc   Wed Nov  5 01:04:48 2014 UTC
@@ -5,7 +5,6 @@
 #include "src/compiler/linkage.h"
 #include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/register-allocator.h"
-#include "src/macro-assembler.h"  // TODO(dcarney): remove this.
 #include "src/string-stream.h"

 namespace v8 {
@@ -507,22 +506,9 @@
 }


-RegisterAllocator::Config RegisterAllocator::PlatformConfig() {
-  DCHECK_EQ(Register::kMaxNumAllocatableRegisters,
-            Register::NumAllocatableRegisters());
-  Config config;
-  config.num_general_registers_ = Register::kMaxNumAllocatableRegisters;
- config.num_double_registers_ = DoubleRegister::kMaxNumAllocatableRegisters;
-  config.num_aliased_double_registers_ =
-      DoubleRegister::NumAllocatableAliasedRegisters();
-  config.GeneralRegisterName = Register::AllocationIndexToString;
-  config.DoubleRegisterName = DoubleRegister::AllocationIndexToString;
-  return config;
-}
-
-
-RegisterAllocator::RegisterAllocator(const Config& config, Zone* local_zone, - Frame* frame, InstructionSequence* code,
+RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
+                                     Zone* local_zone, Frame* frame,
+                                     InstructionSequence* code,
                                      const char* debug_name)
     : zone_(local_zone),
       frame_(frame),
@@ -531,8 +517,8 @@
       config_(config),
       live_in_sets_(code->InstructionBlockCount(), zone()),
       live_ranges_(code->VirtualRegisterCount() * 2, zone()),
- fixed_live_ranges_(this->config().num_general_registers_, NULL, zone()),
-      fixed_double_live_ranges_(this->config().num_double_registers_, NULL,
+ fixed_live_ranges_(this->config()->num_general_registers(), NULL, zone()), + fixed_double_live_ranges_(this->config()->num_double_registers(), NULL,
                                 zone()),
       unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
       active_live_ranges_(8, zone()),
@@ -541,12 +527,14 @@
       mode_(UNALLOCATED_REGISTERS),
       num_registers_(-1),
       allocation_ok_(true) {
-  DCHECK(this->config().num_general_registers_ <= kMaxGeneralRegisters);
-  DCHECK(this->config().num_double_registers_ <= kMaxDoubleRegisters);
+  DCHECK(this->config()->num_general_registers() <=
+         RegisterConfiguration::kMaxGeneralRegisters);
+  DCHECK(this->config()->num_double_registers() <=
+         RegisterConfiguration::kMaxDoubleRegisters);
   // TryAllocateFreeReg and AllocateBlockedReg assume this
   // when allocating local arrays.
-  DCHECK(this->config().num_double_registers_ >=
-         this->config().num_general_registers_);
+  DCHECK(this->config()->num_double_registers() >=
+         this->config()->num_general_registers());
 }


@@ -603,7 +591,7 @@


 int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
-  return -index - 1 - config().num_general_registers_;
+  return -index - 1 - config()->num_general_registers();
 }


@@ -635,7 +623,7 @@


 LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
-  DCHECK(index < config().num_general_registers_);
+  DCHECK(index < config()->num_general_registers());
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
     // TODO(titzer): add a utility method to allocate a new LiveRange:
@@ -653,7 +641,7 @@


 LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
-  DCHECK(index < config().num_aliased_double_registers_);
+  DCHECK(index < config()->num_aliased_double_registers());
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
@@ -1031,7 +1019,7 @@
       }

       if (instr->ClobbersRegisters()) {
-        for (int i = 0; i < config().num_general_registers_; ++i) {
+        for (int i = 0; i < config()->num_general_registers(); ++i) {
           if (!IsOutputRegisterOf(instr, i)) {
             LiveRange* range = FixedLiveRangeFor(i);
range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
@@ -1041,7 +1029,7 @@
       }

       if (instr->ClobbersDoubleRegisters()) {
-        for (int i = 0; i < config().num_aliased_double_registers_; ++i) {
+ for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
           if (!IsOutputDoubleRegisterOf(instr, i)) {
             LiveRange* range = FixedDoubleLiveRangeFor(i);
range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
@@ -1126,10 +1114,10 @@


 bool RegisterAllocator::Allocate(PipelineStatistics* stats) {
-  assigned_registers_ =
- new (code_zone()) BitVector(config().num_general_registers_, code_zone());
+  assigned_registers_ = new (code_zone())
+      BitVector(config()->num_general_registers(), code_zone());
   assigned_double_registers_ = new (code_zone())
-      BitVector(config().num_aliased_double_registers_, code_zone());
+      BitVector(config()->num_aliased_double_registers(), code_zone());
   {
     PhaseScope phase_scope(stats, "meet register constraints");
     MeetRegisterConstraints();
@@ -1535,14 +1523,14 @@


 void RegisterAllocator::AllocateGeneralRegisters() {
-  num_registers_ = config().num_general_registers_;
+  num_registers_ = config()->num_general_registers();
   mode_ = GENERAL_REGISTERS;
   AllocateRegisters();
 }


 void RegisterAllocator::AllocateDoubleRegisters() {
-  num_registers_ = config().num_aliased_double_registers_;
+  num_registers_ = config()->num_aliased_double_registers();
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
 }
@@ -1566,7 +1554,7 @@
   DCHECK(inactive_live_ranges_.is_empty());

   if (mode_ == DOUBLE_REGISTERS) {
-    for (int i = 0; i < config().num_aliased_double_registers_; ++i) {
+    for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
       LiveRange* current = fixed_double_live_ranges_.at(i);
       if (current != NULL) {
         AddToInactive(current);
@@ -1658,9 +1646,9 @@

 const char* RegisterAllocator::RegisterName(int allocation_index) {
   if (mode_ == GENERAL_REGISTERS) {
-    return config().GeneralRegisterName(allocation_index);
+    return config()->general_register_name(allocation_index);
   } else {
-    return config().DoubleRegisterName(allocation_index);
+    return config()->double_register_name(allocation_index);
   }
 }

@@ -1805,7 +1793,7 @@


 bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
-  LifetimePosition free_until_pos[kMaxDoubleRegisters];
+ LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];

   for (int i = 0; i < num_registers_; i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
@@ -1888,8 +1876,8 @@
     return;
   }

-  LifetimePosition use_pos[kMaxGeneralRegisters];
-  LifetimePosition block_pos[kMaxDoubleRegisters];
+  LifetimePosition use_pos[RegisterConfiguration::kMaxGeneralRegisters];
+  LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];

   for (int i = 0; i < num_registers_; i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
=======================================
--- /trunk/src/compiler/register-allocator.h    Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/compiler/register-allocator.h    Wed Nov  5 01:04:48 2014 UTC
@@ -319,19 +319,9 @@

 class RegisterAllocator FINAL {
  public:
-  class Config {
-   public:
-    int num_general_registers_;
-    int num_double_registers_;
-    int num_aliased_double_registers_;
-    const char* (*GeneralRegisterName)(int allocation_index);
-    const char* (*DoubleRegisterName)(int allocation_index);
-  };
-
-  static Config PlatformConfig();
-
-  explicit RegisterAllocator(const Config& config, Zone* local_zone,
-                             Frame* frame, InstructionSequence* code,
+  explicit RegisterAllocator(const RegisterConfiguration* config,
+                             Zone* local_zone, Frame* frame,
+                             InstructionSequence* code,
                              const char* debug_name = nullptr);

   bool Allocate(PipelineStatistics* stats = NULL);
@@ -502,14 +492,14 @@

   Frame* frame() const { return frame_; }
   const char* debug_name() const { return debug_name_; }
-  const Config& config() const { return config_; }
+  const RegisterConfiguration* config() const { return config_; }

   Zone* const zone_;
   Frame* const frame_;
   InstructionSequence* const code_;
   const char* const debug_name_;

-  const Config config_;
+  const RegisterConfiguration* config_;

   // During liveness analysis keep a mapping from block id to live_in sets
   // for blocks already analyzed.
=======================================
--- /trunk/src/compiler/select-lowering.cc      Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/compiler/select-lowering.cc      Wed Nov  5 01:04:48 2014 UTC
@@ -5,6 +5,7 @@
 #include "src/compiler/select-lowering.h"

 #include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"

@@ -26,17 +27,13 @@
   SelectParameters const p = SelectParametersOf(node->op());

   Node* const cond = node->InputAt(0);
-  Node* const control = graph()->start();

   // Check if we already have a diamond for this condition.
   auto i = merges_.find(cond);
   if (i == merges_.end()) {
     // Create a new diamond for this condition and remember its merge node.
- Node* branch = graph()->NewNode(common()->Branch(p.hint()), cond, control);
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    i = merges_.insert(std::make_pair(cond, merge)).first;
+    Diamond d(graph(), common(), cond, p.hint());
+    i = merges_.insert(std::make_pair(cond, d.merge)).first;
   }

   DCHECK_EQ(cond, i->first);
=======================================
--- /trunk/src/compiler/simplified-lowering.cc  Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/compiler/simplified-lowering.cc  Wed Nov  5 01:04:48 2014 UTC
@@ -9,6 +9,7 @@
 #include "src/base/bits.h"
 #include "src/code-factory.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
@@ -632,7 +633,7 @@
           if (lower()) DeferReplacement(node, lowering->Int32Div(node));
           break;
         }
-        if (CanLowerToUint32Binop(node, use)) {
+ if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
           // => unsigned Uint32Div
           VisitUint32Binop(node);
           if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
@@ -1172,11 +1173,9 @@
       node->ReplaceInput(2, effect);
       node->ReplaceInput(3, graph()->start());
     } else {
-      Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                      check, graph()->start());
+      Diamond d(graph(), common(), check, BranchHint::kTrue);

-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-      Node* load = graph()->NewNode(op, base, index, effect, if_true);
+      Node* load = graph()->NewNode(op, base, index, effect, d.if_true);
       Node* result = load;
       if (output_type & kRepTagged) {
         // TODO(turbofan): This is ugly as hell!
@@ -1187,7 +1186,6 @@
changer.GetTaggedRepresentationFor(result, access.machine_type);
       }

-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
       Node* undefined;
       if (output_type & kRepTagged) {
         DCHECK_EQ(0, access.machine_type & kRepTagged);
@@ -1201,24 +1199,11 @@
       } else {
         undefined = jsgraph()->Int32Constant(0);
       }
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false); - Node* phi = graph()->NewNode(common()->EffectPhi(2), load, effect, merge);

       // Replace effect uses of node with the effect phi.
-      for (UseIter i = node->uses().begin(); i != node->uses().end();) {
-        if (NodeProperties::IsEffectEdge(i.edge())) {
-          i = i.UpdateToAndIncrement(phi);
-        } else {
-          ++i;
-        }
-      }
+ NodeProperties::ReplaceWithValue(node, node, d.EffectPhi(load, effect));

-      node->set_op(common()->Phi(output_type, 2));
-      node->ReplaceInput(0, result);
-      node->ReplaceInput(1, undefined);
-      node->ReplaceInput(2, merge);
-      node->TrimInputCount(3);
+      d.OverwriteWithPhi(node, output_type, result, undefined);
     }
   }
 }
@@ -1257,21 +1242,10 @@
       node->ReplaceInput(1, select);
       node->RemoveInput(2);
     } else {
-      Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* store = graph()->NewNode(op, base, index, value, effect, if_true);
-
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
-
-      node->set_op(common()->EffectPhi(2));
-      node->ReplaceInput(0, store);
-      node->ReplaceInput(1, effect);
-      node->ReplaceInput(2, merge);
-      node->TrimInputCount(3);
+      Diamond d(graph(), common(), check, BranchHint::kTrue);
+      d.Chain(control);
+ Node* store = graph()->NewNode(op, base, index, value, effect, d.if_true);
+      d.OverwriteWithEffectPhi(node, store, effect);
     }
   }
 }
@@ -1325,34 +1299,20 @@
return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
   }

-  Node* check0 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse), check0,
-                                   graph()->start());
+  Diamond if_zero(graph(), common(),
+                  graph()->NewNode(machine()->Word32Equal(), rhs, zero),
+                  BranchHint::kFalse);

-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* true0 = zero;
+  Diamond if_minus_one(graph(), common(),
+                       graph()->NewNode(machine()->Word32Equal(), rhs,
+                                        jsgraph()->Int32Constant(-1)),
+                       BranchHint::kFalse);
+  if_minus_one.Nest(if_zero, false);
+  Node* sub = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
+  Node* div =
+ graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_minus_one.if_false);

-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* false0 = nullptr;
-  {
-    Node* check1 = graph()->NewNode(machine()->Word32Equal(), rhs,
-                                    jsgraph()->Int32Constant(-1));
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* true1 = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* false1 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false1);
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    false0 = graph()->NewNode(common()->Phi(kMachInt32, 2), true1, false1,
-                              if_false0);
-  }
-
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- return graph()->NewNode(common()->Phi(kMachInt32, 2), true0, false0, merge0); + return if_zero.Phi(kMachInt32, zero, if_minus_one.Phi(kMachInt32, sub, div));
 }


@@ -1368,34 +1328,19 @@
return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
   }

-  Node* check0 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse), check0,
-                                   graph()->start());
+  Diamond if_zero(graph(), common(),
+                  graph()->NewNode(machine()->Word32Equal(), rhs, zero),
+                  BranchHint::kFalse);

-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* true0 = zero;
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* false0 = nullptr;
-  {
-    Node* check1 = graph()->NewNode(machine()->Word32Equal(), rhs,
-                                    jsgraph()->Int32Constant(-1));
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* true1 = zero;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* false1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false1);
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    false0 = graph()->NewNode(common()->Phi(kMachInt32, 2), true1, false1,
-                              if_false0);
-  }
+  Diamond if_minus_one(graph(), common(),
+                       graph()->NewNode(machine()->Word32Equal(), rhs,
+                                        jsgraph()->Int32Constant(-1)),
+                       BranchHint::kFalse);
+  if_minus_one.Nest(if_zero, false);
+  Node* mod =
+ graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_minus_one.if_false);

-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- return graph()->NewNode(common()->Phi(kMachInt32, 2), true0, false0, merge0); + return if_zero.Phi(kMachInt32, zero, if_minus_one.Phi(kMachInt32, zero, mod));
 }


@@ -1412,17 +1357,9 @@
   }

   Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse), check,
-                                  graph()->start());
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = zero;
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, if_false);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- return graph()->NewNode(common()->Phi(kMachUint32, 2), vtrue, vfalse, merge);
+  Diamond d(graph(), common(), check, BranchHint::kFalse);
+ Node* div = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, d.if_false);
+  return d.Phi(kMachUint32, zero, div);
 }


@@ -1439,17 +1376,9 @@
   }

   Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse), check,
-                                  graph()->start());
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = zero;
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_false);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- return graph()->NewNode(common()->Phi(kMachUint32, 2), vtrue, vfalse, merge);
+  Diamond d(graph(), common(), check, BranchHint::kFalse);
+ Node* mod = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, d.if_false);
+  return d.Phi(kMachUint32, zero, mod);
 }


=======================================
--- /trunk/src/factory.cc       Tue Oct 21 12:48:28 2014 UTC
+++ /trunk/src/factory.cc       Wed Nov  5 01:04:48 2014 UTC
@@ -1720,6 +1720,22 @@
       isolate()->heap()->AllocateJSObject(*data_view_fun),
       JSDataView);
 }
+
+
+Handle<JSMapIterator> Factory::NewJSMapIterator() {
+  Handle<Map> map(isolate()->native_context()->map_iterator_map());
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->AllocateJSObjectFromMap(*map),
+                     JSMapIterator);
+}
+
+
+Handle<JSSetIterator> Factory::NewJSSetIterator() {
+  Handle<Map> map(isolate()->native_context()->set_iterator_map());
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->AllocateJSObjectFromMap(*map),
+                     JSSetIterator);
+}


 namespace {
=======================================
--- /trunk/src/factory.h        Tue Oct 21 12:48:28 2014 UTC
+++ /trunk/src/factory.h        Wed Nov  5 01:04:48 2014 UTC
@@ -445,6 +445,10 @@
   Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
                                    size_t byte_offset, size_t byte_length);

+ // TODO(aandrey): Maybe these should take table, index and kind arguments.
+  Handle<JSMapIterator> NewJSMapIterator();
+  Handle<JSSetIterator> NewJSSetIterator();
+
   // Allocates a Harmony proxy.
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);

=======================================
--- /trunk/src/flag-definitions.h       Wed Oct 29 14:24:00 2014 UTC
+++ /trunk/src/flag-definitions.h       Wed Nov  5 01:04:48 2014 UTC
@@ -155,54 +155,65 @@
 // Flags for language modes and experimental language features.
 DEFINE_BOOL(use_strict, false, "enforce strict mode")

-DEFINE_BOOL(es_staging, false, "enable upcoming ES6+ features")
-DEFINE_BOOL(harmony, false, "enable all harmony features (except proxies)")
+DEFINE_BOOL(es_staging, false, "enable all completed harmony features")
+DEFINE_BOOL(harmony, false, "enable all completed harmony features")
 DEFINE_IMPLICATION(harmony, es_staging)
+// TODO(rossberg): activate once we have staged scoping:
+// DEFINE_IMPLICATION(es_staging, harmony)

-#define HARMONY_FEATURES(V)                                       \
+// Features that are still work in progress (behind individual flags).
+#define HARMONY_INPROGRESS(V)                                     \
   V(harmony_scoping, "harmony block scoping")                     \
   V(harmony_modules, "harmony modules (implies block scoping)")   \
-  V(harmony_arrays, "harmony arrays")                             \
-  V(harmony_classes, "harmony classes")                           \
+  V(harmony_arrays, "harmony array methods")                      \
+  V(harmony_classes,                                              \
+    "harmony classes (implies block scoping & object literal extension)") \
   V(harmony_object_literals, "harmony object literal extensions") \
-  V(harmony_regexps, "reg-exp related harmony features")          \
+  V(harmony_regexps, "harmony regular expression extensions")     \
   V(harmony_arrow_functions, "harmony arrow functions")           \
-  V(harmony_tostring, "harmony Symbol.toStringTag")
+  V(harmony_tostring, "harmony toString")                         \
+  V(harmony_proxies, "harmony proxies")

-#define STAGED_FEATURES(V)              \
-  V(harmony_strings, "harmony strings") \
-  V(harmony_numeric_literals, "harmony numeric literals (0o77, 0b11)")
+// Features that are complete (but still behind --harmony/es-staging flag).
+#define HARMONY_STAGED(V)                      \
+  V(harmony_strings, "harmony string methods") \
+  V(harmony_numeric_literals, "harmony numeric literals")

-#define SHIPPING_FEATURES(V)
+// Features that are shipping (turned on by default, but internal flag remains).
+#define HARMONY_SHIPPING(V)
+
+// Once a shipping feature has proved stable in the wild, it will be dropped +// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed, +// and associated tests are moved from the harmony directory to the appropriate
+// esN directory.

-#define FLAG_FEATURES(id, description)           \
-  DEFINE_BOOL(id, false, "enable " #description) \
-  DEFINE_IMPLICATION(harmony, id)

-HARMONY_FEATURES(FLAG_FEATURES)
-STAGED_FEATURES(FLAG_FEATURES)
-#undef FLAG_FEATURES
+#define FLAG_INPROGRESS_FEATURES(id, description) \
+  DEFINE_BOOL(id, false, "enable " #description " (in progress)")
+HARMONY_INPROGRESS(FLAG_INPROGRESS_FEATURES)
+#undef FLAG_INPROGRESS_FEATURES

-#define FLAG_STAGED_FEATURES(id, description) DEFINE_IMPLICATION(es_staging, id)
+// TODO(rossberg): temporary, remove once we have staged scoping.
+// After that, --harmony will be synonymous to --es-staging.
+DEFINE_IMPLICATION(harmony, harmony_scoping)

-STAGED_FEATURES(FLAG_STAGED_FEATURES)
+#define FLAG_STAGED_FEATURES(id, description) \
+  DEFINE_BOOL(id, false, "enable " #description) \
+  DEFINE_IMPLICATION(es_staging, id)
+HARMONY_STAGED(FLAG_STAGED_FEATURES)
 #undef FLAG_STAGED_FEATURES

 #define FLAG_SHIPPING_FEATURES(id, description) \
   DEFINE_BOOL_READONLY(id, true, "enable " #description)
+HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
+#undef FLAG_SHIPPING_FEATURES

-SHIPPING_FEATURES(FLAG_SHIPPING_FEATURES)
-#undef FLAG_SHIPPING_FEATURES

 // Feature dependencies.
 DEFINE_IMPLICATION(harmony_modules, harmony_scoping)
 DEFINE_IMPLICATION(harmony_classes, harmony_scoping)
 DEFINE_IMPLICATION(harmony_classes, harmony_object_literals)

-DEFINE_BOOL(harmony_proxies, false, "enable harmony proxies")
-// TODO(rossberg): Reenable when problems are sorted out.
-// DEFINE_IMPLICATION(harmony, harmony_proxies)
-

 // Flags for experimental implementation features.
 DEFINE_BOOL(compiled_keyed_generic_loads, false,
=======================================
--- /trunk/src/heap/mark-compact.cc     Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/heap/mark-compact.cc     Wed Nov  5 01:04:48 2014 UTC
@@ -456,7 +456,6 @@
 void MarkCompactCollector::StartSweeperThreads() {
   DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
   DCHECK(free_list_old_data_space_.get()->IsEmpty());
-  sweeping_in_progress_ = true;
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
       new SweeperTask(heap(), heap()->old_data_space()),
       v8::Platform::kShortRunningTask);
@@ -471,13 +470,15 @@

// If sweeping is not completed or not running at all, we try to complete it
   // here.
-  if (!IsSweepingCompleted()) {
+  if (FLAG_predictable || !IsSweepingCompleted()) {
     SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
     SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
   }
   // Wait twice for both jobs.
-  pending_sweeper_jobs_semaphore_.Wait();
-  pending_sweeper_jobs_semaphore_.Wait();
+  if (!FLAG_predictable) {
+    pending_sweeper_jobs_semaphore_.Wait();
+    pending_sweeper_jobs_semaphore_.Wait();
+  }
   ParallelSweepSpacesComplete();
   sweeping_in_progress_ = false;
   RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
@@ -4185,7 +4186,7 @@
       SweepSpace(heap()->old_pointer_space(), CONCURRENT_SWEEPING);
       SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING);
     }
-
+    sweeping_in_progress_ = true;
     if (!FLAG_predictable) {
       StartSweeperThreads();
     }
=======================================
--- /trunk/src/heap/spaces.cc   Sat Nov  1 22:28:42 2014 UTC
+++ /trunk/src/heap/spaces.cc   Wed Nov  5 01:04:48 2014 UTC
@@ -2569,7 +2569,8 @@


 intptr_t PagedSpace::SizeOfObjects() {
-  DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
+  DCHECK(FLAG_predictable ||
+         heap()->mark_compact_collector()->sweeping_in_progress() ||
          (unswept_free_bytes_ == 0));
   return Size() - unswept_free_bytes_ - (limit() - top());
 }
=======================================
--- /trunk/src/ia32/interface-descriptors-ia32.cc Wed Oct 1 00:05:35 2014 UTC +++ /trunk/src/ia32/interface-descriptors-ia32.cc Wed Nov 5 01:04:48 2014 UTC
@@ -153,6 +153,15 @@
   Register registers[] = {esi, eax, ebx};
   data->Initialize(arraysize(registers), registers, NULL);
 }
+
+
+void AllocateHeapNumberDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // esi -- context
+  Register registers[] = {esi};
+  data->Initialize(arraysize(registers), registers, nullptr);
+}


 void ArrayConstructorConstantArgCountDescriptor::Initialize(
=======================================
--- /trunk/src/ic/ic.cc Wed Oct 29 14:24:00 2014 UTC
+++ /trunk/src/ic/ic.cc Wed Nov  5 01:04:48 2014 UTC
@@ -2688,7 +2688,7 @@
   PrototypeIterator iter(isolate, receiver,
                          PrototypeIterator::START_AT_RECEIVER);
   bool found = false;
-  while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ for (; !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
     Handle<Object> current = PrototypeIterator::GetCurrent(iter);
     if (current->IsJSObject() &&
         Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
=======================================
--- /trunk/src/interface-descriptors.h  Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/interface-descriptors.h  Wed Nov  5 01:04:48 2014 UTC
@@ -33,6 +33,7 @@
   V(CallConstruct)                            \
   V(RegExpConstructResult)                    \
   V(TransitionElementsKind)                   \
+  V(AllocateHeapNumber)                       \
   V(ArrayConstructorConstantArgCount)         \
   V(ArrayConstructor)                         \
   V(InternalArrayConstructorConstantArgCount) \
@@ -346,6 +347,12 @@
 };


+class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
+};
+
+
 class ArrayConstructorConstantArgCountDescriptor
     : public CallInterfaceDescriptor {
  public:
=======================================
--- /trunk/src/isolate.cc       Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/isolate.cc       Wed Nov  5 01:04:48 2014 UTC
@@ -1047,15 +1047,15 @@
 }


-void Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
+bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
                                             Handle<Object> exception) {
   *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);

-  if (!exception->IsJSObject()) return;
+  if (!exception->IsJSObject()) return false;
   Handle<Name> key = factory()->stack_trace_symbol();
   Handle<Object> property =
       JSObject::GetDataProperty(Handle<JSObject>::cast(exception), key);
-  if (!property->IsJSArray()) return;
+  if (!property->IsJSArray()) return false;
   Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);

Handle<FixedArray> elements(FixedArray::cast(simple_stack_trace->elements()));
@@ -1075,9 +1075,10 @@
       int pos = code->SourcePosition(pc);
       Handle<Script> casted_script(Script::cast(script));
       *target = MessageLocation(casted_script, pos, pos + 1);
-      break;
+      return true;
     }
   }
+  return false;
 }


@@ -1149,10 +1150,6 @@
       // at this throw site.
       stack_trace_object =
           GetDetailedStackTrace(Handle<JSObject>::cast(exception));
-      if (!location) {
- ComputeLocationFromStackTrace(&potential_computed_location, exception);
-        location = &potential_computed_location;
-      }
     }
     if (stack_trace_object.is_null()) {
       // Not an error object, we capture stack and location at throw site.
@@ -1162,7 +1159,10 @@
     }
   }
   if (!location) {
-    ComputeLocation(&potential_computed_location);
+    if (!ComputeLocationFromStackTrace(&potential_computed_location,
+                                       exception)) {
+      ComputeLocation(&potential_computed_location);
+    }
     location = &potential_computed_location;
   }

=======================================
--- /trunk/src/isolate.h        Fri Oct 31 13:19:44 2014 UTC
+++ /trunk/src/isolate.h        Wed Nov  5 01:04:48 2014 UTC
@@ -801,7 +801,7 @@
   // Attempts to compute the current source location, storing the
   // result in the target out parameter.
   void ComputeLocation(MessageLocation* target);
-  void ComputeLocationFromStackTrace(MessageLocation* target,
+  bool ComputeLocationFromStackTrace(MessageLocation* target,
                                      Handle<Object> exception);

   Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
=======================================
--- /trunk/src/mips/interface-descriptors-mips.cc Wed Oct 1 00:05:35 2014 UTC +++ /trunk/src/mips/interface-descriptors-mips.cc Wed Nov 5 01:04:48 2014 UTC
@@ -150,6 +150,15 @@
   Register registers[] = {cp, a0, a1};
   data->Initialize(arraysize(registers), registers, NULL);
 }
+
+
+void AllocateHeapNumberDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  Register registers[] = {cp};
+  data->Initialize(arraysize(registers), registers, nullptr);
+}


 void ArrayConstructorConstantArgCountDescriptor::Initialize(
=======================================
--- /trunk/src/mips/macro-assembler-mips.cc     Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/mips/macro-assembler-mips.cc     Wed Nov  5 01:04:48 2014 UTC
@@ -2037,18 +2037,26 @@
         b(offset);
         break;
       case eq:
-        // We don't want any other register but scratch clobbered.
-        DCHECK(!scratch.is(rs));
-        r2 = scratch;
-        li(r2, rt);
-        beq(rs, r2, offset);
+        if (rt.imm32_ == 0) {
+          beq(rs, zero_reg, offset);
+        } else {
+          // We don't want any other register but scratch clobbered.
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          beq(rs, r2, offset);
+        }
         break;
       case ne:
-        // We don't want any other register but scratch clobbered.
-        DCHECK(!scratch.is(rs));
-        r2 = scratch;
-        li(r2, rt);
-        bne(rs, r2, offset);
+        if (rt.imm32_ == 0) {
+          bne(rs, zero_reg, offset);
+        } else {
+          // We don't want any other register but scratch clobbered.
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          bne(rs, r2, offset);
+        }
         break;
       // Signed comparison.
       case greater:
@@ -2290,18 +2298,28 @@
         b(offset);
         break;
       case eq:
-        DCHECK(!scratch.is(rs));
-        r2 = scratch;
-        li(r2, rt);
-        offset = shifted_branch_offset(L, false);
-        beq(rs, r2, offset);
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          beq(rs, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          offset = shifted_branch_offset(L, false);
+          beq(rs, r2, offset);
+        }
         break;
       case ne:
-        DCHECK(!scratch.is(rs));
-        r2 = scratch;
-        li(r2, rt);
-        offset = shifted_branch_offset(L, false);
-        bne(rs, r2, offset);
+        if (rt.imm32_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bne(rs, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          offset = shifted_branch_offset(L, false);
+          bne(rs, r2, offset);
+        }
         break;
       // Signed comparison.
       case greater:
=======================================
--- /trunk/src/mips64/interface-descriptors-mips64.cc Wed Oct 1 00:05:35 2014 UTC +++ /trunk/src/mips64/interface-descriptors-mips64.cc Wed Nov 5 01:04:48 2014 UTC
@@ -150,6 +150,15 @@
   Register registers[] = {cp, a0, a1};
   data->Initialize(arraysize(registers), registers, NULL);
 }
+
+
+void AllocateHeapNumberDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  Register registers[] = {cp};
+  data->Initialize(arraysize(registers), registers, nullptr);
+}


 void ArrayConstructorConstantArgCountDescriptor::Initialize(
=======================================
--- /trunk/src/mirror-debugger.js       Tue Sep 23 08:38:19 2014 UTC
+++ /trunk/src/mirror-debugger.js       Wed Nov  5 01:04:48 2014 UTC
@@ -85,6 +85,8 @@
     mirror = new MapMirror(value);
   } else if (IS_SET(value) || IS_WEAKSET(value)) {
     mirror = new SetMirror(value);
+  } else if (IS_MAP_ITERATOR(value) || IS_SET_ITERATOR(value)) {
+    mirror = new IteratorMirror(value);
   } else if (ObjectIsPromise(value)) {
     mirror = new PromiseMirror(value);
   } else if (IS_GENERATOR(value)) {
@@ -163,6 +165,7 @@
 var PROMISE_TYPE = 'promise';
 var MAP_TYPE = 'map';
 var SET_TYPE = 'set';
+var ITERATOR_TYPE = 'iterator';
 var GENERATOR_TYPE = 'generator';

 // Maximum length when sending strings through the JSON protocol.
@@ -217,6 +220,7 @@
 //         - PromiseMirror
 //         - MapMirror
 //         - SetMirror
+//         - IteratorMirror
 //         - GeneratorMirror
 //     - PropertyMirror
 //     - InternalPropertyMirror
@@ -455,6 +459,15 @@
 };


+/**
+ * Check whether the mirror reflects an iterator.
+ * @returns {boolean} True if the mirror reflects an iterator
+ */
+Mirror.prototype.isIterator = function() {
+  return this instanceof IteratorMirror;
+};
+
+
 /**
  * Allocate a handle id for this object.
  */
@@ -1343,6 +1356,16 @@
 inherits(SetMirror, ObjectMirror);


+function IteratorGetValues_(iter, next_function) {
+  var result = [];
+  var next;
+  while (!(next = %_CallFunction(iter, next_function)).done) {
+    result.push(next.value);
+  }
+  return result;
+}
+
+
 /**
  * Returns an array of elements of a set.
  * This will keep elements alive for WeakSets.
@@ -1354,13 +1377,31 @@
     return %GetWeakSetValues(this.value_);
   }

-  var result = [];
   var iter = %_CallFunction(this.value_, builtins.SetValues);
-  var next;
-  while (!(next = iter.next()).done) {
-    result.push(next.value);
+  return IteratorGetValues_(iter, builtins.SetIteratorNextJS);
+};
+
+
+function IteratorMirror(value) {
+  %_CallFunction(this, value, ITERATOR_TYPE, ObjectMirror);
+}
+inherits(IteratorMirror, ObjectMirror);
+
+
+/**
+ * Returns a preview of elements of an iterator.
+ * Does not change the backing iterator state.
+ *
+ * @returns {Array.<Object>} Array of elements of an iterator.
+ */
+IteratorMirror.prototype.preview = function() {
+  if (IS_MAP_ITERATOR(this.value_)) {
+    return IteratorGetValues_(%MapIteratorClone(this.value_),
+                              builtins.MapIteratorNextJS);
+  } else if (IS_SET_ITERATOR(this.value_)) {
+    return IteratorGetValues_(%SetIteratorClone(this.value_),
+                              builtins.SetIteratorNextJS);
   }
-  return result;
 };


=======================================
--- /trunk/src/objects-inl.h    Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/objects-inl.h    Wed Nov  5 01:04:48 2014 UTC
@@ -2801,8 +2801,10 @@
// Perform a binary search in a fixed array. Low and high are entry indices. If
 // there are three entries in this array it should be called with low=0 and
 // high=2.
-template<SearchMode search_mode, typename T>
-int BinarySearch(T* array, Name* name, int low, int high, int valid_entries) {
+template <SearchMode search_mode, typename T>
+int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
+                 int* out_insertion_index) {
+  DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == NULL);
   uint32_t hash = name->Hash();
   int limit = high;

@@ -2823,7 +2825,13 @@
   for (; low <= limit; ++low) {
     int sort_index = array->GetSortedKeyIndex(low);
     Name* entry = array->GetKey(sort_index);
-    if (entry->Hash() != hash) break;
+    uint32_t current_hash = entry->Hash();
+    if (current_hash != hash) {
+      if (out_insertion_index != NULL) {
+        *out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
+      }
+      return T::kNotFound;
+    }
     if (entry->Equals(name)) {
       if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
         return sort_index;
@@ -2832,37 +2840,45 @@
     }
   }

+  if (out_insertion_index != NULL) *out_insertion_index = limit + 1;
   return T::kNotFound;
 }


 // Perform a linear search in this fixed array. len is the number of entry
 // indices that are valid.
-template<SearchMode search_mode, typename T>
-int LinearSearch(T* array, Name* name, int len, int valid_entries) {
+template <SearchMode search_mode, typename T>
+int LinearSearch(T* array, Name* name, int len, int valid_entries,
+                 int* out_insertion_index) {
   uint32_t hash = name->Hash();
   if (search_mode == ALL_ENTRIES) {
     for (int number = 0; number < len; number++) {
       int sorted_index = array->GetSortedKeyIndex(number);
       Name* entry = array->GetKey(sorted_index);
       uint32_t current_hash = entry->Hash();
-      if (current_hash > hash) break;
+      if (current_hash > hash) {
+ if (out_insertion_index != NULL) *out_insertion_index = sorted_index;
+        return T::kNotFound;
+      }
       if (current_hash == hash && entry->Equals(name)) return sorted_index;
     }
+    if (out_insertion_index != NULL) *out_insertion_index = len;
+    return T::kNotFound;
   } else {
     DCHECK(len >= valid_entries);
+    DCHECK_EQ(NULL, out_insertion_index);  // Not supported here.
     for (int number = 0; number < valid_entries; number++) {
       Name* entry = array->GetKey(number);
       uint32_t current_hash = entry->Hash();
       if (current_hash == hash && entry->Equals(name)) return number;
     }
+    return T::kNotFound;
   }
-  return T::kNotFound;
 }


-template<SearchMode search_mode, typename T>
-int Search(T* array, Name* name, int valid_entries) {
+template <SearchMode search_mode, typename T>
+int Search(T* array, Name* name, int valid_entries, int* out_insertion_index) {
   if (search_mode == VALID_ENTRIES) {
     SLOW_DCHECK(array->IsSortedNoDuplicates(valid_entries));
   } else {
@@ -2870,7 +2886,10 @@
   }

   int nof = array->number_of_entries();
-  if (nof == 0) return T::kNotFound;
+  if (nof == 0) {
+    if (out_insertion_index != NULL) *out_insertion_index = 0;
+    return T::kNotFound;
+  }

   // Fast case: do linear search for small arrays.
   const int kMaxElementsForLinearSearch = 8;
@@ -2878,16 +2897,18 @@
        nof <= kMaxElementsForLinearSearch) ||
       (search_mode == VALID_ENTRIES &&
        valid_entries <= (kMaxElementsForLinearSearch * 3))) {
-    return LinearSearch<search_mode>(array, name, nof, valid_entries);
+    return LinearSearch<search_mode>(array, name, nof, valid_entries,
+                                     out_insertion_index);
   }

   // Slow case: perform binary search.
-  return BinarySearch<search_mode>(array, name, 0, nof - 1, valid_entries);
+  return BinarySearch<search_mode>(array, name, 0, nof - 1, valid_entries,
+                                   out_insertion_index);
 }


 int DescriptorArray::Search(Name* name, int valid_descriptors) {
-  return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors);
+ return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, NULL);
 }


=======================================
--- /trunk/src/objects.h        Tue Nov  4 01:04:58 2014 UTC
+++ /trunk/src/objects.h        Wed Nov  5 01:04:48 2014 UTC
@@ -3145,12 +3145,9 @@

 enum SearchMode { ALL_ENTRIES, VALID_ENTRIES };

-template<SearchMode search_mode, typename T>
-inline int LinearSearch(T* array, Name* name, int len, int valid_entries);
-
-
-template<SearchMode search_mode, typename T>
-inline int Search(T* array, Name* name, int valid_entries = 0);
+template <SearchMode search_mode, typename T>
+inline int Search(T* array, Name* name, int valid_entries = 0,
+                  int* out_insertion_index = NULL);


 // HashTable is a subclass of FixedArray that implements a hash table
=======================================
--- /trunk/src/runtime/runtime-collections.cc   Sat Nov  1 22:28:42 2014 UTC
+++ /trunk/src/runtime/runtime-collections.cc   Wed Nov  5 01:04:48 2014 UTC
@@ -90,6 +90,20 @@
   holder->set_kind(Smi::FromInt(kind));
   return isolate->heap()->undefined_value();
 }
+
+
+RUNTIME_FUNCTION(Runtime_SetIteratorClone) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
+
+  Handle<JSSetIterator> result = isolate->factory()->NewJSSetIterator();
+  result->set_table(holder->table());
+  result->set_index(Smi::FromInt(Smi::cast(holder->index())->value()));
+  result->set_kind(Smi::FromInt(Smi::cast(holder->kind())->value()));
+
+  return *result;
+}


 RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
@@ -195,6 +209,20 @@
   holder->set_kind(Smi::FromInt(kind));
   return isolate->heap()->undefined_value();
 }
+
+
+RUNTIME_FUNCTION(Runtime_MapIteratorClone) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
+
+  Handle<JSMapIterator> result = isolate->factory()->NewJSMapIterator();
+  result->set_table(holder->table());
+  result->set_index(Smi::FromInt(Smi::cast(holder->index())->value()));
+  result->set_kind(Smi::FromInt(Smi::cast(holder->kind())->value()));
+
+  return *result;
+}


 RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
=======================================
--- /trunk/src/runtime/runtime.h        Thu Oct 30 10:14:23 2014 UTC
+++ /trunk/src/runtime/runtime.h        Wed Nov  5 01:04:48 2014 UTC
@@ -310,6 +310,7 @@
   F(SetGetSize, 1, 1)                                  \
                                                        \
   F(SetIteratorInitialize, 3, 1)                       \
+  F(SetIteratorClone, 1, 1)                            \
   F(SetIteratorNext, 2, 1)                             \
                                                        \
   /* Harmony maps */                                   \
@@ -322,6 +323,7 @@
   F(MapGetSize, 1, 1)                                  \
                                                        \
   F(MapIteratorInitialize, 3, 1)                       \
+  F(MapIteratorClone, 1, 1)                            \
   F(MapIteratorNext, 2, 1)                             \
                                                        \
   /* Harmony weak maps and sets */                     \
=======================================
***Additional files exist in this changeset.***

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to