Revision: 23036
Author:   [email protected]
Date:     Mon Aug 11 12:49:03 2014 UTC
Log:      Version 3.28.69 (based on bleeding_edge revision r23031)

Performance and stability improvements on all platforms.
http://code.google.com/p/v8/source/detail?r=23036

Added:
 /trunk/test/mjsunit/regress/regress-crbug-401915.js
Modified:
 /trunk/ChangeLog
 /trunk/src/compiler/lowering-builder.cc
 /trunk/src/compiler/node-properties.h
 /trunk/src/compiler/pipeline.cc
 /trunk/src/compiler/representation-change.h
 /trunk/src/compiler/simplified-lowering.cc
 /trunk/src/compiler/simplified-lowering.h
 /trunk/src/debug.cc
 /trunk/src/version.cc
 /trunk/test/cctest/compiler/call-tester.h
 /trunk/test/cctest/compiler/graph-builder-tester.h
 /trunk/test/cctest/compiler/test-changes-lowering.cc
 /trunk/test/cctest/compiler/test-simplified-lowering.cc
 /trunk/test/mozilla/mozilla.status
 /trunk/testing/gtest.gyp
 /trunk/tools/whitespace.txt

=======================================
--- /dev/null
+++ /trunk/test/mjsunit/regress/regress-crbug-401915.js Mon Aug 11 12:49:03 2014 UTC
@@ -0,0 +1,20 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+Debug = debug.Debug;
+Debug.setListener(function() {});
+Debug.setBreakOnException();
+
+try {
+  try {
+    %DebugPushPromise(new Promise(function() {}));
+  } catch (e) {
+  }
+  throw new Error();
+} catch (e) {
+}
+
+Debug.setListener(null);
=======================================
--- /trunk/ChangeLog    Sat Aug  9 11:07:11 2014 UTC
+++ /trunk/ChangeLog    Mon Aug 11 12:49:03 2014 UTC
@@ -1,3 +1,8 @@
+2014-08-11: Version 3.28.69
+
+        Performance and stability improvements on all platforms.
+
+
 2014-08-09: Version 3.28.65

         Performance and stability improvements on all platforms.
=======================================
--- /trunk/src/compiler/lowering-builder.cc     Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/lowering-builder.cc     Mon Aug 11 12:49:03 2014 UTC
@@ -16,8 +16,12 @@
   explicit NodeVisitor(LoweringBuilder* lowering) : lowering_(lowering) {}

   GenericGraphVisit::Control Post(Node* node) {
-    SourcePositionTable::Scope pos(lowering_->source_positions_, node);
-    lowering_->Lower(node);
+    if (lowering_->source_positions_ != NULL) {
+      SourcePositionTable::Scope pos(lowering_->source_positions_, node);
+      lowering_->Lower(node);
+    } else {
+      lowering_->Lower(node);
+    }
     return GenericGraphVisit::CONTINUE;
   }

=======================================
--- /trunk/src/compiler/node-properties.h       Fri Aug  8 15:46:17 2014 UTC
+++ /trunk/src/compiler/node-properties.h       Mon Aug 11 12:49:03 2014 UTC
@@ -40,7 +40,6 @@

   static inline int GetContextIndex(Node* node);

- private:
   static inline int FirstValueIndex(Node* node);
   static inline int FirstEffectIndex(Node* node);
   static inline int FirstControlIndex(Node* node);
=======================================
--- /trunk/src/compiler/pipeline.cc     Fri Aug  8 15:46:17 2014 UTC
+++ /trunk/src/compiler/pipeline.cc     Mon Aug 11 12:49:03 2014 UTC
@@ -231,7 +231,7 @@
 Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage,
                                                    Graph* graph,
                                                    Schedule* schedule) {
-  CHECK(SupportedTarget());
+  CHECK(SupportedBackend());
   if (schedule == NULL) {
     VerifyAndPrintGraph(graph, "Machine");
     schedule = ComputeSchedule(graph);
@@ -257,7 +257,7 @@
   DCHECK_NOT_NULL(graph);
   DCHECK_NOT_NULL(linkage);
   DCHECK_NOT_NULL(schedule);
-  DCHECK(SupportedTarget());
+  CHECK(SupportedBackend());

   InstructionSequence sequence(linkage, graph, schedule);

=======================================
--- /trunk/src/compiler/representation-change.h Tue Aug  5 00:05:55 2014 UTC
+++ /trunk/src/compiler/representation-change.h Mon Aug 11 12:49:03 2014 UTC
@@ -36,14 +36,28 @@
   tAny = 1 << 11
 };

+#define REP_TYPE_STRLEN 24
+
 typedef uint16_t RepTypeUnion;

+
+inline void RenderRepTypeUnion(char* buf, RepTypeUnion info) {
+  base::OS::SNPrintF(buf, REP_TYPE_STRLEN, "{%s%s%s%s%s %s%s%s%s%s%s%s}",
+ (info & rBit) ? "k" : " ", (info & rWord32) ? "w" : " ",
+                     (info & rWord64) ? "q" : " ",
+                     (info & rFloat64) ? "f" : " ",
+ (info & rTagged) ? "t" : " ", (info & tBool) ? "Z" : " ", + (info & tInt32) ? "I" : " ", (info & tUint32) ? "U" : " ", + (info & tInt64) ? "L" : " ", (info & tUint64) ? "J" : " ", + (info & tNumber) ? "N" : " ", (info & tAny) ? "*" : " ");
+}
+
+
 const RepTypeUnion rMask = rBit | rWord32 | rWord64 | rFloat64 | rTagged;
 const RepTypeUnion tMask =
     tBool | tInt32 | tUint32 | tInt64 | tUint64 | tNumber | tAny;
 const RepType rPtr = kPointerSize == 4 ? rWord32 : rWord64;

-
// Contains logic related to changing the representation of values for constants
 // and other nodes, as well as lowering Simplified->Machine operators.
 // Eagerly folds any representation changes for constants.
@@ -344,10 +358,24 @@
     return static_cast<RepType>(tElement | rElement);
   }

-  RepType TypeForBasePointer(Node* node) {
-    Type* upper = NodeProperties::GetBounds(node).upper;
-    if (upper->Is(Type::UntaggedPtr())) return rPtr;
-    return static_cast<RepType>(tAny | rTagged);
+  RepType TypeForBasePointer(const FieldAccess& access) {
+    if (access.tag() != 0) return static_cast<RepType>(tAny | rTagged);
+    return kPointerSize == 8 ? rWord64 : rWord32;
+  }
+
+  RepType TypeForBasePointer(const ElementAccess& access) {
+    if (access.tag() != 0) return static_cast<RepType>(tAny | rTagged);
+    return kPointerSize == 8 ? rWord64 : rWord32;
+  }
+
+  RepType TypeFromUpperBound(Type* type) {
+    if (type->Is(Type::None()))
+      return tAny;  // TODO(titzer): should be an error
+    if (type->Is(Type::Signed32())) return tInt32;
+    if (type->Is(Type::Unsigned32())) return tUint32;
+    if (type->Is(Type::Number())) return tNumber;
+    if (type->Is(Type::Boolean())) return tBool;
+    return tAny;
   }

  private:
@@ -364,7 +392,14 @@
   Node* TypeError(Node* node, RepTypeUnion output_type, RepTypeUnion use) {
     type_error_ = true;
     if (!testing_type_errors_) {
-      UNREACHABLE();  // TODO(titzer): report nicer type error
+      char buf1[REP_TYPE_STRLEN];
+      char buf2[REP_TYPE_STRLEN];
+      RenderRepTypeUnion(buf1, output_type);
+      RenderRepTypeUnion(buf2, use);
+      V8_Fatal(__FILE__, __LINE__,
+               "RepresentationChangerError: node #%d:%s of rep"
+               "%s cannot be changed to rep%s",
+               node->id(), node->op()->mnemonic(), buf1, buf2);
     }
     return node;
   }
=======================================
--- /trunk/src/compiler/simplified-lowering.cc  Tue Aug  5 00:05:55 2014 UTC
+++ /trunk/src/compiler/simplified-lowering.cc  Mon Aug 11 12:49:03 2014 UTC
@@ -4,20 +4,723 @@

 #include "src/compiler/simplified-lowering.h"

+#include <deque>
+#include <queue>
+
+#include "src/compiler/common-operator.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node-properties-inl.h"
+#include "src/compiler/representation-change.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/objects.h"

 namespace v8 {
 namespace internal {
 namespace compiler {

+// Macro for outputting trace information from representation inference.
+#define TRACE(x) \
+  if (FLAG_trace_representation) PrintF x
+
+// Representation selection and lowering of {Simplified} operators to machine +// operators are interwined. We use a fixpoint calculation to compute both the +// output representation and the best possible lowering for {Simplified} nodes. +// Representation change insertion ensures that all values are in the correct
+// machine representation after this phase, as dictated by the machine
+// operators themselves.
+enum Phase {
+ // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information + // backwards from uses to definitions, around cycles in phis, according
+  //     to local rules for each operator.
+ // During this phase, the usage information for a node determines the best + // possible lowering for each operator so far, and that in turn determines
+  //     the output representation.
+ // Therefore, to be correct, this phase must iterate to a fixpoint before
+  //     the next phase can begin.
+  PROPAGATE,
+
+ // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some + // operators for some nodes, expanding some nodes to multiple nodes, or
+  //     removing some (redundant) nodes.
+  //     During this phase, use the {RepresentationChanger} to insert
+  //     representation changes between uses that demand a particular
+  //     representation and nodes that produce a different representation.
+  LOWER
+};
+
+
+class RepresentationSelector {
+ public:
+  // Information for each node tracked during the fixpoint.
+  struct NodeInfo {
+    RepTypeUnion use : 14;     // Union of all usages for the node.
+    bool queued : 1;           // Bookkeeping for the traversal.
+    bool visited : 1;          // Bookkeeping for the traversal.
+    RepTypeUnion output : 14;  // Output type of the node.
+  };
+
+  RepresentationSelector(JSGraph* jsgraph, Zone* zone,
+                         RepresentationChanger* changer)
+      : jsgraph_(jsgraph),
+        count_(jsgraph->graph()->NodeCount()),
+        info_(zone->NewArray<NodeInfo>(count_)),
+        nodes_(NodeVector::allocator_type(zone)),
+        replacements_(NodeVector::allocator_type(zone)),
+        contains_js_nodes_(false),
+        phase_(PROPAGATE),
+        changer_(changer),
+        queue_(std::deque<Node*, NodePtrZoneAllocator>(
+            NodePtrZoneAllocator(zone))) {
+    memset(info_, 0, sizeof(NodeInfo) * count_);
+  }
+
+  void Run(SimplifiedLowering* lowering) {
+    // Run propagation phase to a fixpoint.
+    TRACE(("--{Propagation phase}--\n"));
+    phase_ = PROPAGATE;
+    Enqueue(jsgraph_->graph()->end());
+    // Process nodes from the queue until it is empty.
+    while (!queue_.empty()) {
+      Node* node = queue_.front();
+      NodeInfo* info = GetInfo(node);
+      queue_.pop();
+      info->queued = false;
+      TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+      VisitNode(node, info->use, NULL);
+      TRACE(("  ==> output "));
+      PrintInfo(info->output);
+      TRACE(("\n"));
+    }
+
+    // Run lowering and change insertion phase.
+    TRACE(("--{Simplified lowering phase}--\n"));
+    phase_ = LOWER;
+    // Process nodes from the collected {nodes_} vector.
+    for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
+      Node* node = *i;
+      TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+      // Reuse {VisitNode()} so the representation rules are in one place.
+      VisitNode(node, GetUseInfo(node), lowering);
+    }
+
+    // Perform the final replacements.
+    for (NodeVector::iterator i = replacements_.begin();
+         i != replacements_.end(); ++i) {
+      Node* node = *i;
+      Node* replacement = *(++i);
+      node->ReplaceUses(replacement);
+    }
+  }
+
+  // Enqueue {node} if the {use} contains new information for that node.
+  // Add {node} to {nodes_} if this is the first time it's been visited.
+  void Enqueue(Node* node, RepTypeUnion use = 0) {
+    if (phase_ != PROPAGATE) return;
+    NodeInfo* info = GetInfo(node);
+    if (!info->visited) {
+      // First visit of this node.
+      info->visited = true;
+      info->queued = true;
+      nodes_.push_back(node);
+      queue_.push(node);
+      TRACE(("  initial: "));
+      info->use |= use;
+      PrintUseInfo(node);
+      return;
+    }
+    TRACE(("   queue?: "));
+    PrintUseInfo(node);
+    if ((info->use & use) != use) {
+      // New usage information for the node is available.
+      if (!info->queued) {
+        queue_.push(node);
+        info->queued = true;
+        TRACE(("   added: "));
+      } else {
+        TRACE((" inqueue: "));
+      }
+      info->use |= use;
+      PrintUseInfo(node);
+    }
+  }
+
+  bool lower() { return phase_ == LOWER; }
+
+  void Enqueue(Node* node, RepType use) {
+    Enqueue(node, static_cast<RepTypeUnion>(use));
+  }
+
+  void SetOutput(Node* node, RepTypeUnion output) {
+    // Every node should have at most one output representation. Note that
+ // phis can have 0, if they have not been used in a representation-inducing
+    // instruction.
+    DCHECK((output & rMask) == 0 || IsPowerOf2(output & rMask));
+    GetInfo(node)->output = output;
+  }
+
+  bool BothInputsAre(Node* node, Type* type) {
+    DCHECK_EQ(2, node->InputCount());
+    return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) &&
+           NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
+  }
+
+  void ProcessInput(Node* node, int index, RepTypeUnion use) {
+    Node* input = node->InputAt(index);
+    if (phase_ == PROPAGATE) {
+      // In the propagate phase, propagate the usage information backward.
+      Enqueue(input, use);
+    } else {
+      // In the change phase, insert a change before the use if necessary.
+      if ((use & rMask) == 0) return;  // No input requirement on the use.
+      RepTypeUnion output = GetInfo(input)->output;
+      if ((output & rMask & use) == 0) {
+        // Output representation doesn't match usage.
+        TRACE(("  change: #%d:%s(@%d #%d:%s) ", node->id(),
+               node->op()->mnemonic(), index, input->id(),
+               input->op()->mnemonic()));
+        TRACE((" from "));
+        PrintInfo(output);
+        TRACE((" to "));
+        PrintInfo(use);
+        TRACE(("\n"));
+        Node* n = changer_->GetRepresentationFor(input, output, use);
+        node->ReplaceInput(index, n);
+      }
+    }
+  }
+
+  static const RepTypeUnion kFloat64 = rFloat64 | tNumber;
+  static const RepTypeUnion kInt32 = rWord32 | tInt32;
+  static const RepTypeUnion kUint32 = rWord32 | tUint32;
+  static const RepTypeUnion kInt64 = rWord64 | tInt64;
+  static const RepTypeUnion kUint64 = rWord64 | tUint64;
+  static const RepTypeUnion kAnyTagged = rTagged | tAny;
+
+ // The default, most general visitation case. For {node}, process all value, + // context, effect, and control inputs, assuming that value inputs should have
+  // {rTagged} representation and can observe all output values {tAny}.
+  void VisitInputs(Node* node) {
+    InputIter i = node->inputs().begin();
+    for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
+         ++i, j--) {
+      ProcessInput(node, i.index(), kAnyTagged);  // Value inputs
+    }
+ for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
+         ++i, j--) {
+      ProcessInput(node, i.index(), kAnyTagged);  // Context inputs
+    }
+ for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
+         ++i, j--) {
+      Enqueue(*i);  // Effect inputs: just visit
+    }
+ for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
+         ++i, j--) {
+      Enqueue(*i);  // Control inputs: just visit
+    }
+    SetOutput(node, kAnyTagged);
+  }
+
+  // Helper for binops of the I x I -> O variety.
+ void VisitBinop(Node* node, RepTypeUnion input_use, RepTypeUnion output) {
+    DCHECK_EQ(2, node->InputCount());
+    ProcessInput(node, 0, input_use);
+    ProcessInput(node, 1, input_use);
+    SetOutput(node, output);
+  }
+
+  // Helper for unops of the I -> O variety.
+  void VisitUnop(Node* node, RepTypeUnion input_use, RepTypeUnion output) {
+    DCHECK_EQ(1, node->InputCount());
+    ProcessInput(node, 0, input_use);
+    SetOutput(node, output);
+  }
+
+  // Helper for leaf nodes.
+  void VisitLeaf(Node* node, RepTypeUnion output) {
+    DCHECK_EQ(0, node->InputCount());
+    SetOutput(node, output);
+  }
+
+  // Helpers for specific types of binops.
+ void VisitFloat64Binop(Node* node) { VisitBinop(node, kFloat64, kFloat64); }
+  void VisitInt32Binop(Node* node) { VisitBinop(node, kInt32, kInt32); }
+  void VisitUint32Binop(Node* node) { VisitBinop(node, kUint32, kUint32); }
+  void VisitInt64Binop(Node* node) { VisitBinop(node, kInt64, kInt64); }
+  void VisitUint64Binop(Node* node) { VisitBinop(node, kUint64, kUint64); }
+  void VisitFloat64Cmp(Node* node) { VisitBinop(node, kFloat64, rBit); }
+  void VisitInt32Cmp(Node* node) { VisitBinop(node, kInt32, rBit); }
+  void VisitUint32Cmp(Node* node) { VisitBinop(node, kUint32, rBit); }
+  void VisitInt64Cmp(Node* node) { VisitBinop(node, kInt64, rBit); }
+  void VisitUint64Cmp(Node* node) { VisitBinop(node, kUint64, rBit); }
+
+  // Helper for handling phis.
+  void VisitPhi(Node* node, RepTypeUnion use) {
+    // First, propagate the usage information to inputs of the phi.
+    int values = OperatorProperties::GetValueInputCount(node->op());
+    Node::Inputs inputs = node->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter, --values) {
+      // Propagate {use} of the phi to value inputs, and 0 to control.
+      // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+      ProcessInput(node, iter.index(), values > 0 ? use : 0);
+    }
+    // Phis adapt to whatever output representation their uses demand,
+    // pushing representation changes to their inputs.
+    RepTypeUnion use_rep = GetUseInfo(node) & rMask;
+    RepTypeUnion use_type = GetUseInfo(node) & tMask;
+    RepTypeUnion rep = 0;
+    if (use_rep & rTagged) {
+      rep = rTagged;  // Tagged overrides everything.
+    } else if (use_rep & rFloat64) {
+      rep = rFloat64;
+    } else if (use_rep & rWord64) {
+      rep = rWord64;
+    } else if (use_rep & rWord32) {
+      rep = rWord32;
+    } else if (use_rep & rBit) {
+      rep = rBit;
+    } else {
+      // There was no representation associated with any of the uses.
+ // TODO(titzer): Select the best rep using phi's type, not the usage type?
+      if (use_type & tAny) {
+        rep = rTagged;
+      } else if (use_type & tNumber) {
+        rep = rFloat64;
+      } else if (use_type & tInt64 || use_type & tUint64) {
+        rep = rWord64;
+      } else if (use_type & tInt32 || use_type & tUint32) {
+        rep = rWord32;
+      } else if (use_type & tBool) {
+        rep = rBit;
+      } else {
+        UNREACHABLE();  // should have at least a usage type!
+      }
+    }
+    // Preserve the usage type, but set the representation.
+    Type* upper = NodeProperties::GetBounds(node).upper;
+    SetOutput(node, rep | changer_->TypeFromUpperBound(upper));
+  }
+
+  Operator* Int32Op(Node* node) {
+    return changer_->Int32OperatorFor(node->opcode());
+  }
+
+  Operator* Uint32Op(Node* node) {
+    return changer_->Uint32OperatorFor(node->opcode());
+  }
+
+  Operator* Float64Op(Node* node) {
+    return changer_->Float64OperatorFor(node->opcode());
+  }
+
+  // Dispatching routine for visiting the node {node} with the usage {use}.
+  // Depending on the operator, propagate new usage info to the inputs.
+ void VisitNode(Node* node, RepTypeUnion use, SimplifiedLowering* lowering) {
+    switch (node->opcode()) {
+      //------------------------------------------------------------------
+      // Common operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kStart:
+      case IrOpcode::kDead:
+        return VisitLeaf(node, 0);
+      case IrOpcode::kParameter: {
+        // TODO(titzer): use representation from linkage.
+        Type* upper = NodeProperties::GetBounds(node).upper;
+        ProcessInput(node, 0, 0);
+        SetOutput(node, rTagged | changer_->TypeFromUpperBound(upper));
+        return;
+      }
+      case IrOpcode::kInt32Constant:
+        return VisitLeaf(node, rWord32);
+      case IrOpcode::kInt64Constant:
+        return VisitLeaf(node, rWord64);
+      case IrOpcode::kFloat64Constant:
+        return VisitLeaf(node, rFloat64);
+      case IrOpcode::kExternalConstant:
+        return VisitLeaf(node, rPtr);
+      case IrOpcode::kNumberConstant:
+        return VisitLeaf(node, rTagged);
+      case IrOpcode::kHeapConstant:
+        return VisitLeaf(node, rTagged);
+
+      case IrOpcode::kEnd:
+      case IrOpcode::kIfTrue:
+      case IrOpcode::kIfFalse:
+      case IrOpcode::kReturn:
+      case IrOpcode::kMerge:
+      case IrOpcode::kThrow:
+        return VisitInputs(node);  // default visit for all node inputs.
+
+      case IrOpcode::kBranch:
+        ProcessInput(node, 0, rBit);
+        Enqueue(NodeProperties::GetControlInput(node, 0));
+        break;
+      case IrOpcode::kPhi:
+        return VisitPhi(node, use);
+
+//------------------------------------------------------------------
+// JavaScript operators.
+//------------------------------------------------------------------
+// For now, we assume that all JS operators were too complex to lower
+// to Simplified and that they will always require tagged value inputs
+// and produce tagged value outputs.
+// TODO(turbofan): it might be possible to lower some JSOperators here,
+// but that responsibility really lies in the typed lowering phase.
+#define DEFINE_JS_CASE(x) case IrOpcode::k##x:
+        JS_OP_LIST(DEFINE_JS_CASE)
+#undef DEFINE_JS_CASE
+        contains_js_nodes_ = true;
+        VisitInputs(node);
+        return SetOutput(node, rTagged);
+
+      //------------------------------------------------------------------
+      // Simplified operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kBooleanNot: {
+        if (lower()) {
+          RepTypeUnion input = GetInfo(node->InputAt(0))->output;
+          if (input & rBit) {
+            // BooleanNot(x: rBit) => WordEqual(x, #0)
+            node->set_op(lowering->machine()->WordEqual());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
+          } else {
+            // BooleanNot(x: rTagged) => WordEqual(x, #false)
+            node->set_op(lowering->machine()->WordEqual());
+            node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
+          }
+        } else {
+          // No input representation requirement; adapt during lowering.
+          ProcessInput(node, 0, tBool);
+          SetOutput(node, rBit);
+        }
+        break;
+      }
+      case IrOpcode::kNumberEqual:
+      case IrOpcode::kNumberLessThan:
+      case IrOpcode::kNumberLessThanOrEqual: {
+ // Number comparisons reduce to integer comparisons for integer inputs.
+        if (BothInputsAre(node, Type::Signed32())) {
+          // => signed Int32Cmp
+          VisitInt32Cmp(node);
+          if (lower()) node->set_op(Int32Op(node));
+        } else if (BothInputsAre(node, Type::Unsigned32())) {
+          // => unsigned Int32Cmp
+          VisitUint32Cmp(node);
+          if (lower()) node->set_op(Uint32Op(node));
+        } else {
+          // => Float64Cmp
+          VisitFloat64Cmp(node);
+          if (lower()) node->set_op(Float64Op(node));
+        }
+        break;
+      }
+      case IrOpcode::kNumberAdd:
+      case IrOpcode::kNumberSubtract: {
+        // Add and subtract reduce to Int32Add/Sub if the inputs
+        // are already integers and all uses are truncating.
+        if (BothInputsAre(node, Type::Signed32()) &&
+            (use & (tUint32 | tNumber | tAny)) == 0) {
+          // => signed Int32Add/Sub
+          VisitInt32Binop(node);
+          if (lower()) node->set_op(Int32Op(node));
+        } else if (BothInputsAre(node, Type::Unsigned32()) &&
+                   (use & (tInt32 | tNumber | tAny)) == 0) {
+          // => unsigned Int32Add/Sub
+          VisitUint32Binop(node);
+          if (lower()) node->set_op(Uint32Op(node));
+        } else {
+          // => Float64Add/Sub
+          VisitFloat64Binop(node);
+          if (lower()) node->set_op(Float64Op(node));
+        }
+        break;
+      }
+      case IrOpcode::kNumberMultiply:
+      case IrOpcode::kNumberDivide:
+      case IrOpcode::kNumberModulus: {
+        // Float64Mul/Div/Mod
+        VisitFloat64Binop(node);
+        if (lower()) node->set_op(Float64Op(node));
+        break;
+      }
+      case IrOpcode::kNumberToInt32: {
+        RepTypeUnion use_rep = use & rMask;
+        if (lower()) {
+          RepTypeUnion in = GetInfo(node->InputAt(0))->output;
+          if ((in & tMask) == tInt32 || (in & rMask) == rWord32) {
+ // If the input has type int32, or is already a word32, just change
+            // representation if necessary.
+            VisitUnop(node, tInt32 | use_rep, tInt32 | use_rep);
+            DeferReplacement(node, node->InputAt(0));
+          } else {
+            // Require the input in float64 format and perform truncation.
+ // TODO(turbofan): could also avoid the truncation with a tag check.
+            VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32);
+            // TODO(titzer): should be a truncation.
+            node->set_op(lowering->machine()->ChangeFloat64ToInt32());
+          }
+        } else {
+ // Propagate a type to the input, but pass through representation.
+          VisitUnop(node, tInt32, tInt32 | use_rep);
+        }
+        break;
+      }
+      case IrOpcode::kNumberToUint32: {
+        RepTypeUnion use_rep = use & rMask;
+        if (lower()) {
+          RepTypeUnion in = GetInfo(node->InputAt(0))->output;
+          if ((in & tMask) == tUint32 || (in & rMask) == rWord32) {
+            // The input has type int32, just change representation.
+            VisitUnop(node, tUint32 | use_rep, tUint32 | use_rep);
+            DeferReplacement(node, node->InputAt(0));
+          } else {
+            // Require the input in float64 format to perform truncation.
+ // TODO(turbofan): could also avoid the truncation with a tag check.
+            VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32);
+            // TODO(titzer): should be a truncation.
+            node->set_op(lowering->machine()->ChangeFloat64ToUint32());
+          }
+        } else {
+ // Propagate a type to the input, but pass through representation.
+          VisitUnop(node, tUint32, tUint32 | use_rep);
+        }
+        break;
+      }
+      case IrOpcode::kReferenceEqual: {
+        VisitBinop(node, kAnyTagged, rBit);
+        if (lower()) node->set_op(lowering->machine()->WordEqual());
+        break;
+      }
+      case IrOpcode::kStringEqual: {
+        VisitBinop(node, kAnyTagged, rBit);
+        // TODO(titzer): lower StringEqual to stub/runtime call.
+        break;
+      }
+      case IrOpcode::kStringLessThan: {
+        VisitBinop(node, kAnyTagged, rBit);
+        // TODO(titzer): lower StringLessThan to stub/runtime call.
+        break;
+      }
+      case IrOpcode::kStringLessThanOrEqual: {
+        VisitBinop(node, kAnyTagged, rBit);
+        // TODO(titzer): lower StringLessThanOrEqual to stub/runtime call.
+        break;
+      }
+      case IrOpcode::kStringAdd: {
+        VisitBinop(node, kAnyTagged, kAnyTagged);
+        // TODO(titzer): lower StringAdd to stub/runtime call.
+        break;
+      }
+      case IrOpcode::kLoadField: {
+        FieldAccess access = FieldAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        SetOutput(node, changer_->TypeForField(access));
+        if (lower()) lowering->DoLoadField(node);
+        break;
+      }
+      case IrOpcode::kStoreField: {
+        FieldAccess access = FieldAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, changer_->TypeForField(access));
+        SetOutput(node, 0);
+        if (lower()) lowering->DoStoreField(node);
+        break;
+      }
+      case IrOpcode::kLoadElement: {
+        ElementAccess access = ElementAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, kInt32);  // element index
+        SetOutput(node, changer_->TypeForElement(access));
+        if (lower()) lowering->DoLoadElement(node);
+        break;
+      }
+      case IrOpcode::kStoreElement: {
+        ElementAccess access = ElementAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, kInt32);  // element index
+        ProcessInput(node, 2, changer_->TypeForElement(access));
+        SetOutput(node, 0);
+        if (lower()) lowering->DoStoreElement(node);
+        break;
+      }
+
+      //------------------------------------------------------------------
+      // Machine-level operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kLoad: {
+        // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
+        RepType tBase = rTagged;
+ MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
+        ProcessInput(node, 0, tBase);   // pointer or object
+        ProcessInput(node, 1, kInt32);  // index
+        SetOutput(node, changer_->TypeForMachineRepresentation(rep));
+        break;
+      }
+      case IrOpcode::kStore: {
+        // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
+        RepType tBase = rTagged;
+        StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
+        ProcessInput(node, 0, tBase);   // pointer or object
+        ProcessInput(node, 1, kInt32);  // index
+ ProcessInput(node, 2, changer_->TypeForMachineRepresentation(rep.rep));
+        SetOutput(node, 0);
+        break;
+      }
+      case IrOpcode::kWord32Shr:
+        // We output unsigned int32 for shift right because JavaScript.
+        return VisitBinop(node, rWord32, rWord32 | tUint32);
+      case IrOpcode::kWord32And:
+      case IrOpcode::kWord32Or:
+      case IrOpcode::kWord32Xor:
+      case IrOpcode::kWord32Shl:
+      case IrOpcode::kWord32Sar:
+ // We use signed int32 as the output type for these word32 operations, + // though the machine bits are the same for either signed or unsigned, + // because JavaScript considers the result from these operations signed.
+        return VisitBinop(node, rWord32, rWord32 | tInt32);
+      case IrOpcode::kWord32Equal:
+        return VisitBinop(node, rWord32, rBit);
+
+      case IrOpcode::kInt32Add:
+      case IrOpcode::kInt32Sub:
+      case IrOpcode::kInt32Mul:
+      case IrOpcode::kInt32Div:
+      case IrOpcode::kInt32Mod:
+        return VisitInt32Binop(node);
+      case IrOpcode::kInt32UDiv:
+      case IrOpcode::kInt32UMod:
+        return VisitUint32Binop(node);
+      case IrOpcode::kInt32LessThan:
+      case IrOpcode::kInt32LessThanOrEqual:
+        return VisitInt32Cmp(node);
+
+      case IrOpcode::kUint32LessThan:
+      case IrOpcode::kUint32LessThanOrEqual:
+        return VisitUint32Cmp(node);
+
+      case IrOpcode::kInt64Add:
+      case IrOpcode::kInt64Sub:
+      case IrOpcode::kInt64Mul:
+      case IrOpcode::kInt64Div:
+      case IrOpcode::kInt64Mod:
+        return VisitInt64Binop(node);
+      case IrOpcode::kInt64LessThan:
+      case IrOpcode::kInt64LessThanOrEqual:
+        return VisitInt64Cmp(node);
+
+      case IrOpcode::kInt64UDiv:
+      case IrOpcode::kInt64UMod:
+        return VisitUint64Binop(node);
+
+      case IrOpcode::kWord64And:
+      case IrOpcode::kWord64Or:
+      case IrOpcode::kWord64Xor:
+      case IrOpcode::kWord64Shl:
+      case IrOpcode::kWord64Shr:
+      case IrOpcode::kWord64Sar:
+        return VisitBinop(node, rWord64, rWord64);
+      case IrOpcode::kWord64Equal:
+        return VisitBinop(node, rWord64, rBit);
+
+      case IrOpcode::kConvertInt32ToInt64:
+        return VisitUnop(node, tInt32 | rWord32, tInt32 | rWord64);
+      case IrOpcode::kConvertInt64ToInt32:
+        return VisitUnop(node, tInt64 | rWord64, tInt32 | rWord32);
+
+      case IrOpcode::kChangeInt32ToFloat64:
+        return VisitUnop(node, tInt32 | rWord32, tInt32 | rFloat64);
+      case IrOpcode::kChangeUint32ToFloat64:
+        return VisitUnop(node, tUint32 | rWord32, tUint32 | rFloat64);
+      case IrOpcode::kChangeFloat64ToInt32:
+        return VisitUnop(node, tInt32 | rFloat64, tInt32 | rWord32);
+      case IrOpcode::kChangeFloat64ToUint32:
+        return VisitUnop(node, tUint32 | rFloat64, tUint32 | rWord32);
+
+      case IrOpcode::kFloat64Add:
+      case IrOpcode::kFloat64Sub:
+      case IrOpcode::kFloat64Mul:
+      case IrOpcode::kFloat64Div:
+      case IrOpcode::kFloat64Mod:
+        return VisitFloat64Binop(node);
+      case IrOpcode::kFloat64Equal:
+      case IrOpcode::kFloat64LessThan:
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return VisitFloat64Cmp(node);
+      default:
+        VisitInputs(node);
+        break;
+    }
+  }
+
+  void DeferReplacement(Node* node, Node* replacement) {
+    if (replacement->id() < count_) {
+      // Replace with a previously existing node eagerly.
+      node->ReplaceUses(replacement);
+    } else {
+      // Otherwise, we are replacing a node with a representation change.
+ // Such a substitution must be done after all lowering is done, because
+      // new nodes do not have {NodeInfo} entries, and that would confuse
+      // the representation change insertion for uses of it.
+      replacements_.push_back(node);
+      replacements_.push_back(replacement);
+    }
+    // TODO(titzer) node->RemoveAllInputs();  // Node is now dead.
+  }
+
+  void PrintUseInfo(Node* node) {
+    TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic()));
+    PrintInfo(GetUseInfo(node));
+    TRACE(("\n"));
+  }
+
+  void PrintInfo(RepTypeUnion info) {
+    if (FLAG_trace_representation) {
+      char buf[REP_TYPE_STRLEN];
+      RenderRepTypeUnion(buf, info);
+      TRACE(("%s", buf));
+    }
+  }
+
+ private:
+  JSGraph* jsgraph_;
+  int count_;                       // number of nodes in the graph
+  NodeInfo* info_;                  // node id -> usage information
+  NodeVector nodes_;                // collected nodes
+ NodeVector replacements_; // replacements to be done after lowering
+  bool contains_js_nodes_;          // {true} if a JS operator was seen
+  Phase phase_;                     // current phase of algorithm
+  RepresentationChanger* changer_;  // for inserting representation changes
+
+  std::queue<Node*, std::deque<Node*, NodePtrZoneAllocator> > queue_;
+
+  NodeInfo* GetInfo(Node* node) {
+    DCHECK(node->id() >= 0);
+    DCHECK(node->id() < count_);
+    return &info_[node->id()];
+  }
+
+  RepTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
+};
+
+
 Node* SimplifiedLowering::IsTagged(Node* node) {
   // TODO(titzer): factor this out to a TaggingScheme abstraction.
   STATIC_ASSERT(kSmiTagMask == 1);  // Only works if tag is the low bit.
   return graph()->NewNode(machine()->WordAnd(), node,
                           jsgraph()->Int32Constant(kSmiTagMask));
 }
+
+
+void SimplifiedLowering::LowerAllNodes() {
+  SimplifiedOperatorBuilder simplified(graph()->zone());
+  RepresentationChanger changer(jsgraph(), &simplified, machine(),
+                                graph()->zone()->isolate());
+  RepresentationSelector selector(jsgraph(), zone(), &changer);
+  selector.Run(this);
+
+  LoweringBuilder::LowerAllNodes();
+}


 Node* SimplifiedLowering::Untag(Node* node) {
@@ -165,10 +868,8 @@

 void SimplifiedLowering::DoChangeBoolToBit(Node* node, Node* effect,
                                            Node* control) {
-  Node* val = node->InputAt(0);
-  Operator* op =
- kPointerSize == 8 ? machine()->Word64Equal() : machine()->Word32Equal();
-  Node* cmp = graph()->NewNode(op, val, jsgraph()->TrueConstant());
+  Node* cmp = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+                               jsgraph()->TrueConstant());
   node->ReplaceUses(cmp);
 }

@@ -204,7 +905,7 @@
 }


-void SimplifiedLowering::DoLoadField(Node* node, Node* effect, Node* control) {
+void SimplifiedLowering::DoLoadField(Node* node) {
   const FieldAccess& access = FieldAccessOf(node->op());
   node->set_op(machine_.Load(access.representation));
   Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
@@ -212,7 +913,7 @@
 }


-void SimplifiedLowering::DoStoreField(Node* node, Node* effect, Node* control) {
+void SimplifiedLowering::DoStoreField(Node* node) {
   const FieldAccess& access = FieldAccessOf(node->op());
   WriteBarrierKind kind = ComputeWriteBarrierKind(
       access.base_is_tagged, access.representation, access.type);
@@ -252,21 +953,19 @@
   }
   int fixed_offset = access.header_size - access.tag();
   if (fixed_offset == 0) return index;
-  return graph()->NewNode(machine()->Int32Add(),
-                          jsgraph()->Int32Constant(fixed_offset), index);
+  return graph()->NewNode(machine()->Int32Add(), index,
+                          jsgraph()->Int32Constant(fixed_offset));
 }


-void SimplifiedLowering::DoLoadElement(Node* node, Node* effect,
-                                       Node* control) {
+void SimplifiedLowering::DoLoadElement(Node* node) {
   const ElementAccess& access = ElementAccessOf(node->op());
   node->set_op(machine_.Load(access.representation));
   node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
 }


-void SimplifiedLowering::DoStoreElement(Node* node, Node* effect,
-                                        Node* control) {
+void SimplifiedLowering::DoStoreElement(Node* node) {
   const ElementAccess& access = ElementAccessOf(node->op());
   WriteBarrierKind kind = ComputeWriteBarrierKind(
       access.base_is_tagged, access.representation, access.type);
@@ -275,63 +974,37 @@
 }


-void SimplifiedLowering::Lower(Node* node) {
-  Node* start = graph()->start();
+void SimplifiedLowering::Lower(Node* node) {}
+
+
+void SimplifiedLowering::LowerChange(Node* node, Node* effect, Node* control) {
   switch (node->opcode()) {
-    case IrOpcode::kBooleanNot:
-    case IrOpcode::kNumberEqual:
-    case IrOpcode::kNumberLessThan:
-    case IrOpcode::kNumberLessThanOrEqual:
-    case IrOpcode::kNumberAdd:
-    case IrOpcode::kNumberSubtract:
-    case IrOpcode::kNumberMultiply:
-    case IrOpcode::kNumberDivide:
-    case IrOpcode::kNumberModulus:
-    case IrOpcode::kNumberToInt32:
-    case IrOpcode::kNumberToUint32:
-    case IrOpcode::kReferenceEqual:
-    case IrOpcode::kStringEqual:
-    case IrOpcode::kStringLessThan:
-    case IrOpcode::kStringLessThanOrEqual:
-    case IrOpcode::kStringAdd:
-      break;
     case IrOpcode::kChangeTaggedToInt32:
-      DoChangeTaggedToUI32(node, start, start, true);
+      DoChangeTaggedToUI32(node, effect, control, true);
       break;
     case IrOpcode::kChangeTaggedToUint32:
-      DoChangeTaggedToUI32(node, start, start, false);
+      DoChangeTaggedToUI32(node, effect, control, false);
       break;
     case IrOpcode::kChangeTaggedToFloat64:
-      DoChangeTaggedToFloat64(node, start, start);
+      DoChangeTaggedToFloat64(node, effect, control);
       break;
     case IrOpcode::kChangeInt32ToTagged:
-      DoChangeUI32ToTagged(node, start, start, true);
+      DoChangeUI32ToTagged(node, effect, control, true);
       break;
     case IrOpcode::kChangeUint32ToTagged:
-      DoChangeUI32ToTagged(node, start, start, false);
+      DoChangeUI32ToTagged(node, effect, control, false);
       break;
     case IrOpcode::kChangeFloat64ToTagged:
-      DoChangeFloat64ToTagged(node, start, start);
+      DoChangeFloat64ToTagged(node, effect, control);
       break;
     case IrOpcode::kChangeBoolToBit:
-      DoChangeBoolToBit(node, start, start);
+      DoChangeBoolToBit(node, effect, control);
       break;
     case IrOpcode::kChangeBitToBool:
-      DoChangeBitToBool(node, start, start);
-      break;
-    case IrOpcode::kLoadField:
-      DoLoadField(node, start, start);
-      break;
-    case IrOpcode::kStoreField:
-      DoStoreField(node, start, start);
-      break;
-    case IrOpcode::kLoadElement:
-      DoLoadElement(node, start, start);
-      break;
-    case IrOpcode::kStoreElement:
-      DoStoreElement(node, start, start);
+      DoChangeBitToBool(node, effect, control);
       break;
     default:
+      UNREACHABLE();
       break;
   }
 }
=======================================
--- /trunk/src/compiler/simplified-lowering.h   Fri Aug  1 10:40:37 2014 UTC
+++ /trunk/src/compiler/simplified-lowering.h   Mon Aug 11 12:49:03 2014 UTC
@@ -24,22 +24,17 @@
         jsgraph_(jsgraph),
         machine_(jsgraph->zone()) {}
   virtual ~SimplifiedLowering() {}
+
+  void LowerAllNodes();

   virtual void Lower(Node* node);
+  void LowerChange(Node* node, Node* effect, Node* control);

// TODO(titzer): These are exposed for direct testing. Use a friend class.
-  void DoChangeTaggedToUI32(Node* node, Node* effect, Node* control,
-                            bool is_signed);
-  void DoChangeUI32ToTagged(Node* node, Node* effect, Node* control,
-                            bool is_signed);
-  void DoChangeTaggedToFloat64(Node* node, Node* effect, Node* control);
-  void DoChangeFloat64ToTagged(Node* node, Node* effect, Node* control);
-  void DoChangeBoolToBit(Node* node, Node* effect, Node* control);
-  void DoChangeBitToBool(Node* node, Node* effect, Node* control);
-  void DoLoadField(Node* node, Node* effect, Node* control);
-  void DoStoreField(Node* node, Node* effect, Node* control);
-  void DoLoadElement(Node* node, Node* effect, Node* control);
-  void DoStoreElement(Node* node, Node* effect, Node* control);
+  void DoLoadField(Node* node);
+  void DoStoreField(Node* node);
+  void DoLoadElement(Node* node);
+  void DoStoreElement(Node* node);

  private:
   JSGraph* jsgraph_;
@@ -49,8 +44,18 @@
   Node* IsTagged(Node* node);
   Node* Untag(Node* node);
   Node* OffsetMinusTagConstant(int32_t offset);
+  Node* ComputeIndex(const ElementAccess& access, Node* index);
+
+  void DoChangeTaggedToUI32(Node* node, Node* effect, Node* control,
+                            bool is_signed);
+  void DoChangeUI32ToTagged(Node* node, Node* effect, Node* control,
+                            bool is_signed);
+  void DoChangeTaggedToFloat64(Node* node, Node* effect, Node* control);
+  void DoChangeFloat64ToTagged(Node* node, Node* effect, Node* control);
+  void DoChangeBoolToBit(Node* node, Node* effect, Node* control);
+  void DoChangeBitToBool(Node* node, Node* effect, Node* control);

-  Node* ComputeIndex(const ElementAccess& access, Node* index);
+  friend class RepresentationSelector;

   Zone* zone() { return jsgraph_->zone(); }
   JSGraph* jsgraph() { return jsgraph_; }
=======================================
--- /trunk/src/debug.cc Thu Aug  7 08:39:21 2014 UTC
+++ /trunk/src/debug.cc Mon Aug 11 12:49:03 2014 UTC
@@ -1316,11 +1316,9 @@
       return thread_local_.promise_on_stack_->promise();
     }
     handler = handler->next();
-    // There must be a try-catch handler if a promise is on stack.
-    DCHECK_NE(NULL, handler);
// Throwing inside a Promise can be intercepted by an inner try-catch, so
     // we stop at the first try-catch handler.
-  } while (!handler->is_catch());
+  } while (handler != NULL && !handler->is_catch());
   return undefined;
 }

=======================================
--- /trunk/src/version.cc       Sat Aug  9 11:07:11 2014 UTC
+++ /trunk/src/version.cc       Mon Aug 11 12:49:03 2014 UTC
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     28
-#define BUILD_NUMBER      65
+#define BUILD_NUMBER      69
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
=======================================
--- /trunk/test/cctest/compiler/call-tester.h   Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/test/cctest/compiler/call-tester.h   Mon Aug 11 12:49:03 2014 UTC
@@ -106,6 +106,7 @@
     UNREACHABLE();
     return 0.0;
   }
+  static MachineRepresentation Representation() { return kMachineFloat64; }
 };


=======================================
--- /trunk/test/cctest/compiler/graph-builder-tester.h Wed Aug 6 00:06:29 2014 UTC +++ /trunk/test/cctest/compiler/graph-builder-tester.h Mon Aug 11 12:49:03 2014 UTC
@@ -40,6 +40,8 @@
   MachineCallHelper(Zone* zone, MachineCallDescriptorBuilder* builder);

   Node* Parameter(int offset);
+
+  void GenerateCode() { Generate(); }

  protected:
   virtual byte* Generate();
@@ -71,7 +73,7 @@

  protected:
   // Prefixed with main_ to avoid naiming conflicts.
-  Graph* const main_graph_;
+  Graph* main_graph_;
   CommonOperatorBuilder main_common_;
   MachineOperatorBuilder main_machine_;
   SimplifiedOperatorBuilder main_simplified_;
=======================================
--- /trunk/test/cctest/compiler/test-changes-lowering.cc Fri Aug 8 15:46:17 2014 UTC +++ /trunk/test/cctest/compiler/test-changes-lowering.cc Mon Aug 11 12:49:03 2014 UTC
@@ -108,7 +108,7 @@
                                        this->start(), this->start());
     Node* end = this->graph()->NewNode(this->common()->End(), ret);
     this->graph()->SetEnd(end);
-    this->lowering.Lower(change);
+    this->lowering.LowerChange(change, this->start(), this->start());
     Verifier::Run(this->graph());
   }

@@ -124,7 +124,7 @@
this->common()->Return(), this->Int32Constant(0), store, this->start());
     Node* end = this->graph()->NewNode(this->common()->End(), ret);
     this->graph()->SetEnd(end);
-    this->lowering.Lower(change);
+    this->lowering.LowerChange(change, this->start(), this->start());
     Verifier::Run(this->graph());
   }

@@ -139,7 +139,7 @@
                                        this->start(), this->start());
     Node* end = this->graph()->NewNode(this->common()->End(), ret);
     this->graph()->SetEnd(end);
-    this->lowering.Lower(change);
+    this->lowering.LowerChange(change, this->start(), this->start());
     Verifier::Run(this->graph());
   }

=======================================
--- /trunk/test/cctest/compiler/test-simplified-lowering.cc Tue Aug 5 00:05:55 2014 UTC +++ /trunk/test/cctest/compiler/test-simplified-lowering.cc Mon Aug 11 12:49:03 2014 UTC
@@ -6,8 +6,10 @@

 #include "src/compiler/control-builders.h"
 #include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-visualizer.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/pipeline.h"
+#include "src/compiler/representation-change.h"
 #include "src/compiler/simplified-lowering.h"
 #include "src/compiler/simplified-node-factory.h"
 #include "src/compiler/typer.h"
@@ -24,15 +26,14 @@
 using namespace v8::internal;
 using namespace v8::internal::compiler;

-// TODO(titzer): rename this to VMLoweringTester
 template <typename ReturnType>
-class SimplifiedGraphBuilderTester : public GraphBuilderTester<ReturnType> {
+class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
  public:
-  SimplifiedGraphBuilderTester(MachineRepresentation p0 = kMachineLast,
-                               MachineRepresentation p1 = kMachineLast,
-                               MachineRepresentation p2 = kMachineLast,
-                               MachineRepresentation p3 = kMachineLast,
-                               MachineRepresentation p4 = kMachineLast)
+  SimplifiedLoweringTester(MachineRepresentation p0 = kMachineLast,
+                           MachineRepresentation p1 = kMachineLast,
+                           MachineRepresentation p2 = kMachineLast,
+                           MachineRepresentation p3 = kMachineLast,
+                           MachineRepresentation p4 = kMachineLast)
       : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4),
         typer(this->zone()),
         source_positions(this->graph()),
@@ -44,37 +45,9 @@
   JSGraph jsgraph;
   SimplifiedLowering lowering;

-  // Close graph and lower one node.
-  void Lower(Node* node) {
+  void LowerAllNodes() {
     this->End();
-    if (node == NULL) {
-      lowering.LowerAllNodes();
-    } else {
-      lowering.Lower(node);
-    }
-  }
-
-  // Close graph and lower all nodes.
-  void LowerAllNodes() { Lower(NULL); }
-
-  void StoreFloat64(Node* node, double* ptr) {
-    Node* ptr_node = this->PointerConstant(ptr);
-    this->Store(kMachineFloat64, ptr_node, node);
-  }
-
-  Node* LoadInt32(int32_t* ptr) {
-    Node* ptr_node = this->PointerConstant(ptr);
-    return this->Load(kMachineWord32, ptr_node);
-  }
-
-  Node* LoadUint32(uint32_t* ptr) {
-    Node* ptr_node = this->PointerConstant(ptr);
-    return this->Load(kMachineWord32, ptr_node);
-  }
-
-  Node* LoadFloat64(double* ptr) {
-    Node* ptr_node = this->PointerConstant(ptr);
-    return this->Load(kMachineFloat64, ptr_node);
+    lowering.LowerAllNodes();
   }

   Factory* factory() { return this->isolate()->factory(); }
@@ -135,60 +108,63 @@


 TEST(RunLoadMap) {
-  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  SimplifiedLoweringTester<Object*> t(kMachineTagged);
   FieldAccess access = ForJSObjectMap();
   Node* load = t.LoadField(access, t.Parameter(0));
   t.Return(load);

   t.LowerAllNodes();

-  if (!Pipeline::SupportedTarget()) return;
-
-  Handle<JSObject> src = TestObject();
-  Handle<Map> src_map(src->map());
-  Object* result = t.Call(*src);
-  CHECK_EQ(*src_map, result);
+  if (Pipeline::SupportedTarget()) {
+    t.GenerateCode();
+    Handle<JSObject> src = TestObject();
+    Handle<Map> src_map(src->map());
+    Object* result = t.Call(*src);  // TODO(titzer): raw pointers in call
+    CHECK_EQ(*src_map, result);
+  }
 }


 TEST(RunStoreMap) {
-  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged, kMachineTagged);
+  SimplifiedLoweringTester<int32_t> t(kMachineTagged, kMachineTagged);
   FieldAccess access = ForJSObjectMap();
   t.StoreField(access, t.Parameter(1), t.Parameter(0));
-  t.Return(t.Int32Constant(0));
+  t.Return(t.jsgraph.TrueConstant());

   t.LowerAllNodes();

-  if (!Pipeline::SupportedTarget()) return;
-
-  Handle<JSObject> src = TestObject();
-  Handle<Map> src_map(src->map());
-  Handle<JSObject> dst = TestObject();
-  CHECK(src->map() != dst->map());
-  t.Call(*src_map, *dst);
-  CHECK(*src_map == dst->map());
+  if (Pipeline::SupportedTarget()) {
+    t.GenerateCode();
+    Handle<JSObject> src = TestObject();
+    Handle<Map> src_map(src->map());
+    Handle<JSObject> dst = TestObject();
+    CHECK(src->map() != dst->map());
+    t.Call(*src_map, *dst);  // TODO(titzer): raw pointers in call
+    CHECK(*src_map == dst->map());
+  }
 }


 TEST(RunLoadProperties) {
-  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  SimplifiedLoweringTester<Object*> t(kMachineTagged);
   FieldAccess access = ForJSObjectProperties();
   Node* load = t.LoadField(access, t.Parameter(0));
   t.Return(load);

   t.LowerAllNodes();

-  if (!Pipeline::SupportedTarget()) return;
-
-  Handle<JSObject> src = TestObject();
-  Handle<FixedArray> src_props(src->properties());
-  Object* result = t.Call(*src);
-  CHECK_EQ(*src_props, result);
+  if (Pipeline::SupportedTarget()) {
+    t.GenerateCode();
+    Handle<JSObject> src = TestObject();
+    Handle<FixedArray> src_props(src->properties());
+    Object* result = t.Call(*src);  // TODO(titzer): raw pointers in call
+    CHECK_EQ(*src_props, result);
+  }
 }


 TEST(RunLoadStoreMap) {
-  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged, kMachineTagged);
+  SimplifiedLoweringTester<Object*> t(kMachineTagged, kMachineTagged);
   FieldAccess access = ForJSObjectMap();
   Node* load = t.LoadField(access, t.Parameter(0));
   t.StoreField(access, t.Parameter(1), load);
@@ -196,21 +172,22 @@

   t.LowerAllNodes();

-  if (!Pipeline::SupportedTarget()) return;
-
-  Handle<JSObject> src = TestObject();
-  Handle<Map> src_map(src->map());
-  Handle<JSObject> dst = TestObject();
-  CHECK(src->map() != dst->map());
-  Object* result = t.Call(*src, *dst);
-  CHECK(result->IsMap());
-  CHECK_EQ(*src_map, result);
-  CHECK(*src_map == dst->map());
+  if (Pipeline::SupportedTarget()) {
+    t.GenerateCode();
+    Handle<JSObject> src = TestObject();
+    Handle<Map> src_map(src->map());
+    Handle<JSObject> dst = TestObject();
+    CHECK(src->map() != dst->map());
+ Object* result = t.Call(*src, *dst); // TODO(titzer): raw pointers in call
+    CHECK(result->IsMap());
+    CHECK_EQ(*src_map, result);
+    CHECK(*src_map == dst->map());
+  }
 }


 TEST(RunLoadStoreFixedArrayIndex) {
-  SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+  SimplifiedLoweringTester<Object*> t(kMachineTagged);
   ElementAccess access = ForFixedArrayElement();
   Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
   t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
@@ -218,101 +195,53 @@

   t.LowerAllNodes();

-  if (!Pipeline::SupportedTarget()) return;
-
-  Handle<FixedArray> array = t.factory()->NewFixedArray(2);
-  Handle<JSObject> src = TestObject();
-  Handle<JSObject> dst = TestObject();
-  array->set(0, *src);
-  array->set(1, *dst);
-  Object* result = t.Call(*array);
-  CHECK_EQ(*src, result);
-  CHECK_EQ(*src, array->get(0));
-  CHECK_EQ(*src, array->get(1));
+  if (Pipeline::SupportedTarget()) {
+    t.GenerateCode();
+    Handle<FixedArray> array = t.factory()->NewFixedArray(2);
+    Handle<JSObject> src = TestObject();
+    Handle<JSObject> dst = TestObject();
+    array->set(0, *src);
+    array->set(1, *dst);
+    Object* result = t.Call(*array);
+    CHECK_EQ(*src, result);
+    CHECK_EQ(*src, array->get(0));
+    CHECK_EQ(*src, array->get(1));
+  }
 }


 TEST(RunLoadStoreArrayBuffer) {
-  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged);
+  SimplifiedLoweringTester<Object*> t(kMachineTagged);
   const int index = 12;
-  FieldAccess access = ForArrayBufferBackingStore();
-  Node* backing_store = t.LoadField(access, t.Parameter(0));
   ElementAccess buffer_access = ForBackingStoreElement(kMachineWord8);
+  Node* backing_store =
+      t.LoadField(ForArrayBufferBackingStore(), t.Parameter(0));
   Node* load =
       t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
   t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
                  load);
-  t.Return(load);
+  t.Return(t.jsgraph.TrueConstant());

   t.LowerAllNodes();

-  if (!Pipeline::SupportedTarget()) return;
+  if (Pipeline::SupportedTarget()) {
+    t.GenerateCode();
+    Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
+    const int array_length = 2 * index;
+ Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
+    uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
+    for (int i = 0; i < array_length; i++) {
+      data[i] = i;
+    }

-  Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
-  const int array_length = 2 * index;
- Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
-  uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
-  for (int i = 0; i < array_length; i++) {
-    data[i] = i;
-  }
-  int32_t result = t.Call(*array);
-  CHECK_EQ(index, result);
-  for (int i = 0; i < array_length; i++) {
-    uint8_t expected = i;
-    if (i == (index + 1)) expected = result;
-    CHECK_EQ(data[i], expected);
-  }
-}
-
-
-TEST(RunCopyFixedArray) {
-  SimplifiedGraphBuilderTester<int32_t> t(kMachineTagged, kMachineTagged);
-
-  const int kArraySize = 15;
-  Node* one = t.Int32Constant(1);
-  Node* index = t.Int32Constant(0);
-  Node* limit = t.Int32Constant(kArraySize);
-  t.environment()->Push(index);
-  {
-    LoopBuilder loop(&t);
-    loop.BeginLoop();
-    // Loop exit condition.
-    index = t.environment()->Top();
-    Node* condition = t.Int32LessThan(index, limit);
-    loop.BreakUnless(condition);
-    // src[index] = dst[index].
-    index = t.environment()->Pop();
-    ElementAccess access = ForFixedArrayElement();
-    Node* src = t.Parameter(0);
-    Node* load = t.LoadElement(access, src, index);
-    Node* dst = t.Parameter(1);
-    t.StoreElement(access, dst, index, load);
-    // index++
-    index = t.Int32Add(index, one);
-    t.environment()->Push(index);
-    // continue.
-    loop.EndBody();
-    loop.EndLoop();
-  }
-  index = t.environment()->Pop();
-  t.Return(index);
-
-  t.LowerAllNodes();
-
-  if (!Pipeline::SupportedTarget()) return;
-
-  Handle<FixedArray> src = t.factory()->NewFixedArray(kArraySize);
-  Handle<FixedArray> src_copy = t.factory()->NewFixedArray(kArraySize);
-  Handle<FixedArray> dst = t.factory()->NewFixedArray(kArraySize);
-  for (int i = 0; i < kArraySize; i++) {
-    src->set(i, *TestObject());
-    src_copy->set(i, src->get(i));
-    dst->set(i, *TestObject());
-    CHECK_NE(src_copy->get(i), dst->get(i));
-  }
-  CHECK_EQ(kArraySize, t.Call(*src, *dst));
-  for (int i = 0; i < kArraySize; i++) {
-    CHECK_EQ(src_copy->get(i), dst->get(i));
+    // TODO(titzer): raw pointers in call
+    Object* result = t.Call(*array);
+    CHECK_EQ(t.isolate()->heap()->true_value(), result);
+    for (int i = 0; i < array_length; i++) {
+      uint8_t expected = i;
+      if (i == (index + 1)) expected = index;
+      CHECK_EQ(data[i], expected);
+    }
   }
 }

@@ -325,7 +254,7 @@
     FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
                           Type::Integral32(), kMachineTagged};

-    SimplifiedGraphBuilderTester<Object*> t;
+    SimplifiedLoweringTester<Object*> t;
     Node* load = t.LoadField(access, t.PointerConstant(smis));
     t.Return(load);
     t.LowerAllNodes();
@@ -349,7 +278,7 @@
     FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
                           Type::Integral32(), kMachineTagged};

-    SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+    SimplifiedLoweringTester<Object*> t(kMachineTagged);
     Node* p0 = t.Parameter(0);
     t.StoreField(access, t.PointerConstant(smis), p0);
     t.Return(p0);
@@ -377,7 +306,7 @@
       ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
                               kMachineTagged};

-      SimplifiedGraphBuilderTester<Object*> t;
+      SimplifiedLoweringTester<Object*> t;
       Node* load = t.LoadElement(access, t.PointerConstant(smis),
                                  t.Int32Constant(static_cast<int>(j)));
       t.Return(load);
@@ -405,7 +334,7 @@
       ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
                               kMachineTagged};

-      SimplifiedGraphBuilderTester<Object*> t(kMachineTagged);
+      SimplifiedLoweringTester<Object*> t(kMachineTagged);
       Node* p0 = t.Parameter(0);
       t.StoreElement(access, t.PointerConstant(smis),
                      t.Int32Constant(static_cast<int>(j)), p0);
@@ -425,3 +354,1020 @@
     }
   }
 }
+
+
+// A helper class for accessing fields and elements of various types, on both +// tagged and untagged base pointers. Contains both tagged and untagged buffers
+// for testing direct memory access from generated code.
+template <typename E>
+class AccessTester : public HandleAndZoneScope {
+ public:
+  bool tagged;
+  MachineRepresentation rep;
+  E* original_elements;
+  size_t num_elements;
+  E* untagged_array;
+ Handle<ByteArray> tagged_array; // TODO(titzer): use FixedArray for tagged.
+
+  AccessTester(bool t, MachineRepresentation r, E* orig, size_t num)
+      : tagged(t),
+        rep(r),
+        original_elements(orig),
+        num_elements(num),
+        untagged_array(static_cast<E*>(malloc(ByteSize()))),
+        tagged_array(main_isolate()->factory()->NewByteArray(
+            static_cast<int>(ByteSize()))) {
+    Reinitialize();
+  }
+
+  ~AccessTester() { free(untagged_array); }
+
+  size_t ByteSize() { return num_elements * sizeof(E); }
+
+ // Nuke both {untagged_array} and {tagged_array} with {original_elements}.
+  void Reinitialize() {
+    memcpy(untagged_array, original_elements, ByteSize());
+    CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
+    E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
+    memcpy(raw, original_elements, ByteSize());
+  }
+
+  // Create and run code that copies the element in either {untagged_array}
+  // or {tagged_array} at index {from_index} to index {to_index}.
+  void RunCopyElement(int from_index, int to_index) {
+    // TODO(titzer): test element and field accesses where the base is not
+    // a constant in the code.
+    BoundsCheck(from_index);
+    BoundsCheck(to_index);
+    ElementAccess access = GetElementAccess();
+
+    SimplifiedLoweringTester<Object*> t;
+    Node* ptr = GetBaseNode(&t);
+    Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
+    t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
+    t.Return(t.jsgraph.TrueConstant());
+    t.LowerAllNodes();
+    t.GenerateCode();
+
+    if (Pipeline::SupportedTarget()) {
+      Object* result = t.Call();
+      CHECK_EQ(t.isolate()->heap()->true_value(), result);
+    }
+  }
+
+  // Create and run code that copies the field in either {untagged_array}
+  // or {tagged_array} at index {from_index} to index {to_index}.
+  void RunCopyField(int from_index, int to_index) {
+    BoundsCheck(from_index);
+    BoundsCheck(to_index);
+    FieldAccess from_access = GetFieldAccess(from_index);
+    FieldAccess to_access = GetFieldAccess(to_index);
+
+    SimplifiedLoweringTester<Object*> t;
+    Node* ptr = GetBaseNode(&t);
+    Node* load = t.LoadField(from_access, ptr);
+    t.StoreField(to_access, ptr, load);
+    t.Return(t.jsgraph.TrueConstant());
+    t.LowerAllNodes();
+    t.GenerateCode();
+
+    if (Pipeline::SupportedTarget()) {
+      Object* result = t.Call();
+      CHECK_EQ(t.isolate()->heap()->true_value(), result);
+    }
+  }
+
+  // Create and run code that copies the elements from {this} to {that}.
+  void RunCopyElements(AccessTester<E>* that) {
+    SimplifiedLoweringTester<Object*> t;
+
+    Node* one = t.Int32Constant(1);
+    Node* index = t.Int32Constant(0);
+    Node* limit = t.Int32Constant(static_cast<int>(num_elements));
+    t.environment()->Push(index);
+    Node* src = this->GetBaseNode(&t);
+    Node* dst = that->GetBaseNode(&t);
+    {
+      LoopBuilder loop(&t);
+      loop.BeginLoop();
+      // Loop exit condition
+      index = t.environment()->Top();
+      Node* condition = t.Int32LessThan(index, limit);
+      loop.BreakUnless(condition);
+      // dst[index] = src[index]
+      index = t.environment()->Pop();
+      Node* load = t.LoadElement(this->GetElementAccess(), src, index);
+      t.StoreElement(that->GetElementAccess(), dst, index, load);
+      // index++
+      index = t.Int32Add(index, one);
+      t.environment()->Push(index);
+      // continue
+      loop.EndBody();
+      loop.EndLoop();
+    }
+    index = t.environment()->Pop();
+    t.Return(t.jsgraph.TrueConstant());
+    t.LowerAllNodes();
+    t.GenerateCode();
+
+    if (Pipeline::SupportedTarget()) {
+      Object* result = t.Call();
+      CHECK_EQ(t.isolate()->heap()->true_value(), result);
+    }
+  }
+
+  E GetElement(int index) {
+    BoundsCheck(index);
+    if (tagged) {
+      E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
+      return raw[index];
+    } else {
+      return untagged_array[index];
+    }
+  }
+
+ private:
+  ElementAccess GetElementAccess() {
+    ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
+                            tagged ? FixedArrayBase::kHeaderSize : 0,
+                            Type::Any(), rep};
+    return access;
+  }
+
+  FieldAccess GetFieldAccess(int field) {
+    int offset = field * sizeof(E);
+    FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
+ offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
+                          Handle<Name>(), Type::Any(), rep};
+    return access;
+  }
+
+  template <typename T>
+  Node* GetBaseNode(SimplifiedLoweringTester<T>* t) {
+    return tagged ? t->HeapConstant(tagged_array)
+                  : t->PointerConstant(untagged_array);
+  }
+
+  void BoundsCheck(int index) {
+    CHECK_GE(index, 0);
+    CHECK_LT(index, static_cast<int>(num_elements));
+    CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
+  }
+};
+
+
+template <typename E>
+static void RunAccessTest(MachineRepresentation rep, E* original_elements,
+                          size_t num) {
+  int num_elements = static_cast<int>(num);
+
+  for (int taggedness = 0; taggedness < 2; taggedness++) {
+    AccessTester<E> a(taggedness == 1, rep, original_elements, num);
+    for (int field = 0; field < 2; field++) {
+      for (int i = 0; i < num_elements - 1; i++) {
+        a.Reinitialize();
+        if (field == 0) {
+          a.RunCopyField(i, i + 1);  // Test field read/write.
+        } else {
+          a.RunCopyElement(i, i + 1);  // Test element read/write.
+        }
+        if (Pipeline::SupportedTarget()) {  // verify.
+          for (int j = 0; j < num_elements; j++) {
+            E expect =
+                j == (i + 1) ? original_elements[i] : original_elements[j];
+            CHECK_EQ(expect, a.GetElement(j));
+          }
+        }
+      }
+    }
+  }
+  // Test array copy.
+  for (int tf = 0; tf < 2; tf++) {
+    for (int tt = 0; tt < 2; tt++) {
+      AccessTester<E> a(tf == 1, rep, original_elements, num);
+      AccessTester<E> b(tt == 1, rep, original_elements, num);
+      a.RunCopyElements(&b);
+      if (Pipeline::SupportedTarget()) {  // verify.
+        for (int i = 0; i < num_elements; i++) {
+          CHECK_EQ(a.GetElement(i), b.GetElement(i));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunAccessTests_uint8) {
+  uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
+                    0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
+  RunAccessTest<uint8_t>(kMachineWord8, data, ARRAY_SIZE(data));
+}
+
+
+TEST(RunAccessTests_uint16) {
+  uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
+  RunAccessTest<uint16_t>(kMachineWord16, data, ARRAY_SIZE(data));
+}
+
+
+TEST(RunAccessTests_int32) {
+ int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
+  RunAccessTest<int32_t>(kMachineWord32, data, ARRAY_SIZE(data));
+}
+
+
+#define V8_2PART_INT64(a, b) (((static_cast<int64_t>(a) << 32) + 0x##b##u))
+
+
+TEST(RunAccessTests_int64) {
+  if (kPointerSize != 8) return;
+  int64_t data[] = {V8_2PART_INT64(0x10111213, 14151617),
+                    V8_2PART_INT64(0x20212223, 24252627),
+                    V8_2PART_INT64(0x30313233, 34353637),
+                    V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
+                    V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
+  RunAccessTest<int64_t>(kMachineWord64, data, ARRAY_SIZE(data));
+}
+
+
+TEST(RunAccessTests_float64) {
+  double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
+  RunAccessTest<double>(kMachineFloat64, data, ARRAY_SIZE(data));
+}
+
+
+TEST(RunAccessTests_Smi) {
+  Smi* data[] = {Smi::FromInt(-1),    Smi::FromInt(-9),
+                 Smi::FromInt(0),     Smi::FromInt(666),
+                 Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
+  RunAccessTest<Smi*>(kMachineTagged, data, ARRAY_SIZE(data));
+}
+
+
+// Fills in most of the nodes of the graph in order to make tests shorter.
+class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
+ public:
+  Typer typer;
+  JSGraph jsgraph;
+  Node* p0;
+  Node* p1;
+  Node* start;
+  Node* end;
+  Node* ret;
+
+  TestingGraph(Type* p0_type, Type* p1_type = Type::None())
+      : GraphAndBuilders(main_zone()),
+        typer(main_zone()),
+        jsgraph(graph(), common(), &typer) {
+    start = graph()->NewNode(common()->Start(2));
+    graph()->SetStart(start);
+    ret =
+ graph()->NewNode(common()->Return(), jsgraph.Constant(0), start, start);
+    end = graph()->NewNode(common()->End(), ret);
+    graph()->SetEnd(end);
+    p0 = graph()->NewNode(common()->Parameter(0), start);
+    p1 = graph()->NewNode(common()->Parameter(1), start);
+    NodeProperties::SetBounds(p0, Bounds(p0_type));
+    NodeProperties::SetBounds(p1, Bounds(p1_type));
+  }
+
+  void CheckLoweringBinop(IrOpcode::Value expected, Operator* op) {
+    Node* node = Return(graph()->NewNode(op, p0, p1));
+    Lower();
+    CHECK_EQ(expected, node->opcode());
+  }
+
+  void CheckLoweringTruncatedBinop(IrOpcode::Value expected, Operator* op,
+                                   Operator* trunc) {
+    Node* node = graph()->NewNode(op, p0, p1);
+    Return(graph()->NewNode(trunc, node));
+    Lower();
+    CHECK_EQ(expected, node->opcode());
+  }
+
+  void Lower() {
+    SimplifiedLowering lowering(&jsgraph, NULL);
+    lowering.LowerAllNodes();
+  }
+
+  // Inserts the node as the return value of the graph.
+  Node* Return(Node* node) {
+    ret->ReplaceInput(0, node);
+    return node;
+  }
+
+  // Inserts the node as the effect input to the return of the graph.
+  void Effect(Node* node) { ret->ReplaceInput(1, node); }
+
+  Node* ExampleWithOutput(RepType type) {
+    // TODO(titzer): use parameters with guaranteed representations.
+    if (type & tInt32) {
+ return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
+                              jsgraph.Int32Constant(1));
+    } else if (type & tUint32) {
+ return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
+                              jsgraph.Int32Constant(1));
+    } else if (type & rFloat64) {
+      return graph()->NewNode(machine()->Float64Add(),
+                              jsgraph.Float64Constant(1),
+                              jsgraph.Float64Constant(1));
+    } else if (type & rBit) {
+      return graph()->NewNode(machine()->Word32Equal(),
+                              jsgraph.Int32Constant(1),
+                              jsgraph.Int32Constant(1));
+    } else if (type & rWord64) {
+      return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
+                              Int64Constant(1));
+    } else {
+      CHECK(type & rTagged);
+      return p0;
+    }
+  }
+
+  Node* Use(Node* node, RepType type) {
+    if (type & tInt32) {
+      return graph()->NewNode(machine()->Int32LessThan(), node,
+                              jsgraph.Int32Constant(1));
+    } else if (type & tUint32) {
+      return graph()->NewNode(machine()->Uint32LessThan(), node,
+                              jsgraph.Int32Constant(1));
+    } else if (type & rFloat64) {
+      return graph()->NewNode(machine()->Float64Add(), node,
+                              jsgraph.Float64Constant(1));
+    } else if (type & rWord64) {
+      return graph()->NewNode(machine()->Int64LessThan(), node,
+                              Int64Constant(1));
+    } else {
+ return graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), node,
+                              jsgraph.TrueConstant());
+    }
+  }
+
+  Node* Branch(Node* cond) {
+    Node* br = graph()->NewNode(common()->Branch(), cond, start);
+    Node* tb = graph()->NewNode(common()->IfTrue(), br);
+    Node* fb = graph()->NewNode(common()->IfFalse(), br);
+    Node* m = graph()->NewNode(common()->Merge(2), tb, fb);
+    ret->ReplaceInput(NodeProperties::FirstControlIndex(ret), m);
+    return br;
+  }
+
+  Node* Int64Constant(int64_t v) {
+    return graph()->NewNode(common()->Int64Constant(v));
+  }
+
+  SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
+  MachineOperatorBuilder* machine() { return &main_machine_; }
+  CommonOperatorBuilder* common() { return &main_common_; }
+  Graph* graph() { return main_graph_; }
+};
+
+
+TEST(LowerBooleanNot_bit_bit) {
+  // BooleanNot(x: rBit) used as rBit
+  TestingGraph t(Type::Boolean());
+  Node* b = t.ExampleWithOutput(rBit);
+  Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
+  Node* use = t.Branch(inv);
+  t.Lower();
+  Node* cmp = use->InputAt(0);
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
+  CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
+  Node* f = t.jsgraph.Int32Constant(0);
+  CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
+}
+
+
+TEST(LowerBooleanNot_bit_tagged) {
+  // BooleanNot(x: rBit) used as rTagged
+  TestingGraph t(Type::Boolean());
+  Node* b = t.ExampleWithOutput(rBit);
+  Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
+  Node* use = t.Use(inv, rTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
+  Node* cmp = use->InputAt(0)->InputAt(0);
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
+  CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
+  Node* f = t.jsgraph.Int32Constant(0);
+  CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
+}
+
+
+TEST(LowerBooleanNot_tagged_bit) {
+  // BooleanNot(x: rTagged) used as rBit
+  TestingGraph t(Type::Boolean());
+  Node* b = t.p0;
+  Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
+  Node* use = t.Branch(inv);
+  t.Lower();
+  Node* cmp = use->InputAt(0);
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
+  CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
+  Node* f = t.jsgraph.FalseConstant();
+  CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
+}
+
+
+TEST(LowerBooleanNot_tagged_tagged) {
+  // BooleanNot(x: rTagged) used as rTagged
+  TestingGraph t(Type::Boolean());
+  Node* b = t.p0;
+  Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
+  Node* use = t.Use(inv, rTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
+  Node* cmp = use->InputAt(0)->InputAt(0);
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
+  CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
+  Node* f = t.jsgraph.FalseConstant();
+  CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
+}
+
+
+static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
+                             Type::Number(), Type::Any()};
+
+
+TEST(LowerNumberCmp_to_int32) {
+  TestingGraph t(Type::Signed32(), Type::Signed32());
+
+ t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
+  t.CheckLoweringBinop(IrOpcode::kInt32LessThan,
+                       t.simplified()->NumberLessThan());
+  t.CheckLoweringBinop(IrOpcode::kInt32LessThanOrEqual,
+                       t.simplified()->NumberLessThanOrEqual());
+}
+
+
+TEST(LowerNumberCmp_to_uint32) {
+  TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
+
+ t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
+  t.CheckLoweringBinop(IrOpcode::kUint32LessThan,
+                       t.simplified()->NumberLessThan());
+  t.CheckLoweringBinop(IrOpcode::kUint32LessThanOrEqual,
+                       t.simplified()->NumberLessThanOrEqual());
+}
+
+
+TEST(LowerNumberCmp_to_float64) {
+  static Type* types[] = {Type::Number(), Type::Any()};
+
+  for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
+    TestingGraph t(types[i], types[i]);
+
+    t.CheckLoweringBinop(IrOpcode::kFloat64Equal,
+                         t.simplified()->NumberEqual());
+    t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
+                         t.simplified()->NumberLessThan());
+    t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
+                         t.simplified()->NumberLessThanOrEqual());
+  }
+}
+
+
+TEST(LowerNumberAddSub_to_int32) {
+  TestingGraph t(Type::Signed32(), Type::Signed32());
+  t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
+                                t.simplified()->NumberAdd(),
+                                t.simplified()->NumberToInt32());
+  t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
+                                t.simplified()->NumberSubtract(),
+                                t.simplified()->NumberToInt32());
+}
+
+
+TEST(LowerNumberAddSub_to_uint32) {
+  TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
+  t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
+                                t.simplified()->NumberAdd(),
+                                t.simplified()->NumberToUint32());
+  t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
+                                t.simplified()->NumberSubtract(),
+                                t.simplified()->NumberToUint32());
+}
+
+
+TEST(LowerNumberAddSub_to_float64) {
+  for (size_t i = 0; i < ARRAY_SIZE(test_types); i++) {
+    TestingGraph t(test_types[i], test_types[i]);
+
+ t.CheckLoweringBinop(IrOpcode::kFloat64Add, t.simplified()->NumberAdd());
+    t.CheckLoweringBinop(IrOpcode::kFloat64Sub,
+                         t.simplified()->NumberSubtract());
+  }
+}
+
+
+TEST(LowerNumberDivMod_to_float64) {
+  for (size_t i = 0; i < ARRAY_SIZE(test_types); i++) {
+    TestingGraph t(test_types[i], test_types[i]);
+
+ t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
+    t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
+                         t.simplified()->NumberModulus());
+  }
+}
+
+
+static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
+  CHECK_EQ(change, node->opcode());
+  CHECK_EQ(of, node->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_nop) {
+  // NumberToInt32(x: rTagged | tInt32) used as rTagged
+  TestingGraph t(Type::Signed32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
+  Node* use = t.Use(trunc, rTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeTaggedToFloat64) {
+  // NumberToInt32(x: rTagged | tInt32) used as rFloat64
+  TestingGraph t(Type::Signed32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
+  Node* use = t.Use(trunc, rFloat64);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
+  // NumberToInt32(x: rTagged | tInt32) used as rWord32
+  TestingGraph t(Type::Signed32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
+  Node* use = t.Use(trunc, tInt32);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeFloat64ToTagged) {
+  // TODO(titzer): NumberToInt32(x: rFloat64 | tInt32) used as rTagged
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeFloat64ToInt32) {
+ // TODO(titzer): NumberToInt32(x: rFloat64 | tInt32) used as rWord32 | tInt32
+}
+
+
+TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
+  // TODO(titzer): NumberToInt32(x: rFloat64) used as rWord32 | tUint32
+}
+
+
+TEST(LowerNumberToUint32_to_nop) {
+  // NumberToUint32(x: rTagged | tUint32) used as rTagged
+  TestingGraph t(Type::Unsigned32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
+  Node* use = t.Use(trunc, rTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_ChangeTaggedToFloat64) {
+  // NumberToUint32(x: rTagged | tUint32) used as rWord32
+  TestingGraph t(Type::Unsigned32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
+  Node* use = t.Use(trunc, rFloat64);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
+  // NumberToUint32(x: rTagged | tUint32) used as rWord32
+  TestingGraph t(Type::Unsigned32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
+  Node* use = t.Use(trunc, tUint32);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_ChangeFloat64ToTagged) {
+  // TODO(titzer): NumberToUint32(x: rFloat64 | tUint32) used as rTagged
+}
+
+
+TEST(LowerNumberToUint32_to_ChangeFloat64ToUint32) {
+  // TODO(titzer): NumberToUint32(x: rFloat64 | tUint32) used as rWord32
+}
+
+
+TEST(LowerNumberToUint32_to_TruncateFloat64ToUint32) {
+  // TODO(titzer): NumberToUint32(x: rFloat64) used as rWord32
+}
+
+
+TEST(LowerReferenceEqual_to_wordeq) {
+  TestingGraph t(Type::Any(), Type::Any());
+  IrOpcode::Value opcode =
+      static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
+ t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual(Type::Any()));
+}
+
***The diff for this file has been truncated for email.***
=======================================
--- /trunk/test/mozilla/mozilla.status  Mon Aug  4 06:49:33 2014 UTC
+++ /trunk/test/mozilla/mozilla.status  Mon Aug 11 12:49:03 2014 UTC
@@ -859,7 +859,6 @@
   'js1_5/Regress/regress-404755': [SKIP],
   'js1_5/Regress/regress-451322': [SKIP],

-
   # BUG(1040): Allow this test to timeout.
   'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
 }],  # 'arch == arm or arch == arm64'
@@ -868,6 +867,9 @@
 ['arch ==  arm64', {
   # BUG(v8:3152): Runs out of stack in debug mode.
   'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
+
+  # BUG(v8:3503): Times out in debug mode.
+  'js1_5/Regress/regress-280769-2': [PASS, FAIL, ['mode == debug', SKIP]],
 }],  # 'arch ==  arm64'


=======================================
--- /trunk/testing/gtest.gyp    Thu Aug  7 08:39:21 2014 UTC
+++ /trunk/testing/gtest.gyp    Mon Aug 11 12:49:03 2014 UTC
@@ -79,7 +79,7 @@
             ],
           },
         }],
-        ['OS=="android" and android_app_abi=="x86"', {
+        ['OS=="android"', {
           'defines': [
             'GTEST_HAS_CLONE=0',
           ],
=======================================
--- /trunk/tools/whitespace.txt Fri Aug  8 15:46:17 2014 UTC
+++ /trunk/tools/whitespace.txt Mon Aug 11 12:49:03 2014 UTC
@@ -5,4 +5,4 @@
 A Smi walks into a bar and says:
 "I'm so deoptimized today!"
 The doubles heard this and started to unbox.
-The Smi looked at them and..................
+The Smi looked at them and...................

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to