Revision: 6452
Author: [email protected]
Date: Tue Jan 25 02:10:36 2011
Log: Port new version of ParallelMove's LGapResolver to X64.
Review URL: http://codereview.chromium.org/6366003
http://code.google.com/p/v8/source/detail?r=6452

Added:
 /branches/bleeding_edge/src/x64/lithium-gap-resolver-x64.cc
 /branches/bleeding_edge/src/x64/lithium-gap-resolver-x64.h
Modified:
 /branches/bleeding_edge/src/SConscript
 /branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.cc
 /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
 /branches/bleeding_edge/src/x64/lithium-codegen-x64.h
 /branches/bleeding_edge/tools/gyp/v8.gyp

=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/x64/lithium-gap-resolver-x64.cc Tue Jan 25 02:10:36 2011
@@ -0,0 +1,314 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "x64/lithium-gap-resolver-x64.h"
+#include "x64/lithium-codegen-x64.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner), moves_(32) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  ASSERT(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      PerformMove(i);
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      ASSERT(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move);
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.  We use operand swaps to resolve cycles,
+  // which means that a call to PerformMove could change any source operand
+  // in the move graph.
+
+  ASSERT(!moves_[index].IsPending());
+  ASSERT(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack-allocated local.  Recursion may allow
+  // multiple moves to be pending.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ // Though PerformMove can change any source operand in the move graph,
+      // this call cannot create a blocking move via a swap (this loop does
+      // not miss any).  Assume there is a non-blocking move with source A
+      // and this move is blocked on source B and there is a swap of A and
+      // B.  Then A and B must be involved in the same cycle (or they would
+      // not be swapped).  Since this move's destination is B and there is
+      // only a single incoming edge to an operand, this move must also be
+      // involved in the same cycle.  In that case, the blocking move will
+      // be created but will be "pending" when we return from PerformMove.
+      PerformMove(i);
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // This move's source may have changed due to swaps to resolve cycles and
+  // so it may now be the last move in the cycle.  If so remove it.
+  if (moves_[index].source()->Equals(destination)) {
+    moves_[index].Eliminate();
+    return;
+  }
+
+  // The move may be blocked on a (at most one) pending move, in which case
+ // we have a cycle. Search for such a blocking move and perform a swap to
+  // resolve it.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination)) {
+      ASSERT(other_move.IsPending());
+      EmitSwap(index);
+      return;
+    }
+  }
+
+  // This move is not blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    Register src = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      __ movq(dst, src);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      __ movq(dst, src);
+    }
+
+  } else if (source->IsStackSlot()) {
+    Operand src = cgen_->ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      __ movq(dst, src);
+    } else {
+      ASSERT(destination->IsStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      __ movq(kScratchRegister, src);
+      __ movq(dst, kScratchRegister);
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsInteger32Constant(constant_source)) {
+        __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+      } else {
+        __ Move(dst, cgen_->ToHandle(constant_source));
+      }
+    } else {
+      ASSERT(destination->IsStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      if (cgen_->IsInteger32Constant(constant_source)) {
+        // Allow top 32 bits of an untagged Integer32 to be arbitrary.
+        __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+      } else {
+        __ Move(dst, cgen_->ToHandle(constant_source));
+      }
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ movsd(cgen_->ToDoubleRegister(destination), src);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      __ movsd(cgen_->ToOperand(destination), src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    Operand src = cgen_->ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ movsd(cgen_->ToDoubleRegister(destination), src);
+    } else {
+      ASSERT(destination->IsDoubleStackSlot());
+      __ movsd(xmm0, src);
+      __ movsd(cgen_->ToOperand(destination), xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Swap two general-purpose registers.
+    Register src = cgen_->ToRegister(source);
+    Register dst = cgen_->ToRegister(destination);
+    __ xchg(dst, src);
+
+  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+             (source->IsStackSlot() && destination->IsRegister())) {
+    // Swap a general-purpose register and a stack slot.
+    Register reg =
+        cgen_->ToRegister(source->IsRegister() ? source : destination);
+    Operand mem =
+        cgen_->ToOperand(source->IsRegister() ? destination : source);
+    __ movq(kScratchRegister, mem);
+    __ movq(mem, reg);
+    __ movq(reg, kScratchRegister);
+
+  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+      (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
+    // Swap two stack slots or two double stack slots.
+    Operand src = cgen_->ToOperand(source);
+    Operand dst = cgen_->ToOperand(destination);
+    __ movsd(xmm0, src);
+    __ movq(kScratchRegister, dst);
+    __ movsd(dst, xmm0);
+    __ movq(src, kScratchRegister);
+
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // Swap two double registers.
+    XMMRegister source_reg = cgen_->ToDoubleRegister(source);
+    XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
+    __ movsd(xmm0, source_reg);
+    __ movsd(source_reg, destination_reg);
+    __ movsd(destination_reg, xmm0);
+
+ } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+    // Swap a double register and a double stack slot.
+ ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) || + (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
+    XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
+                                                  ? source
+                                                  : destination);
+    LOperand* other = source->IsDoubleRegister() ? destination : source;
+    ASSERT(other->IsDoubleStackSlot());
+    Operand other_operand = cgen_->ToOperand(other);
+    __ movsd(xmm0, other_operand);
+    __ movsd(other_operand, reg);
+    __ movsd(reg, xmm0);
+
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+
+  // The swap of source and destination has executed a move from source to
+  // destination.
+  moves_[index].Eliminate();
+
+  // Any unperformed (including pending) move with a source of either
+  // this move's source or destination needs to have their source
+  // changed to reflect the state of affairs after the swap.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(source)) {
+      moves_[i].set_source(destination);
+    } else if (other_move.Blocks(destination)) {
+      moves_[i].set_source(source);
+    }
+  }
+}
+
+#undef __
+
+} }  // namespace v8::internal
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/x64/lithium-gap-resolver-x64.h Tue Jan 25 02:10:36 2011
@@ -0,0 +1,74 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Execute a move by emitting a swap of two operands.  The move from
+  // source to destination is removed from the move graph.
+  void EmitSwap(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
=======================================
--- /branches/bleeding_edge/src/SConscript      Thu Jan 20 00:38:01 2011
+++ /branches/bleeding_edge/src/SConscript      Tue Jan 25 02:10:36 2011
@@ -216,8 +216,9 @@
     x64/full-codegen-x64.cc
     x64/ic-x64.cc
     x64/jump-target-x64.cc
-    x64/lithium-x64.cc
     x64/lithium-codegen-x64.cc
+    x64/lithium-gap-resolver-x64.cc
+    x64/lithium-x64.cc
     x64/macro-assembler-x64.cc
     x64/regexp-macro-assembler-x64.cc
     x64/register-allocator-x64.cc
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.cc Mon Jan 17 03:25:36 2011 +++ /branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.cc Tue Jan 25 02:10:36 2011
@@ -32,12 +32,11 @@
 namespace internal {

 LGapResolver::LGapResolver(LCodeGen* owner)
-    : cgen_(owner), moves_(32), spilled_register_(-1) {
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
-    source_uses_[i] = 0;
-    destination_uses_[i] = 0;
-  }
-}
+    : cgen_(owner),
+      moves_(32),
+      source_uses_(),
+      destination_uses_(),
+      spilled_register_(-1) {}


 void LGapResolver::Resolve(LParallelMove* parallel_move) {
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon Jan 24 23:57:56 2011 +++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Tue Jan 25 02:10:36 2011
@@ -37,157 +37,6 @@
 namespace internal {


-class LGapNode: public ZoneObject {
- public:
-  explicit LGapNode(LOperand* operand)
-      : operand_(operand), resolved_(false), visited_id_(-1) { }
-
-  LOperand* operand() const { return operand_; }
-  bool IsResolved() const { return !IsAssigned() || resolved_; }
-  void MarkResolved() {
-    ASSERT(!IsResolved());
-    resolved_ = true;
-  }
-  int visited_id() const { return visited_id_; }
-  void set_visited_id(int id) {
-    ASSERT(id > visited_id_);
-    visited_id_ = id;
-  }
-
-  bool IsAssigned() const { return assigned_from_.is_set(); }
-  LGapNode* assigned_from() const { return assigned_from_.get(); }
-  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
-  LOperand* operand_;
-  SetOncePointer<LGapNode> assigned_from_;
-  bool resolved_;
-  int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
-    : nodes_(32),
-      identified_cycles_(4),
-      result_(16),
-      next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
-    const ZoneList<LMoveOperands>* moves,
-    LOperand* marker_operand) {
-  nodes_.Rewind(0);
-  identified_cycles_.Rewind(0);
-  result_.Rewind(0);
-  next_visited_id_ = 0;
-
-  for (int i = 0; i < moves->length(); ++i) {
-    LMoveOperands move = moves->at(i);
-    if (!move.IsRedundant()) RegisterMove(move);
-  }
-
-  for (int i = 0; i < identified_cycles_.length(); ++i) {
-    ResolveCycle(identified_cycles_[i], marker_operand);
-  }
-
-  int unresolved_nodes;
-  do {
-    unresolved_nodes = 0;
-    for (int j = 0; j < nodes_.length(); j++) {
-      LGapNode* node = nodes_[j];
-      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
-        AddResultMove(node->assigned_from(), node);
-        node->MarkResolved();
-      }
-      if (!node->IsResolved()) ++unresolved_nodes;
-    }
-  } while (unresolved_nodes > 0);
-  return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
-  AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
-  result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
-  ZoneList<LOperand*> cycle_operands(8);
-  cycle_operands.Add(marker_operand);
-  LGapNode* cur = start;
-  do {
-    cur->MarkResolved();
-    cycle_operands.Add(cur->operand());
-    cur = cur->assigned_from();
-  } while (cur != start);
-  cycle_operands.Add(marker_operand);
-
-  for (int i = cycle_operands.length() - 1; i > 0; --i) {
-    LOperand* from = cycle_operands[i];
-    LOperand* to = cycle_operands[i - 1];
-    AddResultMove(from, to);
-  }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
-  ASSERT(a != b);
-  LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
-    cur->set_visited_id(visited_id);
-    cur = cur->assigned_from();
-  }
-
-  return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
-  ASSERT(a != b);
-  return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
-  if (move.source()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
-    // first to the result set.
-    AddResultMove(move.source(), move.destination());
-  } else {
-    LGapNode* from = LookupNode(move.source());
-    LGapNode* to = LookupNode(move.destination());
-    if (to->IsAssigned() && to->assigned_from() == from) {
-      move.Eliminate();
-      return;
-    }
-    ASSERT(!to->IsAssigned());
-    if (CanReach(from, to)) {
-      // This introduces a cycle. Save.
-      identified_cycles_.Add(from);
-    }
-    to->set_assigned_from(from);
-  }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
-  for (int i = 0; i < nodes_.length(); ++i) {
-    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
-  }
-
-  // No node found => create a new one.
-  LGapNode* result = new LGapNode(operand);
-  nodes_.Add(result);
-  return result;
-}
-
-
 #define __ masm()->

 bool LCodeGen::GenerateCode() {
@@ -696,86 +545,7 @@


 void LCodeGen::DoParallelMove(LParallelMove* move) {
-  // xmm0 must always be a scratch register.
-  XMMRegister xmm_scratch = xmm0;
-  LUnallocated marker_operand(LUnallocated::NONE);
-
-  Register cpu_scratch = kScratchRegister;
-
-  const ZoneList<LMoveOperands>* moves =
-      resolver_.Resolve(move->move_operands(), &marker_operand);
-  for (int i = moves->length() - 1; i >= 0; --i) {
-    LMoveOperands move = moves->at(i);
-    LOperand* from = move.source();
-    LOperand* to = move.destination();
-    ASSERT(!from->IsDoubleRegister() ||
-           !ToDoubleRegister(from).is(xmm_scratch));
- ASSERT(!to->IsDoubleRegister() | | !ToDoubleRegister(to).is(xmm_scratch));
-    ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
-    ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
-    if (from->IsConstantOperand()) {
-      LConstantOperand* constant_from = LConstantOperand::cast(from);
-      if (to->IsRegister()) {
-        if (IsInteger32Constant(constant_from)) {
-          __ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
-        } else {
-          __ Move(ToRegister(to), ToHandle(constant_from));
-        }
-      } else {
-        if (IsInteger32Constant(constant_from)) {
-          __ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
-        } else {
-          __ Move(ToOperand(to), ToHandle(constant_from));
-        }
-      }
-    } else if (from == &marker_operand) {
-      if (to->IsRegister()) {
-        __ movq(ToRegister(to), cpu_scratch);
-      } else if (to->IsStackSlot()) {
-        __ movq(ToOperand(to), cpu_scratch);
-      } else if (to->IsDoubleRegister()) {
-        __ movsd(ToDoubleRegister(to), xmm_scratch);
-      } else {
-        ASSERT(to->IsDoubleStackSlot());
-        __ movsd(ToOperand(to), xmm_scratch);
-      }
-    } else if (to == &marker_operand) {
-      if (from->IsRegister()) {
-        __ movq(cpu_scratch, ToRegister(from));
-      } else if (from->IsStackSlot()) {
-        __ movq(cpu_scratch, ToOperand(from));
-      } else if (from->IsDoubleRegister()) {
-        __ movsd(xmm_scratch, ToDoubleRegister(from));
-      } else {
-        ASSERT(from->IsDoubleStackSlot());
-        __ movsd(xmm_scratch, ToOperand(from));
-      }
-    } else if (from->IsRegister()) {
-      if (to->IsRegister()) {
-        __ movq(ToRegister(to), ToRegister(from));
-      } else {
-        __ movq(ToOperand(to), ToRegister(from));
-      }
-    } else if (to->IsRegister()) {
-      __ movq(ToRegister(to), ToOperand(from));
-    } else if (from->IsStackSlot()) {
-      ASSERT(to->IsStackSlot());
-      __ push(rax);
-      __ movq(rax, ToOperand(from));
-      __ movq(ToOperand(to), rax);
-      __ pop(rax);
-    } else if (from->IsDoubleRegister()) {
-      ASSERT(to->IsDoubleStackSlot());
-      __ movsd(ToOperand(to), ToDoubleRegister(from));
-    } else if (to->IsDoubleRegister()) {
-      ASSERT(from->IsDoubleStackSlot());
-      __ movsd(ToDoubleRegister(to), ToOperand(from));
-    } else {
-      ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
-      __ movsd(xmm_scratch, ToOperand(from));
-      __ movsd(ToOperand(to), xmm_scratch);
-    }
-  }
+  resolver_.Resolve(move);
 }


=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.h Wed Jan 19 02:17:18 2011 +++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.h Tue Jan 25 02:10:36 2011
@@ -34,37 +34,15 @@
 #include "deoptimizer.h"
 #include "safepoint-table.h"
 #include "scopes.h"
+#include "x64/lithium-gap-resolver-x64.h"

 namespace v8 {
 namespace internal {

 // Forward declarations.
 class LDeferredCode;
-class LGapNode;
 class SafepointGenerator;

-class LGapResolver BASE_EMBEDDED {
- public:
-  LGapResolver();
- const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
-                                         LOperand* marker_operand);
-
- private:
-  LGapNode* LookupNode(LOperand* operand);
-  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
-  bool CanReach(LGapNode* a, LGapNode* b);
-  void RegisterMove(LMoveOperands move);
-  void AddResultMove(LOperand* from, LOperand* to);
-  void AddResultMove(LGapNode* from, LGapNode* to);
-  void ResolveCycle(LGapNode* start, LOperand* marker_operand);
-
-  ZoneList<LGapNode*> nodes_;
-  ZoneList<LGapNode*> identified_cycles_;
-  ZoneList<LMoveOperands> result_;
-  int next_visited_id_;
-};
-
-
 class LCodeGen BASE_EMBEDDED {
  public:
   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -80,9 +58,23 @@
         scope_(chunk->graph()->info()->scope()),
         status_(UNUSED),
         deferred_(8),
-        osr_pc_offset_(-1) {
+        osr_pc_offset_(-1),
+        resolver_(this) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
   }
+
+  // Simple accessors.
+  MacroAssembler* masm() const { return masm_; }
+
+  // Support for converting LOperands to assembler types.
+  Register ToRegister(LOperand* op) const;
+  XMMRegister ToDoubleRegister(LOperand* op) const;
+  bool IsInteger32Constant(LConstantOperand* op) const;
+  int ToInteger32(LConstantOperand* op) const;
+  bool IsTaggedConstant(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op) const;
+

   // Try to generate code for the entire chunk, but it may fail if the
   // chunk contains constructs we cannot handle. Returns true if the
@@ -129,7 +121,6 @@
   LChunk* chunk() const { return chunk_; }
   Scope* scope() const { return scope_; }
   HGraph* graph() const { return chunk_->graph(); }
-  MacroAssembler* masm() const { return masm_; }

   int GetNextEmittedBlock(int block);
   LInstruction* GetNextInstruction();
@@ -190,13 +181,6 @@

   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
-  Register ToRegister(LOperand* op) const;
-  XMMRegister ToDoubleRegister(LOperand* op) const;
-  bool IsInteger32Constant(LConstantOperand* op) const;
-  int ToInteger32(LConstantOperand* op) const;
-  bool IsTaggedConstant(LConstantOperand* op) const;
-  Handle<Object> ToHandle(LConstantOperand* op) const;
-  Operand ToOperand(LOperand* op) const;

   // Specific math operations - used from DoUnaryMathOperation.
   void DoMathAbs(LUnaryMathOperation* instr);
=======================================
--- /branches/bleeding_edge/tools/gyp/v8.gyp    Thu Jan 20 00:38:01 2011
+++ /branches/bleeding_edge/tools/gyp/v8.gyp    Tue Jan 25 02:10:36 2011
@@ -691,6 +691,8 @@
                 '../../src/x64/jump-target-x64.cc',
                 '../../src/x64/lithium-codegen-x64.cc',
                 '../../src/x64/lithium-codegen-x64.h',
+                '../../src/x64/lithium-gap-resolver-x64.cc',
+                '../../src/x64/lithium-gap-resolver-x64.h',
                 '../../src/x64/lithium-x64.cc',
                 '../../src/x64/lithium-x64.h',
                 '../../src/x64/macro-assembler-x64.cc',

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to