Revision: 6839
Author: [email protected]
Date: Thu Feb 17 07:25:38 2011
Log: ARM: Port new version of ParallelMove's GapResolver to ARM. Fix error
in vstr(DoubleRegister, MemOperand), where it was implemented as a vldr, in
r6830.
Review URL: http://codereview.chromium.org/6311010
http://code.google.com/p/v8/source/detail?r=6839
Added:
/branches/bleeding_edge/src/arm/lithium-gap-resolver-arm.cc
/branches/bleeding_edge/src/arm/lithium-gap-resolver-arm.h
Modified:
/branches/bleeding_edge/src/SConscript
/branches/bleeding_edge/src/arm/assembler-arm.cc
/branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
/branches/bleeding_edge/src/arm/lithium-codegen-arm.h
/branches/bleeding_edge/tools/gyp/v8.gyp
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/arm/lithium-gap-resolver-arm.cc Thu Feb 17
07:25:38 2011
@@ -0,0 +1,303 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm/lithium-gap-resolver-arm.h"
+#include "arm/lithium-codegen-arm.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = { 9 };
+static const DoubleRegister kSavedDoubleValueRegister = { 0 };
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
+ saved_destination_(NULL) { }
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ root_index_ = i; // Any cycle is found when by reaching this move
again.
+ PerformMove(i);
+ if (in_cycle_) {
+ RestoreValue();
+ }
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the
move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph.
+
+ // We can only find a cycle, when doing a depth-first traversal of moves,
+ // be encountering the starting move again. So by spilling the source of
+ // the starting move, we break the cycle. All moves are then unblocked,
+ // and the starting move is completed by writing the spilled value to
+ // its destination. All other moves from the spilled source have been
+ // completed prior to breaking the cycle.
+ // An additional complication is that moves to MemOperands with large
+ // offsets (more than 1K or 4K) require us to spill this spilled value to
+ // the stack, to free up the register.
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack allocated local. Multiple moves can
+ // be pending because this function is recursive.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look
eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ PerformMove(i);
+ // If there is a blocking, pending move it must be
moves_[root_index_]
+ // and all other moves with the same source as moves_[root_index_]
are
+ // sucessfully executed (because they are cycle-free) by this loop.
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // The move may be blocked on a pending move, which must be the starting
move.
+ // In this case, we have a cycle, and we save the source of this move to
+ // a scratch register to break it.
+ LMoveOperands other_move = moves_[root_index_];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ BreakCycle(index);
+ return;
+ }
+
+ // This move is no longer blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+ // We save in a register the value that should end up in the source of
+ // moves_[root_index]. After performing all moves in the tree rooted
+ // in that move, we save the value to that source.
+
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
+ ASSERT(!in_cycle_);
+ in_cycle_ = true;
+ LOperand* source = moves_[index].source();
+ saved_destination_ = moves_[index].destination();
+ if (source->IsRegister()) {
+ __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+ } else if (source->IsStackSlot()) {
+ __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
+ } else if (source->IsDoubleRegister()) {
+ __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+ } else if (source->IsDoubleStackSlot()) {
+ __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+ } else {
+ UNREACHABLE();
+ }
+ // This move will be done by restoring the saved value to the
destination.
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+ ASSERT(in_cycle_);
+ ASSERT(saved_destination_ != NULL);
+
+ // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+ if (saved_destination_->IsRegister()) {
+ __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+ } else if (saved_destination_->IsStackSlot()) {
+ __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+ } else if (saved_destination_->IsDoubleRegister()) {
+ __ vmov(cgen_->ToDoubleRegister(saved_destination_),
+ kSavedDoubleValueRegister);
+ } else if (saved_destination_->IsDoubleStackSlot()) {
+ __ vstr(kSavedDoubleValueRegister,
+ cgen_->ToMemOperand(saved_destination_));
+ } else {
+ UNREACHABLE();
+ }
+
+ in_cycle_ = false;
+ saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+
+ if (source->IsRegister()) {
+ Register source_register = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ __ str(source_register, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ ldr(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ if (!destination_operand.OffsetIsUint12Encodable()) {
+ // ip is overwritten while saving the value to the destination.
+ // Therefore we can't use ip. It is OK if the read from the
source
+ // destroys ip, since that happens before the value is read.
+ __ vldr(kSavedDoubleValueRegister.low(), source_operand);
+ __ vstr(kSavedDoubleValueRegister.low(), destination_operand);
+ } else {
+ __ ldr(ip, source_operand);
+ __ str(ip, destination_operand);
+ }
+ } else {
+ __ ldr(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, destination_operand);
+ }
+ }
+
+ } else if (source->IsConstantOperand()) {
+ Operand source_operand = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ __ mov(cgen_->ToRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ ASSERT(!in_cycle_); // Constant moves happen after all cycles are
gone.
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ mov(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ vmov(cgen_->ToDoubleRegister(destination), source_register);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ __ vstr(source_register, destination_operand);
+ }
+
+ } else if (source->IsDoubleStackSlot()) {
+ MemOperand source_operand = cgen_->ToMemOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ MemOperand destination_operand = cgen_->ToMemOperand(destination);
+ if (in_cycle_) {
+ // kSavedDoubleValueRegister was used to break the cycle,
+ // but kSavedValueRegister is free.
+ MemOperand source_high_operand =
+ cgen_->ToHighMemOperand(source);
+ MemOperand destination_high_operand =
+ cgen_->ToHighMemOperand(destination);
+ __ ldr(kSavedValueRegister, source_operand);
+ __ str(kSavedValueRegister, destination_operand);
+ __ ldr(kSavedValueRegister, source_high_operand);
+ __ str(kSavedValueRegister, destination_high_operand);
+ } else {
+ __ vldr(kSavedDoubleValueRegister, source_operand);
+ __ vstr(kSavedDoubleValueRegister, destination_operand);
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/arm/lithium-gap-resolver-arm.h Thu Feb 17
07:25:38 2011
@@ -0,0 +1,84 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // If a cycle is found in the series of moves, save the blocking value to
+ // a scratch register. The cycle must be found by hitting the root of
the
+ // depth-first search.
+ void BreakCycle(int index);
+
+ // After a cycle has been resolved, restore the value from the scratch
+ // register to its proper destination.
+ void RestoreValue();
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ int root_index_;
+ bool in_cycle_;
+ LOperand* saved_destination_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
=======================================
--- /branches/bleeding_edge/src/SConscript Fri Feb 11 04:25:41 2011
+++ /branches/bleeding_edge/src/SConscript Thu Feb 17 07:25:38 2011
@@ -153,6 +153,7 @@
arm/jump-target-arm.cc
arm/lithium-arm.cc
arm/lithium-codegen-arm.cc
+ arm/lithium-gap-resolver-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.cc Thu Feb 17 02:07:13
2011
+++ /branches/bleeding_edge/src/arm/assembler-arm.cc Thu Feb 17 07:25:38
2011
@@ -1957,7 +1957,7 @@
const Condition cond) {
ASSERT(!operand.rm().is_valid());
ASSERT(operand.am_ == Offset);
- vldr(src, operand.rn(), operand.offset(), cond);
+ vstr(src, operand.rn(), operand.offset(), cond);
}
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Wed Feb 16
06:48:41 2011
+++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Thu Feb 17
07:25:38 2011
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm/lithium-codegen-arm.h"
+#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
@@ -54,157 +55,6 @@
};
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
- : nodes_(32),
- identified_cycles_(4),
- result_(16),
- next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
- const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand) {
- nodes_.Rewind(0);
- identified_cycles_.Rewind(0);
- result_.Rewind(0);
- next_visited_id_ = 0;
-
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i], marker_operand);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand)
{
- ZoneList<LOperand*> cycle_operands(8);
- cycle_operands.Add(marker_operand);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- cycle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- cycle_operands.Add(marker_operand);
-
- for (int i = cycle_operands.length() - 1; i > 0; --i) {
- LOperand* from = cycle_operands[i];
- LOperand* to = cycle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned())
{
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.source()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add
them
- // first to the result set.
- AddResultMove(move.source(), move.destination());
- } else {
- LGapNode* from = LookupNode(move.source());
- LGapNode* to = LookupNode(move.destination());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a cycle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -464,7 +314,6 @@
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
- // TODO(regis): Revisit.
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
@@ -478,6 +327,21 @@
return MemOperand(fp, -(index - 1) * kPointerSize);
}
}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+ ASSERT(op->IsDoubleStackSlot());
+ int index = op->index();
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, context,
+ // and the first word of the double in the fixed part of the frame.
+ return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
+ } else {
+ // Incoming parameter. Skip the return address and the first word of
+ // the double.
+ return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
+ }
+}
void LCodeGen::WriteTranslation(LEnvironment* environment,
@@ -787,116 +651,7 @@
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // d0 must always be a scratch register.
- DoubleRegister dbl_scratch = d0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register core_scratch = scratch0();
- bool destroys_core_scratch = false;
-
- const ZoneList<LMoveOperands>* moves =
- resolver_.Resolve(move->move_operands(), &marker_operand);
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.source();
- LOperand* to = move.destination();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(dbl_scratch));
- ASSERT(!to->IsDoubleRegister() |
| !ToDoubleRegister(to).is(dbl_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
- if (from == &marker_operand) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), core_scratch);
- ASSERT(destroys_core_scratch);
- } else if (to->IsStackSlot()) {
- __ str(core_scratch, ToMemOperand(to));
- ASSERT(destroys_core_scratch);
- } else if (to->IsDoubleRegister()) {
- __ vmov(ToDoubleRegister(to), dbl_scratch);
- } else {
- ASSERT(to->IsDoubleStackSlot());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister() || from->IsConstantOperand()) {
- __ mov(core_scratch, ToOperand(from));
- destroys_core_scratch = true;
- } else if (from->IsStackSlot()) {
- __ ldr(core_scratch, ToMemOperand(from));
- destroys_core_scratch = true;
- } else if (from->IsDoubleRegister()) {
- __ vmov(dbl_scratch, ToDoubleRegister(from));
- } else {
- ASSERT(from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
- }
- } else if (from->IsConstantOperand()) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsStackSlot());
- __ mov(ip, ToOperand(from));
- __ str(ip, ToMemOperand(to));
- }
- } else if (from->IsRegister()) {
- if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsStackSlot());
- __ str(ToRegister(from), ToMemOperand(to));
- }
- } else if (to->IsRegister()) {
- ASSERT(from->IsStackSlot());
- __ ldr(ToRegister(to), ToMemOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ ldr(ip, ToMemOperand(from));
- __ str(ip, ToMemOperand(to));
- } else if (from->IsDoubleRegister()) {
- if (to->IsDoubleRegister()) {
- __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
- } else {
- ASSERT(to->IsDoubleStackSlot());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(ToDoubleRegister(from), to_operand.rn(),
to_operand.offset());
- }
- } else if (to->IsDoubleRegister()) {
- ASSERT(from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(ToDoubleRegister(to), from_operand.rn(),
from_operand.offset());
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- // TODO(regis): Why is vldr not taking a MemOperand?
- // __ vldr(dbl_scratch, ToMemOperand(from));
- MemOperand from_operand = ToMemOperand(from);
- __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
- // TODO(regis): Why is vstr not taking a MemOperand?
- // __ vstr(dbl_scratch, ToMemOperand(to));
- MemOperand to_operand = ToMemOperand(to);
- __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
- }
- }
-
- if (destroys_core_scratch) {
- __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
- }
-
- LInstruction* next = GetNextInstruction();
- if (next != NULL && next->IsLazyBailout()) {
- int pc = masm()->pc_offset();
- safepoints_.SetPcAfterGap(pc);
- }
+ resolver_.Resolve(move);
}
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.h Wed Feb 16
06:48:41 2011
+++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.h Thu Feb 17
07:25:38 2011
@@ -29,7 +29,7 @@
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
#include "arm/lithium-arm.h"
-
+#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
@@ -39,31 +39,8 @@
// Forward declarations.
class LDeferredCode;
-class LGapNode;
class SafepointGenerator;
-class LGapResolver BASE_EMBEDDED {
- public:
- LGapResolver();
- const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>*
moves,
- LOperand* marker_operand);
-
- private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start, LOperand* marker_operand);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- int next_visited_id_;
-};
-
-
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -79,9 +56,34 @@
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+
+ // Support for converting LOperands to assembler types.
+ // LOperand must be a register.
+ Register ToRegister(LOperand* op) const;
+
+ // LOperand is loaded into scratch, unless already a register.
+ Register EmitLoadRegister(LOperand* op, Register scratch);
+
+ // LOperand must be a double register.
+ DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+ // LOperand is loaded into dbl_scratch, unless already a double register.
+ DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DoubleRegister dbl_scratch);
+ int ToInteger32(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op);
+ MemOperand ToMemOperand(LOperand* op) const;
+ // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+ MemOperand ToHighMemOperand(LOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
@@ -136,7 +138,6 @@
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- MacroAssembler* masm() const { return masm_; }
Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return d0; }
@@ -202,24 +203,6 @@
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
- // LOperand must be a register.
- Register ToRegister(LOperand* op) const;
-
- // LOperand is loaded into scratch, unless already a register.
- Register EmitLoadRegister(LOperand* op, Register scratch);
-
- // LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
-
- // LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
-
- int ToInteger32(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op);
- MemOperand ToMemOperand(LOperand* op) const;
-
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
=======================================
--- /branches/bleeding_edge/tools/gyp/v8.gyp Fri Feb 11 04:25:41 2011
+++ /branches/bleeding_edge/tools/gyp/v8.gyp Thu Feb 17 07:25:38 2011
@@ -598,6 +598,8 @@
'../../src/arm/lithium-arm.h',
'../../src/arm/lithium-codegen-arm.cc',
'../../src/arm/lithium-codegen-arm.h',
+ '../../src/arm/lithium-gap-resolver-arm.cc',
+ '../../src/arm/lithium-gap-resolver-arm.h',
'../../src/arm/macro-assembler-arm.cc',
'../../src/arm/macro-assembler-arm.h',
'../../src/arm/regexp-macro-assembler-arm.cc',
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev