Reviewers: Benedikt Meurer,

Message:
PTAL

Description:
[turbofan][arm64] Optimize shifts combined with truncations or extensions.

[email protected]

Please review this at https://codereview.chromium.org/705983002/

Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files (+93, -0 lines):
  M src/compiler/arm64/instruction-selector-arm64.cc
  M test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc


Index: src/compiler/arm64/instruction-selector-arm64.cc
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc index 1040131c522141ec0f9e6035a614d9a8d576a24a..151d1aa8b7eac05d1612c657a833c2e0695d0133 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -542,6 +542,17 @@ void InstructionSelector::VisitWord32Shl(Node* node) {


 void InstructionSelector::VisitWord64Shl(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+      m.right().IsInRange(32, 63)) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+    // 32 bits anyway.
+    Emit(kArm64Lsl, g.DefineAsRegister(node),
+         g.UseRegister(m.left().node()->InputAt(0)),
+         g.UseImmediate(m.right().node()));
+    return;
+  }
   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
 }

@@ -884,6 +895,18 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {

 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   Arm64OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  if (CanCover(node, value)) {
+    Int64BinopMatcher m(value);
+    if ((m.IsWord64Sar() && m.right().HasValue() &&
+         (m.right().Value() == 32)) ||
+        (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
+ Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseImmediate(m.right().node()));
+      return;
+    }
+  }
+
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
 }

Index: test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
diff --git a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc index 96b8a83eb66ca511f364ed8930d35a821046991b..eee9c58b127ddac04e6439708f3af73c6d0c910d 100644
--- a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -1229,6 +1229,76 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
                         ::testing::ValuesIn(kShiftInstructions));


+TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt64, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
+    m.Return(n);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsl, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt64, kMachUint32);
+    Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
+    m.Return(n);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsl, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
+  StreamBuilder m(this, kMachInt32, kMachInt64);
+  Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
+  m.Return(t);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(32, s.ToInt64(s[0]->InputAt(1)));
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt32, kMachInt64);
+    Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(x)));
+    m.Return(t);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+  }
+}
+
+
// -----------------------------------------------------------------------------
 // Mul and Div instructions.



--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to