Revision: 25210
Author:   [email protected]
Date:     Fri Nov  7 09:55:39 2014 UTC
Log: [turbofan][arm64] Optimize shifts combined with truncations or extensions.

[email protected]

Review URL: https://codereview.chromium.org/705983002
https://code.google.com/p/v8/source/detail?r=25210

Modified:
 /branches/bleeding_edge/src/compiler/arm64/instruction-selector-arm64.cc
/branches/bleeding_edge/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc

=======================================
--- /branches/bleeding_edge/src/compiler/arm64/instruction-selector-arm64.cc Mon Nov 3 10:28:46 2014 UTC +++ /branches/bleeding_edge/src/compiler/arm64/instruction-selector-arm64.cc Fri Nov 7 09:55:39 2014 UTC
@@ -542,6 +542,17 @@


 void InstructionSelector::VisitWord64Shl(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+      m.right().IsInRange(32, 63)) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+    // 32 bits anyway.
+    Emit(kArm64Lsl, g.DefineAsRegister(node),
+         g.UseRegister(m.left().node()->InputAt(0)),
+         g.UseImmediate(m.right().node()));
+    return;
+  }
   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
 }

@@ -884,6 +895,18 @@

 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   Arm64OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  if (CanCover(node, value)) {
+    Int64BinopMatcher m(value);
+    if ((m.IsWord64Sar() && m.right().HasValue() &&
+         (m.right().Value() == 32)) ||
+        (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
+ Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseImmediate(m.right().node()));
+      return;
+    }
+  }
+
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
 }

=======================================
--- /branches/bleeding_edge/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc Mon Nov 3 10:28:46 2014 UTC +++ /branches/bleeding_edge/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc Fri Nov 7 09:55:39 2014 UTC
@@ -1229,6 +1229,76 @@
                         ::testing::ValuesIn(kShiftInstructions));


+TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt64, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
+    m.Return(n);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsl, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt64, kMachUint32);
+    Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
+    m.Return(n);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsl, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
+  StreamBuilder m(this, kMachInt32, kMachInt64);
+  Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
+  m.Return(t);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(32, s.ToInt64(s[0]->InputAt(1)));
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt32, kMachInt64);
+    Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(x)));
+    m.Return(t);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+  }
+}
+
+
// -----------------------------------------------------------------------------
 // Mul and Div instructions.

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to