Revision: 22017
Author:   [email protected]
Date:     Wed Jun 25 15:11:00 2014 UTC
Log: ARM: Use the shifter operand to merge in previous shift instructions.

When possible, we transform sequences of code of the form
    lsl r1, r2, #imm
    add r0, r5, r1
into
    add r0, r5, r2 LSL #imm

This is an adaptation of r21161.

[email protected]

Review URL: https://codereview.chromium.org/312173002
http://code.google.com/p/v8/source/detail?r=22017

Modified:
 /branches/bleeding_edge/src/arm/assembler-arm.cc
 /branches/bleeding_edge/src/arm/assembler-arm.h
 /branches/bleeding_edge/src/arm/constants-arm.h
 /branches/bleeding_edge/src/arm/lithium-arm.cc
 /branches/bleeding_edge/src/arm/lithium-arm.h
 /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
 /branches/bleeding_edge/src/arm/lithium-codegen-arm.h

=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.cc Tue Jun 24 12:20:12 2014 UTC +++ /branches/bleeding_edge/src/arm/assembler-arm.cc Wed Jun 25 15:11:00 2014 UTC
@@ -281,6 +281,7 @@

 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
   ASSERT(is_uint5(shift_imm));
+  ASSERT(shift_op != NO_SHIFT);

   rm_ = rm;
   rs_ = no_reg;
@@ -301,7 +302,7 @@


 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
-  ASSERT(shift_op != RRX);
+  ASSERT((shift_op != RRX) && (shift_op != NO_SHIFT));
   rm_ = rm;
   rs_ = no_reg;
   shift_op_ = shift_op;
@@ -957,16 +958,16 @@
 // If this returns true then you have to use the rotate_imm and immed_8
 // that it returns, because it may have already changed the instruction
 // to match them!
-static bool fits_shifter(uint32_t imm32,
-                         uint32_t* rotate_imm,
-                         uint32_t* immed_8,
-                         Instr* instr) {
+bool fits_shifter(uint32_t imm32,
+                  uint32_t* rotate_imm,
+                  uint32_t* immed_8,
+                  Instr* instr) {
   // imm32 must be unsigned.
   for (int rot = 0; rot < 16; rot++) {
     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
     if ((imm8 <= 0xff)) {
-      *rotate_imm = rot;
-      *immed_8 = imm8;
+      if (rotate_imm != NULL) *rotate_imm = rot;
+      if (immed_8 != NULL) *immed_8 = imm8;
       return true;
     }
   }
@@ -982,7 +983,8 @@
           if (imm32 < 0x10000) {
             *instr ^= kMovwLeaveCCFlip;
             *instr |= EncodeMovwImmediate(imm32);
-            *rotate_imm = *immed_8 = 0;  // Not used for movw.
+            if (rotate_imm != NULL) *rotate_imm = 0;  // Not used for movw.
+            if (immed_8 != NULL) *immed_8 = 0;  // Not used for movw.
             return true;
           }
         }
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.h Mon Jun 23 11:27:24 2014 UTC +++ /branches/bleeding_edge/src/arm/assembler-arm.h Wed Jun 25 15:11:00 2014 UTC
@@ -1598,6 +1598,10 @@
 };


+bool fits_shifter(uint32_t imm32, uint32_t* rotate_imm,
+                  uint32_t* immed_8, Instr* instr);
+
+
 class EnsureSpace BASE_EMBEDDED {
  public:
   explicit EnsureSpace(Assembler* assembler) {
=======================================
--- /branches/bleeding_edge/src/arm/constants-arm.h Mon Jun 23 11:27:24 2014 UTC +++ /branches/bleeding_edge/src/arm/constants-arm.h Wed Jun 25 15:11:00 2014 UTC
@@ -236,6 +236,7 @@
   // as an argument, and will never actually be encoded. The Assembler will
   // detect it and emit the correct ROR shift operand with shift_imm == 0.
   RRX = -1,
+  NO_SHIFT = -2,
   kNumberOfShifts = 4
 };

=======================================
--- /branches/bleeding_edge/src/arm/lithium-arm.cc Tue Jun 24 12:54:59 2014 UTC +++ /branches/bleeding_edge/src/arm/lithium-arm.cc Wed Jun 25 15:11:00 2014 UTC
@@ -674,6 +674,117 @@
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
   return AssignEnvironment(new(zone()) LDeoptimize);
 }
+
+
+HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val, + HValue** left) {
+  if (!val->representation().IsInteger32()) return NULL;
+  if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL;
+
+  HBinaryOperation* hinstr = HBinaryOperation::cast(val);
+  HValue* hleft = hinstr->left();
+  HValue* hright = hinstr->right();
+  ASSERT(hleft->representation().Equals(hinstr->representation()));
+  ASSERT(hright->representation().Equals(hinstr->representation()));
+
+  if ((hright->IsConstant() &&
+ LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
+      (hinstr->IsCommutative() && hleft->IsConstant() &&
+ LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) {
+    // The constant operand will likely fit in the immediate field. We are
+    // better off with
+    //     mov r1, r2 LSL #imm
+    //     add r0, r1, #imm2
+    // than with
+    //     mov r5, #imm2
+    //     add r0, r5, r2 LSL #imm
+    return NULL;
+  }
+
+  HBitwiseBinaryOperation* shift = NULL;
+ // TODO(aleram): We will miss situations where a shift operation is used by
+  // different instructions both as a left and right operands.
+  if (hright->IsBitwiseBinaryShift() &&
+      HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) {
+    shift = HBitwiseBinaryOperation::cast(hright);
+    if (left != NULL) {
+      *left = hleft;
+    }
+  } else if (hinstr->IsCommutative() &&
+             hleft->IsBitwiseBinaryShift() &&
+             HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) {
+    shift = HBitwiseBinaryOperation::cast(hleft);
+    if (left != NULL) {
+      *left = hright;
+    }
+  } else {
+    return NULL;
+  }
+
+ if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) {
+    // Logical shifts right by zero can deoptimize.
+    return NULL;
+  }
+
+  return shift;
+}
+
+
+bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) {
+  if (!shift->representation().IsInteger32()) {
+    return false;
+  }
+  for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) {
+    if (shift != CanTransformToShiftedOp(it.value())) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
+    HBinaryOperation* instr) {
+  HValue* left;
+  HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left);
+
+  if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) {
+    return DoShiftedBinaryOp(instr, left, shift);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoShiftedBinaryOp(
+ HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
+  ASSERT(hshift->IsBitwiseBinaryShift());
+ ASSERT(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right())
0));
+
+  LTemplateResultInstruction<1>* res;
+  LOperand* left = UseRegisterAtStart(hleft);
+  LOperand* right = UseRegisterAtStart(hshift->left());
+  LOperand* shift_amount = UseConstant(hshift->right());
+  ShiftOp shift_op;
+  switch (hshift->opcode()) {
+    case HValue::kShl: shift_op = LSL; break;
+    case HValue::kShr: shift_op = LSR; break;
+    case HValue::kSar: shift_op = ASR; break;
+    default: UNREACHABLE(); shift_op = NO_SHIFT;
+  }
+
+  if (hinstr->IsBitwise()) {
+    res = new(zone()) LBitI(left, right, shift_op, shift_amount);
+  } else if (hinstr->IsAdd()) {
+    res = new(zone()) LAddI(left, right, shift_op, shift_amount);
+  } else {
+    ASSERT(hinstr->IsSub());
+    res = new(zone()) LSubI(left, right, shift_op, shift_amount);
+  }
+  if (hinstr->CheckFlag(HValue::kCanOverflow)) {
+    AssignEnvironment(res);
+  }
+  return DefineAsRegister(res);
+}


 LInstruction* LChunkBuilder::DoShift(Token::Value op,
@@ -681,6 +792,11 @@
   if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+    if (ShiftCanBeOptimizedAway(instr)) {
+      return NULL;
+    }
+
     LOperand* left = UseRegisterAtStart(instr->left());

     HValue* right_value = instr->right();
@@ -690,7 +806,7 @@
     if (right_value->IsConstant()) {
       HConstant* constant = HConstant::cast(right_value);
       right = chunk_->DefineConstantOperand(constant);
-      constant_value = constant->Integer32Value() & 0x1f;
+      constant_value = JSShiftAmountFromHConstant(constant);
// Left shifts can deoptimize if we shift by > 0 and the result cannot be
       // truncated to smi.
       if (instr->representation().IsSmi() && constant_value > 0) {
@@ -1249,6 +1365,11 @@
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
     ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }

     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1532,6 +1653,11 @@
   if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }

     if (instr->left()->IsConstant()) {
       // If lhs is constant, do reverse subtraction instead.
@@ -1600,6 +1726,12 @@
   if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }
+
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
     LAddI* add = new(zone()) LAddI(left, right);
=======================================
--- /branches/bleeding_edge/src/arm/lithium-arm.h Fri Jun 20 08:40:11 2014 UTC +++ /branches/bleeding_edge/src/arm/lithium-arm.h Wed Jun 25 15:11:00 2014 UTC
@@ -1239,18 +1239,32 @@

 class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(LOperand* left, LOperand* right) {
+  LBitI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
     inputs_[0] = left;
     inputs_[1] = right;
   }
+
+ LBitI(LOperand* left, LOperand* right, ShiftOp shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }

   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
+
+  ShiftOp shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }

   Token::Value op() const { return hydrogen()->op(); }

   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
   DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ protected:
+  ShiftOp shift_;
+  LOperand* shift_amount_;
 };


@@ -1277,16 +1291,30 @@

 class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
-  LSubI(LOperand* left, LOperand* right) {
+  LSubI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
     inputs_[0] = left;
     inputs_[1] = right;
   }
+
+ LSubI(LOperand* left, LOperand* right, ShiftOp shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  ShiftOp shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }

   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }

   DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
   DECLARE_HYDROGEN_ACCESSOR(Sub)
+
+ protected:
+  ShiftOp shift_;
+  LOperand* shift_amount_;
 };


@@ -1455,16 +1483,30 @@

 class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
-  LAddI(LOperand* left, LOperand* right) {
+  LAddI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+ LAddI(LOperand* left, LOperand* right, ShiftOp shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
     inputs_[0] = left;
     inputs_[1] = right;
   }

   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
+
+  ShiftOp shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }

   DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
   DECLARE_HYDROGEN_ACCESSOR(Add)
+
+ protected:
+  ShiftOp shift_;
+  LOperand* shift_amount_;
 };


@@ -2855,6 +2897,49 @@
   void AddInstruction(LInstruction* instr, HInstruction* current);

   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+
+  int JSShiftAmountFromHConstant(HValue* constant) {
+    return HConstant::cast(constant)->Integer32Value() & 0x1f;
+  }
+  bool LikelyFitsImmField(HInstruction* instr, int imm) {
+    Instr instr_bits;
+    // All arithmetic and logical operations accept the same range of
+ // immediates. In some cases though, the operation itself can be changed to
+    // get a wider effective range of immediates.
+    if (instr->IsAdd() || instr->IsSub()) {
+      // ADD and SUB can be exchanged with a negate immediate.
+      instr_bits = ADD;
+    } else if (HBitwise::cast(instr)->op() == Token::BIT_AND) {
+      ASSERT(instr->IsBitwise());
+      // AND and BIC can be exchanged with an inverted immediate.
+      instr_bits = AND;
+    } else {
+      ASSERT(instr->IsBitwise());
+ // Use ORR for all other operations, since fits_shifter() can't adapt ORR.
+      instr_bits = ORR;
+    }
+    return fits_shifter(imm, NULL, NULL, &instr_bits);
+  }
+
+  // Indicates if a sequence of the form
+  //   mov r1, r2 LSL #imm
+  //   add r0, r5, r1
+  // can be replaced with:
+  //   add r0, r5, r2 LSL #imm
+ // If this is not possible, the function returns NULL. Otherwise it returns a
+  // pointer to the shift instruction that would be optimized away.
+  HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val,
+                                                   HValue** left = NULL);
+  // Checks if all uses of the shift operation can optimize it away.
+  bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift);
+ // Attempts to merge the binary operation and a previous shift operation into + // a single operation. Returns the merged instruction on success, and NULL
+  // otherwise.
+  LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op);
+  LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr,
+                                  HValue* left,
+                                  HBitwiseBinaryOperation* shift);
+
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Tue Jun 24 12:54:59 2014 UTC +++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Wed Jun 25 15:11:00 2014 UTC
@@ -478,6 +478,19 @@
 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
 }
+
+
+template<class LI>
+Operand LCodeGen::ToShiftedRightOperand(LOperand* right, LI* shift_info) {
+  if (shift_info->shift() == NO_SHIFT) {
+    return ToOperand(right);
+  } else {
+    return Operand(
+        ToRegister(right),
+        shift_info->shift(),
+        JSShiftAmountFromLConstant(shift_info->shift_amount()));
+  }
+}


 bool LCodeGen::IsSmi(LConstantOperand* op) const {
@@ -1712,11 +1725,13 @@
   Register result = ToRegister(instr->result());
   Operand right(no_reg);

+  ASSERT(right_op->IsRegister() || (instr->shift() == NO_SHIFT));
+
   if (right_op->IsStackSlot()) {
     right = Operand(EmitLoadRegister(right_op, ip));
   } else {
     ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
-    right = ToOperand(right_op);
+    right = ToShiftedRightOperand(right_op, instr);
   }

   switch (instr->op()) {
@@ -1773,9 +1788,7 @@
         break;
     }
   } else {
-    // Mask the right_op operand.
-    int value = ToInteger32(LConstantOperand::cast(right_op));
-    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    int shift_count = JSShiftAmountFromLConstant(right_op);
     switch (instr->op()) {
       case Token::ROR:
           if (shift_count != 0) {
@@ -1835,12 +1848,15 @@
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   SBit set_cond = can_overflow ? SetCC : LeaveCC;

+  ASSERT(right->IsRegister() || (instr->shift() == NO_SHIFT));
+
   if (right->IsStackSlot()) {
     Register right_reg = EmitLoadRegister(right, ip);
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   } else {
     ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+    __ sub(ToRegister(result), ToRegister(left),
+           ToShiftedRightOperand(right, instr), set_cond);
   }

   if (can_overflow) {
@@ -2029,12 +2045,15 @@
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   SBit set_cond = can_overflow ? SetCC : LeaveCC;

+  ASSERT(right->IsRegister() || (instr->shift() == NO_SHIFT));
+
   if (right->IsStackSlot()) {
     Register right_reg = EmitLoadRegister(right, ip);
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   } else {
     ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+    __ add(ToRegister(result), ToRegister(left),
+           ToShiftedRightOperand(right, instr), set_cond);
   }

   if (can_overflow) {
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.h Tue Jun 3 08:12:43 2014 UTC +++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.h Wed Jun 25 15:11:00 2014 UTC
@@ -85,6 +85,13 @@
   MemOperand ToMemOperand(LOperand* op) const;
   // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
   MemOperand ToHighMemOperand(LOperand* op) const;
+
+  template<class LI>
+  Operand ToShiftedRightOperand(LOperand* right, LI* shift_info);
+
+  int JSShiftAmountFromLConstant(LOperand* constant) {
+    return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
+  }

   bool IsInteger32(LConstantOperand* op) const;
   bool IsSmi(LConstantOperand* op) const;

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to