Reviewers: Paul Lind, palfia, kisg, kilvadyb,

Description:
MIPS: Fixes for patch sites if long branches are emitted.

This resolves many mjsunit failures if long branches are forced to be emitted.

TEST=
BUG=

Please review this at https://codereview.chromium.org/153983002/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files (+31, -32 lines):
  M src/mips/codegen-mips.cc
  M src/mips/deoptimizer-mips.cc
  M src/mips/full-codegen-mips.cc
  M src/mips/lithium-codegen-mips.cc
  M src/mips/macro-assembler-mips.h
  M src/mips/macro-assembler-mips.cc


Index: src/mips/codegen-mips.cc
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index e5338dbcc15adbcc69b05e77681260ec13c937e2..1b79433d3760ca1cadbde60c17fee339480e4bb2 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -1023,11 +1023,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
     __ Move(double_scratch1, temp3, temp1);
   }
   __ mul_d(result, result, double_scratch1);
-  __ Branch(&done);
+  __ BranchShort(&done);

   __ bind(&zero);
   __ Move(result, kDoubleRegZero);
-  __ Branch(&done);
+  __ BranchShort(&done);

   __ bind(&infinity);
   __ ldc1(result, ExpConstant(2, temp3));
Index: src/mips/deoptimizer-mips.cc
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 0662b17366b4eeeb57f99f44f6d373df1c40ea5c..6bd9ba7b7f14e785423b9f03461161d94dcef950 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -239,13 +239,13 @@ void Deoptimizer::EntryGenerator::Generate() {
   __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
   Label pop_loop;
   Label pop_loop_header;
-  __ Branch(&pop_loop_header);
+  __ BranchShort(&pop_loop_header);
   __ bind(&pop_loop);
   __ pop(t0);
   __ sw(t0, MemOperand(a3, 0));
   __ addiu(a3, a3, sizeof(uint32_t));
   __ bind(&pop_loop_header);
-  __ Branch(&pop_loop, ne, a2, Operand(sp));
+  __ BranchShort(&pop_loop, ne, a2, Operand(sp));

   // Compute the output frame in the deoptimizer.
   __ push(a0);  // Preserve deoptimizer object across call.
@@ -280,11 +280,11 @@ void Deoptimizer::EntryGenerator::Generate() {
   __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
   __ push(t3);
   __ bind(&inner_loop_header);
-  __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
+  __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));

   __ Addu(t0, t0, Operand(kPointerSize));
   __ bind(&outer_loop_header);
-  __ Branch(&outer_push_loop, lt, t0, Operand(a1));
+  __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));

   __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
   for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
Index: src/mips/full-codegen-mips.cc
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 4a7e50420ce30f347323c7b6a949524f4dced4f1..18ee02dc5c4bb7328546b9712079b30e3cc9f328 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -84,7 +84,7 @@ class JumpPatchSite BASE_EMBEDDED {
     __ bind(&patch_site_);
     __ andi(at, reg, 0);
     // Always taken before patched.
-    __ Branch(target, eq, at, Operand(zero_reg));
+    __ BranchShort(target, eq, at, Operand(zero_reg));
   }

// When initially emitting this ensure that a jump is never generated to skip
@@ -95,7 +95,7 @@ class JumpPatchSite BASE_EMBEDDED {
     __ bind(&patch_site_);
     __ andi(at, reg, 0);
     // Never taken before patched.
-    __ Branch(target, ne, at, Operand(zero_reg));
+    __ BranchShort(target, ne, at, Operand(zero_reg));
   }

   void EmitPatchInfo() {
@@ -2347,13 +2347,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
   // recording binary operation stub, see
   switch (op) {
     case Token::SAR:
-      __ Branch(&stub_call);
       __ GetLeastBitsFromSmi(scratch1, right, 5);
       __ srav(right, left, scratch1);
       __ And(v0, right, Operand(~kSmiTagMask));
       break;
     case Token::SHL: {
-      __ Branch(&stub_call);
       __ SmiUntag(scratch1, left);
       __ GetLeastBitsFromSmi(scratch2, right, 5);
       __ sllv(scratch1, scratch1, scratch2);
@@ -2363,7 +2361,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
       break;
     }
     case Token::SHR: {
-      __ Branch(&stub_call);
       __ SmiUntag(scratch1, left);
       __ GetLeastBitsFromSmi(scratch2, right, 5);
       __ srlv(scratch1, scratch1, scratch2);
@@ -3055,8 +3052,8 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
   __ JumpIfSmi(v0, if_false);
   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
-  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
   PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
   Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);

   context()->Plug(if_true, if_false);
Index: src/mips/lithium-codegen-mips.cc
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc index a2df23fc5a88bcd6278f2a46a3b70fc3944bb6bd..440cfc75789890f289d7a9f90a923a087e8c80a2 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -2613,10 +2613,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
   __ li(at, Operand(Handle<Object>(cell)));
   __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
-  __ Branch(&cache_miss, ne, map, Operand(at));
+  __ BranchShort(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
   // root array to force relocation to be able to later patch
-  // with true or false.
+  // with true or false. The distance from map check has to be constant.
   __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
   __ Branch(&done);

@@ -3755,6 +3755,7 @@ void LCodeGen::DoPower(LPower* instr) {
     Label no_deopt;
     __ JumpIfSmi(a2, &no_deopt);
     __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
     DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
     __ bind(&no_deopt);
     MathPowStub stub(MathPowStub::TAGGED);
Index: src/mips/macro-assembler-mips.cc
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc index d8b65663e854cd0228e7b321fda9928057f3e098..07cc7f9ec2672927e3090a145418956db0beaccd 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1563,19 +1563,27 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
     if (is_near(L)) {
       BranchShort(L, cond, rs, rt, bdslot);
     } else {
-      Label skip;
-      Condition neg_cond = NegateCondition(cond);
-      BranchShort(&skip, neg_cond, rs, rt);
-      Jr(L, bdslot);
-      bind(&skip);
+      if (cond != cc_always) {
+        Label skip;
+        Condition neg_cond = NegateCondition(cond);
+        BranchShort(&skip, neg_cond, rs, rt);
+        Jr(L, bdslot);
+        bind(&skip);
+      } else {
+        Jr(L, bdslot);
+      }
     }
   } else {
     if (is_trampoline_emitted()) {
-      Label skip;
-      Condition neg_cond = NegateCondition(cond);
-      BranchShort(&skip, neg_cond, rs, rt);
-      Jr(L, bdslot);
-      bind(&skip);
+      if (cond != cc_always) {
+        Label skip;
+        Condition neg_cond = NegateCondition(cond);
+        BranchShort(&skip, neg_cond, rs, rt);
+        Jr(L, bdslot);
+        bind(&skip);
+      } else {
+        Jr(L, bdslot);
+      }
     } else {
       BranchShort(L, cond, rs, rt, bdslot);
     }
Index: src/mips/macro-assembler-mips.h
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h index da22a2be8e118ebf76a5a7b5ec1dd672a815cd91..85347c9e51d1f12dc769b8c7e80b0eb6d240339a 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -169,6 +169,7 @@ class MacroAssembler: public Assembler {

   DECLARE_BRANCH_PROTOTYPES(Branch)
   DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+  DECLARE_BRANCH_PROTOTYPES(BranchShort)

 #undef DECLARE_BRANCH_PROTOTYPES
 #undef COND_TYPED_ARGS
@@ -1570,14 +1571,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
                            int num_reg_arguments,
                            int num_double_arguments);

-  void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
-  void BranchShort(int16_t offset, Condition cond, Register rs,
-                   const Operand& rt,
-                   BranchDelaySlot bdslot = PROTECT);
-  void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
-  void BranchShort(Label* L, Condition cond, Register rs,
-                   const Operand& rt,
-                   BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
   void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
                           const Operand& rt,


--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.

Reply via email to