Revision: 6669
Author: [email protected]
Date: Mon Feb  7 23:49:59 2011
Log: Fixed a number of issues on x64 crankshaft port:

- Don't use SmiSub when overflow can occur. It asserts that overflow
  does not happen.

- Actually use CompareICs and signal to crankshaft whether or not smi
  code was inlined.

- Fix bug in CmpI where 64 bits were compared instead of 32 bits.

- Implement Throw, DeferredStackCheck, StoreKeyedFastElement in
  lithium backend.

BUG=
TEST=

Review URL: http://codereview.chromium.org/6312193
http://code.google.com/p/v8/source/detail?r=6669

Modified:
 /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc
 /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc
 /branches/bleeding_edge/src/messages.js
 /branches/bleeding_edge/src/x64/assembler-x64.h
 /branches/bleeding_edge/src/x64/code-stubs-x64.cc
 /branches/bleeding_edge/src/x64/full-codegen-x64.cc
 /branches/bleeding_edge/src/x64/ic-x64.cc
 /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
 /branches/bleeding_edge/src/x64/lithium-x64.cc

=======================================
--- /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Mon Feb 7 01:55:42 2011 +++ /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Mon Feb 7 23:49:59 2011
@@ -3949,8 +3949,7 @@
   // Call stub for +1/-1.
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
-  TypeRecordingBinaryOpStub stub(expr->binary_op(),
-                                 NO_OVERWRITE);
+  TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
   EmitCallIC(stub.GetCode(), &patch_site);
   __ bind(&done);

=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Fri Feb 4 07:42:02 2011 +++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Mon Feb 7 23:49:59 2011
@@ -2035,7 +2035,10 @@
   ASSERT(result.is(elements));

   // Load the result.
- __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+  __ mov(result, FieldOperand(elements,
+                              key,
+                              times_pointer_size,
+                              FixedArray::kHeaderSize));

   // Check for the hole value.
   __ cmp(result, Factory::the_hole_value());
@@ -2661,13 +2664,20 @@
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
     __ mov(FieldOperand(elements, offset), value);
   } else {
-    __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
+    __ mov(FieldOperand(elements,
+                        key,
+                        times_pointer_size,
+                        FixedArray::kHeaderSize),
            value);
   }

   if (instr->hydrogen()->NeedsWriteBarrier()) {
     // Compute address of modified element and store it into key register.
- __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+    __ lea(key,
+           FieldOperand(elements,
+                        key,
+                        times_pointer_size,
+                        FixedArray::kHeaderSize));
     __ RecordWrite(elements, key, value);
   }
 }
=======================================
--- /branches/bleeding_edge/src/messages.js     Fri Feb  4 10:36:37 2011
+++ /branches/bleeding_edge/src/messages.js     Mon Feb  7 23:49:59 2011
@@ -316,6 +316,7 @@
       return i;
     }
   }
+
   return -1;
 }

=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.h     Fri Feb  4 06:09:03 2011
+++ /branches/bleeding_edge/src/x64/assembler-x64.h     Mon Feb  7 23:49:59 2011
@@ -565,6 +565,8 @@

   // One byte opcode for test eax,0xXXXXXXXX.
   static const byte kTestEaxByte = 0xA9;
+  // One byte opcode for test al, 0xXX.
+  static const byte kTestAlByte = 0xA8;

// ---------------------------------------------------------------------------
   // Code generation
=======================================
--- /branches/bleeding_edge/src/x64/code-stubs-x64.cc Thu Feb 3 07:36:44 2011 +++ /branches/bleeding_edge/src/x64/code-stubs-x64.cc Mon Feb 7 23:49:59 2011
@@ -4627,10 +4627,10 @@

   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
-    __ SmiSub(rax, rax, rdx);
+    __ subq(rax, rdx);
   } else {
     NearLabel done;
-    __ SmiSub(rdx, rdx, rax);
+    __ subq(rdx, rax);
     __ j(no_overflow, &done);
     // Correct sign of result in case of overflow.
     __ SmiNot(rdx, rdx);
=======================================
--- /branches/bleeding_edge/src/x64/full-codegen-x64.cc Mon Feb 7 06:15:05 2011 +++ /branches/bleeding_edge/src/x64/full-codegen-x64.cc Mon Feb 7 23:49:59 2011
@@ -43,6 +43,58 @@

 #define __ ACCESS_MASM(masm_)

+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm)
+      : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() {
+    ASSERT(patch_site_.is_bound() == info_emitted_);
+  }
+
+  void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+    __ testb(reg, Immediate(kSmiTagMask));
+    EmitJump(not_carry, target);   // Always taken before patched.
+  }
+
+  void EmitJumpIfSmi(Register reg, NearLabel* target) {
+    __ testb(reg, Immediate(kSmiTagMask));
+    EmitJump(carry, target);  // Never taken before patched.
+  }
+
+  void EmitPatchInfo() {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+    ASSERT(is_int8(delta_to_patch_site));
+    __ testl(rax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+    info_emitted_ = true;
+#endif
+  }
+
+  bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+  // jc will be patched with jz, jnc will become jnz.
+  void EmitJump(Condition cc, NearLabel* target) {
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    ASSERT(cc == carry || cc == not_carry);
+    __ bind(&patch_site_);
+    __ j(cc, target);
+  }
+
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
@@ -728,21 +780,25 @@
     // Perform the comparison as if via '==='.
     __ movq(rdx, Operand(rsp, 0));  // Switch value.
     bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
     if (inline_smi_code) {
-      Label slow_case;
-      __ JumpIfNotBothSmi(rdx, rax, &slow_case);
-      __ SmiCompare(rdx, rax);
+      NearLabel slow_case;
+      __ movq(rcx, rdx);
+      __ or_(rcx, rax);
+      patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+
+      __ cmpq(rdx, rax);
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
       __ jmp(clause->body_target()->entry_label());
       __ bind(&slow_case);
     }

-    CompareFlags flags = inline_smi_code
-        ? NO_SMI_COMPARE_IN_STUB
-        : NO_COMPARE_FLAGS;
-    CompareStub stub(equal, true, flags);
-    __ CallStub(&stub);
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+    Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+    EmitCallIC(ic, &patch_site);
+
     __ testq(rax, rax);
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
@@ -1522,16 +1578,17 @@
   // Do combined smi check of the operands. Left operand is on the
   // stack (popped into rdx). Right operand is in rax but moved into
   // rcx to make the shifts easier.
-  Label done, stub_call, smi_case;
+  NearLabel done, stub_call, smi_case;
   __ pop(rdx);
   __ movq(rcx, rax);
-  Condition smi = masm()->CheckBothSmi(rdx, rax);
-  __ j(smi, &smi_case);
+  __ or_(rax, rdx);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(rax, &smi_case);

   __ bind(&stub_call);
-  TypeRecordingBinaryOpStub stub(op, mode);
   __ movq(rax, rcx);
-  __ CallStub(&stub);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ jmp(&done);

   __ bind(&smi_case);
@@ -3197,7 +3254,9 @@
   }

   // Inline smi case if we are in a loop.
-  Label stub_call, done;
+  NearLabel stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
   if (ShouldInlineSmiCase(expr->op())) {
     if (expr->op() == Token::INC) {
       __ SmiAddConstant(rax, rax, Smi::FromInt(1));
@@ -3207,8 +3266,7 @@
     __ j(overflow, &stub_call);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    is_smi = masm_->CheckSmi(rax);
-    __ j(is_smi, &done);
+    patch_site.EmitJumpIfSmi(rax, &done);

     __ bind(&stub_call);
     // Call stub. Undo operation first.
@@ -3230,9 +3288,9 @@
     __ movq(rdx, rax);
     __ Move(rax, Smi::FromInt(1));
   }
-  __ CallStub(&stub);
-
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ bind(&done);
+
   // Store the value returned in rax.
   switch (assign_type) {
     case VARIABLE:
@@ -3500,19 +3558,21 @@
       }

       bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
-        Label slow_case;
-        __ JumpIfNotBothSmi(rax, rdx, &slow_case);
-        __ SmiCompare(rdx, rax);
+        NearLabel slow_case;
+        __ movq(rcx, rdx);
+        __ or_(rcx, rax);
+        patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
+        __ cmpq(rdx, rax);
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
       }

-      CompareFlags flags = inline_smi_code
-          ? NO_SMI_COMPARE_IN_STUB
-          : NO_COMPARE_FLAGS;
-      CompareStub stub(cc, strict, flags);
-      __ CallStub(&stub);
+      // Record position and call the compare IC.
+      SetSourcePosition(expr->position());
+      Handle<Code> ic = CompareIC::GetUninitialized(op);
+      EmitCallIC(ic, &patch_site);

       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ testq(rax, rax);
@@ -3615,6 +3675,16 @@
       break;
   }
 }
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+  __ call(ic, RelocInfo::CODE_TARGET);
+  if (patch_site != NULL && patch_site->is_bound()) {
+    patch_site->EmitPatchInfo();
+  } else {
+    __ nop();  // Signals no inlined code.
+  }
+}


void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
=======================================
--- /branches/bleeding_edge/src/x64/ic-x64.cc   Thu Feb  3 07:36:44 2011
+++ /branches/bleeding_edge/src/x64/ic-x64.cc   Mon Feb  7 23:49:59 2011
@@ -1671,13 +1671,25 @@
       return no_condition;
   }
 }
+
+
+static bool HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  return *test_instruction_address == Assembler::kTestAlByte;
+}


 void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
   HandleScope scope;
   Handle<Code> rewritten;
   State previous_state = GetState();
-  State state = TargetState(previous_state, false, x, y);
+
+ State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
   if (state == GENERIC) {
     CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
     rewritten = stub.GetCode();
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon Feb 7 06:15:05 2011 +++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon Feb 7 23:49:59 2011
@@ -810,7 +810,13 @@


 void LCodeGen::DoThrow(LThrow* instr) {
-  Abort("Unimplemented: %s", "DoThrow");
+  __ push(ToRegister(instr->InputAt(0)));
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    Comment("Unreachable code.");
+    __ int3();
+  }
 }


@@ -963,7 +969,11 @@


 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  Abort("Unimplemented: %s", "DoDeferredStackCheck");
+  __ Pushad();
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  __ Popad();
 }


@@ -1022,9 +1032,9 @@
       __ cmpl(ToOperand(left), Immediate(value));
     }
   } else if (right->IsRegister()) {
-    __ cmpq(ToRegister(left), ToRegister(right));
+    __ cmpl(ToRegister(left), ToRegister(right));
   } else {
-    __ cmpq(ToRegister(left), ToOperand(right));
+    __ cmpl(ToRegister(left), ToOperand(right));
   }
 }

@@ -1869,7 +1879,33 @@


 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
-  Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset =
+ ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+    __ movq(FieldOperand(elements, offset), value);
+  } else {
+    __ movq(FieldOperand(elements,
+                         key,
+                         times_pointer_size,
+                         FixedArray::kHeaderSize),
+            value);
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    // Compute address of modified element and store it into key register.
+    __ lea(key, FieldOperand(elements,
+                             key,
+                             times_pointer_size,
+                             FixedArray::kHeaderSize));
+    __ RecordWrite(elements, key, value);
+  }
 }


=======================================
--- /branches/bleeding_edge/src/x64/lithium-x64.cc      Mon Feb  7 06:11:53 2011
+++ /branches/bleeding_edge/src/x64/lithium-x64.cc      Mon Feb  7 23:49:59 2011
@@ -1439,8 +1439,8 @@


 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
-  Abort("Unimplemented: %s", "DoThrow");
-  return NULL;
+  LOperand* value = UseFixed(instr->value(), rax);
+  return MarkAsCall(new LThrow(value), instr);
 }


@@ -1640,8 +1640,20 @@

 LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
     HStoreKeyedFastElement* instr) {
-  Abort("Unimplemented: %s", "DoStoreKeyedFastElement");
-  return NULL;
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
 }


--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to