Revision: 2673
Author: [email protected]
Date: Thu Aug 13 01:00:04 2009
Log: X64: Add an SHL optimization, fix a floating-point bug, fix xchg  
rax,r8 and printing of test ?ax, imm in disassembler.
Review URL: http://codereview.chromium.org/164399
http://code.google.com/p/v8/source/detail?r=2673

Modified:
  /branches/bleeding_edge/src/x64/codegen-x64.cc
  /branches/bleeding_edge/src/x64/disasm-x64.cc

=======================================
--- /branches/bleeding_edge/src/x64/codegen-x64.cc      Tue Aug 11 08:30:09 2009
+++ /branches/bleeding_edge/src/x64/codegen-x64.cc      Thu Aug 13 01:00:04 2009
@@ -5258,6 +5258,58 @@
        }
        break;

+    case Token::SHL:
+      if (reversed) {
+        Result constant_operand(value);
+        LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                 overwrite_mode);
+      } else {
+        // Only the least significant 5 bits of the shift value are used.
+        // In the slow case, this masking is done inside the runtime call.
+        int shift_value = int_value & 0x1f;
+        operand->ToRegister();
+        if (shift_value == 0) {
+          // Spill operand so it can be overwritten in the slow case.
+          frame_->Spill(operand->reg());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             operand->reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ testl(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+          deferred->BindExit();
+          frame_->Push(operand);
+        } else {
+          // Use a fresh temporary for nonzero shift values.
+          Result answer = allocator()->Allocate();
+          ASSERT(answer.is_valid());
+          DeferredInlineSmiOperation* deferred =
+              new DeferredInlineSmiOperation(op,
+                                             answer.reg(),
+                                             operand->reg(),
+                                             smi_value,
+                                             overwrite_mode);
+          __ testl(operand->reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+          __ movl(answer.reg(), operand->reg());
+          ASSERT(kSmiTag == 0);  // adjust code if not the case
+          // We do no shifts, only the Smi conversion, if shift_value is 1.
+          if (shift_value > 1) {
+            __ shll(answer.reg(), Immediate(shift_value - 1));
+          }
+          // Convert int result to Smi, checking that it is in int range.
+          ASSERT(kSmiTagSize == 1);  // adjust code if not the case
+          __ addl(answer.reg(), answer.reg());
+          deferred->Branch(overflow);
+          deferred->BindExit();
+          operand->Unuse();
+          frame_->Push(&answer);
+        }
+      }
+      break;
+
      case Token::BIT_OR:
      case Token::BIT_XOR:
      case Token::BIT_AND: {
@@ -6013,6 +6065,8 @@
          __ testl(key.reg(),
                   Immediate(static_cast<uint32_t>(kSmiTagMask |  
0x80000000U)));
          deferred->Branch(not_zero);
+        // Ensure that the smi is zero-extended.  This is not guaranteed.
+        __ movl(key.reg(), key.reg());

          // Check that the receiver is not a smi.
          __ testl(receiver.reg(), Immediate(kSmiTagMask));
@@ -7172,14 +7226,14 @@
    __ jmp(&done);

    __ bind(&load_smi_1);
-  __ sar(kScratchRegister, Immediate(kSmiTagSize));
+  __ sarl(kScratchRegister, Immediate(kSmiTagSize));
    __ push(kScratchRegister);
    __ fild_s(Operand(rsp, 0));
    __ pop(kScratchRegister);
    __ jmp(&done_load_1);

    __ bind(&load_smi_2);
-  __ sar(kScratchRegister, Immediate(kSmiTagSize));
+  __ sarl(kScratchRegister, Immediate(kSmiTagSize));
    __ push(kScratchRegister);
    __ fild_s(Operand(rsp, 0));
    __ pop(kScratchRegister);
@@ -7534,7 +7588,7 @@
          __ j(negative, &non_smi_result);
        }
        // Tag smi result and return.
-      ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
+      ASSERT(kSmiTagSize == 1);  // adjust code if not the case
        __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
        __ ret(2 * kPointerSize);

=======================================
--- /branches/bleeding_edge/src/x64/disasm-x64.cc       Tue Aug  4 06:46:58 2009
+++ /branches/bleeding_edge/src/x64/disasm-x64.cc       Thu Aug 13 01:00:04 2009
@@ -105,7 +105,6 @@
  static ByteMnemonic zero_operands_instr[] = {
    { 0xC3, UNSET_OP_ORDER, "ret" },
    { 0xC9, UNSET_OP_ORDER, "leave" },
-  { 0x90, UNSET_OP_ORDER, "nop" },
    { 0xF4, UNSET_OP_ORDER, "hlt" },
    { 0xCC, UNSET_OP_ORDER, "int3" },
    { 0x60, UNSET_OP_ORDER, "pushad" },
@@ -1425,7 +1424,7 @@
            default:
              UNREACHABLE();
          }
-        AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"ux",
+        AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"x",
                         operand_size_code(),
                         value);
          break;

--~--~---------~--~----~------------~-------~--~----~
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
-~----------~----~----~----~------~----~------~--~---

Reply via email to