Revision: 17268
Author:   [email protected]
Date:     Fri Oct 18 10:54:45 2013 UTC
Log:      Tweak Math.log on ia32/x64

ia32 and x64 more consistent now
1. use non-transcendental cache version of log for x64
2. use negative infinity constant instead of pushing to stack and loading to XMM register
3. remove movdbl, use movsd directly. movdbl seems confusing

BUG=
[email protected]

Review URL: https://codereview.chromium.org/27197013

Patch from Weiliang Lin <[email protected]>.
http://code.google.com/p/v8/source/detail?r=17268

Modified:
 /branches/bleeding_edge/src/ia32/assembler-ia32.cc
 /branches/bleeding_edge/src/ia32/assembler-ia32.h
 /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc
 /branches/bleeding_edge/src/ia32/code-stubs-ia32.h
 /branches/bleeding_edge/src/ia32/codegen-ia32.cc
 /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
 /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc
 /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc
 /branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.cc
 /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc
 /branches/bleeding_edge/src/ia32/stub-cache-ia32.cc
 /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
 /branches/bleeding_edge/src/x64/lithium-x64.cc
 /branches/bleeding_edge/test/cctest/test-assembler-ia32.cc
 /branches/bleeding_edge/test/cctest/test-disasm-ia32.cc

=======================================
--- /branches/bleeding_edge/src/ia32/assembler-ia32.cc Tue Oct 15 12:51:58 2013 UTC +++ /branches/bleeding_edge/src/ia32/assembler-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -2067,6 +2067,7 @@


 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0xF2);
   EMIT(0x0F);
@@ -2076,6 +2077,7 @@


 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2085,6 +2087,7 @@


 void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2245,18 +2248,6 @@
   XMMRegister code = XMMRegister::from_code(level);
   emit_sse_operand(code, src);
 }
-
-
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
-  EnsureSpace ensure_space(this);
-  movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
-  EnsureSpace ensure_space(this);
-  movsd(dst, src);
-}


 void Assembler::movsd(const Operand& dst, XMMRegister src ) {
=======================================
--- /branches/bleeding_edge/src/ia32/assembler-ia32.h Tue Oct 15 12:51:58 2013 UTC +++ /branches/bleeding_edge/src/ia32/assembler-ia32.h Fri Oct 18 10:54:45 2013 UTC
@@ -1071,16 +1071,15 @@
       movdqu(dst, src);
     }
   }
-
-  // Use either movsd or movlpd.
-  void movdbl(XMMRegister dst, const Operand& src);
-  void movdbl(const Operand& dst, XMMRegister src);

   void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
   void movd(XMMRegister dst, const Operand& src);
   void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
   void movd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, XMMRegister src);
+  void movsd(XMMRegister dst, const Operand& src);
+  void movsd(const Operand& dst, XMMRegister src);
+

   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
@@ -1162,9 +1161,6 @@
   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }

  protected:
-  void movsd(XMMRegister dst, const Operand& src);
-  void movsd(const Operand& dst, XMMRegister src);
-
   void emit_sse_operand(XMMRegister reg, const Operand& adr);
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(Register dst, XMMRegister src);
=======================================
--- /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc Thu Oct 10 10:37:18 2013 UTC +++ /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -454,7 +454,7 @@
     __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
-      __ movdbl(Operand(esp, i * kDoubleSize), reg);
+      __ movsd(Operand(esp, i * kDoubleSize), reg);
     }
   }
   const int argument_count = 1;
@@ -470,7 +470,7 @@
     CpuFeatureScope scope(masm, SSE2);
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
-      __ movdbl(reg, Operand(esp, i * kDoubleSize));
+      __ movsd(reg, Operand(esp, i * kDoubleSize));
     }
     __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
   }
@@ -770,7 +770,7 @@
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
     CpuFeatureScope scope(masm, SSE2);
-    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }

@@ -785,7 +785,7 @@
     CpuFeatureScope scope(masm, SSE2);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ sub(esp, Immediate(kDoubleSize));
-    __ movdbl(Operand(esp, 0), xmm1);
+    __ movsd(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
     __ add(esp, Immediate(kDoubleSize));
   }
@@ -798,17 +798,17 @@
     __ ret(kPointerSize);
   } else {  // UNTAGGED.
     CpuFeatureScope scope(masm, SSE2);
-    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();

     // Skip cache and return answer directly, only in untagged case.
     __ bind(&skip_cache);
     __ sub(esp, Immediate(kDoubleSize));
-    __ movdbl(Operand(esp, 0), xmm1);
+    __ movsd(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
     GenerateOperation(masm, type_);
     __ fstp_d(Operand(esp, 0));
-    __ movdbl(xmm1, Operand(esp, 0));
+    __ movsd(xmm1, Operand(esp, 0));
     __ add(esp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
@@ -834,13 +834,13 @@
     __ bind(&runtime_call_clear_stack);
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
-    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+    __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
       __ push(eax);
       __ CallRuntime(RuntimeFunction(), 1);
     }
-    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
 }
@@ -983,7 +983,7 @@
   Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
   __ j(not_equal, not_numbers);  // Argument in edx is not a number.
-  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+  __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
   __ bind(&load_eax);
   // Load operand in eax into xmm1, or branch to not_numbers.
   __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
@@ -1001,7 +1001,7 @@
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
   __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
-  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
   __ bind(&done);
 }

@@ -1059,7 +1059,7 @@
            factory->heap_number_map());
     __ j(not_equal, &call_runtime);

-    __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+    __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
     __ jmp(&unpack_exponent, Label::kNear);

     __ bind(&base_is_smi);
@@ -1075,7 +1075,7 @@
     __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
            factory->heap_number_map());
     __ j(not_equal, &call_runtime);
-    __ movdbl(double_exponent,
+    __ movsd(double_exponent,
               FieldOperand(exponent, HeapNumber::kValueOffset));
   } else if (exponent_type_ == TAGGED) {
     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -1083,7 +1083,7 @@
     __ jmp(&int_exponent);

     __ bind(&exponent_not_smi);
-    __ movdbl(double_exponent,
+    __ movsd(double_exponent,
               FieldOperand(exponent, HeapNumber::kValueOffset));
   }

@@ -1178,9 +1178,9 @@
     __ fnclex();  // Clear flags to catch exceptions later.
     // Transfer (B)ase and (E)xponent onto the FPU register stack.
     __ sub(esp, Immediate(kDoubleSize));
-    __ movdbl(Operand(esp, 0), double_exponent);
+    __ movsd(Operand(esp, 0), double_exponent);
     __ fld_d(Operand(esp, 0));  // E
-    __ movdbl(Operand(esp, 0), double_base);
+    __ movsd(Operand(esp, 0), double_base);
     __ fld_d(Operand(esp, 0));  // B, E

     // Exponent is in st(1) and base is in st(0)
@@ -1203,7 +1203,7 @@
     __ test_b(eax, 0x5F);  // We check for all but precision exception.
     __ j(not_zero, &fast_power_failed, Label::kNear);
     __ fstp_d(Operand(esp, 0));
-    __ movdbl(double_result, Operand(esp, 0));
+    __ movsd(double_result, Operand(esp, 0));
     __ add(esp, Immediate(kDoubleSize));
     __ jmp(&done);

@@ -1270,7 +1270,7 @@
     // as heap number in exponent.
     __ bind(&done);
     __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
-    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+    __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
     __ IncrementCounter(counters->math_pow(), 1);
     __ ret(2 * kPointerSize);
   } else {
@@ -1278,8 +1278,8 @@
     {
       AllowExternalCallThatCantCauseGC scope(masm);
       __ PrepareCallCFunction(4, scratch);
-      __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
-      __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+      __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+      __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
       __ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
     }
@@ -1287,7 +1287,7 @@
     // Store it into the (fixed) result register.
     __ sub(esp, Immediate(kDoubleSize));
     __ fstp_d(Operand(esp, 0));
-    __ movdbl(double_result, Operand(esp, 0));
+    __ movsd(double_result, Operand(esp, 0));
     __ add(esp, Immediate(kDoubleSize));

     __ bind(&done);
@@ -4730,7 +4730,7 @@
     __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
            masm->isolate()->factory()->heap_number_map());
     __ j(not_equal, &maybe_undefined1, Label::kNear);
-    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ jmp(&left, Label::kNear);
     __ bind(&right_smi);
     __ mov(ecx, eax);  // Can't clobber eax because we can still jump away.
@@ -4742,7 +4742,7 @@
     __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
            masm->isolate()->factory()->heap_number_map());
     __ j(not_equal, &maybe_undefined2, Label::kNear);
-    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
     __ jmp(&done);
     __ bind(&left_smi);
     __ mov(ecx, edx);  // Can't clobber edx because we can still jump away.
=======================================
--- /branches/bleeding_edge/src/ia32/code-stubs-ia32.h Tue Sep 24 09:31:07 2013 UTC +++ /branches/bleeding_edge/src/ia32/code-stubs-ia32.h Fri Oct 18 10:54:45 2013 UTC
@@ -444,7 +444,7 @@
         // Save all XMM registers except XMM0.
         for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
           XMMRegister reg = XMMRegister::from_code(i);
-          masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+          masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
         }
       }
     }
@@ -456,7 +456,7 @@
         // Restore all XMM registers except XMM0.
         for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
           XMMRegister reg = XMMRegister::from_code(i);
-          masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+          masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
         }
         masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
=======================================
--- /branches/bleeding_edge/src/ia32/codegen-ia32.cc Tue Oct 15 15:04:29 2013 UTC +++ /branches/bleeding_edge/src/ia32/codegen-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -117,7 +117,7 @@
     CpuFeatureScope use_sse2(&masm, SSE2);
     XMMRegister input = xmm1;
     XMMRegister result = xmm2;
-    __ movdbl(input, Operand(esp, 1 * kPointerSize));
+    __ movsd(input, Operand(esp, 1 * kPointerSize));
     __ push(eax);
     __ push(ebx);

@@ -125,7 +125,7 @@

     __ pop(ebx);
     __ pop(eax);
-    __ movdbl(Operand(esp, 1 * kPointerSize), result);
+    __ movsd(Operand(esp, 1 * kPointerSize), result);
     __ fld_d(Operand(esp, 1 * kPointerSize));
     __ Ret();
   }
@@ -155,9 +155,9 @@
   // Move double input into registers.
   {
     CpuFeatureScope use_sse2(&masm, SSE2);
-    __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+    __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
     __ sqrtsd(xmm0, xmm0);
-    __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+    __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
     // Load result into floating point register as return value.
     __ fld_d(Operand(esp, 1 * kPointerSize));
     __ Ret();
@@ -462,10 +462,10 @@
       Label medium_handlers, f9_16, f17_32, f33_48, f49_63;

       __ bind(&f9_16);
-      __ movdbl(xmm0, Operand(src, 0));
-      __ movdbl(xmm1, Operand(src, count, times_1, -8));
-      __ movdbl(Operand(dst, 0), xmm0);
-      __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+      __ movsd(xmm0, Operand(src, 0));
+      __ movsd(xmm1, Operand(src, count, times_1, -8));
+      __ movsd(Operand(dst, 0), xmm0);
+      __ movsd(Operand(dst, count, times_1, -8), xmm1);
       MemMoveEmitPopAndReturn(&masm);

       __ bind(&f17_32);
@@ -741,7 +741,7 @@
   XMMRegister the_hole_nan = xmm1;
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope use_sse2(masm, SSE2);
-    __ movdbl(the_hole_nan,
+    __ movsd(the_hole_nan,
               Operand::StaticVariable(canonical_the_hole_nan_reference));
   }
   __ jmp(&entry);
@@ -767,7 +767,7 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope fscope(masm, SSE2);
     __ Cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), + __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
               xmm0);
   } else {
     __ push(ebx);
@@ -787,7 +787,7 @@

   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), + __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
               the_hole_nan);
   } else {
     __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
@@ -896,9 +896,9 @@
   // edx: new heap number
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope fscope(masm, SSE2);
-    __ movdbl(xmm0,
+    __ movsd(xmm0,
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
-    __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+    __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
   } else {
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
     __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
@@ -1078,20 +1078,20 @@

   Label done;

-  __ movdbl(double_scratch, ExpConstant(0));
+  __ movsd(double_scratch, ExpConstant(0));
   __ xorpd(result, result);
   __ ucomisd(double_scratch, input);
   __ j(above_equal, &done);
   __ ucomisd(input, ExpConstant(1));
-  __ movdbl(result, ExpConstant(2));
+  __ movsd(result, ExpConstant(2));
   __ j(above_equal, &done);
-  __ movdbl(double_scratch, ExpConstant(3));
-  __ movdbl(result, ExpConstant(4));
+  __ movsd(double_scratch, ExpConstant(3));
+  __ movsd(result, ExpConstant(4));
   __ mulsd(double_scratch, input);
   __ addsd(double_scratch, result);
   __ movd(temp2, double_scratch);
   __ subsd(double_scratch, result);
-  __ movdbl(result, ExpConstant(6));
+  __ movsd(result, ExpConstant(6));
   __ mulsd(double_scratch, ExpConstant(5));
   __ subsd(double_scratch, input);
   __ subsd(result, double_scratch);
@@ -1108,7 +1108,7 @@
   __ shl(temp1, 20);
   __ movd(input, temp1);
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
-  __ movdbl(double_scratch, Operand::StaticArray(
+  __ movsd(double_scratch, Operand::StaticArray(
       temp2, times_8, ExternalReference::math_exp_log_table()));
   __ por(input, double_scratch);
   __ mulsd(result, input);
=======================================
--- /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Fri Oct 4 08:17:11 2013 UTC +++ /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -250,7 +250,7 @@
     for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
       XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
       int offset = i * kDoubleSize;
-      __ movdbl(Operand(esp, offset), xmm_reg);
+      __ movsd(Operand(esp, offset), xmm_reg);
     }
   }

@@ -302,8 +302,8 @@
     for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
       int dst_offset = i * kDoubleSize + double_regs_offset;
       int src_offset = i * kDoubleSize;
-      __ movdbl(xmm0, Operand(esp, src_offset));
-      __ movdbl(Operand(ebx, dst_offset), xmm0);
+      __ movsd(xmm0, Operand(esp, src_offset));
+      __ movsd(Operand(ebx, dst_offset), xmm0);
     }
   }

@@ -388,7 +388,7 @@
     for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
       XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
       int src_offset = i * kDoubleSize + double_regs_offset;
-      __ movdbl(xmm_reg, Operand(ebx, src_offset));
+      __ movsd(xmm_reg, Operand(ebx, src_offset));
     }
   }

=======================================
--- /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Wed Sep 25 08:26:11 2013 UTC +++ /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -3310,7 +3310,7 @@
     __ cvtss2sd(xmm1, xmm1);
     __ xorps(xmm0, xmm1);
     __ subsd(xmm0, xmm1);
-    __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+    __ movsd(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
   } else {
     // 0x4130000000000000 is 1.0 x 2^20 as a double.
     __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Wed Oct 16 12:20:21 2013 UTC +++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -257,7 +257,7 @@
       BitVector* doubles = chunk()->allocated_double_registers();
       BitVector::Iterator save_iterator(doubles);
       while (!save_iterator.Done()) {
-        __ movdbl(MemOperand(esp, count * kDoubleSize),
+        __ movsd(MemOperand(esp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
         save_iterator.Advance();
         count++;
@@ -2233,8 +2233,8 @@
       case Token::MOD: {
         // Pass two doubles as arguments on the stack.
         __ PrepareCallCFunction(4, eax);
-        __ movdbl(Operand(esp, 0 * kDoubleSize), left);
-        __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+        __ movsd(Operand(esp, 0 * kDoubleSize), left);
+        __ movsd(Operand(esp, 1 * kDoubleSize), right);
         __ CallCFunction(
             ExternalReference::double_fp_operation(Token::MOD, isolate()),
             4);
@@ -2243,7 +2243,7 @@
         // Store it into the result register.
         __ sub(Operand(esp), Immediate(kDoubleSize));
         __ fstp_d(Operand(esp, 0));
-        __ movdbl(result, Operand(esp, 0));
+        __ movsd(result, Operand(esp, 0));
         __ add(Operand(esp), Immediate(kDoubleSize));
         break;
       }
@@ -2617,7 +2617,7 @@
   if (use_sse2) {
     CpuFeatureScope scope(masm(), SSE2);
     XMMRegister input_reg = ToDoubleRegister(instr->object());
-    __ movdbl(MemOperand(esp, 0), input_reg);
+    __ movsd(MemOperand(esp, 0), input_reg);
   } else {
     __ fstp_d(MemOperand(esp, 0));
   }
@@ -3079,7 +3079,7 @@
     BitVector::Iterator save_iterator(doubles);
     int count = 0;
     while (!save_iterator.Done()) {
-      __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+      __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
                 MemOperand(esp, count * kDoubleSize));
       save_iterator.Advance();
       count++;
@@ -3247,7 +3247,7 @@
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
       XMMRegister result = ToDoubleRegister(instr->result());
-      __ movdbl(result, FieldOperand(object, offset));
+      __ movsd(result, FieldOperand(object, offset));
     } else {
       X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
     }
@@ -3401,7 +3401,7 @@
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
-      __ movdbl(ToDoubleRegister(instr->result()), operand);
+      __ movsd(ToDoubleRegister(instr->result()), operand);
     } else {
       X87Mov(ToX87Register(instr->result()), operand);
     }
@@ -3472,7 +3472,7 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope scope(masm(), SSE2);
     XMMRegister result = ToDoubleRegister(instr->result());
-    __ movdbl(result, double_load_operand);
+    __ movsd(result, double_load_operand);
   } else {
     X87Mov(ToX87Register(instr->result()), double_load_operand);
   }
@@ -3995,7 +3995,7 @@
       ExternalReference::address_of_minus_one_half();

   Label done, round_to_zero, below_one_half, do_not_compensate;
-  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
   __ ucomisd(xmm_scratch, input_reg);
   __ j(above, &below_one_half);

@@ -4009,7 +4009,7 @@
   __ jmp(&done);

   __ bind(&below_one_half);
-  __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
+  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
   __ ucomisd(xmm_scratch, input_reg);
   __ j(below_equal, &round_to_zero);

@@ -4196,22 +4196,21 @@
   __ j(equal, &zero, Label::kNear);
   ExternalReference nan =
       ExternalReference::address_of_canonical_non_hole_nan();
-  __ movdbl(input_reg, Operand::StaticVariable(nan));
+  __ movsd(input_reg, Operand::StaticVariable(nan));
   __ jmp(&done, Label::kNear);
   __ bind(&zero);
-  __ push(Immediate(0xFFF00000));
-  __ push(Immediate(0));
-  __ movdbl(input_reg, Operand(esp, 0));
-  __ add(Operand(esp), Immediate(kDoubleSize));
+  ExternalReference ninf =
+      ExternalReference::address_of_negative_infinity();
+  __ movsd(input_reg, Operand::StaticVariable(ninf));
   __ jmp(&done, Label::kNear);
   __ bind(&positive);
   __ fldln2();
   __ sub(Operand(esp), Immediate(kDoubleSize));
-  __ movdbl(Operand(esp, 0), input_reg);
+  __ movsd(Operand(esp, 0), input_reg);
   __ fld_d(Operand(esp, 0));
   __ fyl2x();
   __ fstp_d(Operand(esp, 0));
-  __ movdbl(input_reg, Operand(esp, 0));
+  __ movsd(input_reg, Operand(esp, 0));
   __ add(Operand(esp), Immediate(kDoubleSize));
   __ bind(&done);
 }
@@ -4482,7 +4481,7 @@
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
       XMMRegister value = ToDoubleRegister(instr->value());
-      __ movdbl(FieldOperand(object, offset), value);
+      __ movsd(FieldOperand(object, offset), value);
     } else {
       X87Register value = ToX87Register(instr->value());
       X87Mov(FieldOperand(object, offset), value);
@@ -4632,7 +4631,7 @@
   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
       CpuFeatureScope scope(masm(), SSE2);
-      __ movdbl(operand, ToDoubleRegister(instr->value()));
+      __ movsd(operand, ToDoubleRegister(instr->value()));
     } else {
       X87Mov(operand, ToX87Register(instr->value()));
     }
@@ -4690,11 +4689,11 @@
       __ ucomisd(value, value);
       __ j(parity_odd, &have_value);  // NaN.

-      __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+      __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
       __ bind(&have_value);
     }

-    __ movdbl(double_store_operand, value);
+    __ movsd(double_store_operand, value);
   } else {
     // Can't use SSE2 in the serializer
     if (instr->hydrogen()->IsConstantHoleStore()) {
@@ -5160,7 +5159,7 @@
   __ bind(&done);
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope feature_scope(masm(), SSE2);
-    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
+    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
   } else {
     __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
   }
@@ -5204,7 +5203,7 @@
   if (use_sse2) {
     CpuFeatureScope scope(masm(), SSE2);
     XMMRegister input_reg = ToDoubleRegister(instr->value());
-    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
   } else {
     __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
   }
@@ -5347,7 +5346,7 @@
     }

     // Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); + __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));

     if (deoptimize_on_minus_zero) {
       XMMRegister xmm_scratch = double_scratch0();
@@ -5369,7 +5368,7 @@

       ExternalReference nan =
           ExternalReference::address_of_canonical_non_hole_nan();
-      __ movdbl(result_reg, Operand::StaticVariable(nan));
+      __ movsd(result_reg, Operand::StaticVariable(nan));
       __ jmp(&done, Label::kNear);
     }
   } else {
@@ -5758,7 +5757,7 @@

   // Heap number
   __ bind(&heap_number);
- __ movdbl(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
   __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
   __ jmp(&done, Label::kNear);

=======================================
--- /branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.cc Thu Jul 25 11:53:38 2013 UTC +++ /branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -326,7 +326,7 @@
         } else {
           __ push(Immediate(upper));
           __ push(Immediate(lower));
-          __ movdbl(dst, Operand(esp, 0));
+          __ movsd(dst, Operand(esp, 0));
           __ add(esp, Immediate(kDoubleSize));
         }
       } else {
@@ -360,7 +360,7 @@
       } else {
         ASSERT(destination->IsDoubleStackSlot());
         Operand dst = cgen_->ToOperand(destination);
-        __ movdbl(dst, src);
+        __ movsd(dst, src);
       }
     } else {
// load from the register onto the stack, store in destination, which must
@@ -378,12 +378,12 @@
       Operand src = cgen_->ToOperand(source);
       if (destination->IsDoubleRegister()) {
         XMMRegister dst = cgen_->ToDoubleRegister(destination);
-        __ movdbl(dst, src);
+        __ movsd(dst, src);
       } else {
         // We rely on having xmm0 available as a fixed scratch register.
         Operand dst = cgen_->ToOperand(destination);
-        __ movdbl(xmm0, src);
-        __ movdbl(dst, xmm0);
+        __ movsd(xmm0, src);
+        __ movsd(dst, xmm0);
       }
     } else {
// load from the stack slot on top of the floating point stack, and then
@@ -486,9 +486,9 @@
                                               : destination);
     Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
-    __ movdbl(xmm0, other);
-    __ movdbl(other, reg);
-    __ movdbl(reg, Operand(xmm0));
+    __ movsd(xmm0, other);
+    __ movsd(other, reg);
+    __ movsd(reg, Operand(xmm0));
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
     CpuFeatureScope scope(cgen_->masm(), SSE2);
     // Double-width memory-to-memory.  Spill on demand to use a general
@@ -499,12 +499,12 @@
     Operand src1 = cgen_->HighOperand(source);
     Operand dst0 = cgen_->ToOperand(destination);
     Operand dst1 = cgen_->HighOperand(destination);
-    __ movdbl(xmm0, dst0);  // Save destination in xmm0.
+    __ movsd(xmm0, dst0);  // Save destination in xmm0.
     __ mov(tmp, src0);  // Then use tmp to copy source to destination.
     __ mov(dst0, tmp);
     __ mov(tmp, src1);
     __ mov(dst1, tmp);
-    __ movdbl(src0, xmm0);
+    __ movsd(src0, xmm0);

   } else {
     // No other combinations are possible.
=======================================
--- /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc Tue Oct 15 16:12:25 2013 UTC +++ /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -232,7 +232,7 @@
   j(not_equal, &done, Label::kNear);

   sub(esp, Immediate(kDoubleSize));
-  movdbl(MemOperand(esp, 0), input_reg);
+  movsd(MemOperand(esp, 0), input_reg);
   SlowTruncateToI(result_reg, esp, 0);
   add(esp, Immediate(kDoubleSize));
   bind(&done);
@@ -344,7 +344,7 @@
     }
   } else if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope scope(this, SSE2);
-    movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     cvttsd2si(result_reg, Operand(xmm0));
     cmp(result_reg, 0x80000000u);
     j(not_equal, &done, Label::kNear);
@@ -361,7 +361,7 @@
     if (input_reg.is(result_reg)) {
       // Input is clobbered. Restore number from double scratch.
       sub(esp, Immediate(kDoubleSize));
-      movdbl(MemOperand(esp, 0), xmm0);
+      movsd(MemOperand(esp, 0), xmm0);
       SlowTruncateToI(result_reg, esp, 0);
       add(esp, Immediate(kDoubleSize));
     } else {
@@ -390,7 +390,7 @@
     ASSERT(!temp.is(no_xmm_reg));
     CpuFeatureScope scope(this, SSE2);

-    movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     cvttsd2si(result_reg, Operand(xmm0));
     Cvtsi2sd(temp, Operand(result_reg));
     ucomisd(xmm0, temp);
@@ -452,7 +452,7 @@
   cmp(src, Immediate(0));
   ExternalReference uint32_bias =
         ExternalReference::address_of_uint32_bias();
-  movdbl(scratch, Operand::StaticVariable(uint32_bias));
+  movsd(scratch, Operand::StaticVariable(uint32_bias));
   Cvtsi2sd(dst, src);
   j(not_sign, &done, Label::kNear);
   addsd(dst, scratch);
@@ -816,9 +816,9 @@
       ExternalReference::address_of_canonical_non_hole_nan();
   if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
     CpuFeatureScope use_sse2(this, SSE2);
-    movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+    movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
     bind(&have_double_value);
-    movdbl(FieldOperand(elements, key, times_4,
+    movsd(FieldOperand(elements, key, times_4,
                         FixedDoubleArray::kHeaderSize - elements_offset),
            scratch2);
   } else {
@@ -838,7 +838,7 @@
   bind(&is_nan);
   if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
     CpuFeatureScope use_sse2(this, SSE2);
-    movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+    movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
   } else {
     fld_d(Operand::StaticVariable(canonical_nan_reference));
   }
@@ -852,7 +852,7 @@
   if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
     CpuFeatureScope fscope(this, SSE2);
     Cvtsi2sd(scratch2, scratch1);
-    movdbl(FieldOperand(elements, key, times_4,
+    movsd(FieldOperand(elements, key, times_4,
                         FixedDoubleArray::kHeaderSize - elements_offset),
            scratch2);
   } else {
@@ -1068,7 +1068,7 @@
     const int offset = -2 * kPointerSize;
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
-      movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+      movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
     }
   } else {
     sub(esp, Immediate(argc * kPointerSize));
@@ -1112,7 +1112,7 @@
     const int offset = -2 * kPointerSize;
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
-      movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+      movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
     }
   }

@@ -3070,7 +3070,7 @@
   JumpIfSmi(probe, not_found);
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatureScope fscope(this, SSE2);
-    movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+    movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
     ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
   } else {
     fld_d(FieldOperand(object, HeapNumber::kValueOffset));
=======================================
--- /branches/bleeding_edge/src/ia32/stub-cache-ia32.cc Fri Oct 11 14:05:23 2013 UTC +++ /branches/bleeding_edge/src/ia32/stub-cache-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -879,7 +879,7 @@
                 miss_label, DONT_DO_SMI_CHECK);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+      __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
     } else {
       __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
     }
@@ -887,7 +887,7 @@
     __ bind(&do_store);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+      __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
     } else {
       __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
     }
@@ -1057,14 +1057,14 @@
                 miss_label, DONT_DO_SMI_CHECK);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+      __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
     } else {
       __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
     }
     __ bind(&do_store);
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope use_sse2(masm, SSE2);
-      __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+      __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
     } else {
       __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
     }
@@ -2397,7 +2397,7 @@
   // Check if the argument is a heap number and load its value into xmm0.
   Label slow;
   __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
-  __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));

   // Check if the argument is strictly positive. Note this also
   // discards NaN.
@@ -2447,7 +2447,7 @@

   // Return a new heap number.
   __ AllocateHeapNumber(eax, ebx, edx, &slow);
-  __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+  __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
   __ ret(2 * kPointerSize);

   // Return the argument (when it's an already round heap number).
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Wed Oct 16 12:20:21 2013 UTC +++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Fri Oct 18 10:54:45 2013 UTC
@@ -3673,10 +3673,35 @@


 void LCodeGen::DoMathLog(LMathLog* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+  ASSERT(instr->value()->Equals(instr->result()));
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister xmm_scratch = double_scratch0();
+  Label positive, done, zero;
+  __ xorps(xmm_scratch, xmm_scratch);
+  __ ucomisd(input_reg, xmm_scratch);
+  __ j(above, &positive, Label::kNear);
+  __ j(equal, &zero, Label::kNear);
+  ExternalReference nan =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  Operand nan_operand = masm()->ExternalOperand(nan);
+  __ movsd(input_reg, nan_operand);
+  __ jmp(&done, Label::kNear);
+  __ bind(&zero);
+  ExternalReference ninf =
+      ExternalReference::address_of_negative_infinity();
+  Operand ninf_operand = masm()->ExternalOperand(ninf);
+  __ movsd(input_reg, ninf_operand);
+  __ jmp(&done, Label::kNear);
+  __ bind(&positive);
+  __ fldln2();
+  __ subq(rsp, Immediate(kDoubleSize));
+  __ movsd(Operand(rsp, 0), input_reg);
+  __ fld_d(Operand(rsp, 0));
+  __ fyl2x();
+  __ fstp_d(Operand(rsp, 0));
+  __ movsd(input_reg, Operand(rsp, 0));
+  __ addq(rsp, Immediate(kDoubleSize));
+  __ bind(&done);
 }


=======================================
--- /branches/bleeding_edge/src/x64/lithium-x64.cc Mon Oct 14 06:31:01 2013 UTC +++ /branches/bleeding_edge/src/x64/lithium-x64.cc Fri Oct 18 10:54:45 2013 UTC
@@ -1216,9 +1216,11 @@


 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), xmm1);
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
   LMathLog* result = new(zone()) LMathLog(input);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+  return DefineSameAsFirst(result);
 }


=======================================
--- /branches/bleeding_edge/test/cctest/test-assembler-ia32.cc Wed Oct 16 08:08:41 2013 UTC +++ /branches/bleeding_edge/test/cctest/test-assembler-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -264,15 +264,15 @@
   Assembler assm(isolate, buffer, sizeof buffer);

   CpuFeatureScope fscope(&assm, SSE2);
-  __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
-  __ movdbl(xmm1, Operand(esp, 3 * kPointerSize));
+  __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
+  __ movsd(xmm1, Operand(esp, 3 * kPointerSize));
   __ addsd(xmm0, xmm1);
   __ mulsd(xmm0, xmm1);
   __ subsd(xmm0, xmm1);
   __ divsd(xmm0, xmm1);
   // Copy xmm0 to st(0) using eight bytes of stack.
   __ sub(esp, Immediate(8));
-  __ movdbl(Operand(esp, 0), xmm0);
+  __ movsd(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
   __ add(esp, Immediate(8));
   __ ret(0);
@@ -313,7 +313,7 @@
   __ cvtsi2sd(xmm0, eax);
   // Copy xmm0 to st(0) using eight bytes of stack.
   __ sub(esp, Immediate(8));
-  __ movdbl(Operand(esp, 0), xmm0);
+  __ movsd(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
   __ add(esp, Immediate(8));
   __ ret(0);
@@ -575,7 +575,7 @@
   MacroAssembler assm(isolate, buffer, sizeof buffer);
   { CpuFeatureScope fscope2(&assm, SSE2);
     CpuFeatureScope fscope41(&assm, SSE4_1);
-    __ movdbl(xmm1, Operand(esp, 4));
+    __ movsd(xmm1, Operand(esp, 4));
     __ extractps(eax, xmm1, 0x1);
     __ ret(0);
   }
=======================================
--- /branches/bleeding_edge/test/cctest/test-disasm-ia32.cc Tue Oct 15 12:51:58 2013 UTC +++ /branches/bleeding_edge/test/cctest/test-disasm-ia32.cc Fri Oct 18 10:54:45 2013 UTC
@@ -358,8 +358,8 @@
       __ mulsd(xmm1, xmm0);
       __ subsd(xmm1, xmm0);
       __ divsd(xmm1, xmm0);
-      __ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
-      __ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
+      __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
       __ ucomisd(xmm0, xmm1);

       // 128 bit move instructions.

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.

Reply via email to