Revision: 10203
Author: [email protected]
Date: Wed Dec 7 08:15:18 2011
Log: Tweaks on Math.pow (ia32 and x64).
Review URL: http://codereview.chromium.org/8831008
http://code.google.com/p/v8/source/detail?r=10203
Modified:
/branches/bleeding_edge/src/assembler.cc
/branches/bleeding_edge/src/ia32/code-stubs-ia32.cc
/branches/bleeding_edge/src/runtime.cc
/branches/bleeding_edge/src/x64/code-stubs-x64.cc
=======================================
--- /branches/bleeding_edge/src/assembler.cc Tue Dec 6 03:56:56 2011
+++ /branches/bleeding_edge/src/assembler.cc Wed Dec 7 08:15:18 2011
@@ -1115,19 +1115,7 @@
double power_double_double(double x, double y) {
// The checks for special cases can be dropped in ia32 because it has
already
// been done in generated code before bailing out here.
-#if !defined(V8_TARGET_ARCH_IA32)
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
- }
- if (!isinf(x)) {
- if (y == 0.5) return sqrt(x + 0.0); // -0 must be converted to +0.
- if (y == -0.5) return 1.0 / sqrt(x + 0.0);
- }
-#endif
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return OS::nan_value();
- }
+ if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return
OS::nan_value();
return pow(x, y);
}
=======================================
--- /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc Wed Dec 7 01:44:31
2011
+++ /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc Wed Dec 7 08:15:18
2011
@@ -2948,8 +2948,7 @@
const XMMRegister double_exponent = xmm1;
const XMMRegister double_scratch = xmm4;
- Label double_int_runtime, generic_runtime, done;
- Label exponent_not_smi, int_exponent;
+ Label call_runtime, done, exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on.
__ mov(scratch, Immediate(1));
@@ -2966,7 +2965,7 @@
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ cmp(FieldOperand(base, HeapObject::kMapOffset),
factory->heap_number_map());
- __ j(not_equal, &generic_runtime);
+ __ j(not_equal, &call_runtime);
__ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
@@ -2983,7 +2982,7 @@
__ bind(&exponent_not_smi);
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map());
- __ j(not_equal, &generic_runtime);
+ __ j(not_equal, &call_runtime);
__ movdbl(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
@@ -3002,7 +3001,7 @@
__ cvttsd2si(exponent, Operand(double_exponent));
// Skip to runtime if possibly NaN (indicated by the indefinite
integer).
__ cmp(exponent, Immediate(0x80000000u));
- __ j(equal, &generic_runtime);
+ __ j(equal, &call_runtime);
__ cvtsi2sd(double_scratch, exponent);
// Already ruled out NaNs for exponent.
__ ucomisd(double_exponent, double_scratch);
@@ -3119,33 +3118,35 @@
__ bind(&fast_power_failed);
__ fninit();
__ add(esp, Immediate(kDoubleSize));
- __ jmp(&generic_runtime);
+ __ jmp(&call_runtime);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
const XMMRegister double_scratch2 = double_exponent;
- __ mov(scratch, exponent); // Back up exponent.
+ __ mov(scratch, exponent); // Back up exponent.
__ movsd(double_scratch, double_base); // Back up base.
__ movsd(double_scratch2, double_result); // Load double_exponent with
1.
// Get absolute value of exponent.
- Label while_true, no_multiply;
- const uint32_t kClearSignBitMask = 0x7FFFFFFF;
- __ and_(exponent, Immediate(kClearSignBitMask));
+ Label no_neg, while_true, no_multiply;
+ __ test(scratch, scratch);
+ __ j(positive, &no_neg, Label::kNear);
+ __ neg(scratch);
+ __ bind(&no_neg);
__ bind(&while_true);
- __ shr(exponent, 1);
+ __ shr(scratch, 1);
__ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(double_result, double_base);
+ __ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
- __ mulsd(double_base, double_base);
+ __ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true);
// scratch has the original value of the exponent - if the exponent is
// negative, return 1/result.
- __ test(scratch, scratch);
+ __ test(exponent, exponent);
__ j(positive, &done);
__ divsd(double_scratch2, double_result);
__ movsd(double_result, double_scratch2);
@@ -3153,47 +3154,36 @@
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ xorps(double_scratch2, double_scratch2);
__ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
- __ j(equal, &double_int_runtime);
+ // double_exponent aliased as double_scratch2 has already been
overwritten
+ // and may not have contained the exponent value in the first place when
the
+ // exponent is a smi. We reset it with exponent value before bailing
out.
+ __ j(not_equal, &done);
+ __ cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
__ bind(&done);
- __ AllocateHeapNumber(exponent, scratch, base, &generic_runtime);
- __ movdbl(FieldOperand(exponent, HeapNumber::kValueOffset),
double_result);
+ __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
-
- // The arguments are still on the stack.
- __ bind(&generic_runtime);
- __ bind(&double_int_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
} else {
- __ jmp(&done);
-
- Label return_from_runtime;
- __ bind(&generic_runtime);
+ __ bind(&call_runtime);
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(4, exponent);
+ __ PrepareCallCFunction(4, scratch);
__ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
__ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
}
- __ jmp(&return_from_runtime, Label::kNear);
-
- __ bind(&double_int_runtime);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(4, exponent);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_scratch);
- __ mov(Operand(esp, 1 * kDoubleSize), scratch);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
4);
- }
-
- __ bind(&return_from_runtime);
// Return value is in st(0) on ia32.
// Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize));
@@ -3202,6 +3192,7 @@
__ add(esp, Immediate(kDoubleSize));
__ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1);
__ ret(0);
}
}
=======================================
--- /branches/bleeding_edge/src/runtime.cc Wed Dec 7 00:43:18 2011
+++ /branches/bleeding_edge/src/runtime.cc Wed Dec 7 08:15:18 2011
@@ -7398,7 +7398,8 @@
return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
-
+// Slow version of Math.pow. We check for fast paths for special cases.
+// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -7414,25 +7415,36 @@
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- // Returning a smi would not confuse crankshaft as this part of code is
only
- // run if SSE2 was not available, in which case crankshaft is disabled.
- if (y == 0) return Smi::FromInt(1); // Returns 1 if exponent is 0.
- return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
+ int y_int = static_cast<int>(y);
+ double result;
+ if (y == y_int) {
+ result = power_double_int(x, y_int); // Returns 1 if exponent is 0.
+ } else if (y == 0.5) {
+ result = (isinf(x)) ? V8_INFINITY : sqrt(x + 0.0); // Convert -0 to
+0.
+ } else if (y == -0.5) {
+ result = (isinf(x)) ? 0 : 1.0 / sqrt(x + 0.0); // Convert -0 to +0.
+ } else {
+ result = power_double_double(x, y);
+ }
+ if (isnan(result)) return isolate->heap()->nan_value();
+ return isolate->heap()->AllocateHeapNumber(result);
}
-// Fast version of Math.pow if we know that y is not an integer and
-// y is not -0.5 or 0.5. Used as slowcase from codegen.
+// Fast version of Math.pow if we know that y is not an integer and y is
not
+// -0.5 or 0.5. Used as slow case from fullcodegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
+ isolate->counters()->math_pow()->Increment();
+
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (y == 0) {
return Smi::FromInt(1);
- } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return isolate->heap()->nan_value();
} else {
- return isolate->heap()->AllocateHeapNumber(pow(x, y));
+ double result = power_double_double(x, y);
+ if (isnan(result)) return isolate->heap()->nan_value();
+ return isolate->heap()->AllocateHeapNumber(result);
}
}
=======================================
--- /branches/bleeding_edge/src/x64/code-stubs-x64.cc Wed Dec 7 01:44:31
2011
+++ /branches/bleeding_edge/src/x64/code-stubs-x64.cc Wed Dec 7 08:15:18
2011
@@ -2004,8 +2004,7 @@
const XMMRegister double_exponent = xmm1;
const XMMRegister double_scratch = xmm4;
- Label double_int_runtime, generic_runtime, done;
- Label exponent_not_smi, int_exponent;
+ Label call_runtime, done, exponent_not_smi, int_exponent;
// Save 1 in double_result - we need this several times later on.
__ movq(scratch, Immediate(1));
@@ -2021,7 +2020,7 @@
__ JumpIfSmi(base, &base_is_smi, Label::kNear);
__ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &generic_runtime);
+ __ j(not_equal, &call_runtime);
__ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
@@ -2038,7 +2037,7 @@
__ bind(&exponent_not_smi);
__ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &generic_runtime);
+ __ j(not_equal, &call_runtime);
__ movsd(double_exponent, FieldOperand(exponent,
HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
@@ -2055,7 +2054,7 @@
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite
integer).
__ cmpl(exponent, Immediate(0x80000000u));
- __ j(equal, &generic_runtime);
+ __ j(equal, &call_runtime);
__ cvtlsi2sd(double_scratch, exponent);
// Already ruled out NaNs for exponent.
__ ucomisd(double_exponent, double_scratch);
@@ -2169,7 +2168,7 @@
__ bind(&fast_power_failed);
__ fninit();
__ addq(rsp, Immediate(kDoubleSize));
- __ jmp(&generic_runtime);
+ __ jmp(&call_runtime);
}
// Calculate power with integer exponent.
@@ -2181,9 +2180,11 @@
__ movsd(double_scratch2, double_result); // Load double_exponent with
1.
// Get absolute value of exponent.
- Label while_true, no_multiply;
- const uint32_t kClearSignBitMask = 0x7FFFFFFF;
- __ andl(scratch, Immediate(kClearSignBitMask));
+ Label no_neg, while_true, no_multiply;
+ __ testl(scratch, scratch);
+ __ j(positive, &no_neg, Label::kNear);
+ __ negl(scratch);
+ __ bind(&no_neg);
__ bind(&while_true);
__ shrl(scratch, Immediate(1));
@@ -2194,8 +2195,7 @@
__ mulsd(double_scratch, double_scratch);
__ j(not_zero, &while_true);
- // scratch has the original value of the exponent - if the exponent is
- // negative, return 1/result.
+ // If the exponent is negative, return 1/result.
__ testl(exponent, exponent);
__ j(greater, &done);
__ divsd(double_scratch2, double_result);
@@ -2204,27 +2204,28 @@
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ xorps(double_scratch2, double_scratch2);
__ ucomisd(double_scratch2, double_result);
- __ j(equal, &double_int_runtime);
+ // double_exponent aliased as double_scratch2 has already been
overwritten
+ // and may not have contained the exponent value in the first place when
the
+ // input was a smi. We reset it with exponent value before bailing out.
+ __ j(not_equal, &done);
+ __ cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
// The stub is called from non-optimized code, which expects the result
// as heap number in eax.
__ bind(&done);
- __ AllocateHeapNumber(rax, rcx, &generic_runtime);
+ __ AllocateHeapNumber(rax, rcx, &call_runtime);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
+ __ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
-
- // The arguments are still on the stack.
- __ bind(&generic_runtime);
- __ bind(&double_int_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
} else {
- __ jmp(&done);
-
- Label return_from_runtime;
- StubRuntimeCallHelper callhelper;
- __ bind(&generic_runtime);
+ __ bind(&call_runtime);
// Move base to the correct argument register. Exponent is already in
xmm1.
__ movsd(xmm0, double_base);
ASSERT(double_exponent.is(xmm1));
@@ -2234,27 +2235,13 @@
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 2);
}
- __ jmp(&return_from_runtime, Label::kNear);
-
- __ bind(&double_int_runtime);
- // Move base to the correct argument register.
- __ movsd(xmm0, double_base);
- // Exponent is already in the correct argument register:
- // edi (not rdi) on Linux and edx on Windows.
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(2);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
2);
- }
-
- __ bind(&return_from_runtime);
// Return value is in xmm0.
__ movsd(double_result, xmm0);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1);
__ ret(0);
}
}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev