Revision: 11464
Author: [email protected]
Date: Mon Apr 30 04:54:34 2012
Log: Revert r11425 because of V8 benchmark performance regression.
Original CL: https://chromiumcodereview.appspot.com/10202007 "Re-enable
optimization for hot functions that have optimization disabled due to many
deopts."
Review URL: https://chromiumcodereview.appspot.com/10265008
http://code.google.com/p/v8/source/detail?r=11464
Modified:
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/objects-inl.h
/branches/bleeding_edge/src/objects.cc
/branches/bleeding_edge/src/objects.h
/branches/bleeding_edge/src/runtime-profiler.cc
=======================================
--- /branches/bleeding_edge/src/heap.cc Thu Apr 26 02:11:45 2012
+++ /branches/bleeding_edge/src/heap.cc Mon Apr 30 04:54:34 2012
@@ -3015,6 +3015,7 @@
share->set_this_property_assignments(undefined_value(),
SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
share->set_deopt_counter(FLAG_deopt_every_n_times);
+ share->set_ic_age(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
@@ -3028,8 +3029,6 @@
share->set_compiler_hints(0);
share->set_this_property_assignments_count(0);
share->set_opt_count(0);
- share->set_ic_age(0);
- share->set_opt_reenable_tries(0);
return share;
}
=======================================
--- /branches/bleeding_edge/src/objects-inl.h Tue Apr 24 07:05:07 2012
+++ /branches/bleeding_edge/src/objects-inl.h Mon Apr 30 04:54:34 2012
@@ -3511,6 +3511,7 @@
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
kThisPropertyAssignmentsOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
@@ -3561,8 +3562,6 @@
SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
SMI_ACCESSORS(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
-SMI_ACCESSORS(SharedFunctionInfo, opt_reenable_tries,
kOptReenableTriesOffset)
#else
#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
@@ -3616,11 +3615,6 @@
PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ast_node_count,
kAstNodeCountOffset)
PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, deopt_counter,
kDeoptCounterOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ic_age, kICAgeOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
- opt_reenable_tries,
- kOptReenableTriesOffset)
#endif
@@ -3660,7 +3654,6 @@
// it will not be counted as optimizable code.
if ((code()->kind() == Code::FUNCTION) && disable) {
code()->set_optimizable(false);
- code()->set_profiler_ticks(0);
}
}
@@ -3695,23 +3688,6 @@
bool SharedFunctionInfo::is_classic_mode() {
return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
}
-
-
-void SharedFunctionInfo::TryReenableOptimization() {
- int tries = opt_reenable_tries();
- if (tries == Smi::kMaxValue) {
- tries = 0;
- }
- set_opt_reenable_tries(tries + 1);
- // We reenable optimization whenever the number of tries is a large
- // enough power of 2.
- if (tries >= 4 && (((tries - 1) & tries) == 0)) {
- set_optimization_disabled(false);
- set_opt_count(0);
- code()->set_optimizable(true);
- }
-}
-
BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
kExtendedModeFunction)
=======================================
--- /branches/bleeding_edge/src/objects.cc Thu Apr 26 09:19:14 2012
+++ /branches/bleeding_edge/src/objects.cc Mon Apr 30 04:54:34 2012
@@ -7871,7 +7871,6 @@
code()->set_optimizable(true);
}
set_opt_count(0);
- set_opt_reenable_tries(0);
}
}
=======================================
--- /branches/bleeding_edge/src/objects.h Tue Apr 24 07:05:07 2012
+++ /branches/bleeding_edge/src/objects.h Mon Apr 30 04:54:34 2012
@@ -5461,13 +5461,6 @@
inline int opt_count();
inline void set_opt_count(int opt_count);
- // Number of times we tried to reenable optimization.
- inline int opt_reenable_tries();
- inline void set_opt_reenable_tries(int opt_reenable_tries);
-
- inline void TryReenableOptimization();
-
-
// Source size of this function.
int SourceSize();
@@ -5524,10 +5517,13 @@
kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
+ // ic_age is a Smi field. It could be grouped with another Smi field
into a
+ // PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
+ static const int kICAgeOffset = kThisPropertyAssignmentsOffset +
kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
+ kICAgeOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset +
kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
@@ -5547,10 +5543,10 @@
kThisPropertyAssignmentsCountOffset + kPointerSize;
static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
static const int kDeoptCounterOffset = kAstNodeCountOffset +
kPointerSize;
- static const int kICAgeOffset = kDeoptCounterOffset + kPointerSize;
- static const int kOptReenableTriesOffset = kICAgeOffset + kPointerSize;
+
+
// Total size.
- static const int kSize = kOptReenableTriesOffset + kPointerSize;
+ static const int kSize = kDeoptCounterOffset + kPointerSize;
#else
// The only reason to use smi fields instead of int fields
// is to allow iteration without maps decoding during
@@ -5562,7 +5558,7 @@
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
+ kICAgeOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
@@ -5589,11 +5585,8 @@
static const int kAstNodeCountOffset = kOptCountOffset + kIntSize;
static const int kDeoptCounterOffset = kAstNodeCountOffset + kIntSize;
- static const int kICAgeOffset = kDeoptCounterOffset + kIntSize;
- static const int kOptReenableTriesOffset = kICAgeOffset + kIntSize;
-
// Total size.
- static const int kSize = kOptReenableTriesOffset + kIntSize;
+ static const int kSize = kDeoptCounterOffset + kIntSize;
#endif
=======================================
--- /branches/bleeding_edge/src/runtime-profiler.cc Tue Apr 24 07:05:07 2012
+++ /branches/bleeding_edge/src/runtime-profiler.cc Mon Apr 30 04:54:34 2012
@@ -65,17 +65,11 @@
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
-// If the function optimization was disabled due to high deoptimization
count,
-// but the function is hot and has been seen on the stack this number of
times,
-// then we try to reenable optimization for this function.
-static const int kProfilerTicksBeforeReenablingOptimization = 250;
// If a function does not have enough type info (according to
// FLAG_type_info_threshold), but has seen a huge number of ticks,
// optimize it as it is.
static const int kTicksWhenNotEnoughTypeInfo = 100;
// We only have one byte to store the number of ticks.
-STATIC_ASSERT(kProfilerTicksBeforeOptimization < 256);
-STATIC_ASSERT(kProfilerTicksBeforeReenablingOptimization < 256);
STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generated code for a function to be optimized
@@ -269,8 +263,7 @@
}
}
- SharedFunctionInfo* shared = function->shared();
- Code* shared_code = shared->code();
+ Code* shared_code = function->shared()->code();
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsMarkedForLazyRecompilation()) {
@@ -279,32 +272,20 @@
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
+
+ // Do not record non-optimizable functions.
+ if (!function->IsOptimizable()) continue;
+ if (function->shared()->optimization_disabled()) continue;
// Only record top-level code on top of the execution stack and
// avoid optimizing excessively large scripts since top-level code
// will be executed only once.
const int kMaxToplevelSourceSize = 10 * 1024;
- if (shared->is_toplevel() &&
- (frame_count > 1 || shared->SourceSize() >
kMaxToplevelSourceSize)) {
+ if (function->shared()->is_toplevel()
+ && (frame_count > 1
+ || function->shared()->SourceSize() > kMaxToplevelSourceSize))
{
continue;
}
-
- // Do not record non-optimizable functions.
- if (shared->optimization_disabled()) {
- if (shared->opt_count() >= Compiler::kDefaultMaxOptCount) {
- // If optimization was disabled due to many deoptimizations,
- // then check if the function is hot and try to reenable
optimization.
- int ticks = shared_code->profiler_ticks();
- if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
- shared_code->set_profiler_ticks(0);
- shared->TryReenableOptimization();
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- }
- }
- continue;
- }
- if (!function->IsOptimizable()) continue;
if (FLAG_watch_ic_patching) {
int ticks = shared_code->profiler_ticks();
@@ -328,7 +309,7 @@
}
}
} else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is
very
// small, optimistically optimize it now.
Optimize(function, "small function");
@@ -338,7 +319,7 @@
} else { // !FLAG_watch_ic_patching
samples[sample_count++] = function;
- int function_size = shared->SourceSize();
+ int function_size = function->shared()->SourceSize();
int threshold_size_factor = (function_size > kSizeLimit)
? sampler_threshold_size_factor_
: 1;
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev