Reviewers: Jakob,
Description:
Reset opt count on optimized code map lookup failure.
[email protected]
BUG=309723
Please review this at https://codereview.chromium.org/59823002/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files (+39, -11 lines):
M src/code-stubs-hydrogen.cc
M src/compiler.cc
M src/factory.cc
M src/heap.cc
M src/hydrogen-instructions.h
M src/objects.cc
M src/runtime-profiler.cc
Index: src/code-stubs-hydrogen.cc
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index
dfa5ecd8cf840a75086eb5b0d5bac86b839aceb3..2022cac6404030fdce0af08a91ffb253df1b5b1a
100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -1191,6 +1191,23 @@ void
CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
loop_builder.Break();
}
done_check.Else();
+ {
+ // The optimization count should be tied to the context, e.g.
as
+ // entry in optimized code map. But for simplicity's sake we
just
+ // reset the opt count if the optimized code map lookup fails,
+ // because no optimized code exists for the current context.
+ HValue* opt_count_and_bailout_reason = Add<HLoadNamedField>(
+ shared_info, HObjectAccess::ForOptCountAndBailoutReason());
+ int32_t opt_count_mask =
+ (1 << SharedFunctionInfo::OptCountBits::kSize - 1)
+ << SharedFunctionInfo::OptCountBits::kShift;
+ HValue* mask = Add<HConstant>(opt_count_mask);
+ HValue* result = Add<HBitwise>(
+ Token::BIT_AND, opt_count_and_bailout_reason, mask);
+ Add<HStoreNamedField>(shared_info,
+
HObjectAccess::ForOptCountAndBailoutReason(),
+ result);
+ }
done_check.End();
}
restore_check.End();
@@ -1241,8 +1258,6 @@ HValue*
CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
HObjectAccess::ForSharedFunctionInfoPointer(),
shared_info);
Add<HStoreNamedField>(js_function,
HObjectAccess::ForFunctionContextPointer(),
- shared_info);
- Add<HStoreNamedField>(js_function,
HObjectAccess::ForFunctionContextPointer(),
context());
// Initialize the code pointer in the function to be the one
Index: src/compiler.cc
diff --git a/src/compiler.cc b/src/compiler.cc
index
ed0a0c8e69b028729943f93024e3f3373b7a056c..e86baa02aa3faee01657ee2a7bc980dfc25210b0
100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -262,8 +262,11 @@ static bool AlwaysFullCompiler(Isolate* isolate) {
void RecompileJob::RecordOptimizationStats() {
Handle<JSFunction> function = info()->closure();
- int opt_count = function->shared()->opt_count();
- function->shared()->set_opt_count(opt_count + 1);
+ if (!function->IsOptimized()) {
+ // Concurrent recompilation and OSR may race. Increment only once.
+ int opt_count = function->shared()->opt_count();
+ function->shared()->set_opt_count(opt_count + 1);
+ }
double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
Index: src/factory.cc
diff --git a/src/factory.cc b/src/factory.cc
index
400132e14b520f1bf5ff7992d4c59e090d9dde88..39a1795d3e950dab49f4b13feea8fb2311c0abd2
100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -711,6 +711,12 @@ Handle<JSFunction>
Factory::NewFunctionFromSharedFunctionInfo(
// Caching of optimized code enabled and optimized code found.
function_info->InstallFromOptimizedCodeMap(*result, index);
return result;
+ } else {
+ // The optimization count should be tied to the context, similar to the
+ // optimized code map. But for simplicity's sake we just reset the opt
+ // count whenever the optimized code map lookup fails. This happens
+ // only when the function has never been optimized for the current
context.
+ function_info->set_opt_count(0);
}
if (isolate()->use_crankshaft() &&
Index: src/heap.cc
diff --git a/src/heap.cc b/src/heap.cc
index
ecd959da6716ee680e1c8307dc7594dfce8c2d46..a87cce2289fa90ce73055ca901cc170232b2b6fe
100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1150,8 +1150,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
isolate_->counters()->objs_since_last_full()->Set(0);
- contexts_disposed_ = 0;
-
flush_monomorphic_ics_ = false;
}
@@ -5761,6 +5759,7 @@ bool Heap::IdleNotification(int hint) {
size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) {
+ contexts_disposed_ = 0;
if (hint >= kMaxHint) {
// The embedder is requesting a lot of GC work after context
disposal,
// we age inline caches so that they don't keep objects from
@@ -5775,8 +5774,8 @@ bool Heap::IdleNotification(int hint) {
"idle notification: contexts disposed");
} else {
AdvanceIdleIncrementalMarking(step_size);
- contexts_disposed_ = 0;
}
+
// After context disposal there is likely a lot of garbage remaining,
reset
// the idle notification counters in order to trigger more incremental
GCs
// on subsequent idle notifications.
Index: src/hydrogen-instructions.h
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index
80773bf14789e8524dc4c46184588641d6396495..bcd6641049e710d85080ebfe1dfe47d36c74dd6b
100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -5843,6 +5843,11 @@ class HObjectAccess V8_FINAL {
SharedFunctionInfo::kOptimizedCodeMapOffset);
}
+ static HObjectAccess ForOptCountAndBailoutReason() {
+ return HObjectAccess(kInobject,
+
SharedFunctionInfo::kOptCountAndBailoutReasonOffset);
+ }
+
static HObjectAccess ForFunctionContextPointer() {
return HObjectAccess(kInobject, JSFunction::kContextOffset);
}
Index: src/objects.cc
diff --git a/src/objects.cc b/src/objects.cc
index
8bb603a985db87e6a8c0d78be94d081b74777f67..8c6dc357a32c29fbac5d976db93d8ee1e164ba5c
100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -9580,7 +9580,7 @@ MaybeObject*
SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
if (value->IsSmi()) {
// No optimized code map.
ASSERT_EQ(0, Smi::cast(value)->value());
- // Crate 3 entries per context {context, code, literals}.
+ // Create 3 entries per context {context, code, literals}.
MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
if (!maybe->To(&new_code_map)) return maybe;
new_code_map->set(kEntriesStart + 0, native_context);
@@ -10231,9 +10231,9 @@ void SharedFunctionInfo::ResetForNewContext(int
new_ic_age) {
set_optimization_disabled(false);
code()->set_optimizable(true);
}
- set_opt_count(0);
- set_deopt_count(0);
}
+ set_opt_count(0);
+ set_deopt_count(0);
}
Index: src/runtime-profiler.cc
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index
7c900b37d90a6aa87c9ce2f2cb3c124f2268bd7b..c40f7ea61f412f29dbe5d8d2d8e9b323846dcd37
100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -306,7 +306,7 @@ void RuntimeProfiler::OptimizeNow() {
// Do not record non-optimizable functions.
if (shared->optimization_disabled()) {
- if (shared->deopt_count() >= FLAG_max_opt_count) {
+ if (shared->opt_count() >= FLAG_max_opt_count) {
// If optimization was disabled due to many deoptimizations,
// then check if the function is hot and try to reenable
optimization.
int ticks = shared_code->profiler_ticks();
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.