Revision: 17866
Author: [email protected]
Date: Tue Nov 19 11:52:47 2013 UTC
Log: Make number of available threads isolate-dependent and expose it
to ResourceConstraints.
[email protected]
BUG=v8:2991
LOG=Y
Review URL: https://codereview.chromium.org/68203029
http://code.google.com/p/v8/source/detail?r=17866
Modified:
/branches/bleeding_edge/include/v8.h
/branches/bleeding_edge/src/api.cc
/branches/bleeding_edge/src/compiler.h
/branches/bleeding_edge/src/execution.cc
/branches/bleeding_edge/src/heap-profiler.cc
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/heap.h
/branches/bleeding_edge/src/hydrogen.cc
/branches/bleeding_edge/src/isolate.cc
/branches/bleeding_edge/src/isolate.h
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/objects.cc
/branches/bleeding_edge/src/optimizing-compiler-thread.cc
/branches/bleeding_edge/src/optimizing-compiler-thread.h
/branches/bleeding_edge/src/runtime-profiler.cc
/branches/bleeding_edge/src/runtime.cc
/branches/bleeding_edge/src/sweeper-thread.cc
/branches/bleeding_edge/src/sweeper-thread.h
/branches/bleeding_edge/src/v8.cc
/branches/bleeding_edge/test/cctest/test-deoptimization.cc
/branches/bleeding_edge/test/cctest/test-heap.cc
=======================================
--- /branches/bleeding_edge/include/v8.h Thu Nov 14 11:37:32 2013 UTC
+++ /branches/bleeding_edge/include/v8.h Tue Nov 19 11:52:47 2013 UTC
@@ -3817,17 +3817,23 @@
void set_max_young_space_size(int value) { max_young_space_size_ =
value; }
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; }
- int max_executable_size() { return max_executable_size_; }
+ int max_executable_size() const { return max_executable_size_; }
void set_max_executable_size(int value) { max_executable_size_ = value; }
uint32_t* stack_limit() const { return stack_limit_; }
// Sets an address beyond which the VM's stack may not grow.
void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+ int max_available_threads() const { return max_available_threads_; }
+ // Set the number of threads available to V8, assuming at least 1.
+ void set_max_available_threads(int value) {
+ max_available_threads_ = value;
+ }
private:
int max_young_space_size_;
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
+ int max_available_threads_;
};
=======================================
--- /branches/bleeding_edge/src/api.cc Thu Nov 14 11:52:24 2013 UTC
+++ /branches/bleeding_edge/src/api.cc Tue Nov 19 11:52:47 2013 UTC
@@ -563,7 +563,8 @@
: max_young_space_size_(0),
max_old_space_size_(0),
max_executable_size_(0),
- stack_limit_(NULL) { }
+ stack_limit_(NULL),
+ max_available_threads_(0) { }
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory) {
@@ -599,6 +600,8 @@
set_max_old_space_size(700 * lump_of_memory);
set_max_executable_size(256 * lump_of_memory);
}
+
+ set_max_available_threads(0);
}
@@ -627,6 +630,8 @@
uintptr_t limit =
reinterpret_cast<uintptr_t>(constraints->stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
}
+
+ isolate->set_max_available_threads(constraints->max_available_threads());
return true;
}
=======================================
--- /branches/bleeding_edge/src/compiler.h Fri Nov 15 10:52:05 2013 UTC
+++ /branches/bleeding_edge/src/compiler.h Tue Nov 19 11:52:47 2013 UTC
@@ -302,12 +302,12 @@
}
void AbortDueToDependencyChange() {
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
abort_due_to_dependency_ = true;
}
bool HasAbortedDueToDependencyChange() {
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
return abort_due_to_dependency_;
}
=======================================
--- /branches/bleeding_edge/src/execution.cc Mon Sep 16 16:09:07 2013 UTC
+++ /branches/bleeding_edge/src/execution.cc Tue Nov 19 11:52:47 2013 UTC
@@ -951,7 +951,7 @@
Deoptimizer::DeoptimizeAll(isolate);
}
if (stack_guard->IsInstallCodeRequest()) {
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(isolate->concurrent_recompilation_enabled());
stack_guard->Continue(INSTALL_CODE);
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
=======================================
--- /branches/bleeding_edge/src/heap-profiler.cc Thu Nov 14 15:14:37 2013
UTC
+++ /branches/bleeding_edge/src/heap-profiler.cc Tue Nov 19 11:52:47 2013
UTC
@@ -177,7 +177,7 @@
Isolate* isolate = heap()->isolate();
HandleScope scope(isolate);
- if (FLAG_concurrent_recompilation) {
+ if (isolate->concurrent_recompilation_enabled()) {
isolate->optimizing_compiler_thread()->Flush();
}
=======================================
--- /branches/bleeding_edge/src/heap.cc Tue Nov 19 10:17:33 2013 UTC
+++ /branches/bleeding_edge/src/heap.cc Tue Nov 19 11:52:47 2013 UTC
@@ -465,7 +465,7 @@
store_buffer()->GCPrologue();
- if (FLAG_concurrent_osr) {
+ if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
}
}
@@ -662,7 +662,7 @@
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
- if (FLAG_concurrent_recompilation) {
+ if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
isolate()->optimizing_compiler_thread()->Flush();
@@ -763,7 +763,7 @@
int Heap::NotifyContextDisposed() {
- if (FLAG_concurrent_recompilation) {
+ if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
@@ -6581,6 +6581,14 @@
+ property_cell_space_->SizeOfObjects()
+ lo_space_->SizeOfObjects();
}
+
+
+bool Heap::AdvanceSweepers(int step_size) {
+ ASSERT(isolate()->num_sweeper_threads() == 0);
+ bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
+ sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
+ return sweeping_complete;
+}
intptr_t Heap::PromotedExternalMemorySize() {
@@ -6729,9 +6737,6 @@
store_buffer()->SetUp();
if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
-#ifdef DEBUG
- relocation_mutex_locked_by_optimizer_thread_ = false;
-#endif // DEBUG
return true;
}
@@ -6876,6 +6881,7 @@
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
+ relocation_mutex_ = NULL;
}
@@ -7954,16 +7960,5 @@
OS::MemCopy(object_sizes_last_time_, object_sizes_,
sizeof(object_sizes_));
ClearObjectStats();
}
-
-
-Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_concurrent_recompilation) {
- heap_->relocation_mutex_->Lock();
-#ifdef DEBUG
- heap_->relocation_mutex_locked_by_optimizer_thread_ =
-
heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
-#endif // DEBUG
- }
-}
} } // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/heap.h Fri Nov 15 13:31:13 2013 UTC
+++ /branches/bleeding_edge/src/heap.h Tue Nov 19 11:52:47 2013 UTC
@@ -1699,12 +1699,7 @@
old_pointer_space()->IsLazySweepingComplete();
}
- bool AdvanceSweepers(int step_size) {
- ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
- bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
- sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
- return sweeping_complete;
- }
+ bool AdvanceSweepers(int step_size);
bool EnsureSweepersProgressed(int step_size) {
bool sweeping_complete =
old_data_space()->EnsureSweeperProgress(step_size);
@@ -1833,22 +1828,18 @@
// only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
- explicit RelocationLock(Heap* heap);
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_concurrent_recompilation) {
+ heap_->relocation_mutex_->Lock();
+ }
+ }
+
~RelocationLock() {
if (FLAG_concurrent_recompilation) {
-#ifdef DEBUG
- heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
-#endif // DEBUG
heap_->relocation_mutex_->Unlock();
}
}
-
-#ifdef DEBUG
- static bool IsLockedByOptimizerThread(Heap* heap) {
- return heap->relocation_mutex_locked_by_optimizer_thread_;
- }
-#endif // DEBUG
private:
Heap* heap_;
=======================================
--- /branches/bleeding_edge/src/hydrogen.cc Tue Nov 19 11:41:04 2013 UTC
+++ /branches/bleeding_edge/src/hydrogen.cc Tue Nov 19 11:52:47 2013 UTC
@@ -2906,7 +2906,7 @@
void HGraph::FinalizeUniqueness() {
DisallowHeapAllocation no_gc;
- ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstructionIterator it(blocks()->at(i)); !it.Done();
it.Advance()) {
it.Current()->FinalizeUniqueness();
@@ -10331,7 +10331,7 @@
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
- ASSERT(!FLAG_concurrent_recompilation);
+ ASSERT(!chunk->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, chunk->graph(), chunk);
@@ -10339,7 +10339,7 @@
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
- ASSERT(!FLAG_concurrent_recompilation);
+ ASSERT(!graph->isolate()->concurrent_recompilation_enabled());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
Trace(name, graph, NULL);
=======================================
--- /branches/bleeding_edge/src/isolate.cc Fri Nov 15 10:52:05 2013 UTC
+++ /branches/bleeding_edge/src/isolate.cc Tue Nov 19 11:52:47 2013 UTC
@@ -129,22 +129,6 @@
v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
}
-
-
-int SystemThreadManager::NumberOfParallelSystemThreads(
- ParallelSystemComponent type) {
- int number_of_threads = Min(CPU::NumberOfProcessorsOnline(),
kMaxThreads);
- ASSERT(number_of_threads > 0);
- if (number_of_threads == 1) {
- return 0;
- }
- if (type == PARALLEL_SWEEPING) {
- return number_of_threads;
- } else if (type == CONCURRENT_SWEEPING) {
- return number_of_threads - 1;
- }
- return 1;
-}
// Create a dummy thread that will wait forever on a semaphore. The only
@@ -1790,6 +1774,8 @@
deferred_handles_head_(NULL),
optimizing_compiler_thread_(NULL),
sweeper_thread_(NULL),
+ num_sweeper_threads_(0),
+ max_available_threads_(0),
stress_deopt_count_(0) {
id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1);
TRACE_ISOLATE(constructor);
@@ -1882,18 +1868,20 @@
debugger()->UnloadDebugger();
#endif
- if (FLAG_concurrent_recompilation) {
+ if (concurrent_recompilation_enabled()) {
optimizing_compiler_thread_->Stop();
delete optimizing_compiler_thread_;
+ optimizing_compiler_thread_ = NULL;
}
- if (FLAG_sweeper_threads > 0) {
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i]->Stop();
- delete sweeper_thread_[i];
- }
- delete[] sweeper_thread_;
+ for (int i = 0; i < num_sweeper_threads_; i++) {
+ sweeper_thread_[i]->Stop();
+ delete sweeper_thread_[i];
+ sweeper_thread_[i] = NULL;
}
+ delete[] sweeper_thread_;
+ sweeper_thread_ = NULL;
+
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
@@ -2217,11 +2205,6 @@
deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
- if (FLAG_concurrent_recompilation) {
- optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
- optimizing_compiler_thread_->Start();
- }
-
const bool create_heap_objects = (des == NULL);
if (create_heap_objects && !heap_.CreateHeapObjects()) {
V8::FatalProcessOutOfMemory("heap object creation");
@@ -2239,6 +2222,31 @@
builtins_.SetUp(this, create_heap_objects);
if (create_heap_objects) heap_.CreateStubsRequiringBuiltins();
+
+ // Set default value if not yet set.
+ // TODO(yangguo): move this to ResourceConstraints::ConfigureDefaults
+ // once ResourceConstraints becomes an argument to the Isolate
constructor.
+ if (max_available_threads_ < 1) {
+ // Choose the default between 1 and 4.
+ max_available_threads_ = Max(Min(CPU::NumberOfProcessorsOnline(), 4),
1);
+ }
+
+ num_sweeper_threads_ =
SweeperThread::NumberOfThreads(max_available_threads_);
+
+ if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
+ PrintF("Concurrent recompilation has been disabled for tracing.\n");
+ } else if (OptimizingCompilerThread::Enabled(max_available_threads_)) {
+ optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
+ optimizing_compiler_thread_->Start();
+ }
+
+ if (num_sweeper_threads_ > 0) {
+ sweeper_thread_ = new SweeperThread*[num_sweeper_threads_];
+ for (int i = 0; i < num_sweeper_threads_; i++) {
+ sweeper_thread_[i] = new SweeperThread(this);
+ sweeper_thread_[i]->Start();
+ }
+ }
// Only preallocate on the first initialization.
if (FLAG_preallocate_message_memory && preallocated_message_space_ ==
NULL) {
@@ -2333,14 +2341,6 @@
NumberToStringStub::InstallDescriptors(this);
NewStringAddStub::InstallDescriptors(this);
}
-
- if (FLAG_sweeper_threads > 0) {
- sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
- sweeper_thread_[i] = new SweeperThread(this);
- sweeper_thread_[i]->Start();
- }
- }
initialized_from_snapshot_ = (des != NULL);
=======================================
--- /branches/bleeding_edge/src/isolate.h Thu Nov 7 16:35:27 2013 UTC
+++ /branches/bleeding_edge/src/isolate.h Tue Nov 19 11:52:47 2013 UTC
@@ -301,20 +301,6 @@
};
-class SystemThreadManager {
- public:
- enum ParallelSystemComponent {
- PARALLEL_SWEEPING,
- CONCURRENT_SWEEPING,
- CONCURRENT_RECOMPILATION
- };
-
- static int NumberOfParallelSystemThreads(ParallelSystemComponent type);
-
- static const int kMaxThreads = 4;
-};
-
-
#ifdef ENABLE_DEBUGGER_SUPPORT
#define
ISOLATE_DEBUGGER_INIT_LIST(V) \
@@ -1111,18 +1097,40 @@
bool IsDeferredHandle(Object** location);
#endif // DEBUG
+ void set_max_available_threads(int value) {
+ max_available_threads_ = value;
+ }
+
+ bool concurrent_recompilation_enabled() {
+ // Thread is only available with flag enabled.
+ ASSERT(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL;
+ }
+
+ bool concurrent_osr_enabled() {
+ // Thread is only available with flag enabled.
+ ASSERT(optimizing_compiler_thread_ == NULL ||
+ FLAG_concurrent_recompilation);
+ return optimizing_compiler_thread_ != NULL && FLAG_concurrent_osr;
+ }
+
OptimizingCompilerThread* optimizing_compiler_thread() {
return optimizing_compiler_thread_;
}
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the
isolate).
- // TODO(svenpanne) This method is on death row...
- static v8::Isolate* GetDefaultIsolateForLocking();
+ bool num_sweeper_threads() {
+ return num_sweeper_threads_;
+ }
SweeperThread** sweeper_threads() {
return sweeper_thread_;
}
+
+ // PreInits and returns a default isolate. Needed when a new thread tries
+ // to create a Locker for the first time (the lock itself is in the
isolate).
+ // TODO(svenpanne) This method is on death row...
+ static v8::Isolate* GetDefaultIsolateForLocking();
int id() const { return static_cast<int>(id_); }
@@ -1373,6 +1381,11 @@
DeferredHandles* deferred_handles_head_;
OptimizingCompilerThread* optimizing_compiler_thread_;
SweeperThread** sweeper_thread_;
+ int num_sweeper_threads_;
+
+ // TODO(yangguo): This will become obsolete once ResourceConstraints
+ // becomes an argument to Isolate constructor.
+ int max_available_threads_;
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Tue Nov 19 10:17:33 2013 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Tue Nov 19 11:52:47 2013 UTC
@@ -564,7 +564,7 @@
void MarkCompactCollector::StartSweeperThreads() {
sweeping_pending_ = true;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
}
@@ -572,7 +572,7 @@
void MarkCompactCollector::WaitUntilSweepingCompleted() {
ASSERT(sweeping_pending_ == true);
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
sweeping_pending_ = false;
@@ -586,7 +586,7 @@
intptr_t MarkCompactCollector::
StealMemoryFromSweeperThreads(PagedSpace* space) {
intptr_t freed_bytes = 0;
- for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
}
space->AddToAccountingStats(freed_bytes);
@@ -4112,8 +4112,10 @@
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
- if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
- if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ if (isolate()->num_sweeper_threads() > 0) {
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ }
if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
=======================================
--- /branches/bleeding_edge/src/objects.cc Mon Nov 18 17:18:14 2013 UTC
+++ /branches/bleeding_edge/src/objects.cc Tue Nov 19 11:52:47 2013 UTC
@@ -9498,7 +9498,7 @@
ASSERT(!IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
ASSERT(!shared()->is_generator());
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Marking ");
PrintName();
@@ -9516,7 +9516,7 @@
ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(GetIsolate()->concurrent_recompilation_enabled());
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queueing ");
PrintName();
=======================================
--- /branches/bleeding_edge/src/optimizing-compiler-thread.cc Thu Nov 7
16:25:20 2013 UTC
+++ /branches/bleeding_edge/src/optimizing-compiler-thread.cc Tue Nov 19
11:52:47 2013 UTC
@@ -370,8 +370,13 @@
#ifdef DEBUG
+bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
+ return isolate->concurrent_recompilation_enabled() &&
+ isolate->optimizing_compiler_thread()->IsOptimizerThread();
+}
+
+
bool OptimizingCompilerThread::IsOptimizerThread() {
- if (!FLAG_concurrent_recompilation) return false;
LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
=======================================
--- /branches/bleeding_edge/src/optimizing-compiler-thread.h Wed Oct 16
16:27:17 2013 UTC
+++ /branches/bleeding_edge/src/optimizing-compiler-thread.h Tue Nov 19
11:52:47 2013 UTC
@@ -95,8 +95,13 @@
// should make sure that we do not hold onto stale jobs indefinitely.
AddToOsrBuffer(NULL);
}
+
+ static bool Enabled(int max_available) {
+ return (FLAG_concurrent_recompilation && max_available > 1);
+ }
#ifdef DEBUG
+ static bool IsOptimizerThread(Isolate* isolate);
bool IsOptimizerThread();
#endif
=======================================
--- /branches/bleeding_edge/src/runtime-profiler.cc Wed Sep 25 08:26:11
2013 UTC
+++ /branches/bleeding_edge/src/runtime-profiler.cc Tue Nov 19 11:52:47
2013 UTC
@@ -139,8 +139,9 @@
}
- if (FLAG_concurrent_recompilation
&& !isolate_->bootstrapper()->IsActive()) {
- if (FLAG_concurrent_osr &&
+ if (isolate_->concurrent_recompilation_enabled() &&
+ !isolate_->bootstrapper()->IsActive()) {
+ if (isolate_->concurrent_osr_enabled() &&
isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
// Do not attempt regular recompilation if we already queued this
for OSR.
// TODO(yangguo): This is necessary so that we don't install
optimized
=======================================
--- /branches/bleeding_edge/src/runtime.cc Mon Nov 18 15:16:22 2013 UTC
+++ /branches/bleeding_edge/src/runtime.cc Tue Nov 19 11:52:47 2013 UTC
@@ -8424,7 +8424,7 @@
return isolate->heap()->undefined_value();
}
function->shared()->code()->set_profiler_ticks(0);
- ASSERT(FLAG_concurrent_recompilation);
+ ASSERT(isolate->concurrent_recompilation_enabled());
if (!Compiler::RecompileConcurrent(function)) {
function->ReplaceCode(function->shared()->code());
}
@@ -8561,7 +8561,7 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConcurrentRecompilationSupported)
{
HandleScope scope(isolate);
- return FLAG_concurrent_recompilation
+ return isolate->concurrent_recompilation_enabled()
? isolate->heap()->true_value() : isolate->heap()->false_value();
}
@@ -8619,7 +8619,8 @@
}
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- if (FLAG_concurrent_recompilation && sync_with_compiler_thread) {
+ if (isolate->concurrent_recompilation_enabled() &&
+ sync_with_compiler_thread) {
while (function->IsInRecompileQueue()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
OS::Sleep(50);
@@ -8697,7 +8698,7 @@
Handle<Code> result = Handle<Code>::null();
BailoutId ast_id = BailoutId::None();
- if (FLAG_concurrent_osr) {
+ if (isolate->concurrent_osr_enabled()) {
if (isolate->optimizing_compiler_thread()->
IsQueuedForOSR(function, pc_offset)) {
// Still waiting for the optimizing compiler thread to finish.
Carry on.
=======================================
--- /branches/bleeding_edge/src/sweeper-thread.cc Mon Sep 2 12:26:06 2013
UTC
+++ /branches/bleeding_edge/src/sweeper-thread.cc Tue Nov 19 11:52:47 2013
UTC
@@ -105,4 +105,14 @@
void SweeperThread::WaitForSweeperThread() {
end_sweeping_semaphore_.Wait();
}
+
+
+int SweeperThread::NumberOfThreads(int max_available) {
+ if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0;
+ if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads;
+ if (FLAG_concurrent_sweeping) return max_available - 1;
+ ASSERT(FLAG_parallel_sweeping);
+ return max_available;
+}
+
} } // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/sweeper-thread.h Mon Sep 2 12:26:06 2013
UTC
+++ /branches/bleeding_edge/src/sweeper-thread.h Tue Nov 19 11:52:47 2013
UTC
@@ -51,6 +51,8 @@
void WaitForSweeperThread();
intptr_t StealMemory(PagedSpace* space);
+ static int NumberOfThreads(int max_available);
+
private:
Isolate* isolate_;
Heap* heap_;
=======================================
--- /branches/bleeding_edge/src/v8.cc Fri Nov 8 10:55:01 2013 UTC
+++ /branches/bleeding_edge/src/v8.cc Tue Nov 19 11:52:47 2013 UTC
@@ -178,38 +178,6 @@
FLAG_gc_global = true;
FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
-
- if (FLAG_concurrent_recompilation &&
- (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs)) {
- FLAG_concurrent_recompilation = false;
- FLAG_concurrent_osr = false;
- PrintF("Concurrent recompilation has been disabled for tracing.\n");
- }
-
- if (FLAG_sweeper_threads <= 0) {
- if (FLAG_concurrent_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::CONCURRENT_SWEEPING);
- } else if (FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = SystemThreadManager::
- NumberOfParallelSystemThreads(
- SystemThreadManager::PARALLEL_SWEEPING);
- }
- if (FLAG_sweeper_threads == 0) {
- FLAG_concurrent_sweeping = false;
- FLAG_parallel_sweeping = false;
- }
- } else if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) {
- FLAG_sweeper_threads = 0;
- }
-
- if (FLAG_concurrent_recompilation &&
- SystemThreadManager::NumberOfParallelSystemThreads(
- SystemThreadManager::CONCURRENT_RECOMPILATION) == 0) {
- FLAG_concurrent_recompilation = false;
- FLAG_concurrent_osr = false;
- }
Sampler::SetUp();
CPU::SetUp();
=======================================
--- /branches/bleeding_edge/test/cctest/test-deoptimization.cc Thu Sep 19
09:46:15 2013 UTC
+++ /branches/bleeding_edge/test/cctest/test-deoptimization.cc Tue Nov 19
11:52:47 2013 UTC
@@ -77,27 +77,23 @@
// Utility class to set --allow-natives-syntax and --nouse-inlining when
// constructed and return to their default state when destroyed.
-class AllowNativesSyntaxNoInliningNoConcurrent {
+class AllowNativesSyntaxNoInlining {
public:
- AllowNativesSyntaxNoInliningNoConcurrent()
+ AllowNativesSyntaxNoInlining()
: allow_natives_syntax_(i::FLAG_allow_natives_syntax),
- use_inlining_(i::FLAG_use_inlining),
- concurrent_recompilation_(i::FLAG_concurrent_recompilation) {
+ use_inlining_(i::FLAG_use_inlining) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_use_inlining = false;
- i::FLAG_concurrent_recompilation = false;
}
- ~AllowNativesSyntaxNoInliningNoConcurrent() {
+ ~AllowNativesSyntaxNoInlining() {
i::FLAG_allow_natives_syntax = allow_natives_syntax_;
i::FLAG_use_inlining = use_inlining_;
- i::FLAG_concurrent_recompilation = concurrent_recompilation_;
}
private:
bool allow_natives_syntax_;
bool use_inlining_;
- bool concurrent_recompilation_;
};
@@ -341,13 +337,14 @@
TEST(DeoptimizeBinaryOperationADDString) {
+ i::FLAG_concurrent_recompilation = false;
+ AllowNativesSyntaxNoInlining options;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* f_source = "function f(x, y) { return x + y; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
// Compile function f and collect to type feedback to insert binary op
stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -405,7 +402,7 @@
binary_op);
char* f_source = f_source_buffer.start();
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert binary op
stub
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -431,6 +428,7 @@
TEST(DeoptimizeBinaryOperationADD) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -443,6 +441,7 @@
TEST(DeoptimizeBinaryOperationSUB) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -455,6 +454,7 @@
TEST(DeoptimizeBinaryOperationMUL) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -467,6 +467,7 @@
TEST(DeoptimizeBinaryOperationDIV) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -479,6 +480,7 @@
TEST(DeoptimizeBinaryOperationMOD) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -491,13 +493,14 @@
TEST(DeoptimizeCompare) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* f_source = "function f(x, y) { return x < y; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert compare ic
// call in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -534,6 +537,7 @@
TEST(DeoptimizeLoadICStoreIC) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -544,7 +548,7 @@
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
@@ -614,6 +618,7 @@
TEST(DeoptimizeLoadICStoreICNested) {
+ i::FLAG_concurrent_recompilation = false;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -624,7 +629,7 @@
const char* g2_source = "function g2(x, y) { x[y] = 1; };";
{
- AllowNativesSyntaxNoInliningNoConcurrent options;
+ AllowNativesSyntaxNoInlining options;
// Compile functions and collect to type feedback to insert ic
// calls in the optimized code.
i::FLAG_prepare_always_opt = true;
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Thu Nov 14 17:30:48
2013 UTC
+++ /branches/bleeding_edge/test/cctest/test-heap.cc Tue Nov 19 11:52:47
2013 UTC
@@ -2963,8 +2963,6 @@
// after the first time the accessor is fired. We use external string
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
- FLAG_use_ic = false; // ICs retain objects.
- FLAG_concurrent_recompilation = false;
v8::HandleScope scope(CcTest::isolate());
SourceResource* resource = new SourceResource(i::StrDup(source));
{
@@ -2987,6 +2985,8 @@
TEST(ReleaseStackTraceData) {
+ FLAG_use_ic = false; // ICs retain objects.
+ FLAG_concurrent_recompilation = false;
CcTest::InitializeVM();
static const char* source1 = "var error = null; "
/* Normal Error */ "try { "
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.