Revision: 16095
Author:   [email protected]
Date:     Wed Aug  7 02:33:09 2013
Log: Re-reland "Flush parallel recompilation queues on context dispose notification"

BUG=
[email protected], [email protected]

Review URL: https://codereview.chromium.org/22379002
http://code.google.com/p/v8/source/detail?r=16095

Added:
/branches/bleeding_edge/test/mjsunit/regress/regress-prepare-break-while-recompile.js
Modified:
 /branches/bleeding_edge/src/api.cc
 /branches/bleeding_edge/src/compiler.cc
 /branches/bleeding_edge/src/debug.cc
 /branches/bleeding_edge/src/factory.cc
 /branches/bleeding_edge/src/heap.cc
 /branches/bleeding_edge/src/heap.h
 /branches/bleeding_edge/src/isolate.cc
 /branches/bleeding_edge/src/isolate.h
 /branches/bleeding_edge/src/liveedit.cc
 /branches/bleeding_edge/src/objects.cc
 /branches/bleeding_edge/src/optimizing-compiler-thread.cc
 /branches/bleeding_edge/src/optimizing-compiler-thread.h
 /branches/bleeding_edge/src/runtime.cc
 /branches/bleeding_edge/test/cctest/test-deoptimization.cc
 /branches/bleeding_edge/test/cctest/test-heap.cc

=======================================
--- /dev/null
+++ /branches/bleeding_edge/test/mjsunit/regress/regress-prepare-break-while-recompile.js Wed Aug 7 02:33:09 2013
@@ -0,0 +1,62 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --parallel-recompilation-delay=300
+
+if (!%IsParallelRecompilationSupported()) {
+  print("Parallel recompilation is disabled. Skipping this test.");
+  quit();
+}
+
+Debug = debug.Debug
+
+function foo() {
+  var x = 1;
+  return x;
+}
+
+function bar() {
+  var x = 2;
+  return x;
+}
+
+foo();
+// Mark and trigger parallel optimization.
+%OptimizeFunctionOnNextCall(foo, "parallel");
+foo();
+
+// Set break points on an unrelated function. This clears both optimized
+// and (shared) unoptimized code on foo, and sets both to lazy-compile builtin.
+// Clear the break point immediately after to deactivate the debugger.
+Debug.setBreakPoint(bar, 0, 0);
+Debug.clearAllBreakPoints();
+
+// Install optimized code when parallel optimization finishes.
+// This needs to be able to deal with shared code being a builtin.
+assertUnoptimized(foo, "sync");
+
=======================================
--- /branches/bleeding_edge/src/api.cc  Wed Aug  7 02:11:39 2013
+++ /branches/bleeding_edge/src/api.cc  Wed Aug  7 02:33:09 2013
@@ -781,7 +781,6 @@
   i::Context* last_context =
       isolate->handle_scope_implementer()->RestoreContext();
   isolate->set_context(last_context);
-  isolate->set_context_exit_happened(true);
 }


=======================================
--- /branches/bleeding_edge/src/compiler.cc     Fri Aug  2 02:53:11 2013
+++ /branches/bleeding_edge/src/compiler.cc     Wed Aug  7 02:33:09 2013
@@ -972,7 +972,9 @@

   if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
     if (FLAG_trace_parallel_recompilation) {
-      PrintF("  ** Compilation queue, will retry opting on next run.\n");
+      PrintF("  ** Compilation queue full, will retry optimizing ");
+      closure->PrintName();
+      PrintF(" on next run.\n");
     }
     return;
   }
=======================================
--- /branches/bleeding_edge/src/debug.cc        Tue Aug  6 06:34:51 2013
+++ /branches/bleeding_edge/src/debug.cc        Wed Aug  7 02:33:09 2013
@@ -2044,6 +2044,10 @@
   // If preparing for the first break point make sure to deoptimize all
   // functions as debugging does not work with optimized code.
   if (!has_break_points_) {
+    if (FLAG_parallel_recompilation) {
+      isolate_->optimizing_compiler_thread()->Flush();
+    }
+
     Deoptimizer::DeoptimizeAll(isolate_);

     Handle<Code> lazy_compile =
=======================================
--- /branches/bleeding_edge/src/factory.cc      Tue Aug  6 12:14:51 2013
+++ /branches/bleeding_edge/src/factory.cc      Wed Aug  7 02:33:09 2013
@@ -1215,6 +1215,7 @@
   shared->set_num_literals(literals_array_size);
   if (is_generator) {
     shared->set_instance_class_name(isolate()->heap()->Generator_string());
+    shared->DisableOptimization(kGenerator);
   }
   return shared;
 }
=======================================
--- /branches/bleeding_edge/src/heap.cc Mon Aug  5 05:52:53 2013
+++ /branches/bleeding_edge/src/heap.cc Wed Aug  7 02:33:09 2013
@@ -701,6 +701,16 @@

   return next_gc_likely_to_collect_more;
 }
+
+
+int Heap::NotifyContextDisposed() {
+  if (FLAG_parallel_recompilation) {
+    // Flush the queued recompilation tasks.
+    isolate()->optimizing_compiler_thread()->Flush();
+  }
+  flush_monomorphic_ics_ = true;
+  return ++contexts_disposed_;
+}


 void Heap::PerformScavenge() {
=======================================
--- /branches/bleeding_edge/src/heap.h  Mon Aug  5 02:46:23 2013
+++ /branches/bleeding_edge/src/heap.h  Wed Aug  7 02:33:09 2013
@@ -1254,10 +1254,7 @@
   void EnsureHeapIsIterable();

   // Notify the heap that a context has been disposed.
-  int NotifyContextDisposed() {
-    flush_monomorphic_ics_ = true;
-    return ++contexts_disposed_;
-  }
+  int NotifyContextDisposed();

   // Utility to invoke the scavenger. This is needed in test code to
   // ensure correct callback for weak global handles.
=======================================
--- /branches/bleeding_edge/src/isolate.cc      Mon Aug  5 02:46:23 2013
+++ /branches/bleeding_edge/src/isolate.cc      Wed Aug  7 02:33:09 2013
@@ -1783,7 +1783,6 @@
       regexp_stack_(NULL),
       date_cache_(NULL),
       code_stub_interface_descriptors_(NULL),
-      context_exit_happened_(false),
       initialized_from_snapshot_(false),
       cpu_profiler_(NULL),
       heap_profiler_(NULL),
=======================================
--- /branches/bleeding_edge/src/isolate.h       Mon Aug  5 02:46:23 2013
+++ /branches/bleeding_edge/src/isolate.h       Wed Aug  7 02:33:09 2013
@@ -1061,13 +1061,6 @@
   void SetTopLookupResult(LookupResult* top) {
     thread_local_top_.top_lookup_result_ = top;
   }
-
-  bool context_exit_happened() {
-    return context_exit_happened_;
-  }
-  void set_context_exit_happened(bool context_exit_happened) {
-    context_exit_happened_ = context_exit_happened;
-  }

   bool initialized_from_snapshot() { return initialized_from_snapshot_; }

@@ -1317,10 +1310,6 @@
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
   CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;

-  // The garbage collector should be a little more aggressive when it knows
-  // that a context was recently exited.
-  bool context_exit_happened_;
-
   // True if this isolate was initialized from a snapshot.
   bool initialized_from_snapshot_;

=======================================
--- /branches/bleeding_edge/src/liveedit.cc     Tue Jul 30 10:00:05 2013
+++ /branches/bleeding_edge/src/liveedit.cc     Wed Aug  7 02:33:09 2013
@@ -1290,6 +1290,7 @@
     if (code_scope_info->IsFixedArray()) {
       shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
     }
+    shared_info->DisableOptimization(kLiveEdit);
   }

   if (shared_info->debug_info()->IsDebugInfo()) {
=======================================
--- /branches/bleeding_edge/src/objects.cc      Tue Aug  6 07:38:30 2013
+++ /branches/bleeding_edge/src/objects.cc      Wed Aug  7 02:33:09 2013
@@ -9222,6 +9222,7 @@
   ASSERT(!IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() ||
          code()->optimizable());
+  ASSERT(!shared()->is_generator());
   set_code_no_write_barrier(
       GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
   // No write barrier required, since the builtin is part of the root set.
@@ -9232,10 +9233,8 @@
   ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
   ASSERT(!IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
-  if (!FLAG_parallel_recompilation) {
-    JSFunction::MarkForLazyRecompilation();
-    return;
-  }
+  ASSERT(!shared()->is_generator());
+  ASSERT(FLAG_parallel_recompilation);
   if (FLAG_trace_parallel_recompilation) {
     PrintF("  ** Marking ");
     PrintName();
=======================================
--- /branches/bleeding_edge/src/optimizing-compiler-thread.cc Wed Jul 31 00:51:46 2013 +++ /branches/bleeding_edge/src/optimizing-compiler-thread.cc Wed Aug 7 02:33:09 2013
@@ -60,12 +60,25 @@
       OS::Sleep(FLAG_parallel_recompilation_delay);
     }

-    if (Acquire_Load(&stop_thread_)) {
-      stop_semaphore_->Signal();
-      if (FLAG_trace_parallel_recompilation) {
-        time_spent_total_ = OS::Ticks() - epoch;
-      }
-      return;
+    switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
+      case CONTINUE:
+        break;
+      case STOP:
+        if (FLAG_trace_parallel_recompilation) {
+          time_spent_total_ = OS::Ticks() - epoch;
+        }
+        stop_semaphore_->Signal();
+        return;
+      case FLUSH:
+        // The main thread is blocked, waiting for the stop semaphore.
+        { AllowHandleDereference allow_handle_dereference;
+          FlushInputQueue(true);
+        }
+        Release_Store(&queue_length_, static_cast<AtomicWord>(0));
+        Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
+        stop_semaphore_->Signal();
+        // Return to start of consumer loop.
+        continue;
     }

     int64_t compiling_start = 0;
@@ -82,7 +95,9 @@

 void OptimizingCompilerThread::CompileNext() {
   OptimizingCompiler* optimizing_compiler = NULL;
-  input_queue_.Dequeue(&optimizing_compiler);
+  bool result = input_queue_.Dequeue(&optimizing_compiler);
+  USE(result);
+  ASSERT(result);
   Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));

   // The function may have already been optimized by OSR.  Simply continue.
@@ -100,28 +115,63 @@
   }
   output_queue_.Enqueue(optimizing_compiler);
 }
+
+
+void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
+  OptimizingCompiler* optimizing_compiler;
+  // The optimizing compiler is allocated in the CompilationInfo's zone.
+  while (input_queue_.Dequeue(&optimizing_compiler)) {
+    // This should not block, since we have one signal on the input queue
+    // semaphore corresponding to each element in the input queue.
+    input_queue_semaphore_->Wait();
+    CompilationInfo* info = optimizing_compiler->info();
+    if (restore_function_code) {
+      Handle<JSFunction> function = info->closure();
+      function->ReplaceCode(function->shared()->code());
+    }
+    delete info;
+  }
+}
+
+
+void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
+  OptimizingCompiler* optimizing_compiler;
+  // The optimizing compiler is allocated in the CompilationInfo's zone.
+  while (output_queue_.Dequeue(&optimizing_compiler)) {
+    CompilationInfo* info = optimizing_compiler->info();
+    if (restore_function_code) {
+      Handle<JSFunction> function = info->closure();
+      function->ReplaceCode(function->shared()->code());
+    }
+    delete info;
+  }
+}
+
+
+void OptimizingCompilerThread::Flush() {
+  ASSERT(!IsOptimizerThread());
+  Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
+  input_queue_semaphore_->Signal();
+  stop_semaphore_->Wait();
+  FlushOutputQueue(true);
+}


 void OptimizingCompilerThread::Stop() {
   ASSERT(!IsOptimizerThread());
-  Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+  Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
   input_queue_semaphore_->Signal();
   stop_semaphore_->Wait();

   if (FLAG_parallel_recompilation_delay != 0) {
     // Barrier when loading queue length is not necessary since the write
     // happens in CompileNext on the same thread.
+    // This is used only for testing.
     while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
     InstallOptimizedFunctions();
   } else {
-    OptimizingCompiler* optimizing_compiler;
-    // The optimizing compiler is allocated in the CompilationInfo's zone.
-    while (input_queue_.Dequeue(&optimizing_compiler)) {
-      delete optimizing_compiler->info();
-    }
-    while (output_queue_.Dequeue(&optimizing_compiler)) {
-      delete optimizing_compiler->info();
-    }
+    FlushInputQueue(false);
+    FlushOutputQueue(false);
   }

   if (FLAG_trace_parallel_recompilation) {
=======================================
--- /branches/bleeding_edge/src/optimizing-compiler-thread.h Wed Jul 31 00:51:46 2013 +++ /branches/bleeding_edge/src/optimizing-compiler-thread.h Wed Aug 7 02:33:09 2013
@@ -54,13 +54,13 @@
       install_mutex_(OS::CreateMutex()),
       time_spent_compiling_(0),
       time_spent_total_(0) {
-    NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+    NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
     NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
   }

   void Run();
   void Stop();
-  void CompileNext();
+  void Flush();
   void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
   void InstallOptimizedFunctions();

@@ -92,6 +92,13 @@
   }

  private:
+  enum StopFlag { CONTINUE, STOP, FLUSH };
+
+  void FlushInputQueue(bool restore_function_code);
+  void FlushOutputQueue(bool restore_function_code);
+
+  void CompileNext();
+
 #ifdef DEBUG
   int thread_id_;
   Mutex* thread_id_mutex_;
=======================================
--- /branches/bleeding_edge/src/runtime.cc      Tue Aug  6 20:40:44 2013
+++ /branches/bleeding_edge/src/runtime.cc      Wed Aug  7 02:33:09 2013
@@ -3013,6 +3013,7 @@
   JavaScriptFrame* frame = stack_iterator.frame();

   ASSERT_EQ(frame->function(), generator_object->function());
+  ASSERT(frame->function()->is_compiled());

   STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0);
   STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0);
@@ -8487,8 +8488,7 @@
   }
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   if (FLAG_parallel_recompilation && sync_with_compiler_thread) {
-    while (function->IsMarkedForParallelRecompilation() ||
-           function->IsInRecompileQueue() ||
+    while (function->IsInRecompileQueue() ||
            function->IsMarkedForInstallingRecompiledCode()) {
       isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
       OS::Sleep(50);
=======================================
--- /branches/bleeding_edge/test/cctest/test-deoptimization.cc Tue Jul 30 01:35:48 2013 +++ /branches/bleeding_edge/test/cctest/test-deoptimization.cc Wed Aug 7 02:33:09 2013
@@ -77,23 +77,27 @@

 // Utility class to set --allow-natives-syntax and --nouse-inlining when
 // constructed and return to their default state when destroyed.
-class AllowNativesSyntaxNoInlining {
+class AllowNativesSyntaxNoInliningNoParallel {
  public:
-  AllowNativesSyntaxNoInlining()
+  AllowNativesSyntaxNoInliningNoParallel()
       : allow_natives_syntax_(i::FLAG_allow_natives_syntax),
-        use_inlining_(i::FLAG_use_inlining) {
+        use_inlining_(i::FLAG_use_inlining),
+        parallel_recompilation_(i::FLAG_parallel_recompilation) {
     i::FLAG_allow_natives_syntax = true;
     i::FLAG_use_inlining = false;
+    i::FLAG_parallel_recompilation = false;
   }

-  ~AllowNativesSyntaxNoInlining() {
+  ~AllowNativesSyntaxNoInliningNoParallel() {
     i::FLAG_allow_natives_syntax = allow_natives_syntax_;
     i::FLAG_use_inlining = use_inlining_;
+    i::FLAG_parallel_recompilation = parallel_recompilation_;
   }

  private:
   bool allow_natives_syntax_;
   bool use_inlining_;
+  bool parallel_recompilation_;
 };


@@ -343,7 +347,7 @@
   const char* f_source = "function f(x, y) { return x + y; };";

   {
-    AllowNativesSyntaxNoInlining options;
+    AllowNativesSyntaxNoInliningNoParallel options;
// Compile function f and collect to type feedback to insert binary op stub
     // call in the optimized code.
     i::FLAG_prepare_always_opt = true;
@@ -401,7 +405,7 @@
                binary_op);
   char* f_source = f_source_buffer.start();

-  AllowNativesSyntaxNoInlining options;
+  AllowNativesSyntaxNoInliningNoParallel options;
// Compile function f and collect to type feedback to insert binary op stub
   // call in the optimized code.
   i::FLAG_prepare_always_opt = true;
@@ -493,7 +497,7 @@
   const char* f_source = "function f(x, y) { return x < y; };";

   {
-    AllowNativesSyntaxNoInlining options;
+    AllowNativesSyntaxNoInliningNoParallel options;
     // Compile function f and collect to type feedback to insert compare ic
     // call in the optimized code.
     i::FLAG_prepare_always_opt = true;
@@ -540,7 +544,7 @@
   const char* g2_source = "function g2(x, y) { x[y] = 1; };";

   {
-    AllowNativesSyntaxNoInlining options;
+    AllowNativesSyntaxNoInliningNoParallel options;
     // Compile functions and collect to type feedback to insert ic
     // calls in the optimized code.
     i::FLAG_prepare_always_opt = true;
@@ -620,7 +624,7 @@
   const char* g2_source = "function g2(x, y) { x[y] = 1; };";

   {
-    AllowNativesSyntaxNoInlining options;
+    AllowNativesSyntaxNoInliningNoParallel options;
     // Compile functions and collect to type feedback to insert ic
     // calls in the optimized code.
     i::FLAG_prepare_always_opt = true;
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Mon Aug 5 05:52:53 2013 +++ /branches/bleeding_edge/test/cctest/test-heap.cc Wed Aug 7 02:33:09 2013
@@ -2826,6 +2826,7 @@
   // to check whether the data is being released since the external string
   // resource's callback is fired when the external string is GC'ed.
   FLAG_use_ic = false;  // ICs retain objects.
+  FLAG_parallel_recompilation = false;
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
   SourceResource* resource = new SourceResource(i::StrDup(source));

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.


Reply via email to