Reviewers: jarin, Michael Achenbach,

Description:
d8: Leak context_mutex_ so it will never be destroyed while locked

Calling quit() from d8 will call exit(), which will run static destructors. If context_mutex_ is statically allocated, pthread_mutex_destroy will be called.

When running d8 in "isolates" mode, another thread may be running. If it calls
CreateEvaluationContext, it will lock the context_mutex_. If the mutex is
destroyed while it is locked, it will return an error.

This CL changes the Mutex to a LazyMutex, which will leak instead of being
destroyed.

BUG=v8:4279
[email protected]
[email protected]
LOG=n

Please review this at https://codereview.chromium.org/1240553003/

Base URL: https://chromium.googlesource.com/v8/v8.git@master

Affected files (+8, -8 lines):
  M src/d8.h
  M src/d8.cc


Index: src/d8.cc
diff --git a/src/d8.cc b/src/d8.cc
index b68e4ba729f324b8ff695ea8fb10a118681107d7..344325e1b90c284112d90401ccf229a04a789625 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -202,11 +202,11 @@ CounterMap* Shell::counter_map_;
 base::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
 CounterCollection Shell::local_counters_;
 CounterCollection* Shell::counters_ = &local_counters_;
-base::Mutex Shell::context_mutex_;
+base::LazyMutex Shell::context_mutex_;
 const base::TimeTicks Shell::kInitialTicks =
     base::TimeTicks::HighResolutionNow();
 Persistent<Context> Shell::utility_context_;
-base::Mutex Shell::workers_mutex_;
+base::LazyMutex Shell::workers_mutex_;
 bool Shell::allow_new_workers_ = true;
 i::List<Worker*> Shell::workers_;
 i::List<SharedArrayBuffer::Contents> Shell::externalized_shared_contents_;
@@ -699,7 +699,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
   }

   {
-    base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
+    base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
     if (!allow_new_workers_) return;

     Worker* worker = new Worker;
@@ -1203,7 +1203,7 @@ void Shell::InitializeDebugger(Isolate* isolate) {
 Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
 #ifndef V8_SHARED
   // This needs to be a critical section since this is not thread-safe
-  base::LockGuard<base::Mutex> lock_guard(&context_mutex_);
+  base::LockGuard<base::Mutex> lock_guard(context_mutex_.Pointer());
 #endif  // !V8_SHARED
   // Initialize the global objects
   Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
@@ -2252,7 +2252,7 @@ void Shell::CleanupWorkers() {
   // create a new Worker, it would deadlock.
   i::List<Worker*> workers_copy;
   {
-    base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
+    base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
     allow_new_workers_ = false;
     workers_copy.AddAll(workers_);
     workers_.Clear();
@@ -2266,7 +2266,7 @@ void Shell::CleanupWorkers() {

   // Now that all workers are terminated, we can re-enable Worker creation.
   {
-    base::LockGuard<base::Mutex> lock_guard(&workers_mutex_);
+    base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
     allow_new_workers_ = true;
   }

Index: src/d8.h
diff --git a/src/d8.h b/src/d8.h
index bb5592bec4f49c2d98edba0de324c686a97b903e..ae61dcc63e0ed1df20ff6e8e690713c7950ef437 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -481,10 +481,10 @@ class Shell : public i::AllStatic {
   static CounterCollection local_counters_;
   static CounterCollection* counters_;
   static base::OS::MemoryMappedFile* counters_file_;
-  static base::Mutex context_mutex_;
+  static base::LazyMutex context_mutex_;
   static const base::TimeTicks kInitialTicks;

-  static base::Mutex workers_mutex_;
+  static base::LazyMutex workers_mutex_;
   static bool allow_new_workers_;
   static i::List<Worker*> workers_;
static i::List<SharedArrayBuffer::Contents> externalized_shared_contents_;


--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to