LeetcodeCourageK opened a new issue, #1782:
URL: https://github.com/apache/incubator-brpc/issues/1782

   I created multiple threads(std::thread),
   these threads push elements to the same BlockingQueue,
   but after the program starts, it failed at "std::unique_lock<std::mutex> 
lock(mutex_)"
   segment fault comes up soon as follows:
   
___________________________________________________________________________________________________________________________________
   libpthread.so.0!pthread_mutex_lock (未知源:0)
   libdataloader_server.so!bthread::pthread_mutex_lock_impl() 
(\work\incubator-brpc\src\bthread\mutex.cpp:549)
   libdataloader_server.so!pthread_mutex_lock(pthread_mutex_t * __mutex) 
(\work\incubator-brpc\src\bthread\mutex.cpp:812)
   libdataloader_server.so!pthread_mutex_lock(pthread_mutex_t * __mutex) 
(\work\incubator-brpc\src\bthread\mutex.cpp:811)
   libdataloader_server.so!__gthread_mutex_lock(__gthread_mutex_t * __mutex) 
(\usr\include\x86_64-linux-gnu\c++\9\bits\gthr-default.h:749)
   libdataloader_server.so!std::mutex::lock(std::mutex * const this) 
(\usr\include\c++\9\bits\std_mutex.h:100)
   
libdataloader_server.so!std::unique_lock<std::mutex>::lock(std::unique_lock<std::mutex>
 * const this) (\usr\include\c++\9\bits\unique_lock.h:141)
   
libdataloader_server.so!std::unique_lock<std::mutex>::unique_lock(std::unique_lock<std::mutex>
 * const this, std::unique_lock<std::mutex>::mutex_type & __m) 
(\usr\include\c++\9\bits\unique_lock.h:71)
   libdataloader_server.so!std::__invoke<void (*&)(unsigned int const&), 
unsigned long&>(void (*&)(const unsigned int &) __fn) 
(\usr\include\c++\9\bits\invoke.h:95)
   libdataloader_server.so!std::_Bind<void (*(unsigned long))(unsigned int 
const&)>::__call<void, , 0ul>(std::tuple<>&&, 
std::_Index_tuple<0ul>)(std::_Bind<void (*(long unsigned int))(unsigned int 
const&)> * const this, std::tuple<> && __args) 
(\usr\include\c++\9\functional:400)
   libdataloader_server.so!std::_Bind<void (*(unsigned long))(unsigned int 
const&)>::operator()<, void>()(std::_Bind<void (*(long unsigned int))(unsigned 
int const&)> * const this) (\usr\include\c++\9\functional:484)
   libdataloader_server.so!std::__invoke_impl<void, std::_Bind<void (*(unsigned 
long))(unsigned int const&)>>(std::__invoke_other, std::_Bind<void (*(unsigned 
long))(unsigned int const&)>&&)(std::_Bind<void (*(long unsigned int))(unsigned 
int const&)> && __f) (\usr\include\c++\9\bits\invoke.h:60)
   libdataloader_server.so!std::__invoke<std::_Bind<void (*(unsigned 
long))(unsigned int const&)>>(std::_Bind<void (*(unsigned long))(unsigned int 
const&)>&&)(std::_Bind<void (*(long unsigned int))(unsigned int const&)> && 
__fn) (\usr\include\c++\9\bits\invoke.h:95)
   libdataloader_server.so!std::thread::_Invoker<std::tuple<std::_Bind<void 
(*(unsigned long))(unsigned int const&)> > 
>::_M_invoke<0ul>(std::_Index_tuple<0ul>)(std::thread::_Invoker<std::tuple<std::_Bind<void
 (*(long unsigned int))(unsigned int const&)> > > * const this) 
(\usr\include\c++\9\thread:244)
   libdataloader_server.so!std::thread::_Invoker<std::tuple<std::_Bind<void 
(*(unsigned long))(unsigned int const&)> > 
>::operator()()(std::thread::_Invoker<std::tuple<std::_Bind<void (*(long 
unsigned int))(unsigned int const&)> > > * const this) 
(\usr\include\c++\9\thread:251)
   
libdataloader_server.so!std::thread::_State_impl<std::thread::_Invoker<std::tuple<std::_Bind<void
 (*(unsigned long))(unsigned int const&)> > > 
>::_M_run()(std::thread::_State_impl<std::thread::_Invoker<std::tuple<std::_Bind<void
 (*(long unsigned int))(unsigned int const&)> > > > * const this) 
(\usr\include\c++\9\thread:195)


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to