Update of /cvsroot/boost/boost/boost/asio/detail
In directory sc8-pr-cvs3.sourceforge.net:/tmp/cvs-serv652
Modified Files:
epoll_reactor.hpp kqueue_reactor.hpp select_reactor.hpp
Log Message:
Always restart epoll_reactor operations if the callback handler indicates
that they should be restarted, even if there was an error associated with
the file descriptor, to ensure that operations don't get "lost".
Don't cleanup pending timer objects while the reactor lock is held, since
the destructors for the objects may try to make calls back into the
reactor.
Index: epoll_reactor.hpp
===================================================================
RCS file: /cvsroot/boost/boost/boost/asio/detail/epoll_reactor.hpp,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -d -r1.8 -r1.9
--- epoll_reactor.hpp 4 Jan 2007 05:53:00 -0000 1.8
+++ epoll_reactor.hpp 31 Jul 2007 11:36:09 -0000 1.9
@@ -332,7 +332,10 @@
std::size_t cancel_timer(timer_queue<Time_Traits>& timer_queue, void* token)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
- return timer_queue.cancel_timer(token);
+ std::size_t n = timer_queue.cancel_timer(token);
+ if (n > 0)
+ interrupter_.interrupt();
+ return n;
}
private:
@@ -348,16 +351,13 @@
read_op_queue_.dispatch_cancellations();
write_op_queue_.dispatch_cancellations();
except_op_queue_.dispatch_cancellations();
+ for (std::size_t i = 0; i < timer_queues_.size(); ++i)
+ timer_queues_[i]->dispatch_cancellations();
// Check if the thread is supposed to stop.
if (stop_thread_)
{
- // Clean up operations. We must not hold the lock since the operations
may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
return;
}
@@ -366,12 +366,7 @@
if (!block && read_op_queue_.empty() && write_op_queue_.empty()
&& except_op_queue_.empty() && all_timer_queues_are_empty())
{
- // Clean up operations. We must not hold the lock since the operations
may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
return;
}
@@ -399,59 +394,44 @@
}
else
{
- if (events[i].events & (EPOLLERR | EPOLLHUP))
- {
- boost::system::error_code ec;
- except_op_queue_.dispatch_all_operations(descriptor, ec);
- read_op_queue_.dispatch_all_operations(descriptor, ec);
- write_op_queue_.dispatch_all_operations(descriptor, ec);
+ bool more_reads = false;
+ bool more_writes = false;
+ bool more_except = false;
+ boost::system::error_code ec;
- epoll_event ev = { 0, { 0 } };
- ev.events = 0;
- ev.data.fd = descriptor;
- epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
- }
+ // Exception operations must be processed first to ensure that any
+ // out-of-band data is read before normal data.
+ if (events[i].events & (EPOLLPRI | EPOLLERR | EPOLLHUP))
+ more_except = except_op_queue_.dispatch_operation(descriptor, ec);
else
- {
- bool more_reads = false;
- bool more_writes = false;
- bool more_except = false;
- boost::system::error_code ec;
-
- // Exception operations must be processed first to ensure that any
- // out-of-band data is read before normal data.
- if (events[i].events & EPOLLPRI)
- more_except = except_op_queue_.dispatch_operation(descriptor, ec);
- else
- more_except = except_op_queue_.has_operation(descriptor);
+ more_except = except_op_queue_.has_operation(descriptor);
- if (events[i].events & EPOLLIN)
- more_reads = read_op_queue_.dispatch_operation(descriptor, ec);
- else
- more_reads = read_op_queue_.has_operation(descriptor);
+ if (events[i].events & (EPOLLIN | EPOLLERR | EPOLLHUP))
+ more_reads = read_op_queue_.dispatch_operation(descriptor, ec);
+ else
+ more_reads = read_op_queue_.has_operation(descriptor);
- if (events[i].events & EPOLLOUT)
- more_writes = write_op_queue_.dispatch_operation(descriptor, ec);
- else
- more_writes = write_op_queue_.has_operation(descriptor);
+ if (events[i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP))
+ more_writes = write_op_queue_.dispatch_operation(descriptor, ec);
+ else
+ more_writes = write_op_queue_.has_operation(descriptor);
- epoll_event ev = { 0, { 0 } };
- ev.events = EPOLLERR | EPOLLHUP;
- if (more_reads)
- ev.events |= EPOLLIN;
- if (more_writes)
- ev.events |= EPOLLOUT;
- if (more_except)
- ev.events |= EPOLLPRI;
- ev.data.fd = descriptor;
- int result = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
- if (result != 0)
- {
- ec = boost::system::error_code(errno, boost::system::native_ecat);
- read_op_queue_.dispatch_all_operations(descriptor, ec);
- write_op_queue_.dispatch_all_operations(descriptor, ec);
- except_op_queue_.dispatch_all_operations(descriptor, ec);
- }
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLERR | EPOLLHUP;
+ if (more_reads)
+ ev.events |= EPOLLIN;
+ if (more_writes)
+ ev.events |= EPOLLOUT;
+ if (more_except)
+ ev.events |= EPOLLPRI;
+ ev.data.fd = descriptor;
+ int result = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
+ if (result != 0)
+ {
+ ec = boost::system::error_code(errno, boost::system::native_ecat);
+ read_op_queue_.dispatch_all_operations(descriptor, ec);
+ write_op_queue_.dispatch_all_operations(descriptor, ec);
+ except_op_queue_.dispatch_all_operations(descriptor, ec);
}
}
}
@@ -459,19 +439,17 @@
write_op_queue_.dispatch_cancellations();
except_op_queue_.dispatch_cancellations();
for (std::size_t i = 0; i < timer_queues_.size(); ++i)
+ {
timer_queues_[i]->dispatch_timers();
+ timer_queues_[i]->dispatch_cancellations();
+ }
// Issue any pending cancellations.
for (size_t i = 0; i < pending_cancellations_.size(); ++i)
cancel_ops_unlocked(pending_cancellations_[i]);
pending_cancellations_.clear();
- // Clean up operations. We must not hold the lock since the operations may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
}
// Run the select loop in the thread.
@@ -567,6 +545,22 @@
interrupter_.interrupt();
}
+ // Clean up operations and timers. We must not hold the lock since the
+ // destructors may make calls back into this reactor. We make a copy of the
+ // vector of timer queues since the original may be modified while the lock
+ // is not held.
+ void cleanup_operations_and_timers(
+ boost::asio::detail::mutex::scoped_lock& lock)
+ {
+ timer_queues_for_cleanup_ = timer_queues_;
+ lock.unlock();
+ read_op_queue_.cleanup_operations();
+ write_op_queue_.cleanup_operations();
+ except_op_queue_.cleanup_operations();
+ for (std::size_t i = 0; i < timer_queues_for_cleanup_.size(); ++i)
+ timer_queues_for_cleanup_[i]->cleanup_timers();
+ }
+
// Mutex to protect access to internal data.
boost::asio::detail::mutex mutex_;
@@ -591,6 +585,10 @@
// The timer queues.
std::vector<timer_queue_base*> timer_queues_;
+ // A copy of the timer queues, used when cleaning up timers. The copy is
+ // stored as a class data member to avoid unnecessary memory allocation.
+ std::vector<timer_queue_base*> timer_queues_for_cleanup_;
+
// The descriptors that are pending cancellation.
std::vector<socket_type> pending_cancellations_;
Index: kqueue_reactor.hpp
===================================================================
RCS file: /cvsroot/boost/boost/boost/asio/detail/kqueue_reactor.hpp,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -d -r1.10 -r1.11
--- kqueue_reactor.hpp 7 Apr 2007 08:54:58 -0000 1.10
+++ kqueue_reactor.hpp 31 Jul 2007 11:36:10 -0000 1.11
@@ -322,7 +322,10 @@
std::size_t cancel_timer(timer_queue<Time_Traits>& timer_queue, void* token)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
- return timer_queue.cancel_timer(token);
+ std::size_t n = timer_queue.cancel_timer(token);
+ if (n > 0)
+ interrupter_.interrupt();
+ return n;
}
private:
@@ -338,16 +341,13 @@
read_op_queue_.dispatch_cancellations();
write_op_queue_.dispatch_cancellations();
except_op_queue_.dispatch_cancellations();
+ for (std::size_t i = 0; i < timer_queues_.size(); ++i)
+ timer_queues_[i]->dispatch_cancellations();
// Check if the thread is supposed to stop.
if (stop_thread_)
{
- // Clean up operations. We must not hold the lock since the operations
may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
return;
}
@@ -356,12 +356,7 @@
if (!block && read_op_queue_.empty() && write_op_queue_.empty()
&& except_op_queue_.empty() && all_timer_queues_are_empty())
{
- // Clean up operations. We must not hold the lock since the operations
may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
return;
}
@@ -467,19 +462,17 @@
write_op_queue_.dispatch_cancellations();
except_op_queue_.dispatch_cancellations();
for (std::size_t i = 0; i < timer_queues_.size(); ++i)
+ {
timer_queues_[i]->dispatch_timers();
+ timer_queues_[i]->dispatch_cancellations();
+ }
// Issue any pending cancellations.
for (std::size_t i = 0; i < pending_cancellations_.size(); ++i)
cancel_ops_unlocked(pending_cancellations_[i]);
pending_cancellations_.clear();
- // Clean up operations. We must not hold the lock since the operations may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
}
// Run the select loop in the thread.
@@ -574,6 +567,22 @@
interrupter_.interrupt();
}
+ // Clean up operations and timers. We must not hold the lock since the
+ // destructors may make calls back into this reactor. We make a copy of the
+ // vector of timer queues since the original may be modified while the lock
+ // is not held.
+ void cleanup_operations_and_timers(
+ boost::asio::detail::mutex::scoped_lock& lock)
+ {
+ timer_queues_for_cleanup_ = timer_queues_;
+ lock.unlock();
+ read_op_queue_.cleanup_operations();
+ write_op_queue_.cleanup_operations();
+ except_op_queue_.cleanup_operations();
+ for (std::size_t i = 0; i < timer_queues_for_cleanup_.size(); ++i)
+ timer_queues_for_cleanup_[i]->cleanup_timers();
+ }
+
// Mutex to protect access to internal data.
boost::asio::detail::mutex mutex_;
@@ -598,6 +607,10 @@
// The timer queues.
std::vector<timer_queue_base*> timer_queues_;
+ // A copy of the timer queues, used when cleaning up timers. The copy is
+ // stored as a class data member to avoid unnecessary memory allocation.
+ std::vector<timer_queue_base*> timer_queues_for_cleanup_;
+
// The descriptors that are pending cancellation.
std::vector<socket_type> pending_cancellations_;
Index: select_reactor.hpp
===================================================================
RCS file: /cvsroot/boost/boost/boost/asio/detail/select_reactor.hpp,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -d -r1.7 -r1.8
--- select_reactor.hpp 4 Jan 2007 05:53:00 -0000 1.7
+++ select_reactor.hpp 31 Jul 2007 11:36:10 -0000 1.8
@@ -230,7 +230,10 @@
std::size_t cancel_timer(timer_queue<Time_Traits>& timer_queue, void* token)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
- return timer_queue.cancel_timer(token);
+ std::size_t n = timer_queue.cancel_timer(token);
+ if (n > 0)
+ interrupter_.interrupt();
+ return n;
}
private:
@@ -246,16 +249,13 @@
read_op_queue_.dispatch_cancellations();
write_op_queue_.dispatch_cancellations();
except_op_queue_.dispatch_cancellations();
+ for (std::size_t i = 0; i < timer_queues_.size(); ++i)
+ timer_queues_[i]->dispatch_cancellations();
// Check if the thread is supposed to stop.
if (stop_thread_)
{
- // Clean up operations. We must not hold the lock since the operations
may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
return;
}
@@ -264,12 +264,7 @@
if (!block && read_op_queue_.empty() && write_op_queue_.empty()
&& except_op_queue_.empty() && all_timer_queues_are_empty())
{
- // Clean up operations. We must not hold the lock since the operations
may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
return;
}
@@ -322,19 +317,17 @@
write_op_queue_.dispatch_cancellations();
}
for (std::size_t i = 0; i < timer_queues_.size(); ++i)
+ {
timer_queues_[i]->dispatch_timers();
+ timer_queues_[i]->dispatch_cancellations();
+ }
// Issue any pending cancellations.
for (size_t i = 0; i < pending_cancellations_.size(); ++i)
cancel_ops_unlocked(pending_cancellations_[i]);
pending_cancellations_.clear();
- // Clean up operations. We must not hold the lock since the operations may
- // make calls back into this reactor.
- lock.unlock();
- read_op_queue_.cleanup_operations();
- write_op_queue_.cleanup_operations();
- except_op_queue_.cleanup_operations();
+ cleanup_operations_and_timers(lock);
}
// Run the select loop in the thread.
@@ -415,6 +408,22 @@
interrupter_.interrupt();
}
+ // Clean up operations and timers. We must not hold the lock since the
+ // destructors may make calls back into this reactor. We make a copy of the
+ // vector of timer queues since the original may be modified while the lock
+ // is not held.
+ void cleanup_operations_and_timers(
+ boost::asio::detail::mutex::scoped_lock& lock)
+ {
+ timer_queues_for_cleanup_ = timer_queues_;
+ lock.unlock();
+ read_op_queue_.cleanup_operations();
+ write_op_queue_.cleanup_operations();
+ except_op_queue_.cleanup_operations();
+ for (std::size_t i = 0; i < timer_queues_for_cleanup_.size(); ++i)
+ timer_queues_for_cleanup_[i]->cleanup_timers();
+ }
+
// Mutex to protect access to internal data.
boost::asio::detail::mutex mutex_;
@@ -436,6 +445,10 @@
// The timer queues.
std::vector<timer_queue_base*> timer_queues_;
+ // A copy of the timer queues, used when cleaning up timers. The copy is
+ // stored as a class data member to avoid unnecessary memory allocation.
+ std::vector<timer_queue_base*> timer_queues_for_cleanup_;
+
// The descriptors that are pending cancellation.
std::vector<socket_type> pending_cancellations_;
-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems? Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
_______________________________________________
Boost-cvs mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/boost-cvs