diff --git a/ryu/base/app_manager.py b/ryu/base/app_manager.py
index 3d5d895..60cf88d 100644
--- a/ryu/base/app_manager.py
+++ b/ryu/base/app_manager.py
@@ -29,6 +29,7 @@ import logging
 import sys
 import os
 import gc
+import traceback
 
 from ryu import cfg
 from ryu import utils
@@ -37,6 +38,7 @@ from ryu.controller.handler import register_instance, get_dependent_services
 from ryu.controller.controller import Datapath
 from ryu.controller import event
 from ryu.controller.event import EventRequestBase, EventReplyBase
+from ryu.controller.ofp_event import EventOFPStateChange
 from ryu.lib import hub
 from ryu.ofproto import ofproto_protocol
 
@@ -157,12 +159,23 @@ class RyuApp(object):
         self.observers = {}     # ev_cls -> observer-name -> states:set
         self.threads = []
         self.main_thread = None
-        self.events = hub.Queue(128)
+        self.events = hub.Queue(64)
+        self.sc_events = hub.Queue(32)
+        self._events_sem = hub.BoundedSemaphore(self.events.maxsize)
+        self._sc_events_sem = hub.BoundedSemaphore(self.sc_events.maxsize)
+        self._event_get_timeout = 5
+
         if hasattr(self.__class__, 'LOGGER_NAME'):
             self.logger = logging.getLogger(self.__class__.LOGGER_NAME)
         else:
             self.logger = logging.getLogger(self.name)
         self.CONF = cfg.CONF
+        self.CONF.register_opts([
+            cfg.FloatOpt('handler-execution-timeout',
+                         default=10.0,
+                         help='Maximum time, in seconds, to permit handlers to run.')
+        ])
+        self._handler_execution_timeout = self.CONF.handler_execution_timeout
 
         # prevent accidental creation of instances of this class outside RyuApp
         class _EventThreadStop(event.EventBase):
@@ -279,15 +292,45 @@ class RyuApp(object):
 
     def _event_loop(self):
         while self.is_active or not self.events.empty():
-            ev, state = self.events.get()
-            if ev == self._event_stop:
+            ev = state = None
+            if self.sc_events.qsize():
+                try:
+                    ev, state = self.sc_events.get(timeout=self._event_get_timeout)
+                except hub.QueueEmpty:
+                    pass
+                else:
+                    self._sc_events_sem.release()
+
+            if ev is None:
+                try:
+                    ev, state = self.events.get(timeout=self._event_get_timeout)
+                except hub.QueueEmpty:
+                    pass
+                else:
+                    self._events_sem.release()
+
+            if (ev is None) or (ev == self._event_stop):
                 continue
             handlers = self.get_handlers(ev, state)
             for handler in handlers:
-                handler(ev)
+                handler_execution_timeout = hub.Timeout(self._handler_execution_timeout)
+                try:
+                    handler(ev)
+                except hub.Timeout:
+                    LOG.error('%s: Handler exceeded maximum execution time; terminated.', self.name)
+                    LOG.error('%s: Backtrace from offending handler [%s] servicing event [%s] follows.',
+                              self.name, handler.__name__, ev.__class__.__name__)
+                    LOG.error('%s', traceback.format_exc())
+                finally:
+                    handler_execution_timeout.cancel()
 
     def _send_event(self, ev, state):
-        self.events.put((ev, state))
+        if isinstance(ev, EventOFPStateChange):
+            self._sc_events_sem.acquire()
+            self.sc_events.put((ev, state), block=False)
+        else:
+            self._events_sem.acquire()
+            self.events.put((ev, state), block=False)
 
     def send_event(self, name, ev, state=None):
         """
@@ -336,7 +379,7 @@ class RyuApp(object):
 
 
 class AppManager(object):
-    # singletone
+    # singleton
     _instance = None
 
     @staticmethod
@@ -519,8 +562,11 @@ class AppManager(object):
         app.stop()
         self._close(app)
         events = app.events
+        sc_events = app.sc_events
         if not events.empty():
-            app.logger.debug('%s events remians %d', app.name, events.qsize())
+            app.logger.debug('%s events remains %d', app.name, events.qsize())
+        if not sc_events.empty():
+            app.logger.debug('%s state changes remains %d', app.name, sc_events.qsize())
 
     def close(self):
         def close_all(close_dict):
diff --git a/ryu/controller/controller.py b/ryu/controller/controller.py
index 25b8776..0a2d426 100644
--- a/ryu/controller/controller.py
+++ b/ryu/controller/controller.py
@@ -30,7 +30,7 @@ from ryu.lib.hub import StreamServer
 import traceback
 import random
 import ssl
-from socket import IPPROTO_TCP, TCP_NODELAY, timeout as SocketTimeout, error as SocketError
+from socket import IPPROTO_TCP, TCP_NODELAY, SHUT_RDWR, timeout as SocketTimeout
 import warnings
 
 import ryu.base.app_manager
@@ -41,8 +41,10 @@ from ryu.ofproto import ofproto_protocol
 from ryu.ofproto import ofproto_v1_0
 from ryu.ofproto import nx_match
 
-from ryu.controller import handler
 from ryu.controller import ofp_event
+from ryu.controller.handler import HANDSHAKE_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER
+
+from ryu.exception import RyuException
 
 from ryu.lib.dpid import dpid_to_str
 
@@ -58,8 +60,16 @@ CONF.register_cli_opts([
     cfg.StrOpt('ctl-privkey', default=None, help='controller private key'),
     cfg.StrOpt('ctl-cert', default=None, help='controller certificate'),
     cfg.StrOpt('ca-certs', default=None, help='CA certificates'),
-    cfg.FloatOpt('socket-timeout', default=5.0, help='Time, in seconds, to await completion of socket operations.')
 ])
+CONF.register_opts([
+    cfg.FloatOpt('socket-timeout',
+                 default=5.0,
+                 help='Time, in seconds, to await completion of socket operations.')
+])
+
+
+class DatapathShutdown(RyuException):
+    message = 'Datapath shutdown was requested'
 
 
 class OpenFlowController(object):
@@ -102,9 +112,22 @@ def _deactivate(method):
     def deactivate(self):
         try:
             method(self)
+        except DatapathShutdown:
+            if self.socket is not None:
+                self.socket.close()
+                self.socket = None
+            if self.recv_thread:
+                self.recv_thread.throw(DatapathShutdown)
+            if self.send_thread:
+                self.send_thread.throw(DatapathShutdown)
         finally:
-            self.send_active = False
-            self.set_state(handler.DEAD_DISPATCHER)
+            if self.socket is not None:
+                try:
+                    self.socket.shutdown(SHUT_RDWR)
+                except (EOFError, IOError):
+                    pass
+            if self.state is not DEAD_DISPATCHER:
+                self.set_state(DEAD_DISPATCHER)
     return deactivate
 
 
@@ -117,19 +140,25 @@ class Datapath(ofproto_protocol.ProtocolDesc):
         self.socket.settimeout(CONF.socket_timeout)
         self.address = address
 
-        self.send_active = True
+        self.send_thread = None
+        self.recv_thread = None
+
         self.close_requested = False
 
         # The limit is arbitrary. We need to limit queue size to
         # prevent it from eating memory up
         self.send_q = hub.Queue(16)
+        self._send_q_sem = hub.BoundedSemaphore(self.send_q.maxsize)
+        self._send_q_timeout = 5
+        self._unknown_datapath_timeout = 20
 
         self.xid = random.randint(0, self.ofproto.MAX_XID)
         self.id = None  # datapath_id is unknown yet
         self._ports = None
         self.flow_format = ofproto_v1_0.NXFF_OPENFLOW10
         self.ofp_brick = ryu.base.app_manager.lookup_service_brick('ofp_event')
-        self.set_state(handler.HANDSHAKE_DISPATCHER)
+        self.dpset = ryu.base.app_manager.lookup_service_brick('dpset')
+        self.set_state(HANDSHAKE_DISPATCHER)
 
     def _get_ports(self):
         if (self.ofproto_parser is not None and
@@ -163,64 +192,88 @@ class Datapath(ofproto_protocol.ProtocolDesc):
     # Low level socket handling layer
     @_deactivate
     def _recv_loop(self):
-        buf = bytearray()
-        required_len = ofproto_common.OFP_HEADER_SIZE
-
-        count = 0
-        while True:
-            ret = ""
-
-            try:
-                ret = self.socket.recv(required_len)
-            except SocketTimeout:
-                if not self.close_requested:
-                    continue
-            except SocketError:
-                self.close_requested = True
-
-            if (len(ret) == 0) or (self.close_requested):
-                self.socket.close()
-                break
-
-            buf += ret
-            while len(buf) >= required_len:
-                (version, msg_type, msg_len, xid) = ofproto_parser.header(buf)
-                required_len = msg_len
-                if len(buf) < required_len:
-                    break
-
-                msg = ofproto_parser.msg(
-                    self, version, msg_type, msg_len, xid, buf[:msg_len])
-                # LOG.debug('queue msg %s cls %s', msg, msg.__class__)
-                if msg:
-                    ev = ofp_event.ofp_msg_to_ev(msg)
-                    self.ofp_brick.send_event_to_observers(ev, self.state)
-
-                    dispatchers = lambda x: x.callers[ev.__class__].dispatchers
-                    handlers = [handler for handler in
-                                self.ofp_brick.get_handlers(ev) if
-                                self.state in dispatchers(handler)]
-                    for handler in handlers:
-                        handler(ev)
-
-                buf = buf[required_len:]
-                required_len = ofproto_common.OFP_HEADER_SIZE
-
-                # We need to schedule other greenlets. Otherwise, ryu
-                # can't accept new switches or handle the existing
-                # switches. The limit is arbitrary. We need the better
-                # approach in the future.
-                count += 1
-                if count > 2048:
-                    count = 0
-                    hub.sleep(0)
+        unknown_datapath_timeout = hub.Timeout(self._unknown_datapath_timeout, DatapathShutdown)
+        try:
+            buf = bytearray()
+            required_len = ofproto_common.OFP_HEADER_SIZE
+
+            count = 0
+            while True:
+                ret = ""
+
+                try:
+                    ret = self.socket.recv(required_len)
+                except SocketTimeout:
+                    if not self.close_requested:
+                        continue
+                except (AttributeError, EOFError, IOError):
+                    self.close_requested = True
+
+                if (len(ret) == 0) or (self.close_requested):
+                    raise DatapathShutdown
+
+                if (self.id) and (self.state == MAIN_DISPATCHER):
+                    registered_datapath_object = self.dpset.get(self.id)
+                    if registered_datapath_object:
+                        if (registered_datapath_object.recv_thread == self.recv_thread):
+                            unknown_datapath_timeout.cancel()
+
+                buf += ret
+                while len(buf) >= required_len:
+                    (version, msg_type, msg_len, xid) = ofproto_parser.header(buf)
+                    required_len = msg_len
+                    if len(buf) < required_len:
+                        break
+
+                    msg = ofproto_parser.msg(
+                        self, version, msg_type, msg_len, xid, buf[:msg_len])
+                    # LOG.debug('queue msg %s cls %s', msg, msg.__class__)
+                    if msg:
+                        ev = ofp_event.ofp_msg_to_ev(msg)
+                        self.ofp_brick.send_event_to_observers(ev, self.state)
+
+                        dispatchers = lambda x: x.callers[ev.__class__].dispatchers
+                        handlers = [handler for handler in
+                                    self.ofp_brick.get_handlers(ev) if
+                                    self.state in dispatchers(handler)]
+                        for handler in handlers:
+                            handler(ev)
+
+                    buf = buf[required_len:]
+                    required_len = ofproto_common.OFP_HEADER_SIZE
+
+                    # We need to schedule other greenlets. Otherwise, ryu
+                    # can't accept new switches or handle the existing
+                    # switches. The limit is arbitrary. We need the better
+                    # approach in the future.
+                    count += 1
+                    if count > 2048:
+                        count = 0
+                        hub.sleep(0)
+        finally:
+            # Get rid of the recv_thread reference.
+            self.recv_thread = None
+            # Cancel timeout, if we haven't already.
+            unknown_datapath_timeout.cancel()
 
     @_deactivate
     def _send_loop(self):
         try:
-            while self.send_active:
-                buf = self.send_q.get()
-                self.socket.sendall(buf)
+            while self.socket is not None:
+                buf = None
+                try:
+                    buf = self.send_q.get(timeout=self._send_q_timeout)
+                except hub.QueueEmpty:
+                    pass
+                else:
+                    self._send_q_sem.release()
+
+                if buf is None:
+                    continue
+                if self.socket:
+                    self.socket.sendall(buf)
+                else:
+                    raise DatapathShutdown
         except IOError as ioe:
             LOG.debug("Socket error while sending data to switch at address %s: [%d] %s",
                       self.address, ioe.errno, ioe.strerror)
@@ -228,17 +281,32 @@ class Datapath(ofproto_protocol.ProtocolDesc):
             q = self.send_q
             # first, clear self.send_q to prevent new references.
             self.send_q = None
-            # there might be threads currently blocking in send_q.put().
-            # unblock them by draining the queue.
+            # Next, get rid of the send_thread reference.
+            self.send_thread = None
+            # Drain the send_q, releasing the associated semaphore for each entry.
+            # This should release all threads waiting to acquire the semaphore.
             try:
                 while q.get(block=False):
-                    pass
+                    self._send_q_sem.release()
             except hub.QueueEmpty:
                 pass
 
     def send(self, buf):
+        acquired = False
+        dp_closed = True
+
         if self.send_q:
-            self.send_q.put(buf)
+            acquired = self._send_q_sem.acquire()
+
+        if self.send_q and acquired:
+            dp_closed = False
+            self.send_q.put(buf, block=False)
+        elif acquired:
+            self._send_q_sem.release()
+
+        if dp_closed:
+            LOG.debug('Datapath in process of terminating; send() to %s discarded.',
+                      self.address)
 
     def set_xid(self, msg):
         self.xid += 1
@@ -255,17 +323,15 @@ class Datapath(ofproto_protocol.ProtocolDesc):
         self.send(msg.buf)
 
     def serve(self):
-        send_thr = hub.spawn(self._send_loop)
+        # self.recv_thread *MUST* be set before we spawn the send loop!
+        self.recv_thread = hub.getcurrent()
+        self.send_thread = hub.spawn(self._send_loop)
 
         # send hello message immediately
         hello = self.ofproto_parser.OFPHello(self)
         self.send_msg(hello)
 
-        try:
-            self._recv_loop()
-        finally:
-            hub.kill(send_thr)
-            hub.joinall([send_thr])
+        self._recv_loop()
 
     #
     # Utility methods for convenience
diff --git a/ryu/lib/hub.py b/ryu/lib/hub.py
index 5621147..e982e9f 100644
--- a/ryu/lib/hub.py
+++ b/ryu/lib/hub.py
@@ -90,11 +90,13 @@ if HUB_TYPE == 'eventlet':
             except greenlet.GreenletExit:
                 pass
 
-    Queue = eventlet.queue.Queue
+
+    Queue = eventlet.queue.LightQueue
     QueueEmpty = eventlet.queue.Empty
     Semaphore = eventlet.semaphore.Semaphore
     BoundedSemaphore = eventlet.semaphore.BoundedSemaphore
 
+
     class StreamServer(object):
         def __init__(self, listen_info, handle=None, backlog=None,
                      spawn='default', **ssl_args):
@@ -120,19 +122,24 @@ if HUB_TYPE == 'eventlet':
                 sock, addr = self.server.accept()
                 spawn(self.handle, sock, addr)
 
+
     class LoggingWrapper(object):
         def write(self, message):
             LOG.info(message.rstrip('\n'))
 
+
     class WSGIServer(StreamServer):
         def serve_forever(self):
             self.logger = LoggingWrapper()
             eventlet.wsgi.server(self.server, self.handle, self.logger)
 
+
     WebSocketWSGI = websocket.WebSocketWSGI
 
+
     Timeout = eventlet.timeout.Timeout
 
+
     class Event(object):
         def __init__(self):
             self._ev = eventlet.event.Event()
@@ -144,7 +151,7 @@ if HUB_TYPE == 'eventlet':
 
         def _broadcast(self):
             self._ev.send()
-            # because eventlet Event doesn't allow mutiple send() on an event,
+            # because eventlet Event doesn't allow multiple send() on an event,
             # re-create the underlying event.
             # note: _ev.reset() is obsolete.
             self._ev = eventlet.event.Event()
