Hello community,

here is the log from the commit of package python3-jupyter_ipykernel for 
openSUSE:Factory checked in at 2016-03-07 13:28:25
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python3-jupyter_ipykernel (Old)
 and      /work/SRC/openSUSE:Factory/.python3-jupyter_ipykernel.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python3-jupyter_ipykernel"

Changes:
--------
--- 
/work/SRC/openSUSE:Factory/python3-jupyter_ipykernel/python3-jupyter_ipykernel.changes
      2016-01-09 23:13:52.000000000 +0100
+++ 
/work/SRC/openSUSE:Factory/.python3-jupyter_ipykernel.new/python3-jupyter_ipykernel.changes
 2016-03-07 13:29:55.000000000 +0100
@@ -1,0 +2,20 @@
+Sat Feb 27 16:39:07 UTC 2016 - [email protected]
+
+- update to version 4.3.1:
+  * Fix Windows Python 3.5 incompatibility caused by faulthandler
+    patch in 4.3
+
+- changes from version 4.3.0:
+  * Publish all IO in a thread, via :class:`IOPubThread`. This solves
+    the problem of requiring :meth:`sys.stdout.flush` to be called in
+    the notebook to produce output promptly during long-running cells.
+  * Remove refrences to outdated IPython guiref in kernel banner.
+  * Patch faulthandler to use sys.__stderr__ instead of forwarded
+    sys.stderr, which has no fileno when forwarded.
+  * Deprecate some vestiges of the Big Split: -
+    :func:`ipykernel.find_connection_file` is deprecated. Use
+    :func:`jupyter_client.find_connection_file` instead. - Various
+    pieces of code specific to IPython parallel are deprecated in
+    ipykernel and moved to ipyparallel.
+
+-------------------------------------------------------------------

Old:
----
  ipykernel-4.2.2.tar.gz

New:
----
  ipykernel-4.3.1.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python3-jupyter_ipykernel.spec ++++++
--- /var/tmp/diff_new_pack.xRhaZg/_old  2016-03-07 13:29:55.000000000 +0100
+++ /var/tmp/diff_new_pack.xRhaZg/_new  2016-03-07 13:29:55.000000000 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           python3-jupyter_ipykernel
-Version:        4.2.2
+Version:        4.3.1
 Release:        0
 Summary:        IPython Kernel for Jupyter
 License:        BSD-3-Clause

++++++ ipykernel-4.2.2.tar.gz -> ipykernel-4.3.1.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/PKG-INFO new/ipykernel-4.3.1/PKG-INFO
--- old/ipykernel-4.2.2/PKG-INFO        2016-01-02 14:18:13.000000000 +0100
+++ new/ipykernel-4.3.1/PKG-INFO        2016-02-26 14:37:08.000000000 +0100
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: ipykernel
-Version: 4.2.2
+Version: 4.3.1
 Summary: IPython Kernel for Jupyter
 Home-page: http://ipython.org
 Author: IPython Development Team
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/docs/changelog.rst 
new/ipykernel-4.3.1/docs/changelog.rst
--- old/ipykernel-4.2.2/docs/changelog.rst      2016-01-02 14:17:21.000000000 
+0100
+++ new/ipykernel-4.3.1/docs/changelog.rst      2016-02-26 14:25:25.000000000 
+0100
@@ -1,6 +1,30 @@
 Changes in IPython kernel
 =========================
 
+4.3
+---
+
+4.3.1
+*****
+
+- Fix Windows Python 3.5 incompatibility caused by faulthandler patch in 4.3
+
+4.3.0
+*****
+
+`4.3.0 on GitHub <https://github.com/ipython/ipykernel/milestones/4.3>`__
+
+- Publish all IO in a thread, via :class:`IOPubThread`.
+  This solves the problem of requiring :meth:`sys.stdout.flush` to be called 
in the notebook to produce output promptly during long-running cells.
+- Remove refrences to outdated IPython guiref in kernel banner.
+- Patch faulthandler to use ``sys.__stderr__`` instead of forwarded 
``sys.stderr``,
+  which has no fileno when forwarded.
+- Deprecate some vestiges of the Big Split:
+  - :func:`ipykernel.find_connection_file` is deprecated. Use 
:func:`jupyter_client.find_connection_file` instead.
+  - Various pieces of code specific to IPython parallel are deprecated in 
ipykernel
+  and moved to ipyparallel.
+
+
 4.2
 ---
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/_version.py 
new/ipykernel-4.3.1/ipykernel/_version.py
--- old/ipykernel-4.2.2/ipykernel/_version.py   2016-01-02 14:17:50.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/_version.py   2016-02-26 14:32:27.000000000 
+0100
@@ -1,4 +1,4 @@
-version_info = (4, 2, 2)
+version_info = (4, 3, 1)
 __version__ = '.'.join(map(str, version_info))
 
 kernel_protocol_version_info = (5, 0)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/codeutil.py 
new/ipykernel-4.3.1/ipykernel/codeutil.py
--- old/ipykernel-4.2.2/ipykernel/codeutil.py   2015-11-13 10:25:37.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/codeutil.py   2016-02-22 13:54:46.000000000 
+0100
@@ -13,6 +13,9 @@
 # Copyright (c) IPython Development Team.
 # Distributed under the terms of the Modified BSD License.
 
+import warnings
+warnings.warn("ipykernel.codeutil is deprecated. It has moved to 
ipyparallel.serialize", DeprecationWarning)
+
 import sys
 import types
 try:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/connect.py 
new/ipykernel-4.3.1/ipykernel/connect.py
--- old/ipykernel-4.2.2/ipykernel/connect.py    2015-06-15 22:26:41.000000000 
+0200
+++ new/ipykernel-4.3.1/ipykernel/connect.py    2016-02-25 15:47:39.000000000 
+0100
@@ -8,6 +8,7 @@
 import json
 import sys
 from subprocess import Popen, PIPE
+import warnings
 
 from IPython.core.profiledir import ProfileDir
 from IPython.paths import get_ipython_dir
@@ -37,18 +38,9 @@
 
 
 def find_connection_file(filename='kernel-*.json', profile=None):
-    """find a connection file, and return its absolute path.
-
-    The current working directory and the profile's security
-    directory will be searched for the file if it is not given by
-    absolute path.
-
-    If profile is unspecified, then the current running application's
-    profile will be used, or 'default', if not run from IPython.
-
-    If the argument does not match an existing file, it will be interpreted as 
a
-    fileglob, and the matching file in the profile's security dir with
-    the latest access time will be used.
+    """DEPRECATED: find a connection file, and return its absolute path.
+    
+    THIS FUNCION IS DEPRECATED. Use juptyer_client.find_connection_file 
instead.
 
     Parameters
     ----------
@@ -62,6 +54,10 @@
     -------
     str : The absolute path of the connection file.
     """
+    
+    import warnings
+    warnings.warn("""ipykernel.find_connection_file is deprecated, use 
jupyter_client.find_connection_file""",
+        DeprecationWarning, stacklevel=2)
     from IPython.core.application import BaseIPythonApplication as IPApp
     try:
         # quick check for absolute path, before going through logic
@@ -85,6 +81,28 @@
     return jupyter_client.find_connection_file(filename, path=['.', 
security_dir])
 
 
+def _find_connection_file(connection_file, profile=None):
+    """Return the absolute path for a connection file
+    
+    - If nothing specified, return current Kernel's connection file
+    - If profile specified, show deprecation warning about finding connection 
files in profiles
+    - Otherwise, call jupyter_client.find_connection_file
+    """
+    if connection_file is None:
+        # get connection file from current kernel
+        return get_connection_file()
+    else:
+        # connection file specified, allow shortnames:
+        if profile is not None:
+            warnings.warn(
+                "Finding connection file by profile is deprecated.",
+                DeprecationWarning, stacklevel=3,
+            )
+            return find_connection_file(connection_file, profile=profile)
+        else:
+            return jupyter_client.find_connection_file(connection_file)
+
+
 def get_connection_info(connection_file=None, unpack=False, profile=None):
     """Return the connection information for the current Kernel.
 
@@ -100,22 +118,14 @@
     unpack : bool [default: False]
         if True, return the unpacked dict, otherwise just the string contents
         of the file.
-    profile : str [optional]
-        The name of the profile to use when searching for the connection file,
-        if different from the current IPython session or 'default'.
-
+    profile : DEPRECATED
 
     Returns
     -------
     The connection dictionary of the current kernel, as string or dict,
     depending on `unpack`.
     """
-    if connection_file is None:
-        # get connection file from current kernel
-        cf = get_connection_file()
-    else:
-        # connection file specified, allow shortnames:
-        cf = find_connection_file(connection_file, profile=profile)
+    cf = _find_connection_file(connection_file, profile)
 
     with open(cf) as f:
         info = f.read()
@@ -144,10 +154,7 @@
         IPython Kernel will be used, which is only allowed from inside a 
kernel.
     argv : list [optional]
         Any extra args to be passed to the console.
-    profile : str [optional]
-        The name of the profile to use when searching for the connection file,
-        if different from the current IPython session or 'default'.
-
+    profile : DEPRECATED
 
     Returns
     -------
@@ -155,11 +162,7 @@
     """
     argv = [] if argv is None else argv
 
-    if connection_file is None:
-        # get connection file from current kernel
-        cf = get_connection_file()
-    else:
-        cf = find_connection_file(connection_file, profile=profile)
+    cf = _find_connection_file(connection_file, profile)
 
     cmd = ';'.join([
         "from IPython.qt.console import qtconsoleapp",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/datapub.py 
new/ipykernel-4.3.1/ipykernel/datapub.py
--- old/ipykernel-4.2.2/ipykernel/datapub.py    2015-11-13 10:25:37.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/datapub.py    2016-02-22 13:54:46.000000000 
+0100
@@ -1,6 +1,9 @@
 """Publishing native (typically pickled) objects.
 """
 
+import warnings
+warnings.warn("ipykernel.datapub is deprecated. It has moved to 
ipyparallel.datapub", DeprecationWarning)
+
 # Copyright (c) IPython Development Team.
 # Distributed under the terms of the Modified BSD License.
 
@@ -53,5 +56,7 @@
     data : dict
         The data to be published. Think of it as a namespace.
     """
+    warnings.warn("ipykernel.datapub is deprecated. It has moved to 
ipyparallel.datapub", DeprecationWarning)
+    
     from ipykernel.zmqshell import ZMQInteractiveShell
     ZMQInteractiveShell.instance().data_pub.publish_data(data)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/inprocess/ipkernel.py 
new/ipykernel-4.3.1/ipykernel/inprocess/ipkernel.py
--- old/ipykernel-4.2.2/ipykernel/inprocess/ipkernel.py 2016-01-02 
14:15:12.000000000 +0100
+++ new/ipykernel-4.3.1/ipykernel/inprocess/ipkernel.py 2016-02-22 
13:54:46.000000000 +0100
@@ -14,6 +14,7 @@
 from ipykernel.zmqshell import ZMQInteractiveShell
 
 from .socket import DummySocket
+from ..iostream import OutStream, BackgroundSocket, IOPubThread
 
 #-----------------------------------------------------------------------------
 # Main kernel class
@@ -49,13 +50,23 @@
     shell_class = Type(allow_none=True)
     shell_streams = List()
     control_stream = Any()
-    iopub_socket = Instance(DummySocket, ())
+    _underlying_iopub_socket = Instance(DummySocket, ())
+    iopub_thread = Instance(IOPubThread)
+    def _iopub_thread_default(self):
+        thread = IOPubThread(self._underlying_iopub_socket)
+        thread.start()
+        return thread
+    
+    iopub_socket = Instance(BackgroundSocket)
+    def _iopub_socket_default(self):
+        return self.iopub_thread.background_socket
+    
     stdin_socket = Instance(DummySocket, ())
 
     def __init__(self, **traits):
         super(InProcessKernel, self).__init__(**traits)
 
-        self.iopub_socket.on_trait_change(self._io_dispatch, 'message_sent')
+        self._underlying_iopub_socket.on_trait_change(self._io_dispatch, 
'message_sent')
         self.shell.kernel = self
 
     def execute_request(self, stream, ident, parent):
@@ -128,12 +139,10 @@
         return InProcessInteractiveShell
 
     def _stdout_default(self):
-        from ipykernel.iostream import OutStream
-        return OutStream(self.session, self.iopub_socket, u'stdout', 
pipe=False)
+        return OutStream(self.session, self.iopub_thread, u'stdout')
 
     def _stderr_default(self):
-        from ipykernel.iostream import OutStream
-        return OutStream(self.session, self.iopub_socket, u'stderr', 
pipe=False)
+        return OutStream(self.session, self.iopub_thread, u'stderr')
 
 #-----------------------------------------------------------------------------
 # Interactive shell subclass
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/iostream.py 
new/ipykernel-4.3.1/ipykernel/iostream.py
--- old/ipykernel-4.2.2/ipykernel/iostream.py   2016-01-02 14:15:12.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/iostream.py   2016-02-22 13:54:46.000000000 
+0100
@@ -1,21 +1,27 @@
+# coding: utf-8
 """Wrappers for forwarding stdout/stderr over zmq"""
 
 # Copyright (c) IPython Development Team.
 # Distributed under the terms of the Modified BSD License.
 
+from __future__ import print_function
+import atexit
 import os
 import threading
-import time
+import sys
 import uuid
+import warnings
 from io import StringIO, UnsupportedOperation
 
 import zmq
 from zmq.eventloop.ioloop import IOLoop
+from zmq.eventloop.zmqstream import ZMQStream
 
 from jupyter_client.session import extract_header
 
 from ipython_genutils import py3compat
 from ipython_genutils.py3compat import unicode_type
+
 from IPython.utils.warn import warn
 
 #-----------------------------------------------------------------------------
@@ -26,166 +32,261 @@
 CHILD = 1
 
 #-----------------------------------------------------------------------------
-# Stream classes
+# IO classes
 #-----------------------------------------------------------------------------
 
-class OutStream(object):
-    """A file like object that publishes the stream to a 0MQ PUB socket."""
-
-    # The time interval between automatic flushes, in seconds.
-    _subprocess_flush_limit = 256
-    flush_interval = 0.05
-    topic=None
-
-    def __init__(self, session, pub_socket, name, pipe=True):
-        self.encoding = 'UTF-8'
-        self.session = session
-        self.pub_socket = pub_socket
-        self.name = name
-        self.topic = b'stream.' + py3compat.cast_bytes(name)
-        self.parent_header = {}
-        self._new_buffer()
-        self._buffer_lock = threading.Lock()
+class IOPubThread(object):
+    """An object for sending IOPub messages in a background thread
+    
+    prevents a blocking main thread
+    
+    IOPubThread(pub_socket).background_socket is a Socket-API-providing object
+    whose IO is always run in a thread.
+    """
+
+    def __init__(self, socket, pipe=False):
+        self.socket = socket
+        self.background_socket = BackgroundSocket(self)
         self._master_pid = os.getpid()
-        self._master_thread = threading.current_thread().ident
-        self._pipe_pid = os.getpid()
         self._pipe_flag = pipe
+        self.io_loop = IOLoop()
         if pipe:
             self._setup_pipe_in()
+        self.thread = threading.Thread(target=self._thread_main)
+        self.thread.daemon = True
+
+    def _thread_main(self):
+        """The inner loop that's actually run in a thread"""
+        self.io_loop.start()
+        self.io_loop.close()
 
     def _setup_pipe_in(self):
         """setup listening pipe for subprocesses"""
-        ctx = self.pub_socket.context
+        ctx = self.socket.context
 
         # use UUID to authenticate pipe messages
         self._pipe_uuid = uuid.uuid4().bytes
 
-        self._pipe_in = ctx.socket(zmq.PULL)
-        self._pipe_in.linger = 0
+        pipe_in = ctx.socket(zmq.PULL)
+        pipe_in.linger = 0
         try:
-            self._pipe_port = 
self._pipe_in.bind_to_random_port("tcp://127.0.0.1")
+            self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
         except zmq.ZMQError as e:
-            warn("Couldn't bind IOStream to 127.0.0.1: %s" % e +
+            warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
                 "\nsubprocess output will be unavailable."
             )
             self._pipe_flag = False
-            self._pipe_in.close()
-            del self._pipe_in
+            pipe_in.close()
             return
-        self._pipe_poller = zmq.Poller()
-        self._pipe_poller.register(self._pipe_in, zmq.POLLIN)
-        if IOLoop.initialized():
-            # subprocess flush should trigger flush
-            # if kernel is idle
-            IOLoop.instance().add_handler(self._pipe_in,
-                lambda s, event: self.flush(),
-                IOLoop.READ,
-            )
+        self._pipe_in = ZMQStream(pipe_in, self.io_loop)
+        self._pipe_in.on_recv(self._handle_pipe_msg)
+    
+    def _handle_pipe_msg(self, msg):
+        """handle a pipe message from a subprocess"""
+        if not self._pipe_flag or not self._is_master_process():
+            return
+        if msg[0] != self._pipe_uuid:
+            print("Bad pipe message: %s", msg, file=sys.__stderr__)
+            return
+        self.send_multipart(msg[1:])
 
     def _setup_pipe_out(self):
         # must be new context after fork
         ctx = zmq.Context()
-        self._pipe_pid = os.getpid()
-        self._pipe_out = ctx.socket(zmq.PUSH)
-        self._pipe_out_lock = threading.Lock()
-        self._pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
+        pipe_out = ctx.socket(zmq.PUSH)
+        pipe_out.linger = 3000 # 3s timeout for pipe_out sends before 
discarding the message
+        pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
+        return ctx, pipe_out
 
     def _is_master_process(self):
         return os.getpid() == self._master_pid
 
-    def _is_master_thread(self):
-        return threading.current_thread().ident == self._master_thread
-
-    def _have_pipe_out(self):
-        return os.getpid() == self._pipe_pid
-
     def _check_mp_mode(self):
         """check for forks, and switch to zmq pipeline if necessary"""
         if not self._pipe_flag or self._is_master_process():
             return MASTER
         else:
-            if not self._have_pipe_out():
-                self._flush_buffer()
-                # setup a new out pipe
-                self._setup_pipe_out()
             return CHILD
+    
+    def start(self):
+        """Start the IOPub thread"""
+        self.thread.start()
+        # make sure we don't prevent process exit
+        # I'm not sure why setting daemon=True above isn't enough, but it 
doesn't appear to be.
+        atexit.register(self.stop)
+    
+    def stop(self):
+        """Stop the IOPub thread"""
+        if not self.thread.is_alive():
+            return
+        self.io_loop.add_callback(self.io_loop.stop)
+        self.thread.join()
+    
+    def close(self):
+        self.socket.close()
+        self.socket = None
+
+    @property
+    def closed(self):
+        return self.socket is None
+    
+    def send_multipart(self, *args, **kwargs):
+        """send_multipart schedules actual zmq send in my thread.
+        
+        If my thread isn't running (e.g. forked process), send immediately.
+        """
+
+        if self.thread.is_alive():
+            self.io_loop.add_callback(lambda : self._really_send(*args, 
**kwargs))
+        else:
+            self._really_send(*args, **kwargs)
+    
+    def _really_send(self, msg, *args, **kwargs):
+        """The callback that actually sends messages"""
+        mp_mode = self._check_mp_mode()
+
+        if mp_mode != CHILD:
+            # we are master, do a regular send
+            self.socket.send_multipart(msg, *args, **kwargs)
+        else:
+            # we are a child, pipe to master
+            # new context/socket for every pipe-out
+            # since forks don't teardown politely, use ctx.term to ensure send 
has completed
+            ctx, pipe_out = self._setup_pipe_out()
+            pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
+            pipe_out.close()
+            ctx.term()
+
+
+class BackgroundSocket(object):
+    """Wrapper around IOPub thread that provides zmq send[_multipart]"""
+    io_thread = None
+    
+    def __init__(self, io_thread):
+        self.io_thread = io_thread
+    
+    def __getattr__(self, attr):
+        """Wrap socket attr access for backward-compatibility"""
+        if attr.startswith('__') and attr.endswith('__'):
+            # don't wrap magic methods
+            super(BackgroundSocket, self).__getattr__(attr)
+        if hasattr(self.io_thread.socket, attr):
+            warnings.warn("Accessing zmq Socket attribute %s on 
BackgroundSocket" % attr,
+                DeprecationWarning, stacklevel=2)
+            return getattr(self.io_thread.socket, attr)
+        super(BackgroundSocket, self).__getattr__(attr)
+    
+    def __setattr__(self, attr, value):
+        if attr == 'io_thread' or (attr.startswith('__' and 
attr.endswith('__'))):
+            super(BackgroundSocket, self).__setattr__(attr, value)
+        else:
+            warnings.warn("Setting zmq Socket attribute %s on 
BackgroundSocket" % attr,
+                DeprecationWarning, stacklevel=2)
+            setattr(self.io_thread.socket, attr, value)
+    
+    def send(self, msg, *args, **kwargs):
+        return self.send_multipart([msg], *args, **kwargs)
+
+    def send_multipart(self, *args, **kwargs):
+        """Schedule send in IO thread"""
+        return self.io_thread.send_multipart(*args, **kwargs)
+
+
+class OutStream(object):
+    """A file like object that publishes the stream to a 0MQ PUB socket.
+    
+    Output is handed off to an IO Thread
+    """
+
+    # The time interval between automatic flushes, in seconds.
+    flush_interval = 0.2
+    topic=None
+
+    def __init__(self, session, pub_thread, name, pipe=None):
+        if pipe is not None:
+            warnings.warn("pipe argument to OutStream is deprecated and 
ignored",
+                DeprecationWarning)
+        self.encoding = 'UTF-8'
+        self.session = session
+        if not isinstance(pub_thread, IOPubThread):
+            # Backward-compat: given socket, not thread. Wrap in a thread.
+            warnings.warn("OutStream should be created with IOPubThread, not 
%r" % pub_thread,
+                DeprecationWarning, stacklevel=2)
+            pub_thread = IOPubThread(pub_thread)
+            pub_thread.start()
+        self.pub_thread = pub_thread
+        self.name = name
+        self.topic = b'stream.' + py3compat.cast_bytes(name)
+        self.parent_header = {}
+        self._master_pid = os.getpid()
+        self._flush_lock = threading.Lock()
+        self._flush_timeout = None
+        self._io_loop = pub_thread.io_loop
+        self._new_buffer()
+
+    def _is_master_process(self):
+        return os.getpid() == self._master_pid
 
     def set_parent(self, parent):
         self.parent_header = extract_header(parent)
 
     def close(self):
-        self.pub_socket = None
+        self.pub_thread = None
 
     @property
     def closed(self):
-        return self.pub_socket is None
-
-    def _flush_from_subprocesses(self):
-        """flush possible pub data from subprocesses into my buffer"""
-        if not self._pipe_flag or not self._is_master_process():
-            return
-        for i in range(self._subprocess_flush_limit):
-            if self._pipe_poller.poll(0):
-                msg = self._pipe_in.recv_multipart()
-                if msg[0] != self._pipe_uuid:
-                    continue
-                else:
-                    self._buffer.write(msg[1].decode(self.encoding, 'replace'))
-                    # this always means a flush,
-                    # so reset our timer
-                    self._start = 0
-            else:
-                break
+        return self.pub_thread is None
 
     def _schedule_flush(self):
-        """schedule a flush in the main thread
-
-        only works with a tornado/pyzmq eventloop running
+        """schedule a flush in the IO thread
+        
+        call this on write, to indicate that flush should be called soon.
         """
-        if IOLoop.initialized():
-            IOLoop.instance().add_callback(self.flush)
-        else:
-            # no async loop, at least force the timer
-            self._start = 0
-
-    def flush(self):
-        """trigger actual zmq send"""
-        if self.pub_socket is None:
-            raise ValueError(u'I/O operation on closed file')
-
-        mp_mode = self._check_mp_mode()
-
-        if mp_mode != CHILD:
-            # we are master
-            if not self._is_master_thread():
-                # sub-threads must not trigger flush directly,
-                # but at least they can schedule an async flush, or force the 
timer.
-                self._schedule_flush()
+        with self._flush_lock:
+            if self._flush_timeout is not None:
                 return
+            # None indicates there's no flush scheduled.
+            # Use a non-None placeholder to indicate that a flush is scheduled
+            # to avoid races while we wait for _schedule_in_thread below to 
fire in the io thread.
+            self._flush_timeout = 'placeholder'
+        
+        # add_timeout has to be handed to the io thread with add_callback
+        def _schedule_in_thread():
+            self._flush_timeout = 
self._io_loop.call_later(self.flush_interval, self._flush)
+        self._io_loop.add_callback(_schedule_in_thread)
 
-            self._flush_from_subprocesses()
+    def flush(self):
+        """trigger actual zmq send
+        
+        send will happen in the background thread
+        """
+        if self.pub_thread.thread.is_alive():
+            self._io_loop.add_callback(self._flush)
+            # wait for flush to actually get through:
+            evt = threading.Event()
+            self._io_loop.add_callback(evt.set)
+            evt.wait()
+        else:
+            self._flush()
+    
+    def _flush(self):
+        """This is where the actual send happens.
+        
+        _flush should generally be called in the IO thread,
+        unless the thread has been destroyed (e.g. forked subprocess).
+        """
+        with self._flush_lock:
+            self._flush_timeout = None
             data = self._flush_buffer()
-
-            if data:
-                content = {u'name':self.name, u'text':data}
-                msg = self.session.send(self.pub_socket, u'stream', 
content=content,
-                                       parent=self.parent_header, 
ident=self.topic)
-
-                if hasattr(self.pub_socket, 'flush'):
-                    # socket itself has flush (presumably ZMQStream)
-                    self.pub_socket.flush()
-        else:
-            with self._pipe_out_lock:
-                string = self._flush_buffer()
-                tracker = self._pipe_out.send_multipart([
-                    self._pipe_uuid,
-                    string.encode(self.encoding, 'replace'),
-                ], copy=False, track=True)
-                try:
-                    tracker.wait(1)
-                except:
-                    pass
-
+        if data:
+            # FIXME: this disables Session's fork-safe check,
+            # since pub_thread is itself fork-safe.
+            # There should be a better way to do this.
+            self.session.pid = os.getpid()
+            content = {u'name':self.name, u'text':data}
+            self.session.send(self.pub_thread, u'stream', content=content,
+                parent=self.parent_header, ident=self.topic)
+    
     def isatty(self):
         return False
 
@@ -205,14 +306,14 @@
         raise UnsupportedOperation("IOStream has no fileno.")
 
     def write(self, string):
-        if self.pub_socket is None:
+        if self.pub_thread is None:
             raise ValueError('I/O operation on closed file')
         else:
             # Make sure that we're handling unicode
             if not isinstance(string, unicode_type):
                 string = string.decode(self.encoding, 'replace')
 
-            is_child = (self._check_mp_mode() == CHILD)
+            is_child = (not self._is_master_process())
             self._buffer.write(string)
             if is_child:
                 # newlines imply flush in subprocesses
@@ -220,16 +321,11 @@
                 # and this helps.
                 if '\n' in string:
                     self.flush()
-            # do we want to check subprocess flushes on write?
-            # self._flush_from_subprocesses()
-            current_time = time.time()
-            if self._start < 0:
-                self._start = current_time
-            elif current_time - self._start > self.flush_interval:
-                self.flush()
+            else:
+                self._schedule_flush()
 
     def writelines(self, sequence):
-        if self.pub_socket is None:
+        if self.pub_thread is None:
             raise ValueError('I/O operation on closed file')
         else:
             for string in sequence:
@@ -239,11 +335,11 @@
         """clear the current buffer and return the current buffer data"""
         data = u''
         if self._buffer is not None:
-            data = self._buffer.getvalue()
-            self._buffer.close()
-        self._new_buffer()
+            buf = self._buffer
+            self._new_buffer()
+            data = buf.getvalue()
+            buf.close()
         return data
 
     def _new_buffer(self):
         self._buffer = StringIO()
-        self._start = -1
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/ipkernel.py 
new/ipykernel-4.3.1/ipykernel/ipkernel.py
--- old/ipykernel-4.2.2/ipykernel/ipkernel.py   2015-11-13 10:25:37.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/ipkernel.py   2016-02-22 13:54:46.000000000 
+0100
@@ -11,7 +11,6 @@
 
 from .comm import CommManager
 from .kernelbase import Kernel as KernelBase
-from .serialize import serialize_object, unpack_apply_message
 from .zmqshell import ZMQInteractiveShell
 
 
@@ -51,8 +50,6 @@
         self.shell.displayhook.topic = self._topic('execute_result')
         self.shell.display_pub.session = self.session
         self.shell.display_pub.pub_socket = self.iopub_socket
-        self.shell.data_pub.session = self.session
-        self.shell.data_pub.pub_socket = self.iopub_socket
 
         # TMP - hack while developing
         self.shell._reply_content = None
@@ -124,6 +121,33 @@
         super(IPythonKernel, self).set_parent(ident, parent)
         self.shell.set_parent(parent)
 
+    def init_metadata(self, parent):
+        """Initialize metadata.
+        
+        Run at the beginning of each execution request.
+        """
+        md = super(IPythonKernel, self).init_metadata(parent)
+        # FIXME: remove deprecated ipyparallel-specific code
+        # This is required for ipyparallel < 5.0
+        md.update({
+            'dependencies_met' : True,
+            'engine' : self.ident,
+        })
+        return md
+    
+    def finish_metadata(self, parent, metadata, reply_content):
+        """Finish populating metadata.
+        
+        Run after completing an execution request.
+        """
+        # FIXME: remove deprecated ipyparallel-specific code
+        # This is required by ipyparallel < 5.0
+        metadata['status'] = reply_content['status']
+        if reply_content['status'] == 'error' and reply_content['ename'] == 
'UnmetDependency':
+                metadata['dependencies_met'] = False
+
+        return metadata
+
     def _forward_input(self, allow_stdin=False):
         """Forward raw_input and getpass to the current frontend.
 
@@ -198,10 +222,11 @@
         # runlines.  We'll need to clean up this logic later.
         if shell._reply_content is not None:
             reply_content.update(shell._reply_content)
-            e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, 
method='execute')
-            reply_content['engine_info'] = e_info
             # reset after use
             shell._reply_content = None
+            # FIXME: deprecate piece for ipyparallel:
+            e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, 
method='execute')
+            reply_content['engine_info'] = e_info
 
         if 'traceback' in reply_content:
             self.log.info("Exception in execute request:\n%s", 
'\n'.join(reply_content['traceback']))
@@ -289,6 +314,7 @@
         return r
 
     def do_apply(self, content, bufs, msg_id, reply_metadata):
+        from .serialize import serialize_object, unpack_apply_message
         shell = self.shell
         try:
             working = shell.user_ns
@@ -328,18 +354,17 @@
             reply_content = {}
             if shell._reply_content is not None:
                 reply_content.update(shell._reply_content)
-                e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, 
method='apply')
-                reply_content['engine_info'] = e_info
                 # reset after use
                 shell._reply_content = None
+                
+                # FIXME: deprecate piece for ipyparallel:
+                e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, 
method='apply')
+                reply_content['engine_info'] = e_info
 
             self.send_response(self.iopub_socket, u'error', reply_content,
                                 ident=self._topic('error'))
             self.log.info("Exception in apply request:\n%s", 
'\n'.join(reply_content['traceback']))
             result_buf = []
-
-            if reply_content['ename'] == 'UnmetDependency':
-                reply_metadata['dependencies_met'] = False
         else:
             reply_content = {'status' : 'ok'}
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/kernelapp.py 
new/ipykernel-4.3.1/ipykernel/kernelapp.py
--- old/ipykernel-4.2.2/ipykernel/kernelapp.py  2016-01-02 14:15:14.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/kernelapp.py  2016-02-26 14:25:25.000000000 
+0100
@@ -33,6 +33,7 @@
 from jupyter_client.connect import ConnectionFileMixin
 
 # local imports
+from .iostream import IOPubThread
 from .heartbeat import Heartbeat
 from .ipkernel import IPythonKernel
 from .parentpoller import ParentPollerUnix, ParentPollerWindows
@@ -231,11 +232,6 @@
         self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
         self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
 
-        self.iopub_socket = context.socket(zmq.PUB)
-        self.iopub_socket.linger = 1000
-        self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
-        self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
-
         self.stdin_socket = context.socket(zmq.ROUTER)
         self.stdin_socket.linger = 1000
         self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
@@ -245,6 +241,19 @@
         self.control_socket.linger = 1000
         self.control_port = self._bind_socket(self.control_socket, 
self.control_port)
         self.log.debug("control ROUTER Channel on port: %i" % 
self.control_port)
+        
+        self.init_iopub(context)
+
+    def init_iopub(self, context):
+        self.iopub_socket = context.socket(zmq.PUB)
+        self.iopub_socket.linger = 1000
+        self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
+        self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
+        self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True)
+        self.iopub_thread.start()
+        # backward-compat: wrap iopub socket API in background thread
+        self.iopub_socket = self.iopub_thread.background_socket
+        
 
     def init_heartbeat(self):
         """start the heart beating"""
@@ -298,11 +307,38 @@
         """Redirect input streams and set a display hook."""
         if self.outstream_class:
             outstream_factory = import_item(str(self.outstream_class))
-            sys.stdout = outstream_factory(self.session, self.iopub_socket, 
u'stdout')
-            sys.stderr = outstream_factory(self.session, self.iopub_socket, 
u'stderr')
+            sys.stdout = outstream_factory(self.session, self.iopub_thread, 
u'stdout')
+            sys.stderr = outstream_factory(self.session, self.iopub_thread, 
u'stderr')
         if self.displayhook_class:
             displayhook_factory = import_item(str(self.displayhook_class))
             sys.displayhook = displayhook_factory(self.session, 
self.iopub_socket)
+        
+        self.patch_io()
+    
+    def patch_io(self):
+        """Patch important libraries that can't handle sys.stdout forwarding"""
+        try:
+            import faulthandler
+        except ImportError:
+            pass
+        else:
+            # Warning: this is a monkeypatch of `faulthandler.enable`, watch 
for possible
+            # updates to the upstream API and update accordingly (up-to-date 
as of Python 3.5):
+            # 
https://docs.python.org/3/library/faulthandler.html#faulthandler.enable
+
+            # change default file to __stderr__ from forwarded stderr
+            faulthandler_enable = faulthandler.enable
+            def enable(file=sys.__stderr__, all_threads=True, **kwargs):
+                return faulthandler_enable(file=file, all_threads=all_threads, 
**kwargs)
+
+            faulthandler.enable = enable
+
+            if hasattr(faulthandler, 'register'):
+                faulthandler_register = faulthandler.register
+                def register(signum, file=sys.__stderr__, all_threads=True, 
chain=False, **kwargs):
+                    return faulthandler_register(signum, file=file, 
all_threads=all_threads,
+                                                 chain=chain, **kwargs)
+                faulthandler.register = register
 
     def init_signal(self):
         signal.signal(signal.SIGINT, signal.SIG_IGN)
@@ -316,6 +352,7 @@
 
         kernel = kernel_factory(parent=self, session=self.session,
                                 shell_streams=[shell_stream, control_stream],
+                                iopub_thread=self.iopub_thread,
                                 iopub_socket=self.iopub_socket,
                                 stdin_socket=self.stdin_socket,
                                 log=self.log,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/kernelbase.py 
new/ipykernel-4.3.1/ipykernel/kernelbase.py
--- old/ipykernel-4.2.2/ipykernel/kernelbase.py 2016-01-02 14:15:12.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/kernelbase.py 2016-02-22 13:54:46.000000000 
+0100
@@ -49,8 +49,9 @@
     profile_dir = Instance('IPython.core.profiledir.ProfileDir', 
allow_none=True)
     shell_streams = List()
     control_stream = Instance(ZMQStream, allow_none=True)
-    iopub_socket = Instance(zmq.Socket, allow_none=True)
-    stdin_socket = Instance(zmq.Socket, allow_none=True)
+    iopub_socket = Any()
+    iopub_thread = Any()
+    stdin_socket = Any()
     log = Instance(logging.Logger, allow_none=True)
 
     # identities:
@@ -111,25 +112,29 @@
     # Track execution count here. For IPython, we override this to use the
     # execution count we store in the shell.
     execution_count = 0
-
+    
+    msg_types = [
+        'execute_request', 'complete_request',
+        'inspect_request', 'history_request',
+        'comm_info_request', 'kernel_info_request',
+        'connect_request', 'shutdown_request',
+        'is_complete_request',
+        # deprecated:
+        'apply_request',
+    ]
+    # add deprecated ipyparallel control messages
+    control_msg_types = msg_types + ['clear_request', 'abort_request']
 
     def __init__(self, **kwargs):
         super(Kernel, self).__init__(**kwargs)
 
         # Build dict of handlers for message types
-        msg_types = [ 'execute_request', 'complete_request',
-                      'inspect_request', 'history_request',
-                      'comm_info_request', 'kernel_info_request',
-                      'connect_request', 'shutdown_request',
-                      'apply_request', 'is_complete_request',
-                    ]
         self.shell_handlers = {}
-        for msg_type in msg_types:
+        for msg_type in self.msg_types:
             self.shell_handlers[msg_type] = getattr(self, msg_type)
 
-        control_msg_types = msg_types + [ 'clear_request', 'abort_request' ]
         self.control_handlers = {}
-        for msg_type in control_msg_types:
+        for msg_type in self.control_msg_types:
             self.control_handlers[msg_type] = getattr(self, msg_type)
 
 
@@ -164,6 +169,25 @@
         sys.stderr.flush()
         self._publish_status(u'idle')
 
+    def should_handle(self, stream, msg, idents):
+        """Check whether a shell-channel message should be handled
+        
+        Allows subclasses to prevent handling of certain messages (e.g. 
aborted requests).
+        """
+        msg_id = msg['header']['msg_id']
+        if msg_id in self.aborted:
+            msg_type = msg['header']['msg_type']
+            # is it safe to assume a msg_id will not be resubmitted?
+            self.aborted.remove(msg_id)
+            reply_type = msg_type.split('_')[0] + '_reply'
+            status = {'status' : 'aborted'}
+            md = {'engine' : self.ident}
+            md.update(status)
+            self.session.send(stream, reply_type, metadata=md,
+                        content=status, parent=msg, ident=idents)
+            return False
+        return True
+
     def dispatch_shell(self, stream, msg):
         """dispatch shell requests"""
         # flush control requests first
@@ -191,15 +215,7 @@
         self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
         self.log.debug('   Content: %s\n   --->\n   ', msg['content'])
 
-        if msg_id in self.aborted:
-            self.aborted.remove(msg_id)
-            # is it safe to assume a msg_id will not be resubmitted?
-            reply_type = msg_type.split('_')[0] + '_reply'
-            status = {'status' : 'aborted'}
-            md = {'engine' : self.ident}
-            md.update(status)
-            self.session.send(stream, reply_type, metadata=md,
-                        content=status, parent=msg, ident=idents)
+        if not self.should_handle(stream, msg, idents):
             return
 
         handler = self.shell_handlers.get(msg_type, None)
@@ -288,17 +304,6 @@
     # Kernel request handlers
     
#---------------------------------------------------------------------------
 
-    def _make_metadata(self, other=None):
-        """init metadata dict, for execute/apply_reply"""
-        new_md = {
-            'dependencies_met' : True,
-            'engine' : self.ident,
-            'started': datetime.now(),
-        }
-        if other:
-            new_md.update(other)
-        return new_md
-
     def _publish_execute_input(self, code, parent, execution_count):
         """Publish the code request on the iopub stream."""
 
@@ -340,6 +345,22 @@
         """
         return self.session.send(stream, msg_or_type, content, 
self._parent_header,
                                  ident, buffers, track, header, metadata)
+    
+    def init_metadata(self, parent):
+        """Initialize metadata.
+        
+        Run at the beginning of execution requests.
+        """
+        return {
+            'started': datetime.now(),
+        }
+    
+    def finish_metadata(self, parent, metadata, reply_content):
+        """Finish populating metadata.
+        
+        Run after completing an execution request.
+        """
+        return metadata
 
     def execute_request(self, stream, ident, parent):
         """handle an execute_request"""
@@ -358,7 +379,7 @@
 
         stop_on_error = content.get('stop_on_error', True)
 
-        md = self._make_metadata(parent['metadata'])
+        metadata = self.init_metadata(parent)
 
         # Re-broadcast our input for the benefit of listening clients, and
         # start computing output
@@ -380,14 +401,10 @@
 
         # Send the reply.
         reply_content = json_clean(reply_content)
-
-        md['status'] = reply_content['status']
-        if reply_content['status'] == 'error' and \
-                        reply_content['ename'] == 'UnmetDependency':
-                md['dependencies_met'] = False
+        metadata = self.finish_metadata(parent, metadata, reply_content)
 
         reply_msg = self.session.send(stream, u'execute_reply',
-                                      reply_content, parent, metadata=md,
+                                      reply_content, parent, metadata=metadata,
                                       ident=ident)
 
         self.log.debug("%s", reply_msg)
@@ -532,10 +549,11 @@
                 }
 
     
#---------------------------------------------------------------------------
-    # Engine methods
+    # Engine methods (DEPRECATED)
     
#---------------------------------------------------------------------------
 
     def apply_request(self, stream, ident, parent):
+        self.log.warn("""apply_request is deprecated in kernel_base, moving to 
ipyparallel.""")
         try:
             content = parent[u'content']
             bufs = parent[u'buffers']
@@ -544,31 +562,30 @@
             self.log.error("Got bad msg: %s", parent, exc_info=True)
             return
 
-        md = self._make_metadata(parent['metadata'])
+        md = self.init_metadata(parent)
 
         reply_content, result_buf = self.do_apply(content, bufs, msg_id, md)
-
-        # put 'ok'/'error' status in header, for scheduler introspection:
-        md['status'] = reply_content['status']
-
+        
         # flush i/o
         sys.stdout.flush()
         sys.stderr.flush()
 
+        md = self.finish_metadata(parent, md, reply_content)
+
         self.session.send(stream, u'apply_reply', reply_content,
                     parent=parent, ident=ident,buffers=result_buf, metadata=md)
 
     def do_apply(self, content, bufs, msg_id, reply_metadata):
-        """Override in subclasses to support the IPython parallel framework.
-        """
+        """DEPRECATED"""
         raise NotImplementedError
 
     
#---------------------------------------------------------------------------
-    # Control messages
+    # Control messages (DEPRECATED)
     
#---------------------------------------------------------------------------
 
     def abort_request(self, stream, ident, parent):
         """abort a specific msg by id"""
+        self.log.warn("abort_request is deprecated in kernel_base. It os only 
part of IPython parallel")
         msg_ids = parent['content'].get('msg_ids', None)
         if isinstance(msg_ids, string_types):
             msg_ids = [msg_ids]
@@ -584,15 +601,13 @@
 
     def clear_request(self, stream, idents, parent):
         """Clear our namespace."""
+        self.log.warn("clear_request is deprecated in kernel_base. It os only 
part of IPython parallel")
         content = self.do_clear()
         self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
                 content = content)
 
     def do_clear(self):
-        """Override in subclasses to clear the namespace
-
-        This is only required for IPython.parallel.
-        """
+        """DEPRECATED"""
         raise NotImplementedError
 
     
#---------------------------------------------------------------------------
@@ -601,10 +616,7 @@
 
     def _topic(self, topic):
         """prefixed topic for IOPub messages"""
-        if self.int_id >= 0:
-            base = "engine.%i" % self.int_id
-        else:
-            base = "kernel.%s" % self.ident
+        base = "kernel.%s" % self.ident
 
         return py3compat.cast_bytes("%s.%s" % (base, topic))
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/kernelspec.py 
new/ipykernel-4.3.1/ipykernel/kernelspec.py
--- old/ipykernel-4.2.2/ipykernel/kernelspec.py 2015-12-08 22:50:06.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/kernelspec.py 2016-02-22 13:54:46.000000000 
+0100
@@ -140,7 +140,7 @@
     
     def start(self):
         import argparse
-        parser = argparse.ArgumentParser(
+        parser = argparse.ArgumentParser(prog=self.name,
             description="Install the IPython kernel spec.")
         parser.add_argument('--user', action='store_true',
             help="Install for the current user instead of system-wide")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/log.py 
new/ipykernel-4.3.1/ipykernel/log.py
--- old/ipykernel-4.2.2/ipykernel/log.py        2015-11-13 10:25:37.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/log.py        2016-02-22 13:54:46.000000000 
+0100
@@ -2,6 +2,9 @@
 
 from zmq.log.handlers import PUBHandler
 
+import warnings
+warnings.warn("ipykernel.log is deprecated. It has moved to 
ipyparallel.engine.log", DeprecationWarning)
+
 class EnginePUBHandler(PUBHandler):
     """A simple PUBHandler subclass that sets root_topic"""
     engine=None
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/pickleutil.py 
new/ipykernel-4.3.1/ipykernel/pickleutil.py
--- old/ipykernel-4.2.2/ipykernel/pickleutil.py 2015-11-13 10:25:37.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/pickleutil.py 2016-02-22 13:54:46.000000000 
+0100
@@ -4,6 +4,9 @@
 # Copyright (c) IPython Development Team.
 # Distributed under the terms of the Modified BSD License.
 
+import warnings
+warnings.warn("ipykernel.pickleutil is deprecated. It has moved to 
ipyparallel.", DeprecationWarning)
+
 import copy
 import logging
 import sys
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/serialize.py 
new/ipykernel-4.3.1/ipykernel/serialize.py
--- old/ipykernel-4.2.2/ipykernel/serialize.py  2015-11-13 10:25:37.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/serialize.py  2016-02-22 13:54:46.000000000 
+0100
@@ -3,6 +3,9 @@
 # Copyright (c) IPython Development Team.
 # Distributed under the terms of the Modified BSD License.
 
+import warnings
+warnings.warn("ipykernel.serialize is deprecated. It has moved to 
ipyparallel.serialize", DeprecationWarning)
+
 try:
     import cPickle
     pickle = cPickle
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/tests/test_connect.py 
new/ipykernel-4.3.1/ipykernel/tests/test_connect.py
--- old/ipykernel-4.2.2/ipykernel/tests/test_connect.py 2015-05-08 
22:15:04.000000000 +0200
+++ new/ipykernel-4.3.1/ipykernel/tests/test_connect.py 2016-02-22 
13:54:46.000000000 +0100
@@ -52,10 +52,12 @@
         connect.write_connection_file(cf, **sample_info)
         json_info = connect.get_connection_info(cf)
         info = connect.get_connection_info(cf, unpack=True)
-
+    
     nt.assert_equal(type(json_info), type(""))
-    nt.assert_equal(info, sample_info)
+    sub_info = {k:v for k,v in info.items() if k in sample_info}
+    nt.assert_equal(sub_info, sample_info)
 
     info2 = json.loads(json_info)
     info2['key'] = str_to_bytes(info2['key'])
-    nt.assert_equal(info2, sample_info)
+    sub_info2 = {k:v for k,v in info.items() if k in sample_info}
+    nt.assert_equal(sub_info2, sample_info)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/tests/test_kernel.py 
new/ipykernel-4.3.1/ipykernel/tests/test_kernel.py
--- old/ipykernel-4.2.2/ipykernel/tests/test_kernel.py  2016-01-02 
14:15:14.000000000 +0100
+++ new/ipykernel-4.3.1/ipykernel/tests/test_kernel.py  2016-02-22 
13:54:46.000000000 +0100
@@ -7,6 +7,7 @@
 import io
 import os.path
 import sys
+import time
 
 import nose.tools as nt
 
@@ -19,12 +20,12 @@
                     flush_channels, wait_for_idle)
 
 
-def _check_mp_mode(kc, expected=False, stream="stdout"):
+def _check_master(kc, expected=True, stream="stdout"):
     execute(kc=kc, code="import sys")
     flush_channels(kc)
-    msg_id, content = execute(kc=kc, code="print (sys.%s._check_mp_mode())" % 
stream)
+    msg_id, content = execute(kc=kc, code="print 
(sys.%s._is_master_process())" % stream)
     stdout, stderr = assemble_output(kc.iopub_channel)
-    nt.assert_equal(eval(stdout.strip()), expected)
+    nt.assert_equal(stdout.strip(), repr(expected))
 
 
 # printing tests
@@ -37,7 +38,7 @@
         stdout, stderr = assemble_output(iopub)
         nt.assert_equal(stdout, 'hi\n')
         nt.assert_equal(stderr, '')
-        _check_mp_mode(kc, expected=False)
+        _check_master(kc, expected=True)
 
 
 def test_sys_path():
@@ -61,29 +62,27 @@
     with new_kernel() as kc:
         iopub = kc.iopub_channel
 
-        _check_mp_mode(kc, expected=False)
+        _check_master(kc, expected=True)
         flush_channels(kc)
         np = 5
         code = '\n'.join([
             "from __future__ import print_function",
+            "import time",
             "import multiprocessing as mp",
             "pool = [mp.Process(target=print, args=('hello', i,)) for i in 
range(%i)]" % np,
             "for p in pool: p.start()",
-            "for p in pool: p.join()"
+            "for p in pool: p.join()",
+            "time.sleep(0.5),"
         ])
 
-        expected = '\n'.join([
-            "hello %s" % i for i in range(np)
-        ]) + '\n'
-
         msg_id, content = execute(kc=kc, code=code)
         stdout, stderr = assemble_output(iopub)
         nt.assert_equal(stdout.count("hello"), np, stdout)
         for n in range(np):
             nt.assert_equal(stdout.count(str(n)), 1, stdout)
         nt.assert_equal(stderr, '')
-        _check_mp_mode(kc, expected=False)
-        _check_mp_mode(kc, expected=False, stream="stderr")
+        _check_master(kc, expected=True)
+        _check_master(kc, expected=True, stream="stderr")
 
 
 def test_subprocess_noprint():
@@ -104,8 +103,8 @@
         nt.assert_equal(stdout, '')
         nt.assert_equal(stderr, '')
 
-        _check_mp_mode(kc, expected=False)
-        _check_mp_mode(kc, expected=False, stream="stderr")
+        _check_master(kc, expected=True)
+        _check_master(kc, expected=True, stream="stderr")
 
 
 @dec.knownfailureif(sys.platform == 'win32', "subprocess prints fail on 
Windows")
@@ -126,8 +125,8 @@
         nt.assert_equal(stdout, '')
         nt.assert_true("ValueError" in stderr, stderr)
 
-        _check_mp_mode(kc, expected=False)
-        _check_mp_mode(kc, expected=False, stream="stderr")
+        _check_master(kc, expected=True)
+        _check_master(kc, expected=True, stream="stderr")
 
 # raw_input tests
 
@@ -189,6 +188,20 @@
         nt.assert_in(u'a=1', content)
         nt.assert_in(u'b=u"abcþ"', content)
 
+
[email protected]_without('faulthandler')
+def test_smoke_faulthandler():
+    with kernel() as kc:
+        code = u'\n'.join([
+            'import faulthandler, signal',
+            'faulthandler.enable()',
+            'faulthandler.register(signal.SIGTERM)',
+        ])
+        _, reply = execute(code, kc=kc)
+        print(_)
+        nt.assert_equal(reply['status'], 'ok', reply.get('traceback', ''))
+
+
 def test_help_output():
     """ipython kernel --help-all works"""
     tt.help_all_output_test('kernel')
@@ -226,3 +239,19 @@
         nt.assert_greater(len(matches), 0)
         for match in matches:
             nt.assert_equal(match[:2], 'a.')
+
+
+def test_shutdown():
+    """Kernel exits after polite shutdown_request"""
+    with new_kernel() as kc:
+        km = kc.parent
+        execute(u'a = 1', kc=kc)
+        wait_for_idle(kc)
+        kc.shutdown()
+        for i in range(100): # 10s timeout
+            if km.is_alive():
+                time.sleep(.1)
+            else:
+                break
+        nt.assert_false(km.is_alive())
+
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/ipykernel/zmqshell.py 
new/ipykernel-4.3.1/ipykernel/zmqshell.py
--- old/ipykernel-4.2.2/ipykernel/zmqshell.py   2015-12-08 22:50:06.000000000 
+0100
+++ new/ipykernel-4.3.1/ipykernel/zmqshell.py   2016-02-26 14:25:34.000000000 
+0100
@@ -19,6 +19,7 @@
 import os
 import sys
 import time
+import warnings
 
 from zmq.eventloop import ioloop
 
@@ -32,7 +33,7 @@
 from IPython.core.magics import MacroToEdit, CodeMagics
 from IPython.core.magic import magics_class, line_magic, Magics
 from IPython.core import payloadpage
-from IPython.core.usage import default_gui_banner
+from IPython.core.usage import default_banner
 from IPython.display import display, Javascript
 from ipykernel import (
     get_connection_file, get_connection_info, connect_qtconsole
@@ -45,7 +46,6 @@
 from traitlets import Instance, Type, Dict, CBool, CBytes, Any
 from IPython.utils.warn import error
 from ipykernel.displayhook import ZMQShellDisplayHook
-from ipykernel.datapub import ZMQDataPublisher
 from jupyter_client.session import extract_header
 from jupyter_client.session import Session
 
@@ -345,12 +345,12 @@
 
     displayhook_class = Type(ZMQShellDisplayHook)
     display_pub_class = Type(ZMQDisplayPublisher)
-    data_pub_class = Type(ZMQDataPublisher)
+    data_pub_class = Type('ipykernel.datapub.ZMQDataPublisher')
     kernel = Any()
     parent_header = Any()
 
     def _banner1_default(self):
-        return default_gui_banner
+        return default_banner
 
     # Override the traitlet in the parent class, because there's no point using
     # readline for the kernel. Can be removed when the readline code is moved
@@ -398,6 +398,25 @@
     def init_hooks(self):
         super(ZMQInteractiveShell, self).init_hooks()
         self.set_hook('show_in_pager', page.as_hook(payloadpage.page), 99)
+    
+    def init_data_pub(self):
+        """Delay datapub init until request, for deprecation warnings"""
+        pass
+    
+    @property
+    def data_pub(self):
+        if not hasattr(self, '_data_pub'):
+            warnings.warn("InteractiveShell.data_pub is deprecated outside 
IPython parallel.", 
+                DeprecationWarning, stacklevel=2)
+            
+            self._data_pub = self.data_pub_class(parent=self)
+            self._data_pub.session = self.display_pub.session
+            self._data_pub.pub_socket = self.display_pub.pub_socket
+        return self._data_pub
+    
+    @data_pub.setter
+    def data_pub(self, pub):
+        self._data_pub = pub
 
     def ask_exit(self):
         """Engage the exit actions."""
@@ -454,7 +473,8 @@
         self.parent_header = parent
         self.displayhook.set_parent(parent)
         self.display_pub.set_parent(parent)
-        self.data_pub.set_parent(parent)
+        if hasattr(self, '_data_pub'):
+            self.data_pub.set_parent(parent)
         try:
             sys.stdout.set_parent(parent)
         except AttributeError:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/ipykernel-4.2.2/setup.py new/ipykernel-4.3.1/setup.py
--- old/ipykernel-4.2.2/setup.py        2015-08-12 03:12:09.000000000 +0200
+++ new/ipykernel-4.3.1/setup.py        2016-02-22 13:54:46.000000000 +0100
@@ -83,6 +83,7 @@
     'ipython>=4.0.0',
     'traitlets',
     'jupyter_client',
+    'tornado>=4.0',
 ]
 
 extras_require = setuptools_args['extras_require'] = {


Reply via email to