Hello community,

here is the log from the commit of package python-dask for openSUSE:Factory 
checked in at 2018-09-18 11:48:46
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-dask (Old)
 and      /work/SRC/openSUSE:Factory/.python-dask.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-dask"

Tue Sep 18 11:48:46 2018 rev:8 rq:636248 version:0.19.2

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-dask/python-dask.changes  2018-09-11 
17:17:59.183346691 +0200
+++ /work/SRC/openSUSE:Factory/.python-dask.new/python-dask.changes     
2018-09-18 11:49:08.927555181 +0200
@@ -1,0 +2,22 @@
+Mon Sep 17 14:54:42 UTC 2018 - Arun Persaud <a...@gmx.de>
+
+- update to version 0.19.2:
+  * Array
+    + apply_gufunc implements automatic infer of functions output
+      dtypes (:pr:`3936`) Markus Gonser
+    + Fix array histogram range error when array has nans (#3980)
+      James Bourbeau
+    + Issue 3937 follow up, int type checks. (#3956) Yu Feng
+    + from_array: add @martindurant's explaining of how hashing is
+      done for an array. (#3965) Mark Harfouche
+    + Support gradient with coordinate (#3949) Keisuke Fujii
+  * Core
+    + Fix use of has_keyword with partial in Python 2.7 (#3966) Mark
+      Harfouche
+    + Set pyarrow as default for HDFS (#3957) Matthew Rocklin
+  * Documentation
+    + Use dask_sphinx_theme (#3963) Matthew Rocklin
+    + Use JupyterLab in Binder links from main page Matthew Rocklin
+    + DOC: fixed sphinx syntax (#3960) Tom Augspurger
+
+-------------------------------------------------------------------

Old:
----
  dask-0.19.1.tar.gz

New:
----
  dask-0.19.2.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-dask.spec ++++++
--- /var/tmp/diff_new_pack.BCZNSs/_old  2018-09-18 11:49:10.735553319 +0200
+++ /var/tmp/diff_new_pack.BCZNSs/_new  2018-09-18 11:49:10.735553319 +0200
@@ -12,7 +12,7 @@
 # license that conforms to the Open Source Definition (Version 1.9)
 # published by the Open Source Initiative.
 
-# Please submit bugfixes or comments via http://bugs.opensuse.org/
+# Please submit bugfixes or comments via https://bugs.opensuse.org/
 #
 
 
@@ -22,7 +22,7 @@
 # python(2/3)-distributed has a dependency loop with python(2/3)-dask
 %bcond_with     test_distributed
 Name:           python-dask
-Version:        0.19.1
+Version:        0.19.2
 Release:        0
 Summary:        Minimal task scheduling abstraction
 License:        BSD-3-Clause

++++++ dask-0.19.1.tar.gz -> dask-0.19.2.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/PKG-INFO new/dask-0.19.2/PKG-INFO
--- old/dask-0.19.1/PKG-INFO    2018-09-06 14:15:04.000000000 +0200
+++ new/dask-0.19.2/PKG-INFO    2018-09-17 15:54:10.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.2
 Name: dask
-Version: 0.19.1
+Version: 0.19.2
 Summary: Parallel PyData with Task Scheduling
 Home-page: http://github.com/dask/dask/
 Author: Matthew Rocklin
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/_version.py 
new/dask-0.19.2/dask/_version.py
--- old/dask-0.19.1/dask/_version.py    2018-09-06 14:15:04.000000000 +0200
+++ new/dask-0.19.2/dask/_version.py    2018-09-17 15:54:10.000000000 +0200
@@ -11,8 +11,8 @@
 {
  "dirty": false,
  "error": null,
- "full-revisionid": "40b5d7b07c9db16e7cbd70be1bc8738ce94fe32c",
- "version": "0.19.1"
+ "full-revisionid": "0fb0f876bb974cfca1458903540ac389e6f89019",
+ "version": "0.19.2"
 }
 '''  # END VERSION_JSON
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/chunk.py 
new/dask-0.19.2/dask/array/chunk.py
--- old/dask-0.19.1/dask/array/chunk.py 2018-08-20 17:50:20.000000000 +0200
+++ new/dask-0.19.2/dask/array/chunk.py 2018-09-14 15:59:39.000000000 +0200
@@ -11,6 +11,8 @@
 from ..core import flatten
 from ..utils import ignoring
 
+from numbers import Integral
+
 try:
     from numpy import broadcast_to
 except ImportError:  # pragma: no cover
@@ -171,7 +173,7 @@
     array([[ 7,  8,  9, 10],
            [13, 14, 15, 16]])
     """
-    if isinstance(axes, int):
+    if isinstance(axes, Integral):
         axes = [axes] * x.ndim
     if isinstance(axes, dict):
         axes = [axes.get(i, 0) for i in range(x.ndim)]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/core.py 
new/dask-0.19.2/dask/array/core.py
--- old/dask-0.19.1/dask/array/core.py  2018-09-06 13:45:35.000000000 +0200
+++ new/dask-0.19.2/dask/array/core.py  2018-09-17 15:46:38.000000000 +0200
@@ -4,7 +4,7 @@
 from functools import partial, wraps
 from itertools import product
 import math
-from numbers import Number
+from numbers import Number, Integral
 import operator
 from operator import add, getitem, mul
 import os
@@ -38,7 +38,7 @@
                      is_integer, IndexCallable, funcname, derived_from,
                      SerializableLock, ensure_dict, Dispatch, factors,
                      parse_bytes, has_keyword, M)
-from ..compatibility import (unicode, long, zip_longest, apply,
+from ..compatibility import (unicode, zip_longest, apply,
                              Iterable, Iterator, Mapping)
 from ..core import quote
 from ..delayed import Delayed, to_task_dask
@@ -93,7 +93,7 @@
     if isinstance(b, tuple) and any(x is None for x in b):
         b2 = tuple(x for x in b if x is not None)
         b3 = tuple(None if x is None else slice(None, None)
-                   for x in b if not isinstance(x, (int, long)))
+                   for x in b if not isinstance(x, Integral))
         return getter(a, b2, asarray=asarray, lock=lock)[b3]
 
     if lock:
@@ -549,7 +549,38 @@
     return concatenate(arrays, axis=axes[0])
 
 
-def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=True):
+def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype='dtype', 
nout=None):
+    """
+    Tries to infer output dtype of ``func`` for a small set of input arguments.
+
+    Parameters
+    ----------
+    func: Callable
+        Function for which output dtype is to be determined
+
+    args: List of array like
+        Arguments to the function, which qwould usually be used. Only 
attributes
+        ``ndim`` and ``dtype`` are used.
+
+    kwargs: dict
+        Additional ``kwargs`` to the ``func``
+
+    funcname: String
+        Name of calling function to improve potential error messages
+
+    suggest_dtype: None/False or String
+        If not ``None`` adds suggestion to potential error message to specify 
a dtype
+        via the specified kwarg. Defaults to ``'dtype'``.
+
+    nout: None or Int
+        ``None`` if function returns single output, integer if many.
+        Deafults to ``None``.
+
+    Returns
+    -------
+    : dtype or List of dtype
+        One or many dtypes (depending on ``nout``)
+    """
     args = [np.ones((1,) * x.ndim, dtype=x.dtype)
             if isinstance(x, Array) else x for x in args]
     try:
@@ -559,7 +590,7 @@
         exc_type, exc_value, exc_traceback = sys.exc_info()
         tb = ''.join(traceback.format_tb(exc_traceback))
         suggest = ("Please specify the dtype explicitly using the "
-                   "`dtype` kwarg.\n\n") if suggest_dtype else ""
+                   "`{dtype}` kwarg.\n\n".format(dtype=suggest_dtype)) if 
suggest_dtype else ""
         msg = ("`dtype` inference failed in `{0}`.\n\n"
                "{1}"
                "Original error is below:\n"
@@ -572,7 +603,7 @@
         msg = None
     if msg is not None:
         raise ValueError(msg)
-    return o.dtype
+    return o.dtype if nout is None else tuple(e.dtype for e in o)
 
 
 def map_blocks(func, *args, **kwargs):
@@ -2306,6 +2337,9 @@
         -1 as a blocksize indicates the size of the corresponding dimension.
     name : str, optional
         The key name to use for the array. Defaults to a hash of ``x``.
+        By default, hash uses python's standard sha1. This behaviour can be
+        changed by installing cityhash, xxhash or murmurhash. If installed,
+        a large-factor speedup can be obtained in the tokenisation step.
         Use ``name=False`` to generate a random name instead of hashing (fast)
     lock : bool or Lock, optional
         If ``x`` doesn't support concurrent reads then provide a lock here, or
@@ -2834,7 +2868,7 @@
             if ind in adjust_chunks:
                 if callable(adjust_chunks[ind]):
                     chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))
-                elif isinstance(adjust_chunks[ind], int):
+                elif isinstance(adjust_chunks[ind], Integral):
                     chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])
                 elif isinstance(adjust_chunks[ind], (tuple, list)):
                     chunks[i] = tuple(adjust_chunks[ind])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/creation.py 
new/dask-0.19.2/dask/array/creation.py
--- old/dask-0.19.1/dask/array/creation.py      2018-09-06 13:45:35.000000000 
+0200
+++ new/dask-0.19.2/dask/array/creation.py      2018-09-14 15:59:39.000000000 
+0200
@@ -442,7 +442,7 @@
       An array where all elements are equal to zero, except for the `k`-th
       diagonal, whose values are equal to one.
     """
-    if not isinstance(chunks, int):
+    if not isinstance(chunks, Integral):
         raise ValueError('chunks must be an int')
 
     token = tokenize(N, chunk, M, k, dtype)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/gufunc.py 
new/dask-0.19.2/dask/array/gufunc.py
--- old/dask-0.19.1/dask/array/gufunc.py        2018-06-22 00:08:14.000000000 
+0200
+++ new/dask-0.19.2/dask/array/gufunc.py        2018-09-17 15:46:38.000000000 
+0200
@@ -9,7 +9,7 @@
 except ImportError:
     from toolz import concat, merge, unique
 
-from .core import Array, asarray, atop, getitem
+from .core import Array, asarray, atop, getitem, apply_infer_dtype
 from .. import sharedict
 from ..core import flatten
 
@@ -83,8 +83,11 @@
         According to the specification of numpy.gufunc signature [2]_
     *args : numeric
         Input arrays or scalars to the callable function.
-    output_dtypes : dtype or list of dtypes, keyword only
-        dtype or list of output dtypes.
+    output_dtypes : Optional, dtype or list of dtypes, keyword only
+        Valid numpy dtype specification or list thereof.
+        If not given, a call of ``func`` with a small set of data
+        is performed in order to try to  automatically determine the
+        output dtypes.
     output_sizes : dict, optional, keyword only
         Optional mapping from dimension names to sizes for outputs. Only used 
if
         new core dimensions (not found on inputs) appear on outputs.
@@ -142,16 +145,19 @@
     ## Determine nout: nout = None for functions of one direct return; nout = 
int for return tuples
     nout = None if not isinstance(core_output_dimss, list) else 
len(core_output_dimss)
 
-    ## Assert output_dtypes
+    ## Determine and handle output_dtypes
     if output_dtypes is None:
-        raise ValueError("Must specify `output_dtypes` of output array(s)")
-    elif isinstance(output_dtypes, str):
-        otypes = list(output_dtypes)
-        output_dtypes = otypes[0] if nout is None else otypes
-    elif isinstance(output_dtypes, (tuple, list)):
+        output_dtypes = apply_infer_dtype(func, args, kwargs, "apply_gufunc", 
"output_dtypes", nout)
+
+    if isinstance(output_dtypes, (tuple, list)):
         if nout is None:
-            raise ValueError("Must specify single dtype for `output_dtypes` 
for function with one output")
-        otypes = output_dtypes
+            if len(output_dtypes) > 1:
+                raise ValueError(("Must specify single dtype or list of one 
dtype "
+                                  "for `output_dtypes` for function with one 
output"))
+            otypes = output_dtypes
+            output_dtypes = output_dtypes[0]
+        else:
+            otypes = output_dtypes
     else:
         if nout is not None:
             raise ValueError("Must specify tuple of dtypes for `output_dtypes` 
for function with multiple outputs")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/optimization.py 
new/dask-0.19.2/dask/array/optimization.py
--- old/dask-0.19.1/dask/array/optimization.py  2018-06-22 00:08:14.000000000 
+0200
+++ new/dask-0.19.2/dask/array/optimization.py  2018-09-14 15:59:39.000000000 
+0200
@@ -10,6 +10,7 @@
 from ..optimization import cull, fuse, inline_functions
 from ..utils import ensure_dict
 
+from numbers import Integral
 
 # All get* functions the optimizations know about
 GETTERS = (getter, getter_nofancy, getter_inline, getitem)
@@ -184,7 +185,7 @@
     # x[:, [1, 2], :][0, :, :] -> x[0, [1, 2], :] or
     # x[0, :, :][:, [1, 2], :] -> x[0, [1, 2], :]
     for f, n in zip_longest(fancy, normal, fillvalue=slice(None)):
-        if type(f) is not list and isinstance(n, int):
+        if type(f) is not list and isinstance(n, Integral):
             raise NotImplementedError("Can't handle normal indexing with "
                                       "integers and fancy indexing if the "
                                       "integers and fancy indices don't "
@@ -229,7 +230,7 @@
     if isinstance(b, slice):
         b = normalize_slice(b)
 
-    if isinstance(a, slice) and isinstance(b, int):
+    if isinstance(a, slice) and isinstance(b, Integral):
         if b < 0:
             raise NotImplementedError()
         return a.start + b * a.step
@@ -252,7 +253,7 @@
 
     if isinstance(b, list):
         return [fuse_slice(a, bb) for bb in b]
-    if isinstance(a, list) and isinstance(b, (int, slice)):
+    if isinstance(a, list) and isinstance(b, (Integral, slice)):
         return a[b]
 
     if isinstance(a, tuple) and not isinstance(b, tuple):
@@ -276,7 +277,7 @@
         result = list()
         for i in range(len(a)):
             #  axis ceased to exist  or we're out of b
-            if isinstance(a[i], int) or j == len(b):
+            if isinstance(a[i], Integral) or j == len(b):
                 result.append(a[i])
                 continue
             while b[j] is None:  # insert any Nones on the rhs
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/reductions.py 
new/dask-0.19.2/dask/array/reductions.py
--- old/dask-0.19.1/dask/array/reductions.py    2018-09-06 13:45:35.000000000 
+0200
+++ new/dask-0.19.2/dask/array/reductions.py    2018-09-14 15:59:39.000000000 
+0200
@@ -6,6 +6,8 @@
 from math import factorial, log, ceil
 
 import numpy as np
+from numbers import Integral
+
 from toolz import compose, partition_all, get, accumulate, pluck
 
 from . import chunk
@@ -20,7 +22,6 @@
 from ..utils import ignoring, funcname, Dispatch
 from .. import config, sharedict
 
-
 # Generic functions to support chunks of different types
 empty_lookup = Dispatch('empty')
 empty_lookup.register((object, np.ndarray), np.empty)
@@ -124,7 +125,7 @@
     """
     if axis is None:
         axis = tuple(range(x.ndim))
-    if isinstance(axis, int):
+    if isinstance(axis, Integral):
         axis = (axis,)
     axis = validate_axis(axis, x.ndim)
 
@@ -159,7 +160,7 @@
     split_every = split_every or config.get('split_every', 4)
     if isinstance(split_every, dict):
         split_every = dict((k, split_every.get(k, 2)) for k in axis)
-    elif isinstance(split_every, int):
+    elif isinstance(split_every, Integral):
         n = builtins.max(int(split_every ** (1 / (len(axis) or 1))), 2)
         split_every = dict.fromkeys(axis, n)
     else:
@@ -446,7 +447,7 @@
 
 def moment(a, order, axis=None, dtype=None, keepdims=False, ddof=0,
            split_every=None, out=None):
-    if not isinstance(order, int) or order < 0:
+    if not isinstance(order, Integral) or order < 0:
         raise ValueError("Order must be an integer >= 0")
 
     if order < 2:
@@ -605,7 +606,7 @@
     if axis is None:
         axis = tuple(range(x.ndim))
         ravel = True
-    elif isinstance(axis, int):
+    elif isinstance(axis, Integral):
         axis = validate_axis(axis, x.ndim)
         axis = (axis,)
         ravel = x.ndim == 1
@@ -710,7 +711,7 @@
         axis = 0
     if dtype is None:
         dtype = getattr(func(np.empty((0,), dtype=x.dtype)), 'dtype', object)
-    assert isinstance(axis, int)
+    assert isinstance(axis, Integral)
     axis = validate_axis(axis, x.ndim)
 
     m = x.map_blocks(func, axis=axis, dtype=dtype)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/routines.py 
new/dask-0.19.2/dask/array/routines.py
--- old/dask-0.19.1/dask/array/routines.py      2018-08-20 17:50:20.000000000 
+0200
+++ new/dask-0.19.2/dask/array/routines.py      2018-09-17 15:46:38.000000000 
+0200
@@ -5,10 +5,10 @@
 import warnings
 from distutils.version import LooseVersion
 from functools import wraps, partial
-from numbers import Number, Real, Integral
+from numbers import Real, Integral
 
 import numpy as np
-from toolz import concat, merge, sliding_window, interleave
+from toolz import concat, sliding_window, interleave
 
 from .. import sharedict
 from ..compatibility import Iterable
@@ -215,9 +215,9 @@
         left_axes = tuple(range(lhs.ndim - 1, lhs.ndim - axes - 1, -1))
         right_axes = tuple(range(0, axes))
 
-    if isinstance(left_axes, int):
+    if isinstance(left_axes, Integral):
         left_axes = (left_axes,)
-    if isinstance(right_axes, int):
+    if isinstance(right_axes, Integral):
         right_axes = (right_axes,)
     if isinstance(left_axes, list):
         left_axes = tuple(left_axes)
@@ -426,26 +426,30 @@
     return r
 
 
-def _gradient_kernel(f, grad_varargs, grad_kwargs):
-    return np.gradient(f, *grad_varargs, **grad_kwargs)
+def _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):
+    """
+    x: nd-array
+        array of one block
+    coord: 1d-array or scalar
+        coordinate along which the gradient is computed.
+    axis: int
+        axis along which the gradient is computed
+    array_locs:
+        actual location along axis. None if coordinate is scalar
+    grad_kwargs:
+        keyword to be passed to np.gradient
+    """
+    block_loc = block_id[axis]
+    if array_locs is not None:
+        coord = coord[array_locs[0][block_loc]:array_locs[1][block_loc]]
+    grad = np.gradient(x, coord, axis=axis, **grad_kwargs)
+    return grad
 
 
 @wraps(np.gradient)
 def gradient(f, *varargs, **kwargs):
     f = asarray(f)
 
-    if not all([isinstance(e, Number) for e in varargs]):
-        raise NotImplementedError("Only numeric scalar spacings supported.")
-
-    if varargs == ():
-        varargs = (1,)
-    if len(varargs) == 1:
-        varargs = f.ndim * varargs
-    if len(varargs) != f.ndim:
-        raise TypeError(
-            "Spacing must either be a scalar or a scalar per dimension."
-        )
-
     kwargs["edge_order"] = math.ceil(kwargs.get("edge_order", 1))
     if kwargs["edge_order"] > 2:
         raise ValueError("edge_order must be less than or equal to 2.")
@@ -465,26 +469,59 @@
 
     axis = tuple(ax % f.ndim for ax in axis)
 
+    if varargs == ():
+        varargs = (1,)
+    if len(varargs) == 1:
+        varargs = len(axis) * varargs
+    if len(varargs) != len(axis):
+        raise TypeError(
+            "Spacing must either be a single scalar, or a scalar / 1d-array "
+            "per axis"
+        )
+
     if issubclass(f.dtype.type, (np.bool8, Integral)):
         f = f.astype(float)
     elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:
         f = f.astype(float)
 
-    r = [
-        f.map_overlap(
+    results = []
+    for i, ax in enumerate(axis):
+        for c in f.chunks[ax]:
+            if np.min(c) < kwargs["edge_order"] + 1:
+                raise ValueError(
+                    'Chunk size must be larger than edge_order + 1. '
+                    'Minimum chunk for aixs {} is {}. Rechunk to '
+                    'proceed.'.format(np.min(c), ax))
+
+        if np.isscalar(varargs[i]):
+            array_locs = None
+        else:
+            if isinstance(varargs[i], Array):
+                raise NotImplementedError(
+                    'dask array coordinated is not supported.')
+            # coordinate position for each block taking overlap into account
+            chunk = np.array(f.chunks[ax])
+            array_loc_stop = np.cumsum(chunk) + 1
+            array_loc_start = array_loc_stop - chunk - 2
+            array_loc_stop[-1] -= 1
+            array_loc_start[0] = 0
+            array_locs = (array_loc_start, array_loc_stop)
+
+        results.append(f.map_overlap(
             _gradient_kernel,
             dtype=f.dtype,
             depth={j: 1 if j == ax else 0 for j in range(f.ndim)},
             boundary="none",
-            grad_varargs=(varargs[i],),
-            grad_kwargs=merge(kwargs, {"axis": ax}),
-        )
-        for i, ax in enumerate(axis)
-    ]
+            coord=varargs[i],
+            axis=ax,
+            array_locs=array_locs,
+            grad_kwargs=kwargs,
+        ))
+
     if drop_result_list:
-        r = r[0]
+        results = results[0]
 
-    return r
+    return results
 
 
 @wraps(np.bincount)
@@ -590,17 +627,17 @@
     name = 'histogram-sum-' + token
 
     # Map the histogram to all bins
-    def block_hist(x, weights=None):
-        return np.histogram(x, bins, weights=weights)[0][np.newaxis]
+    def block_hist(x, range=None, weights=None):
+        return np.histogram(x, bins, range=range, 
weights=weights)[0][np.newaxis]
 
     if weights is None:
-        dsk = {(name, i, 0): (block_hist, k)
+        dsk = {(name, i, 0): (block_hist, k, range)
                for i, k in enumerate(flatten(a.__dask_keys__()))}
         dtype = np.histogram([])[0].dtype
     else:
         a_keys = flatten(a.__dask_keys__())
         w_keys = flatten(weights.__dask_keys__())
-        dsk = {(name, i, 0): (block_hist, k, w)
+        dsk = {(name, i, 0): (block_hist, k, range, w)
                for i, (k, w) in enumerate(zip(a_keys, w_keys))}
         dtype = weights.dtype
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/slicing.py 
new/dask-0.19.2/dask/array/slicing.py
--- old/dask-0.19.1/dask/array/slicing.py       2018-09-06 13:45:35.000000000 
+0200
+++ new/dask-0.19.2/dask/array/slicing.py       2018-09-14 15:59:39.000000000 
+0200
@@ -172,7 +172,7 @@
     where_none = [i for i, ind in enumerate(index) if ind is None]
     where_none_orig = list(where_none)
     for i, x in enumerate(where_none):
-        n = sum(isinstance(ind, int) for ind in index[:x])
+        n = sum(isinstance(ind, Integral) for ind in index[:x])
         if n:
             where_none[i] -= n
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/tests/test_array_core.py 
new/dask-0.19.2/dask/array/tests/test_array_core.py
--- old/dask-0.19.1/dask/array/tests/test_array_core.py 2018-09-06 
13:45:35.000000000 +0200
+++ new/dask-0.19.2/dask/array/tests/test_array_core.py 2018-09-17 
15:46:38.000000000 +0200
@@ -1184,14 +1184,12 @@
     def foo(x):
         raise RuntimeError("Woops")
 
-    try:
+    with pytest.raises(ValueError) as e:
         dx.map_blocks(foo)
-    except Exception as e:
-        assert e.args[0].startswith("`dtype` inference failed")
-        assert "Please specify the dtype explicitly" in e.args[0]
-        assert 'RuntimeError' in e.args[0]
-    else:
-        assert False, "Should have errored"
+    msg = str(e.value)
+    assert msg.startswith("`dtype` inference failed")
+    assert "Please specify the dtype explicitly" in msg
+    assert 'RuntimeError' in msg
 
 
 def test_from_function_requires_block_args():
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/tests/test_gufunc.py 
new/dask-0.19.2/dask/array/tests/test_gufunc.py
--- old/dask-0.19.1/dask/array/tests/test_gufunc.py     2018-07-08 
16:21:38.000000000 +0200
+++ new/dask-0.19.2/dask/array/tests/test_gufunc.py     2018-09-17 
15:46:38.000000000 +0200
@@ -73,7 +73,7 @@
     def stats(x):
         return np.mean(x, axis=-1), np.std(x, axis=-1)
     a = da.random.normal(size=(10, 20, 30), chunks=(5, 5, 30))
-    mean, std = apply_gufunc(stats, "(i)->(),()", a, output_dtypes="ff", 
vectorize=vectorize)
+    mean, std = apply_gufunc(stats, "(i)->(),()", a, output_dtypes=("f", "f"), 
vectorize=vectorize)
     assert mean.compute().shape == (10, 20)
     assert std.compute().shape == (10, 20)
 
@@ -187,6 +187,17 @@
     assert_eq(y, np.ones((2, 3), dtype=float))
 
 
+@pytest.mark.parametrize('output_dtypes', [int, (int,)])
+def test_apply_gufunc_output_dtypes(output_dtypes):
+    def foo(x):
+        return y
+    x = np.random.randn(10)
+    y = x.astype(int)
+    dy = apply_gufunc(foo, "()->()", x, output_dtypes=output_dtypes)
+    #print(x, x.compute())
+    assert_eq(y, dy)
+
+
 def test_gufunc_two_inputs():
     def foo(x, y):
         return np.einsum('...ij,...jk->ik', x, y)
@@ -283,3 +294,45 @@
     with pytest.raises(ValueError) as excinfo:
         da.apply_gufunc(foo, "(),()->()", a, b, output_dtypes=float, 
allow_rechunk=False)
     assert "with different chunksize present" in str(excinfo.value)
+
+
+def test_apply_gufunc_infer_dtype():
+    x = np.arange(50).reshape((5, 10))
+    y = np.arange(10)
+    dx = da.from_array(x, chunks=5)
+    dy = da.from_array(y, chunks=5)
+
+    def foo(x, *args, **kwargs):
+        cast = kwargs.pop('cast', 'i8')
+        return (x + sum(args)).astype(cast)
+
+    dz = apply_gufunc(foo, "(),(),()->()", dx, dy, 1)
+    z = foo(dx, dy, 1)
+    assert_eq(dz, z)
+
+    dz = apply_gufunc(foo, "(),(),()->()", dx, dy, 1, cast='f8')
+    z = foo(dx, dy, 1, cast='f8')
+    assert_eq(dz, z)
+
+    dz = apply_gufunc(foo, "(),(),()->()", dx, dy, 1, cast='f8', 
output_dtypes='f8')
+    z = foo(dx, dy, 1, cast='f8')
+    assert_eq(dz, z)
+
+    def foo(x):
+        raise RuntimeError("Woops")
+
+    with pytest.raises(ValueError) as e:
+        apply_gufunc(foo, "()->()", dx)
+    msg = str(e.value)
+    assert msg.startswith("`dtype` inference failed")
+    assert "Please specify the dtype explicitly" in msg
+    assert 'RuntimeError' in msg
+
+    # Multiple outputs
+    def foo(x, y):
+        return x + y, x - y
+
+    z0, z1 = apply_gufunc(foo, "(),()->(),()", dx, dy)
+
+    assert_eq(z0, dx + dy)
+    assert_eq(z1, dx - dy)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/array/tests/test_routines.py 
new/dask-0.19.2/dask/array/tests/test_routines.py
--- old/dask-0.19.1/dask/array/tests/test_routines.py   2018-08-20 
17:50:20.000000000 +0200
+++ new/dask-0.19.2/dask/array/tests/test_routines.py   2018-09-17 
15:46:38.000000000 +0200
@@ -449,11 +449,16 @@
     [(10, 15, 20), (), 2],
     [(10, 15, 20), (), -1],
     [(10, 15, 20), (), (0, 2)],
+    [(10, 15, 20), (np.exp(np.arange(10)), np.exp(np.arange(20)), ), (0, 2)],
+    [(10, 15, 20), (0.5, np.exp(np.arange(20)), ), (0, 2)],
+    [(10, 15, 20), (np.exp(np.arange(20)), ), -1],
 ])
 @pytest.mark.parametrize('edge_order', [
     1,
     2
 ])
+@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
+                    reason="Old np.gradient does not support coordinate.")
 def test_gradient(shape, varargs, axis, edge_order):
     a = np.random.randint(0, 10, shape)
     d_a = da.from_array(a, chunks=(len(shape) * (5,)))
@@ -546,6 +551,15 @@
     assert_eq(a1, a2)
     assert_eq(b1, b2)
 
+
+def test_histogram_bins_range_with_nan_array():
+    # Regression test for issue #3977
+    v = da.from_array(np.array([-2, np.nan, 2]), chunks=1)
+    (a1, b1) = da.histogram(v, bins=10, range=(-3, 3))
+    (a2, b2) = np.histogram(v, bins=10, range=(-3, 3))
+    assert_eq(a1, a2)
+    assert_eq(b1, b2)
+
 
 def test_histogram_return_type():
     v = da.random.random(100, chunks=10)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/bytes/core.py 
new/dask-0.19.2/dask/bytes/core.py
--- old/dask-0.19.1/dask/bytes/core.py  2018-09-06 14:02:54.000000000 +0200
+++ new/dask-0.19.2/dask/bytes/core.py  2018-09-14 15:59:39.000000000 +0200
@@ -419,13 +419,13 @@
     A filesystem class
     """
     if driver == 'auto':
-        for d in ['hdfs3', 'pyarrow']:
+        for d in ['pyarrow', 'hdfs3']:
             try:
                 return get_hdfs_driver(d)
             except RuntimeError:
                 pass
         else:
-            raise RuntimeError("Please install either `hdfs3` or `pyarrow`")
+            raise RuntimeError("Please install either `pyarrow` (preferred) or 
`hdfs3`")
 
     elif driver == 'hdfs3':
         import_required('hdfs3', "`hdfs3` not installed")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/dataframe/categorical.py 
new/dask-0.19.2/dask/dataframe/categorical.py
--- old/dask-0.19.1/dask/dataframe/categorical.py       2018-09-06 
13:45:35.000000000 +0200
+++ new/dask-0.19.2/dask/dataframe/categorical.py       2018-09-14 
15:59:39.000000000 +0200
@@ -3,6 +3,7 @@
 from collections import defaultdict
 import pandas as pd
 from toolz import partition_all
+from numbers import Integral
 
 from ..base import tokenize, compute_as_if_collection
 from .accessor import Accessor
@@ -105,7 +106,7 @@
         split_every = 16
     elif split_every is False:
         split_every = df.npartitions
-    elif not isinstance(split_every, int) or split_every < 2:
+    elif not isinstance(split_every, Integral) or split_every < 2:
         raise ValueError("split_every must be an integer >= 2")
 
     token = tokenize(df, columns, index, split_every)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/dataframe/core.py 
new/dask-0.19.2/dask/dataframe/core.py
--- old/dask-0.19.1/dask/dataframe/core.py      2018-09-06 13:45:35.000000000 
+0200
+++ new/dask-0.19.2/dask/dataframe/core.py      2018-09-14 15:59:39.000000000 
+0200
@@ -10,7 +10,7 @@
 from toolz import merge, first, unique, partition_all, remove
 import pandas as pd
 import numpy as np
-from numbers import Number
+from numbers import Number, Integral
 
 try:
     from chest import Chest as Cache
@@ -1218,12 +1218,12 @@
         """
         from dask.dataframe.rolling import Rolling
 
-        if isinstance(window, int):
+        if isinstance(window, Integral):
             if window < 0:
                 raise ValueError('window must be >= 0')
 
         if min_periods is not None:
-            if not isinstance(min_periods, int):
+            if not isinstance(min_periods, Integral):
                 raise ValueError('min_periods must be an integer')
             if min_periods < 0:
                 raise ValueError('min_periods must be >= 0')
@@ -1234,7 +1234,7 @@
     @derived_from(pd.DataFrame)
     def diff(self, periods=1, axis=0):
         axis = self._validate_axis(axis)
-        if not isinstance(periods, int):
+        if not isinstance(periods, Integral):
             raise TypeError("periods must be an integer")
 
         if axis == 1:
@@ -1248,7 +1248,7 @@
     @derived_from(pd.DataFrame)
     def shift(self, periods=1, freq=None, axis=0):
         axis = self._validate_axis(axis)
-        if not isinstance(periods, int):
+        if not isinstance(periods, Integral):
             raise TypeError("periods must be an integer")
 
         if axis == 1:
@@ -2279,7 +2279,7 @@
 
     @derived_from(pd.Series)
     def autocorr(self, lag=1, split_every=False):
-        if not isinstance(lag, int):
+        if not isinstance(lag, Integral):
             raise TypeError("lag must be an integer")
         return self.corr(self if lag == 0 else self.shift(lag),
                          split_every=split_every)
@@ -3480,7 +3480,7 @@
         split_every = 8
     elif split_every is False:
         split_every = npartitions
-    elif split_every < 2 or not isinstance(split_every, int):
+    elif split_every < 2 or not isinstance(split_every, Integral):
         raise ValueError("split_every must be an integer >= 2")
 
     token_key = tokenize(token or (chunk, aggregate), meta, args,
@@ -3831,7 +3831,7 @@
 
     if split_every is False:
         split_every = df.npartitions
-    elif split_every < 2 or not isinstance(split_every, int):
+    elif split_every < 2 or not isinstance(split_every, Integral):
         raise ValueError("split_every must be an integer >= 2")
 
     df = df._get_numeric_data()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/dataframe/rolling.py 
new/dask-0.19.2/dask/dataframe/rolling.py
--- old/dask-0.19.1/dask/dataframe/rolling.py   2018-07-22 15:22:13.000000000 
+0200
+++ new/dask-0.19.2/dask/dataframe/rolling.py   2018-09-14 15:59:39.000000000 
+0200
@@ -4,6 +4,7 @@
 
 import pandas as pd
 from pandas.core.window import Rolling as pd_Rolling
+from numbers import Integral
 
 from ..base import tokenize
 from ..utils import M, funcname, derived_from
@@ -18,11 +19,11 @@
            "window size. Try using ``df.repartition`` "
            "to increase the partition size.")
 
-    if prev_part is not None and isinstance(before, int):
+    if prev_part is not None and isinstance(before, Integral):
         if prev_part.shape[0] != before:
             raise NotImplementedError(msg)
 
-    if next_part is not None and isinstance(after, int):
+    if next_part is not None and isinstance(after, Integral):
         if next_part.shape[0] != after:
             raise NotImplementedError(msg)
     # We validate that the window isn't too large for tiemdeltas in map_overlap
@@ -69,8 +70,8 @@
             raise TypeError("Must have a `DatetimeIndex` when using string 
offset "
                             "for `before` and `after`")
     else:
-        if not (isinstance(before, int) and before >= 0 and
-                isinstance(after, int) and after >= 0):
+        if not (isinstance(before, Integral) and before >= 0 and
+                isinstance(after, Integral) and after >= 0):
             raise ValueError("before and after must be positive integers")
 
     if 'token' in kwargs:
@@ -102,7 +103,7 @@
         "Try using ``df.repartition`` to increase the partition size"
     )
 
-    if before and isinstance(before, int):
+    if before and isinstance(before, Integral):
         dsk.update({(name_a, i): (M.tail, (df_name, i), before)
                     for i in range(df.npartitions - 1)})
         prevs = [None] + [(name_a, i) for i in range(df.npartitions - 1)]
@@ -117,7 +118,7 @@
     else:
         prevs = [None] * df.npartitions
 
-    if after and isinstance(after, int):
+    if after and isinstance(after, Integral):
         dsk.update({(name_b, i): (M.head, (df_name, i), after)
                     for i in range(1, df.npartitions)})
         nexts = [(name_b, i) for i in range(1, df.npartitions)] + [None]
@@ -219,7 +220,7 @@
         or multiple (False).
         """
         return (self.axis in (1, 'columns') or
-                (isinstance(self.window, int) and self.window <= 1) or
+                (isinstance(self.window, Integral) and self.window <= 1) or
                 self.obj.npartitions == 1)
 
     def _call_method(self, method_name, *args, **kwargs):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/tests/test_multiprocessing.py 
new/dask-0.19.2/dask/tests/test_multiprocessing.py
--- old/dask-0.19.1/dask/tests/test_multiprocessing.py  2018-06-22 
00:08:14.000000000 +0200
+++ new/dask-0.19.2/dask/tests/test_multiprocessing.py  2018-09-14 
17:39:36.000000000 +0200
@@ -9,18 +9,22 @@
 
 from dask import compute, delayed
 from dask.context import set_options
-from dask.multiprocessing import get, _dumps, _loads, remote_exception
+from dask.multiprocessing import get, _dumps, remote_exception
 from dask.utils_test import inc
 
 
 def test_pickle_globals():
-    """ For the function f(x) defined below, the only globals added in pickling
-    should be 'np' and '__builtins__'"""
-    def f(x):
-        return np.sin(x) + np.cos(x)
-
-    assert set(['np', '__builtins__']) == set(
-        _loads(_dumps(f)).__globals__.keys())
+    """ Unrelated globals should not be included in serialized bytes """
+    def unrelated_function(a):
+        return np.array([a])
+
+    def my_small_function(a, b):
+        return a + b
+
+    b = _dumps(my_small_function)
+    assert b'my_small_function' in b
+    assert b'unrelated_function' not in b
+    assert b'numpy' not in b
 
 
 def bad():
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/tests/test_utils.py 
new/dask-0.19.2/dask/tests/test_utils.py
--- old/dask-0.19.1/dask/tests/test_utils.py    2018-06-22 00:08:14.000000000 
+0200
+++ new/dask-0.19.2/dask/tests/test_utils.py    2018-09-14 15:59:39.000000000 
+0200
@@ -11,7 +11,7 @@
                         memory_repr, methodcaller, M, skip_doctest,
                         SerializableLock, funcname, ndeepmap, ensure_dict,
                         extra_titles, asciitable, itemgetter, partial_by_order,
-                        effective_get)
+                        effective_get, has_keyword)
 from dask.utils_test import inc
 
 
@@ -330,3 +330,15 @@
 
     assert any('dask.base.get_scheduler' in str(warning)
                for warning in record.list)
+
+
+def test_has_keyword():
+    def foo(a, b, c=None):
+        pass
+    assert has_keyword(foo, 'a')
+    assert has_keyword(foo, 'b')
+    assert has_keyword(foo, 'c')
+
+    bar = functools.partial(foo, a=1)
+    assert has_keyword(bar, 'b')
+    assert has_keyword(bar, 'c')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask/utils.py 
new/dask-0.19.2/dask/utils.py
--- old/dask-0.19.1/dask/utils.py       2018-08-20 17:50:20.000000000 +0200
+++ new/dask-0.19.2/dask/utils.py       2018-09-14 15:59:39.000000000 +0200
@@ -1022,6 +1022,9 @@
         if PY3:
             return keyword in inspect.signature(func).parameters
         else:
-            return keyword in inspect.getargspec(func).args
+            if isinstance(func, functools.partial):
+                return keyword in inspect.getargspec(func.func).args
+            else:
+                return keyword in inspect.getargspec(func).args
     except Exception:
         return False
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask.egg-info/PKG-INFO 
new/dask-0.19.2/dask.egg-info/PKG-INFO
--- old/dask-0.19.1/dask.egg-info/PKG-INFO      2018-09-06 14:15:04.000000000 
+0200
+++ new/dask-0.19.2/dask.egg-info/PKG-INFO      2018-09-17 15:54:10.000000000 
+0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.2
 Name: dask
-Version: 0.19.1
+Version: 0.19.2
 Summary: Parallel PyData with Task Scheduling
 Home-page: http://github.com/dask/dask/
 Author: Matthew Rocklin
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/dask.egg-info/SOURCES.txt 
new/dask-0.19.2/dask.egg-info/SOURCES.txt
--- old/dask-0.19.1/dask.egg-info/SOURCES.txt   2018-09-06 14:15:04.000000000 
+0200
+++ new/dask-0.19.2/dask.egg-info/SOURCES.txt   2018-09-17 15:54:10.000000000 
+0200
@@ -270,7 +270,6 @@
 docs/source/_static/main-page.css
 docs/source/_static/profile.html
 docs/source/_static/stacked_profile.html
-docs/source/_static/style.css
 docs/source/_templates/layout.html
 docs/source/examples/array-extend.rst
 docs/source/examples/array-hdf5.rst
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/docs/source/_static/style.css 
new/dask-0.19.2/docs/source/_static/style.css
--- old/dask-0.19.1/docs/source/_static/style.css       2018-07-07 
13:43:24.000000000 +0200
+++ new/dask-0.19.2/docs/source/_static/style.css       1970-01-01 
01:00:00.000000000 +0100
@@ -1,88 +0,0 @@
-@import url("theme.css");
-
-.rst-content h1,h2,h3,h4,h5{
-  font-family: 'Garamond', 'Georgia', serif;
-  font-weight: normal;
-}
-
-.rst-content h1 {
-  font-size: 240%;
-}
-
-.rst-content h2 {
-  font-size: 180%;
-}
-
-.rst-content h3 {
-  font-size: 160%;
-}
-
-.wy-menu-vertical li ul li a,
-.wy-menu-vertical li.current a,
-.wy-menu-vertical a {
-  color: #ECB172;
-}
-
-.wy-menu-vertical li.toctree-l2.current>a:hover,
-.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a:hover,
-.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a:hover,
-.wy-menu-vertical li.current a:hover {
-  background: #404040;
-  color: #ECB172;
-}
-
-.wy-menu-vertical a:hover {
-  background: #303030;
-}
-
-.wy-side-nav-search>a img.logo,
-.wy-side-nav-search .wy-dropdown>a img.logo {
-  width: 10rem
-}
-
-.wy-side-nav-search {
-  background-color: black;
-}
-
-.wy-side-nav-search>div.version {
-  display: none;
-}
-
-.wy-menu-vertical header,
-.wy-menu-vertical p.caption {
-  color: #D67548;
-}
-
-.wy-menu-vertical li.on a,
-.wy-menu-vertical li.current>a {
-  background: #303030;
-  border-top: none;
-  border-bottom: none;
-}
-
-
-.wy-menu-vertical ul,
-.wy-side-scroll {
-  background-color: #101010;
-}
-
-.wy-menu-vertical li.toctree-l2.current>a,
-.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,
-.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,
-.wy-menu-vertical li ul {
-  background-color: #303030;
-}
-
-/* Mobile */
-
-.wy-nav-top {
-  background: black;
-}
-
-.wy-nav-top a {
-  color: #ECB172;
-}
-
-.wy-nav-top i {
-  color: #ECB172;
-}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/docs/source/array-slicing.rst 
new/dask-0.19.2/docs/source/array-slicing.rst
--- old/dask-0.19.1/docs/source/array-slicing.rst       2018-07-07 
13:43:24.000000000 +0200
+++ new/dask-0.19.2/docs/source/array-slicing.rst       2018-09-14 
15:59:39.000000000 +0200
@@ -7,8 +7,8 @@
 *  Slicing by integers and slices ``x[0, :5]``
 *  Slicing by lists/arrays of integers  ``x[[1, 2, 4]]``
 *  Slicing by lists/arrays of booleans ``x[[False, True, True, False, True]]``
-*  Slicing one `~dask.array.Array` with a `~dask.array.Array` of bools ``x[x > 
0]``
-*  Slicing one `~dask.array.Array` with a zero or one-dimensional 
`~dask.array.Array`
+*  Slicing one :class:`~dask.array.Array` with a :class:`~dask.array.Array` of 
bools ``x[x > 0]``
+*  Slicing one :class:`~dask.array.Array` with a zero or one-dimensional 
:class:`~dask.array.Array`
    of ints ``a[b.argtopk(5)]``
 
 It does not currently support the following:
@@ -19,7 +19,7 @@
    issue. Also users interested in this should take a look at
    :attr:`~dask.array.Array.vindex`.
 
-*  Slicing one `~dask.array.Array` with a multi-dimensional 
`~dask.array.Array` of ints
+*  Slicing one :class:`~dask.array.Array` with a multi-dimensional 
:class:`~dask.array.Array` of ints
 
 Efficiency
 ----------
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/docs/source/changelog.rst 
new/dask-0.19.2/docs/source/changelog.rst
--- old/dask-0.19.1/docs/source/changelog.rst   2018-09-06 14:12:47.000000000 
+0200
+++ new/dask-0.19.2/docs/source/changelog.rst   2018-09-17 15:50:37.000000000 
+0200
@@ -1,7 +1,7 @@
 Changelog
 =========
 
-0.19.2 / YYYY-MM-DD
+X.XX.X / YYYY-MM-DD
 -------------------
 
 Array
@@ -9,6 +9,7 @@
 
 -
 
+
 Dataframe
 +++++++++
 
@@ -25,6 +26,32 @@
 -
 
 
+0.19.2 / 2018-09-17
+-------------------
+
+Array
++++++
+
+-  ``apply_gufunc`` implements automatic infer of functions output dtypes 
(:pr:`3936`) `Markus Gonser`_
+-  Fix array histogram range error when array has nans (#3980) `James 
Bourbeau`_
+-  Issue 3937 follow up, int type checks. (#3956) `Yu Feng`_
+-  from_array: add @martindurant's explaining of how hashing is done for an 
array. (#3965) `Mark Harfouche`_
+-  Support gradient with coordinate (#3949) `Keisuke Fujii`_
+
+Core
+++++
+
+-  Fix use of has_keyword with partial in Python 2.7 (#3966) `Mark Harfouche`_
+-  Set pyarrow as default for HDFS (#3957) `Matthew Rocklin`_
+
+Documentation
++++++++++++++
+
+-  Use dask_sphinx_theme (#3963) `Matthew Rocklin`_
+-  Use JupyterLab in Binder links from main page `Matthew Rocklin`_
+-  DOC: fixed sphinx syntax (#3960) `Tom Augspurger`_
+
+
 0.19.1 / 2018-09-06
 -------------------
 
@@ -60,6 +87,7 @@
 Array
 +++++
 
+-  Support coordinate in gradient (:pr:`3949`) `Keisuke Fujii`_
 -  Fix argtopk split_every bug (:pr:`3810`) `Guido Imperiale`_
 -  Ensure result computing dask.array.isnull() always gives a numpy array 
(:pr:`3825`) `Stephan Hoyer`_
 -  Support concatenate for scipy.sparse in dask array (:pr:`3836`) `Matthew 
Rocklin`_
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/docs/source/conf.py 
new/dask-0.19.2/docs/source/conf.py
--- old/dask-0.19.1/docs/source/conf.py 2018-08-20 17:50:20.000000000 +0200
+++ new/dask-0.19.2/docs/source/conf.py 2018-09-14 15:59:39.000000000 +0200
@@ -96,7 +96,7 @@
 
 # -- Options for HTML output 
---------------------------------------------------
 
-html_theme = 'sphinx_rtd_theme'
+html_theme = 'dask_sphinx_theme'
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
@@ -117,7 +117,7 @@
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-html_logo = "images/dask_horizontal_white_no_pad.svg"
+# html_logo = "images/dask_horizontal_white_no_pad.svg"
 
 
 # The name of an image file (within the static path) to use as favicon of the
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/docs/source/docs.rst 
new/dask-0.19.2/docs/source/docs.rst
--- old/dask-0.19.1/docs/source/docs.rst        2018-06-22 00:08:14.000000000 
+0200
+++ new/dask-0.19.2/docs/source/docs.rst        2018-09-14 15:59:39.000000000 
+0200
@@ -2,9 +2,9 @@
 Dask
 ====
 
-*Dask is a flexible parallel computing library for analytic computing.*
+*Dask is a flexible library for parallel computing in Python.*
 
-Dask is composed of two components:
+Dask is composed of two parts:
 
 1.  **Dynamic task scheduling** optimized for computation.  This is similar to
     *Airflow, Luigi, Celery, or Make*, but optimized for interactive
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/docs/source/index.html 
new/dask-0.19.2/docs/source/index.html
--- old/dask-0.19.1/docs/source/index.html      2018-09-06 13:45:35.000000000 
+0200
+++ new/dask-0.19.2/docs/source/index.html      2018-09-14 15:59:39.000000000 
+0200
@@ -67,7 +67,7 @@
         enabling performance at scale for the tools you love
       </p>
         <a class="btn outline-dask btn-lg" href="docs.html">Learn More</a>
-        <a class="btn solid-dask btn-lg" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master"; role="button">Try 
Now &raquo;</a>
+        <a class="btn solid-dask btn-lg" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master?urlpath=lab"; 
role="button">Try Now &raquo;</a>
       </div>
       <div class="product-device box-shadow d-none d-md-block"></div>
       <div class="product-device product-device-2 box-shadow d-none 
d-md-block"></div>
@@ -94,7 +94,7 @@
             <p>Dask arrays scale Numpy workflows, enabling multi-dimensional 
data analysis in earth science, satellite imagery, genomics, biomedical 
applications, and machine learning algorithms.</p>
             <p>
               <a class="btn btn-outline-secondary" href="array.html" 
role="button">Learn More &raquo;</a>
-              <a class="btn btn-secondary" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master?filepath=array.ipynb";
 role="button">Try Now &raquo;</a>
+              <a class="btn btn-secondary" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master?urlpath=lab/tree/array.ipynb";
 role="button">Try Now &raquo;</a>
             </p>
           </div><!-- /.col-lg-4 -->
           <div class="col-lg-4">
@@ -103,7 +103,7 @@
             <p>Dask dataframes scale Pandas workflows, enabling applications 
in time series, business intelligence, and general data munging on big data.</p>
             <p>
               <a class="btn btn-outline-secondary" href="dataframe.html" 
role="button">Learn More &raquo;</a>
-              <a class="btn btn-secondary" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master?filepath=dataframe.ipynb";
 role="button">Try Now &raquo;</a>
+              <a class="btn btn-secondary" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master?urlpath=lab/tree/dataframe.ipynb";
 role="button">Try Now &raquo;</a>
             </p>
           </div><!-- /.col-lg-4 -->
           <div class="col-lg-4">
@@ -113,7 +113,7 @@
             <p>Dask-ML scales machine learning APIs like Scikit-Learn and 
XGBoost to enable scalable training and prediction on large models and large 
datasets.</p>
             <p>
               <a class="btn btn-outline-secondary" 
href="https://dask-ml.readthedocs.org/en/latest/"; role="button">Learn More 
&raquo;</a>
-              <a class="btn btn-secondary" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master?filepath=machine-learning.ipynb";
 role="button">Try Now &raquo;</a>
+              <a class="btn btn-secondary" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master?urlpath=lab/tree/machine-learning.ipynb";
 role="button">Try Now &raquo;</a>
             </p>
           </div><!-- /.col-lg-4 -->
         </div><!-- /.row -->
@@ -187,7 +187,7 @@
 
         <div class="text-center">
           <a class="btn btn-outline-secondary btn-lg" href="docs.html" 
role="button">Learn More &raquo;</a>
-          <a class="btn btn-secondary btn-lg" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master"; role="button">Try 
Now &raquo;</a>
+          <a class="btn btn-secondary btn-lg" 
href="https://mybinder.org/v2/gh/dask/dask-examples/master?urlpath=lab"; 
role="button">Try Now &raquo;</a>
         </div>
 
         <hr class="featurette-divider"></hr>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/dask-0.19.1/docs/source/support.rst 
new/dask-0.19.2/docs/source/support.rst
--- old/dask-0.19.1/docs/source/support.rst     2018-06-22 00:08:14.000000000 
+0200
+++ new/dask-0.19.2/docs/source/support.rst     2018-09-14 17:39:36.000000000 
+0200
@@ -31,6 +31,7 @@
 .. _`Stack Overflow with the #dask tag`: 
http://stackoverflow.com/questions/tagged/dask
 .. _`Github issue tracker`: https://github.com/dask/dask/issues/
 
+
 Asking for help
 ---------------
 
@@ -38,16 +39,31 @@
 new to using the project.  There are a few things you can do to improve the
 likelihood of quickly getting a good answer.
 
-1.  **Ask questions in the right place**.  In particular we strongly prefer 
the use
-    of StackOverflow and Github issues over Gitter chat.  Github and
+1.  **Ask questions in the right place**:  We strongly prefer the use
+    of StackOverflow or Github issues over Gitter chat.  Github and
     StackOverflow are more easily searchable by future users and so is more
     efficient for everyone's time.  Gitter chat is strictly reserved for
     developer and community discussion.
-2.  **Create a minimal example**.  It is ideal to create `minimal, complete,
+
+    If you have a general question about how something should work or
+    want best practices then use Stack Overflow.  If you think you have found a
+    bug then use GitHub.
+
+2.  **Ask only in one place**: Please restrict yourself to posting your
+    question in only one place (likely Stack Overflow or Github) and don't post
+    in both.
+
+3.  **Create a minimal example**:  It is ideal to create `minimal, complete,
     verifiable examples <https://stackoverflow.com/help/mcve>`_.  This
     significantly reduces the time that answerers spend understanding your
     situation and so results in higher quality answers more quickly.
 
+    See also `this blogpost
+    <http://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports>`_
+    about crafting minimal bug reports.  These have a much higher likelihood of
+    being answered.
+
+
 Paid support
 ------------
 


Reply via email to