I have now tested it.  (The dask tests are run in autopkgtest, not build.)

The attached is what I have so far, but it had these failures. The first two happen with or without 969648.patch and (from debci results) appear to be triggered by the new fsspec, but the last is a *regression* caused by this patch.

=================================== FAILURES =================================== _________________________________ test_errors __________________________________

dir_server = '/tmp/tmpuxg_g6b8'

    def test_errors(dir_server):
        f = open_files("http://localhost:8999/doesnotexist";)[0]
        with pytest.raises(requests.exceptions.RequestException):
            with f as f:
>               f.read()

/usr/lib/python3/dist-packages/dask/bytes/tests/test_http.py:117:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3/dist-packages/fsspec/implementations/http.py:343: in read
    self._fetch_all()
/usr/lib/python3/dist-packages/fsspec/asyn.py:121: in wrapper
    return maybe_sync(func, self, *args, **kwargs)
/usr/lib/python3/dist-packages/fsspec/asyn.py:100: in maybe_sync
    return sync(loop, func, *args, **kwargs)
/usr/lib/python3/dist-packages/fsspec/asyn.py:71: in sync
    raise exc.with_traceback(tb)
/usr/lib/python3/dist-packages/fsspec/asyn.py:55: in f
    result[0] = await future
/usr/lib/python3/dist-packages/fsspec/implementations/http.py:360: in async_fetch_all
    r.raise_for_status()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ClientResponse(http://localhost:8999/doesnotexist) [404 File not found]> <CIMultiDictProxy('Server': 'SimpleHTTP/0.6 ...19 Oct 2020 17:38:10 GMT', 'Connection': 'close', 'Content-Type': 'text/html;charset=utf-8', 'Content-Length': '469')>


    def raise_for_status(self) -> None:
        if 400 <= self.status:
            # reason should always be not None for a started response
            assert self.reason is not None
            self.release()
>           raise ClientResponseError(
                self.request_info,
                self.history,
                status=self.status,
                message=self.reason,
                headers=self.headers)
E aiohttp.client_exceptions.ClientResponseError: 404, message='File not found', url=URL('http://localhost:8999/doesnotexist')

/usr/lib/python3/dist-packages/aiohttp/client_reqrep.py:941: ClientResponseError ----------------------------- Captured stderr call -----------------------------
127.0.0.1 - - [19/Oct/2020 17:38:10] code 404, message File not found
127.0.0.1 - - [19/Oct/2020 17:38:10] "HEAD /doesnotexist HTTP/1.1" 404 -
127.0.0.1 - - [19/Oct/2020 17:38:10] code 404, message File not found
127.0.0.1 - - [19/Oct/2020 17:38:10] "GET /doesnotexist HTTP/1.1" 404 -
________________________ test_urlpath_inference_errors _________________________

    def test_urlpath_inference_errors():
        # Empty list
        with pytest.raises(ValueError, match="empty"):
            get_fs_token_paths([])

        # Protocols differ
        with pytest.raises(ValueError, match="the same protocol"):
            get_fs_token_paths(["s3://test/path.csv", "/other/path.csv"])

        # Options differ
with pytest.raises(ValueError, match="the same file-system options"):
            get_fs_token_paths(
                [
                    "ftp://myu...@node.com/test/path.csv";,
                    "ftp://otheru...@node.com/other/path.csv";,
                ]
            )

        # Unknown type
        with pytest.raises(TypeError):
>           get_fs_token_paths(
                {
                    "sets/are.csv",
                    "unordered/so/they.csv",
                    "should/not/be.csv",
                    "allowed.csv",
                }
            )
E           Failed: DID NOT RAISE <class 'TypeError'>

/usr/lib/python3/dist-packages/dask/bytes/tests/test_local.py:86: Failed
______________ test_time_rolling_methods[window3-std-args6-True] _______________

method = 'std', args = (), window = <5 * Seconds>, check_less_precise = {}

    @pytest.mark.parametrize(
"method,args,check_less_precise", rolling_method_args_check_less_precise
    )
@pytest.mark.parametrize("window", ["1S", "2S", "3S", pd.offsets.Second(5)]) def test_time_rolling_methods(method, args, window, check_less_precise):
        if dd._compat.PANDAS_GT_110:
            check_less_precise = {}
        else:
            check_less_precise = {"check_less_precise": check_less_precise}

        # DataFrame
        if method == "apply":
            kwargs = {"raw": False}
        else:
            kwargs = {}
        prolling = ts.rolling(window)
        drolling = dts.rolling(window)
>       assert_eq(
            getattr(prolling, method)(*args, **kwargs),
            getattr(drolling, method)(*args, **kwargs),
            **check_less_precise,
        )

/usr/lib/python3/dist-packages/dask/dataframe/tests/test_rolling.py:288:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3/dist-packages/dask/dataframe/utils.py:807: in assert_eq
    tm.assert_frame_equal(a, b, **kwargs)
pandas/_libs/testing.pyx:67: in pandas._libs.testing.assert_almost_equal
    ???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

>   ???
E   AssertionError: DataFrame.iloc[:, 3] (column name="d") are different
E
E   DataFrame.iloc[:, 3] (column name="d") values are different (2.5 %)
E [index]: [2016-01-01T00:00:00.000000000, 2016-01-01T00:00:03.000000000, 2016-01-01T00:00:05.000000000, 2016-01-01T00:00:06.000000000, 2016-01-01T00:00:09.000000000, 2016-01-01T00:00:10.000000000, 2016-01-01T00:00:12.000000000, 2016-01-01T00:00:15.000000000, 2016-01-01T00:00:18.000000000, 2016-01-01T00:00:20.000000000, 2016-01-01T00:00:21.000000000, 2016-01-01T00:00:24.000000000, 2016-01-01T00:00:25.000000000, 2016-01-01T00:00:27.000000000, 2016-01-01T00:00:30.000000000, 2016-01-01T00:00:33.000000000, 2016-01-01T00:00:35.000000000, 2016-01-01T00:00:36.000000000, 2016-01-01T00:00:39.000000000, 2016-01-01T00:00:40.000000000, 2016-01-01T00:00:42.000000000, 2016-01-01T00:00:45.000000000, 2016-01-01T00:00:48.000000000, 2016-01-01T00:00:50.000000000, 2016-01-01T00:00:51.000000000, 2016-01-01T00:00:54.000000000, 2016-01-01T00:00:55.000000000, 2016-01-01T00:00:57.000000000, 2016-01-01T00:01:00.000000000, 2016-01-01T00:01:03.000000000, 2016-01-01T00:01:05.000000000, 2016-01-01T00:01:06.000000000, 2016-01-01T00:01:09.000000000, 2016-01-01T00:01:10.000000000, 2016-01-01T00:01:12.000000000, 2016-01-01T00:01:15.000000000, 2016-01-01T00:01:18.000000000, 2016-01-01T00:01:20.000000000, 2016-01-01T00:01:21.000000000, 2016-01-01T00:01:24.000000000] E [left]: [nan, 2.8284271247461903, 27.57716446627535, 21.221058723196002, 17.15614564327702, 8.88819441731558, 8.88819441731558, 16.970562748477132, 31.112698372208087, 38.18376618407356, 30.088757590391353, 37.753587026047384, 33.94603560555096, 33.6501609703926, 4.76837158203125e-07, 8.485281374238571, 27.577164466275327, 34.122328955294556, 37.16629297271027, 31.224989991991986, 18.08314132002511, 5.656854249492363, 18.384776310850242, 4.949747468305947, 31.0859024854247, 29.399546481762837, 32.41913015489466, 35.67912554982255, 24.041630560342607, 4.949747468305879, 9.192388155425114, 28.74601421646719, 30.664855018951805, 27.64657905299196, 8.020806277010639, 45.96194077712561, 16.263455967290692, 14.849242404917563, 25.106440076867422, 27.537852736430537] E [right]: [nan, 2.8284271247461903, 27.57716446627535, 21.221058723196002, 17.15614564327702, 8.88819441731558, 8.88819441731558, 16.970562748477132, 31.112698372208087, 38.18376618407356, 30.088757590391353, 37.753587026047384, 33.94603560555096, 33.6501609703926, 0.0, 8.48528137423857, 27.577164466275352, 34.122328955294556, 37.16629297271028, 31.224989991991993, 18.08314132002513, 5.656854249492411, 18.384776310850246, 4.949747468305879, 31.0859024854247, 29.399546481762837, 32.41913015489466, 35.67912554982255, 24.041630560342615, 4.949747468305821, 9.192388155425112, 28.74601421646718, 30.6648550189518, 27.646579052991953, 8.020806277010646, 45.96194077712559, 16.2634559672906, 14.849242404917517, 25.1064400768674, 27.537852736430512]

pandas/_libs/testing.pyx:182: AssertionError
diff -Nru dask-2.11.0+dfsg/debian/changelog dask-2.11.0+dfsg/debian/changelog
--- dask-2.11.0+dfsg/debian/changelog   2020-02-26 21:01:52.000000000 +0000
+++ dask-2.11.0+dfsg/debian/changelog   2020-10-19 08:38:20.000000000 +0100
@@ -1,3 +1,10 @@
+dask (2.11.0+dfsg-1.1) UNRELEASED; urgency=medium
+
+  * Non-maintainer upload.
+  * Fix test failures with pandas 1.1 (Closes: #969648)
+
+ -- Rebecca N. Palmer <rebecca_pal...@zoho.com>  Mon, 19 Oct 2020 08:38:20 
+0100
+
 dask (2.11.0+dfsg-1) unstable; urgency=medium
 
   * New upstream release
diff -Nru dask-2.11.0+dfsg/debian/control dask-2.11.0+dfsg/debian/control
--- dask-2.11.0+dfsg/debian/control     2020-02-11 22:26:36.000000000 +0000
+++ dask-2.11.0+dfsg/debian/control     2020-10-19 08:38:20.000000000 +0100
@@ -12,7 +12,7 @@
                python3-all,
                python3-cloudpickle <!nodoc>,
                python3-dask-sphinx-theme <!nodoc>,
-               python3-distributed <!nodoc>,
+# skip for cycle breaking               python3-distributed <!nodoc>,
                python3-fsspec,
                python3-numpydoc <!nodoc>,
                python3-pandas (>= 0.19.0) <!nodoc>,
diff -Nru dask-2.11.0+dfsg/debian/patches/969648.patch 
dask-2.11.0+dfsg/debian/patches/969648.patch
--- dask-2.11.0+dfsg/debian/patches/969648.patch        1970-01-01 
01:00:00.000000000 +0100
+++ dask-2.11.0+dfsg/debian/patches/969648.patch        2020-10-19 
08:38:20.000000000 +0100
@@ -0,0 +1,352 @@
+Description: Avoid test failures with pandas 1.1.x
+
+Origin: based on upstream f212b76fefeb93298205d7d224cbc1f7ed387ce9 + 
8eeb0e0194ef0561b4202f42de06c0b7fc0784b9
+Author: Tom Augspurger, Julia Signell, Rebecca Palmer
+Bug-Debian: https://bugs.debian.org/969648
+Forwarded: not-needed
+
+--- a/dask/dataframe/_compat.py
++++ b/dask/dataframe/_compat.py
+@@ -9,6 +9,7 @@ PANDAS_VERSION = LooseVersion(pd.__versi
+ PANDAS_GT_0240 = PANDAS_VERSION >= LooseVersion("0.24.0")
+ PANDAS_GT_0250 = PANDAS_VERSION >= LooseVersion("0.25.0")
+ PANDAS_GT_100 = PANDAS_VERSION >= LooseVersion("1.0.0")
++PANDAS_GT_110 = PANDAS_VERSION >= LooseVersion("1.1.0")
+ HAS_INT_NA = PANDAS_GT_0240
+ 
+ 
+--- a/dask/dataframe/core.py
++++ b/dask/dataframe/core.py
+@@ -2354,7 +2354,7 @@ Dask Name: {name}, {task} tasks"""
+         else:
+             is_anchored = offset.isAnchored()
+ 
+-        include_right = is_anchored or not hasattr(offset, "_inc")
++        include_right = is_anchored or not hasattr(offset, "delta")
+ 
+         if end == self.npartitions - 1:
+             divs = self.divisions
+@@ -3930,7 +3930,7 @@ class DataFrame(_Frame):
+             left_index=on is None,
+             right_index=True,
+             left_on=on,
+-            suffixes=[lsuffix, rsuffix],
++            suffixes=(lsuffix, rsuffix),
+             npartitions=npartitions,
+             shuffle=shuffle,
+         )
+--- a/dask/dataframe/tests/test_dataframe.py
++++ b/dask/dataframe/tests/test_dataframe.py
+@@ -36,6 +36,9 @@ dsk = {
+ meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
+ d = dd.DataFrame(dsk, "x", meta, [0, 5, 9, 9])
+ full = d.compute()
++CHECK_FREQ = {}
++if dd._compat.PANDAS_GT_110:
++    CHECK_FREQ["check_freq"] = False
+ 
+ 
+ def test_dataframe_doc():
+@@ -200,7 +203,18 @@ def test_index_names():
+     assert ddf.index.compute().name == "x"
+ 
+ 
+-@pytest.mark.parametrize("npartitions", [1, pytest.param(2, 
marks=pytest.mark.xfail)])
++@pytest.mark.parametrize(
++    "npartitions",
++    [
++        1,
++        pytest.param(
++            2,
++            marks=pytest.mark.xfail(
++                not dd._compat.PANDAS_GT_110, reason="Fixed upstream."
++            ),
++        ),
++    ],
++)
+ def test_timezone_freq(npartitions):
+     s_naive = pd.Series(pd.date_range("20130101", periods=10))
+     s_aware = pd.Series(pd.date_range("20130101", periods=10, 
tz="US/Eastern"))
+@@ -359,12 +373,48 @@ def test_describe_numeric(method, test_v
+         (None, None, None, ["c", "d", "g"]),  # numeric + bool
+         (None, None, None, ["c", "d", "f", "g"]),  # numeric + bool + 
timedelta
+         (None, None, None, ["f", "g"]),  # bool + timedelta
+-        ("all", None, None, None),
+-        (["number"], None, [0.25, 0.5], None),
+-        ([np.timedelta64], None, None, None),
+-        (["number", "object"], None, [0.25, 0.75], None),
+-        (None, ["number", "object"], None, None),
+-        (["object", "datetime", "bool"], None, None, None),
++        pytest.param(
++            "all",
++            None,
++            None,
++            None,
++            marks=pytest.mark.xfail(dd._compat.PANDAS_GT_110, 
reason="upstream changes"),
++        ),
++        pytest.param(
++            ["number"],
++            None,
++            [0.25, 0.5],
++            None,
++            marks=pytest.mark.xfail(dd._compat.PANDAS_GT_110, 
reason="upstream changes"),
++        ),
++        pytest.param(
++            [np.timedelta64],
++            None,
++            None,
++            None,
++            marks=pytest.mark.xfail(dd._compat.PANDAS_GT_110, 
reason="upstream changes"),
++        ),
++        pytest.param(
++            ["number", "object"],
++            None,
++            [0.25, 0.75],
++            None,
++            marks=pytest.mark.xfail(dd._compat.PANDAS_GT_110, 
reason="upstream changes"),
++        ),
++        pytest.param(
++            None,
++            ["number", "object"],
++            None,
++            None,
++            marks=pytest.mark.xfail(dd._compat.PANDAS_GT_110, 
reason="upstream changes"),
++        ),
++        pytest.param(
++            ["object", "datetime", "bool"],
++            None,
++            None,
++            None,
++            marks=pytest.mark.xfail(dd._compat.PANDAS_GT_110, 
reason="upstream changes"),
++        ),
+     ],
+ )
+ def test_describe(include, exclude, percentiles, subset):
+@@ -2434,15 +2484,17 @@ def test_to_timestamp():
+     index = pd.period_range(freq="A", start="1/1/2001", end="12/1/2004")
+     df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]}, index=index)
+     ddf = dd.from_pandas(df, npartitions=3)
+-    assert_eq(ddf.to_timestamp(), df.to_timestamp())
++    assert_eq(ddf.to_timestamp(), df.to_timestamp(), **CHECK_FREQ)
+     assert_eq(
+         ddf.to_timestamp(freq="M", how="s").compute(),
+         df.to_timestamp(freq="M", how="s"),
++        **CHECK_FREQ
+     )
+     assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())
+     assert_eq(
+         ddf.x.to_timestamp(freq="M", how="s").compute(),
+         df.x.to_timestamp(freq="M", how="s"),
++        **CHECK_FREQ
+     )
+ 
+ 
+--- a/dask/dataframe/tests/test_extensions.py
++++ b/dask/dataframe/tests/test_extensions.py
+@@ -41,7 +41,11 @@ def test_reduction():
+     dser = dd.from_pandas(ser, 2)
+     assert_eq(ser.mean(skipna=False), dser.mean(skipna=False))
+ 
+-    assert_eq(ser.to_frame().mean(skipna=False), 
dser.to_frame().mean(skipna=False))
++    # It's unclear whether this can be reliably provided, at least with the 
current
++    # implementation, which uses pandas.DataFrame.sum(), returning a 
(homogenous)
++    # series which has potentially cast values.
++
++    # assert_eq(ser.to_frame().mean(skipna=False), 
dser.to_frame().mean(skipna=False))
+ 
+ 
+ def test_scalar():
+--- a/dask/dataframe/tests/test_indexing.py
++++ b/dask/dataframe/tests/test_indexing.py
+@@ -18,6 +18,9 @@ dsk = {
+ meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
+ d = dd.DataFrame(dsk, "x", meta, [0, 5, 9, 9])
+ full = d.compute()
++CHECK_FREQ = {}
++if dd._compat.PANDAS_GT_110:
++    CHECK_FREQ["check_freq"] = False
+ 
+ 
+ def test_loc():
+@@ -359,24 +362,35 @@ def test_loc_timestamp_str():
+     assert_eq(df.loc["2011-01-02"], ddf.loc["2011-01-02"])
+     assert_eq(df.loc["2011-01-02":"2011-01-10"], 
ddf.loc["2011-01-02":"2011-01-10"])
+     # same reso, dask result is always DataFrame
+-    assert_eq(df.loc["2011-01-02 10:00"].to_frame().T, ddf.loc["2011-01-02 
10:00"])
++    assert_eq(
++        df.loc["2011-01-02 10:00"].to_frame().T,
++        ddf.loc["2011-01-02 10:00"],
++        **CHECK_FREQ
++    )
+ 
+     # series
+-    assert_eq(df.A.loc["2011-01-02"], ddf.A.loc["2011-01-02"])
+-    assert_eq(df.A.loc["2011-01-02":"2011-01-10"], 
ddf.A.loc["2011-01-02":"2011-01-10"])
++    assert_eq(df.A.loc["2011-01-02"], ddf.A.loc["2011-01-02"], **CHECK_FREQ)
++    assert_eq(
++        df.A.loc["2011-01-02":"2011-01-10"],
++        ddf.A.loc["2011-01-02":"2011-01-10"],
++        **CHECK_FREQ
++    )
+ 
+     # slice with timestamp (dask result must be DataFrame)
+     assert_eq(
+         df.loc[pd.Timestamp("2011-01-02")].to_frame().T,
+         ddf.loc[pd.Timestamp("2011-01-02")],
++        **CHECK_FREQ
+     )
+     assert_eq(
+         df.loc[pd.Timestamp("2011-01-02") : pd.Timestamp("2011-01-10")],
+         ddf.loc[pd.Timestamp("2011-01-02") : pd.Timestamp("2011-01-10")],
++        **CHECK_FREQ
+     )
+     assert_eq(
+         df.loc[pd.Timestamp("2011-01-02 10:00")].to_frame().T,
+         ddf.loc[pd.Timestamp("2011-01-02 10:00")],
++        **CHECK_FREQ
+     )
+ 
+     df = pd.DataFrame(
+--- a/dask/dataframe/tests/test_rolling.py
++++ b/dask/dataframe/tests/test_rolling.py
+@@ -2,6 +2,7 @@ import pandas as pd
+ import pytest
+ import numpy as np
+ 
++import dask.array as da
+ import dask.dataframe as dd
+ from dask.dataframe.utils import assert_eq, PANDAS_VERSION
+ 
+@@ -143,6 +144,10 @@ rolling_method_args_check_less_precise =
+ @pytest.mark.parametrize("window", [1, 2, 4, 5])
+ @pytest.mark.parametrize("center", [True, False])
+ def test_rolling_methods(method, args, window, center, check_less_precise):
++    if dd._compat.PANDAS_GT_110:
++        check_less_precise = {}
++    else:
++        check_less_precise = {"check_less_precise": check_less_precise}
+     # DataFrame
+     prolling = df.rolling(window, center=center)
+     drolling = ddf.rolling(window, center=center)
+@@ -154,7 +159,7 @@ def test_rolling_methods(method, args, w
+     assert_eq(
+         getattr(prolling, method)(*args, **kwargs),
+         getattr(drolling, method)(*args, **kwargs),
+-        check_less_precise=check_less_precise,
++        **check_less_precise,
+     )
+ 
+     # Series
+@@ -163,7 +168,7 @@ def test_rolling_methods(method, args, w
+     assert_eq(
+         getattr(prolling, method)(*args, **kwargs),
+         getattr(drolling, method)(*args, **kwargs),
+-        check_less_precise=check_less_precise,
++        **check_less_precise,
+     )
+ 
+ 
+@@ -268,6 +273,11 @@ def test_time_rolling_constructor():
+ )
+ @pytest.mark.parametrize("window", ["1S", "2S", "3S", pd.offsets.Second(5)])
+ def test_time_rolling_methods(method, args, window, check_less_precise):
++    if dd._compat.PANDAS_GT_110:
++        check_less_precise = {}
++    else:
++        check_less_precise = {"check_less_precise": check_less_precise}
++
+     # DataFrame
+     if method == "apply":
+         kwargs = {"raw": False}
+@@ -278,7 +288,7 @@ def test_time_rolling_methods(method, ar
+     assert_eq(
+         getattr(prolling, method)(*args, **kwargs),
+         getattr(drolling, method)(*args, **kwargs),
+-        check_less_precise=check_less_precise,
++        **check_less_precise,
+     )
+ 
+     # Series
+@@ -287,7 +297,7 @@ def test_time_rolling_methods(method, ar
+     assert_eq(
+         getattr(prolling, method)(*args, **kwargs),
+         getattr(drolling, method)(*args, **kwargs),
+-        check_less_precise=check_less_precise,
++        **check_less_precise,
+     )
+ 
+ 
+--- a/dask/dataframe/tests/test_shuffle.py
++++ b/dask/dataframe/tests/test_shuffle.py
+@@ -35,6 +35,9 @@ dsk = {
+ meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
+ d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
+ full = d.compute()
++CHECK_FREQ = {}
++if dd._compat.PANDAS_GT_110:
++    CHECK_FREQ["check_freq"] = False
+ 
+ 
+ shuffle_func = shuffle  # conflicts with keyword argument
+@@ -735,7 +738,7 @@ def test_set_index_on_empty():
+         ddf = ddf[ddf.y > df.y.max()].set_index("x")
+         expected_df = df[df.y > df.y.max()].set_index("x")
+ 
+-        assert assert_eq(ddf, expected_df)
++        assert assert_eq(ddf, expected_df, **CHECK_FREQ)
+         assert ddf.npartitions == 1
+ 
+ 
+@@ -914,8 +917,8 @@ def test_set_index_timestamp():
+         assert ts1.value == ts2.value
+         assert ts1.tz == ts2.tz
+ 
+-    assert_eq(df2, ddf_new_div)
+-    assert_eq(df2, ddf.set_index("A"))
++    assert_eq(df2, ddf_new_div, **CHECK_FREQ)
++    assert_eq(df2, ddf.set_index("A"), **CHECK_FREQ)
+ 
+ 
+ @pytest.mark.parametrize("compression", [None, "ZLib"])
+--- a/dask/dataframe/tests/test_utils_dataframe.py
++++ b/dask/dataframe/tests/test_utils_dataframe.py
+@@ -128,7 +128,7 @@ def test_meta_nonempty():
+             "E": np.int32(1),
+             "F": pd.Timestamp("2016-01-01"),
+             "G": pd.date_range("2016-01-01", periods=3, 
tz="America/New_York"),
+-            "H": pd.Timedelta("1 hours", "ms"),
++            "H": pd.Timedelta("1 hours"),
+             "I": np.void(b" "),
+             "J": pd.Categorical([UNKNOWN_CATEGORIES] * 3),
+         },
+@@ -146,7 +146,7 @@ def test_meta_nonempty():
+     assert df3["E"][0].dtype == "i4"
+     assert df3["F"][0] == pd.Timestamp("1970-01-01 00:00:00")
+     assert df3["G"][0] == pd.Timestamp("1970-01-01 00:00:00", 
tz="America/New_York")
+-    assert df3["H"][0] == pd.Timedelta("1", "ms")
++    assert df3["H"][0] == pd.Timedelta("1")
+     assert df3["I"][0] == "foo"
+     assert df3["J"][0] == UNKNOWN_CATEGORIES
+ 
+--- a/dask/dataframe/tseries/tests/test_resample.py
++++ b/dask/dataframe/tseries/tests/test_resample.py
+@@ -7,6 +7,10 @@ from dask.dataframe.utils import assert_
+ from dask.dataframe._compat import PANDAS_GT_0240
+ import dask.dataframe as dd
+ 
++CHECK_FREQ = {}
++if dd._compat.PANDAS_GT_110:
++    CHECK_FREQ["check_freq"] = False
++
+ 
+ def resample(df, freq, how="mean", **kwargs):
+     return getattr(df.resample(freq, **kwargs), how)()
+@@ -117,7 +121,7 @@ def test_series_resample_non_existent_da
+     result = ddf.resample("1D").mean()
+     expected = df.resample("1D").mean()
+ 
+-    assert_eq(result, expected)
++    assert_eq(result, expected, **CHECK_FREQ)
+ 
+ 
+ @pytest.mark.skipif(PANDAS_VERSION <= "0.23.4", reason="quantile not in 0.23")
diff -Nru dask-2.11.0+dfsg/debian/patches/series 
dask-2.11.0+dfsg/debian/patches/series
--- dask-2.11.0+dfsg/debian/patches/series      2020-02-26 04:55:14.000000000 
+0000
+++ dask-2.11.0+dfsg/debian/patches/series      2020-10-19 08:28:31.000000000 
+0100
@@ -3,3 +3,4 @@
 reproducible-version.patch
 Use-the-packaged-MathJax.patch
 use-local-intersphinx.patch
+969648.patch
diff -Nru dask-2.11.0+dfsg/debian/tests/control 
dask-2.11.0+dfsg/debian/tests/control
--- dask-2.11.0+dfsg/debian/tests/control       2020-02-11 22:26:36.000000000 
+0000
+++ dask-2.11.0+dfsg/debian/tests/control       2020-10-19 08:38:20.000000000 
+0100
@@ -6,6 +6,7 @@
  ; done
 Depends: @, python3-all,
          python3-ipython,
+         python3-aiohttp,
          python3-bcolz,
          python3-blosc,
          python3-boto,

Reply via email to