Hello community,

here is the log from the commit of package python-pandas-datareader for 
openSUSE:Factory checked in at 2019-08-05 10:42:21
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-pandas-datareader (Old)
 and      /work/SRC/openSUSE:Factory/.python-pandas-datareader.new.4126 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-pandas-datareader"

Mon Aug  5 10:42:21 2019 rev:4 rq:720882 version:0.7.4

Changes:
--------
--- 
/work/SRC/openSUSE:Factory/python-pandas-datareader/python-pandas-datareader.changes
        2018-09-18 11:44:22.531851782 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-pandas-datareader.new.4126/python-pandas-datareader.changes
      2019-08-05 10:42:30.867291017 +0200
@@ -1,0 +2,23 @@
+Sat Aug  3 15:12:10 UTC 2019 - Arun Persaud <a...@gmx.de>
+
+- specfile:
+  * update copyright year
+
+- update to version 0.7.4:
+  * This release restores Python 2.7 compatibility for the 0.7 branch.
+
+- changes from version 0.7.3:
+  * This is a patch release that fixes a break due to changes in
+    pandas 0.25. It is otherwise identical to v0.7.0. It fixes issues
+    missed in v0.7.2.
+
+- changes from version 0.7.2:
+  * This is a patch release that fixes a break due to changes in
+    pandas 0.25. It is otherwise identical to v0.7.0. It fixes issues
+    missed in v0.7.1.
+
+- changes from version 0.7.1:
+  * This is a patch release that fixes a break due to changes in
+    pandas 0.25. It is otherwise identical to v0.7.0.
+
+-------------------------------------------------------------------

Old:
----
  pandas-datareader-0.7.0.tar.gz

New:
----
  pandas-datareader-0.7.4.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-pandas-datareader.spec ++++++
--- /var/tmp/diff_new_pack.QNMWpD/_old  2019-08-05 10:42:31.339290963 +0200
+++ /var/tmp/diff_new_pack.QNMWpD/_new  2019-08-05 10:42:31.343290962 +0200
@@ -1,7 +1,7 @@
 #
 # spec file for package python-pandas-datareader
 #
-# Copyright (c) 2018 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -20,7 +20,7 @@
 # Tests require an internet connection
 %bcond_with tests
 Name:           python-pandas-datareader
-Version:        0.7.0
+Version:        0.7.4
 Release:        0
 Summary:        Data readers extracted from the pandas codebase
 License:        BSD-3-Clause

++++++ pandas-datareader-0.7.0.tar.gz -> pandas-datareader-0.7.4.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pandas-datareader-0.7.0/PKG-INFO 
new/pandas-datareader-0.7.4/PKG-INFO
--- old/pandas-datareader-0.7.0/PKG-INFO        2018-09-12 12:38:50.000000000 
+0200
+++ new/pandas-datareader-0.7.4/PKG-INFO        2019-07-29 10:23:34.000000000 
+0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: pandas-datareader
-Version: 0.7.0
+Version: 0.7.4
 Summary: Data readers extracted from the pandas codebase,should be compatible 
with recent pandas versions
 Home-page: https://github.com/pydata/pandas-datareader
 Author: The PyData Development Team
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/_version.py 
new/pandas-datareader-0.7.4/pandas_datareader/_version.py
--- old/pandas-datareader-0.7.0/pandas_datareader/_version.py   2018-09-12 
12:38:50.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/_version.py   2019-07-29 
10:23:34.000000000 +0200
@@ -8,11 +8,11 @@
 
 version_json = '''
 {
- "date": "2018-09-11T23:51:52+0100",
+ "date": "2019-07-29T09:22:01+0100",
  "dirty": false,
  "error": null,
- "full-revisionid": "44ee8c84b46c8f9aff299140a83c6a11887bfca1",
- "version": "0.7.0"
+ "full-revisionid": "b44f4d365006bb63805834616cea319ca6c49d15",
+ "version": "0.7.4"
 }
 '''  # END VERSION_JSON
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/bankofcanada.py 
new/pandas-datareader-0.7.4/pandas_datareader/bankofcanada.py
--- old/pandas-datareader-0.7.0/pandas_datareader/bankofcanada.py       
2018-09-12 12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/bankofcanada.py       
2019-07-29 08:56:18.000000000 +0200
@@ -1,6 +1,6 @@
 from __future__ import unicode_literals
 
-import pandas.compat as compat
+from pandas_datareader.compat import string_types
 
 from pandas_datareader.base import _BaseReader
 
@@ -17,7 +17,7 @@
     @property
     def url(self):
         """API URL"""
-        if not isinstance(self.symbols, compat.string_types):
+        if not isinstance(self.symbols, string_types):
             raise ValueError('data name must be string')
 
         return '{0}/{1}/csv'.format(self._URL, self.symbols)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pandas-datareader-0.7.0/pandas_datareader/base.py 
new/pandas-datareader-0.7.4/pandas_datareader/base.py
--- old/pandas-datareader-0.7.0/pandas_datareader/base.py       2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/base.py       2019-07-29 
09:57:13.000000000 +0200
@@ -4,11 +4,11 @@
 
 import requests
 
-import pandas.compat as compat
 from pandas import DataFrame
 from pandas import read_csv, concat
 from pandas.io.common import urlencode
-from pandas.compat import StringIO, bytes_to_str
+from pandas_datareader.compat import bytes_to_str, string_types, binary_type, \
+    StringIO
 
 from pandas_datareader._utils import (RemoteDataError, SymbolWarning,
                                       _sanitize_dates, _init_session)
@@ -99,7 +99,7 @@
             service = self.__class__.__name__
             raise IOError("{} request returned no data; check URL for invalid "
                           "inputs: {}".format(service, self.url))
-        if isinstance(text, compat.binary_type):
+        if isinstance(text, binary_type):
             out.write(bytes_to_str(text))
         else:
             out.write(text)
@@ -205,7 +205,7 @@
     def read(self):
         """Read data"""
         # If a single symbol, (e.g., 'GOOG')
-        if isinstance(self.symbols, (compat.string_types, int)):
+        if isinstance(self.symbols, (string_types, int)):
             df = self._read_one_data(self.url,
                                      params=self._get_params(self.symbols))
         # Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/compat/__init__.py 
new/pandas-datareader-0.7.4/pandas_datareader/compat/__init__.py
--- old/pandas-datareader-0.7.0/pandas_datareader/compat/__init__.py    
2018-09-12 12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/compat/__init__.py    
2019-07-29 09:58:48.000000000 +0200
@@ -1,11 +1,8 @@
 # flake8: noqa
-import sys
-from distutils.version import LooseVersion
-from io import BytesIO
-
 import pandas as pd
-import pandas.compat as compat
 import pandas.io.common as com
+import sys
+from distutils.version import LooseVersion
 
 PY3 = sys.version_info >= (3, 0)
 
@@ -45,8 +42,39 @@
 else:
     from pandas.core.common import is_list_like
 
-
-if compat.PY3:
+if PY3:
     from urllib.error import HTTPError
+    from functools import reduce
+
+    string_types = str,
+    binary_type = bytes
+    from io import StringIO
+
+    def str_to_bytes(s, encoding=None):
+        return s.encode(encoding or 'ascii')
+
+
+    def bytes_to_str(b, encoding=None):
+        return b.decode(encoding or 'utf-8')
 else:
     from urllib2 import HTTPError
+    from cStringIO import StringIO
+    reduce = reduce
+    binary_type = str
+    string_types = basestring,
+
+
+    def bytes_to_str(b, encoding=None):
+        return b
+
+
+    def str_to_bytes(s, encoding=None):
+        return s
+
+
+def lmap(*args, **kwargs):
+    return list(map(*args, **kwargs))
+
+
+def lrange(*args, **kwargs):
+    return list(range(*args, **kwargs))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pandas-datareader-0.7.0/pandas_datareader/enigma.py 
new/pandas-datareader-0.7.4/pandas_datareader/enigma.py
--- old/pandas-datareader-0.7.0/pandas_datareader/enigma.py     2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/enigma.py     2019-07-29 
09:57:13.000000000 +0200
@@ -1,11 +1,10 @@
 import os
 import time
 
-from pandas.compat import StringIO
-import pandas.compat as compat
 import pandas as pd
 
-from pandas_datareader.base import _BaseReader
+from pandas_datareader.base import _BaseReader, string_types
+from pandas_datareader.compat import StringIO
 
 
 class EnigmaReader(_BaseReader):
@@ -65,7 +64,7 @@
             self._api_key = api_key
 
         self._dataset_id = dataset_id
-        if not isinstance(self._dataset_id, compat.string_types):
+        if not isinstance(self._dataset_id, string_types):
             raise ValueError(
                 "The Enigma dataset_id must be a string (ex: "
                 "'bedaf052-5fcd-4758-8d27-048ce8746c6a')")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/eurostat.py 
new/pandas-datareader-0.7.4/pandas_datareader/eurostat.py
--- old/pandas-datareader-0.7.0/pandas_datareader/eurostat.py   2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/eurostat.py   2019-07-29 
08:56:18.000000000 +0200
@@ -1,9 +1,9 @@
 from __future__ import unicode_literals
 
 import pandas as pd
-import pandas.compat as compat
 
 from pandas_datareader.io.sdmx import read_sdmx, _read_sdmx_dsd
+from pandas_datareader.compat import string_types
 from pandas_datareader.base import _BaseReader
 
 
@@ -15,7 +15,7 @@
     @property
     def url(self):
         """API URL"""
-        if not isinstance(self.symbols, compat.string_types):
+        if not isinstance(self.symbols, string_types):
             raise ValueError('data name must be string')
 
         q = '{0}/data/{1}/?startperiod={2}&endperiod={3}'
@@ -25,7 +25,7 @@
     @property
     def dsd_url(self):
         """API DSD URL"""
-        if not isinstance(self.symbols, compat.string_types):
+        if not isinstance(self.symbols, string_types):
             raise ValueError('data name must be string')
 
         return '{0}/datastructure/ESTAT/DSD_{1}'.format(
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/famafrench.py 
new/pandas-datareader-0.7.4/pandas_datareader/famafrench.py
--- old/pandas-datareader-0.7.0/pandas_datareader/famafrench.py 2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/famafrench.py 2019-07-29 
09:57:13.000000000 +0200
@@ -1,10 +1,11 @@
 import datetime as dt
 import re
 import tempfile
+
 from zipfile import ZipFile
 
 from pandas import read_csv, to_datetime
-from pandas.compat import lmap, StringIO
+from pandas_datareader.compat import lmap, StringIO
 
 from pandas_datareader.base import _BaseReader
 
@@ -106,7 +107,7 @@
 
         datasets, table_desc = {}, []
         for i, src in enumerate(tables):
-            match = re.search('^\s*,', src, re.M)  # the table starts there
+            match = re.search(r'^\s*,', src, re.M)  # the table starts there
             start = 0 if not match else match.start()
 
             df = read_csv(StringIO('Date' + src[start:]), **params)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/google/quotes.py 
new/pandas-datareader-0.7.4/pandas_datareader/google/quotes.py
--- old/pandas-datareader-0.7.0/pandas_datareader/google/quotes.py      
2018-09-12 12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/google/quotes.py      
2019-07-29 08:56:18.000000000 +0200
@@ -1,10 +1,11 @@
+import json
+import numpy as np
 import pandas as pd
+import re
 from dateutil.parser import parse
-import numpy as np
 
 from pandas_datareader.base import _BaseReader
-import json
-import re
+from pandas_datareader.compat import string_types
 
 
 class GoogleQuotesReader(_BaseReader):
@@ -20,7 +21,7 @@
     @property
     def params(self):
         """Parameters to use in API calls"""
-        if isinstance(self.symbols, pd.compat.string_types):
+        if isinstance(self.symbols, string_types):
             sym_list = self.symbols
         else:
             sym_list = ','.join(self.symbols)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/io/jsdmx.py 
new/pandas-datareader-0.7.4/pandas_datareader/io/jsdmx.py
--- old/pandas-datareader-0.7.0/pandas_datareader/io/jsdmx.py   2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/io/jsdmx.py   2019-07-29 
08:56:18.000000000 +0200
@@ -1,13 +1,13 @@
 # pylint: disable-msg=E1101,W0613,W0603
 
 from __future__ import unicode_literals
+from collections import OrderedDict
 
 import itertools
 import sys
 
 import numpy as np
 import pandas as pd
-import pandas.compat as compat
 
 from pandas_datareader.io.util import _read_content
 
@@ -38,7 +38,7 @@
     if isinstance(jdata, dict):
         data = jdata
     else:
-        data = json.loads(jdata, object_pairs_hook=compat.OrderedDict)
+        data = json.loads(jdata, object_pairs_hook=OrderedDict)
 
     structure = data['structure']
     index = _parse_dimensions(structure['dimensions']['observation'])
@@ -56,9 +56,9 @@
 
 def _get_indexer(index):
     if index.nlevels == 1:
-        return [str(i) for i in compat.range(len(index))]
+        return [str(i) for i in range(len(index))]
     else:
-        it = itertools.product(*[compat.range(
+        it = itertools.product(*[range(
             len(level)) for level in index.levels])
         return [':'.join(map(str, i)) for i in it]
 
@@ -68,7 +68,7 @@
     series = dataset['series']
 
     values = []
-    # for s_key, s_value in compat.iteritems(series):
+    # for s_key, s_value in iteritems(series):
     for s_key in _get_indexer(columns):
         try:
             observations = series[s_key]['observations']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pandas-datareader-0.7.0/pandas_datareader/io/sdmx.py 
new/pandas-datareader-0.7.4/pandas_datareader/io/sdmx.py
--- old/pandas-datareader-0.7.0/pandas_datareader/io/sdmx.py    2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/io/sdmx.py    2019-07-29 
08:56:18.000000000 +0200
@@ -3,12 +3,12 @@
 import collections
 import time
 import zipfile
+from io import BytesIO
 
 import pandas as pd
-import pandas.compat as compat
 
 from pandas_datareader.io.util import _read_content
-from pandas_datareader.compat import HTTPError
+from pandas_datareader.compat import HTTPError, str_to_bytes
 
 
 _STRUCTURE = '{http://www.sdmx.org/resources/sdmxml/schemas/v2_1/structure}'
@@ -235,8 +235,8 @@
     """ Unzipp data contains SDMX-XML """
     data = _read_content(path_or_buf)
 
-    zp = compat.BytesIO()
-    zp.write(compat.str_to_bytes(data))
+    zp = BytesIO()
+    zp.write(str_to_bytes(data))
     f = zipfile.ZipFile(zp)
     files = f.namelist()
     assert len(files) == 1
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pandas-datareader-0.7.0/pandas_datareader/io/util.py 
new/pandas-datareader-0.7.4/pandas_datareader/io/util.py
--- old/pandas-datareader-0.7.0/pandas_datareader/io/util.py    2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/io/util.py    2019-07-29 
08:56:18.000000000 +0200
@@ -2,8 +2,7 @@
 
 import os
 
-import pandas.compat as compat
-from pandas_datareader.compat import get_filepath_or_buffer
+from pandas_datareader.compat import get_filepath_or_buffer, string_types
 
 
 def _read_content(path_or_buf):
@@ -13,7 +12,7 @@
 
     filepath_or_buffer = get_filepath_or_buffer(path_or_buf)[0]
 
-    if isinstance(filepath_or_buffer, compat.string_types):
+    if isinstance(filepath_or_buffer, string_types):
         try:
             exists = os.path.exists(filepath_or_buffer)
         except (TypeError, ValueError):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pandas-datareader-0.7.0/pandas_datareader/moex.py 
new/pandas-datareader-0.7.4/pandas_datareader/moex.py
--- old/pandas-datareader-0.7.0/pandas_datareader/moex.py       2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/moex.py       2019-07-29 
09:57:13.000000000 +0200
@@ -3,10 +3,9 @@
 import datetime as dt
 
 import pandas as pd
-from pandas.compat import StringIO
 
 from pandas_datareader.base import _DailyBaseReader
-from pandas_datareader.compat import is_list_like
+from pandas_datareader.compat import is_list_like, StringIO
 
 
 class MoexReader(_DailyBaseReader):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/nasdaq_trader.py 
new/pandas-datareader-0.7.4/pandas_datareader/nasdaq_trader.py
--- old/pandas-datareader-0.7.0/pandas_datareader/nasdaq_trader.py      
2018-09-12 12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/nasdaq_trader.py      
2019-07-29 09:58:48.000000000 +0200
@@ -1,7 +1,8 @@
 from ftplib import FTP, all_errors
+
 from pandas import read_csv
 from pandas_datareader._utils import RemoteDataError
-from pandas.compat import StringIO
+from pandas_datareader.compat import StringIO
 
 import time
 import warnings
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pandas-datareader-0.7.0/pandas_datareader/oecd.py 
new/pandas-datareader-0.7.4/pandas_datareader/oecd.py
--- old/pandas-datareader-0.7.0/pandas_datareader/oecd.py       2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/oecd.py       2019-07-29 
08:56:18.000000000 +0200
@@ -1,8 +1,8 @@
 import pandas as pd
-import pandas.compat as compat
 
 from pandas_datareader.io import read_jsdmx
 from pandas_datareader.base import _BaseReader
+from pandas_datareader.compat import string_types
 
 
 class OECDReader(_BaseReader):
@@ -15,7 +15,7 @@
         """API URL"""
         url = 'http://stats.oecd.org/SDMX-JSON/data'
 
-        if not isinstance(self.symbols, compat.string_types):
+        if not isinstance(self.symbols, string_types):
             raise ValueError('data name must be string')
 
         # API: https://data.oecd.org/api/sdmx-json-documentation/
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/pandas-datareader-0.7.0/pandas_datareader/wb.py 
new/pandas-datareader-0.7.4/pandas_datareader/wb.py
--- old/pandas-datareader-0.7.0/pandas_datareader/wb.py 2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/wb.py 2019-07-29 
08:56:18.000000000 +0200
@@ -2,7 +2,7 @@
 
 import warnings
 
-from pandas.compat import reduce, lrange, string_types
+from pandas_datareader.compat import reduce, lrange, string_types
 import pandas as pd
 import numpy as np
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/yahoo/fx.py 
new/pandas-datareader-0.7.4/pandas_datareader/yahoo/fx.py
--- old/pandas-datareader-0.7.0/pandas_datareader/yahoo/fx.py   2018-09-12 
12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/yahoo/fx.py   2019-07-29 
08:56:18.000000000 +0200
@@ -3,8 +3,8 @@
 import warnings
 from pandas import (DataFrame, Series, to_datetime, concat)
 from pandas_datareader.yahoo.daily import YahooDailyReader
-import pandas.compat as compat
 from pandas_datareader._utils import (RemoteDataError, SymbolWarning)
+from pandas_datareader.compat import string_types
 
 
 class YahooFXReader(YahooDailyReader):
@@ -55,7 +55,7 @@
         """Read data"""
         try:
             # If a single symbol, (e.g., 'GOOG')
-            if isinstance(self.symbols, (compat.string_types, int)):
+            if isinstance(self.symbols, (string_types, int)):
                 df = self._read_one_data(self.symbols)
 
             # Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader/yahoo/quotes.py 
new/pandas-datareader-0.7.4/pandas_datareader/yahoo/quotes.py
--- old/pandas-datareader-0.7.0/pandas_datareader/yahoo/quotes.py       
2018-09-12 12:34:47.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader/yahoo/quotes.py       
2019-07-29 08:56:18.000000000 +0200
@@ -1,12 +1,9 @@
 import json
 from collections import OrderedDict
-
-import pandas.compat as compat
 from pandas import DataFrame
 
-
 from pandas_datareader.base import _BaseReader
-
+from pandas_datareader.compat import string_types
 
 _DEFAULT_PARAMS = {
     'lang': 'en-US',
@@ -24,7 +21,7 @@
         return 'https://query1.finance.yahoo.com/v7/finance/quote'
 
     def read(self):
-        if isinstance(self.symbols, compat.string_types):
+        if isinstance(self.symbols, string_types):
             return self._read_one_data(self.url, self.params(self.symbols))
         else:
             data = OrderedDict()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/pandas-datareader-0.7.0/pandas_datareader.egg-info/PKG-INFO 
new/pandas-datareader-0.7.4/pandas_datareader.egg-info/PKG-INFO
--- old/pandas-datareader-0.7.0/pandas_datareader.egg-info/PKG-INFO     
2018-09-12 12:38:50.000000000 +0200
+++ new/pandas-datareader-0.7.4/pandas_datareader.egg-info/PKG-INFO     
2019-07-29 10:23:33.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: pandas-datareader
-Version: 0.7.0
+Version: 0.7.4
 Summary: Data readers extracted from the pandas codebase,should be compatible 
with recent pandas versions
 Home-page: https://github.com/pydata/pandas-datareader
 Author: The PyData Development Team


Reply via email to