Hello community,

here is the log from the commit of package python-tablib for openSUSE:Factory 
checked in at 2019-03-26 22:34:27
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-tablib (Old)
 and      /work/SRC/openSUSE:Factory/.python-tablib.new.25356 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-tablib"

Tue Mar 26 22:34:27 2019 rev:2 rq:688774 version:0.13.0

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-tablib/python-tablib.changes      
2019-03-19 09:58:14.836106460 +0100
+++ /work/SRC/openSUSE:Factory/.python-tablib.new.25356/python-tablib.changes   
2019-03-26 22:34:55.873667215 +0100
@@ -1,0 +2,8 @@
+Tue Mar 26 10:26:24 UTC 2019 - [email protected]
+
+- version update to 0.13.0
+  * no upstream changelog (HISTORY.rst outdated), see git log
+- deleted patches
+  - openpyxl-usage-fix.patch (upstreamed)
+
+-------------------------------------------------------------------

Old:
----
  openpyxl-usage-fix.patch
  tablib-0.12.1.tar.gz

New:
----
  tablib-0.13.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-tablib.spec ++++++
--- /var/tmp/diff_new_pack.9LdKco/_old  2019-03-26 22:34:57.453666534 +0100
+++ /var/tmp/diff_new_pack.9LdKco/_new  2019-03-26 22:34:57.477666523 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package python-tablib
 #
-# Copyright (c) 2017 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -12,25 +12,23 @@
 # license that conforms to the Open Source Definition (Version 1.9)
 # published by the Open Source Initiative.
 
-# Please submit bugfixes or comments via http://bugs.opensuse.org/
+# Please submit bugfixes or comments via https://bugs.opensuse.org/
 #
 
 
+%{?!python_module:%define python_module() python-%{**} python3-%{**}}
 %if 0%{?rhel}
 # I get syntax errors in the brp-python-bytecompile step...
 %define _python_bytecompile_errors_terminate_build 0
 %endif
-
-%{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-tablib
-Version:        0.12.1
+Version:        0.13.0
 Release:        0
 Summary:        Format agnostic tabular data library (XLS, JSON, YAML, CSV)
 License:        MIT
 Group:          Development/Languages/Python
-Url:            http://python-tablib.org
+URL:            http://python-tablib.org
 Source:         
https://files.pythonhosted.org/packages/source/t/tablib/tablib-%{version}.tar.gz
-Patch0:         openpyxl-usage-fix.patch
 BuildRequires:  %{python_module PyYAML >= 3.12}
 BuildRequires:  %{python_module odfpy >= 1.3.5}
 BuildRequires:  %{python_module openpyxl >= 2.4.8}
@@ -42,6 +40,7 @@
 BuildRequires:  %{python_module xml}
 BuildRequires:  fdupes
 BuildRequires:  python-rpm-macros
+BuildRequires:  python2-backports.csv
 Requires:       python-PyYAML >= 3.12
 Requires:       python-odfpy >= 1.3.5
 Requires:       python-openpyxl >= 2.4.8
@@ -51,7 +50,9 @@
 Requires:       python-xml
 Suggests:       python-pandas >= 0.20.3
 BuildArch:      noarch
-
+%ifpython2
+Requires:       python2-backports.csv
+%endif
 %python_subpackages
 
 %description
@@ -68,7 +69,6 @@
 
 %prep
 %setup -q -n tablib-%{version}
-%patch0 -p1
 # Remove shebang lines from non-executable scripts:
 find tablib -name "*.py" | xargs sed -i '1 { /^#!/ d }'
 

++++++ tablib-0.12.1.tar.gz -> tablib-0.13.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/AUTHORS new/tablib-0.13.0/AUTHORS
--- old/tablib-0.12.1/AUTHORS   2017-08-27 09:23:48.000000000 +0200
+++ new/tablib-0.13.0/AUTHORS   2019-03-08 13:15:56.000000000 +0100
@@ -34,3 +34,5 @@
 - Mathias Loesch
 - Tushar Makkar
 - Andrii Soldatenko
+- Bruno Soares
+- Tsuyoshi Hombashi
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/HISTORY.rst 
new/tablib-0.13.0/HISTORY.rst
--- old/tablib-0.12.1/HISTORY.rst       2017-08-27 09:23:48.000000000 +0200
+++ new/tablib-0.13.0/HISTORY.rst       2019-03-08 13:15:56.000000000 +0100
@@ -254,4 +254,3 @@
 * Export Support for XLS, JSON, YAML, and CSV.
 * DataBook Export for XLS, JSON, and YAML.
 * Python Dict Property Support.
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/NOTICE new/tablib-0.13.0/NOTICE
--- old/tablib-0.12.1/NOTICE    2017-08-27 09:23:48.000000000 +0200
+++ new/tablib-0.13.0/NOTICE    2019-03-08 13:15:56.000000000 +0100
@@ -1,32 +1,6 @@
-Tablib includes some vendorized python libraries: ordereddict, markup.
+Tablib includes some vendorized Python libraries: markup.
 
 Markup License
 ==============
 
 Markup is in the public domain.
-
-
-OrderedDict License
-===================
-
-Copyright (c) 2009 Raymond Hettinger
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation files
-(the "Software"), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of the Software,
-and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-    The above copyright notice and this permission notice shall be
-    included in all copies or substantial portions of the Software.
-
-    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-    OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-    OTHER DEALINGS IN THE SOFTWARE.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/PKG-INFO new/tablib-0.13.0/PKG-INFO
--- old/tablib-0.12.1/PKG-INFO  2017-09-01 21:37:16.000000000 +0200
+++ new/tablib-0.13.0/PKG-INFO  2019-03-08 13:17:26.000000000 +0100
@@ -1,6 +1,6 @@
-Metadata-Version: 1.1
+Metadata-Version: 2.1
 Name: tablib
-Version: 0.12.1
+Version: 0.13.0
 Summary: Format agnostic tabular data library (XLS, JSON, YAML, CSV)
 Home-page: http://python-tablib.org
 Author: Kenneth Reitz
@@ -14,11 +14,11 @@
         
         ::
         
-               _____         ______  ___________ ______
-               __  /_______ ____  /_ ___  /___(_)___  /_
-               _  __/_  __ `/__  __ \__  / __  / __  __ \
-               / /_  / /_/ / _  /_/ /_  /  _  /  _  /_/ /
-               \__/  \__,_/  /_.___/ /_/   /_/   /_.___/
+            _____         ______  ___________ ______
+            __  /_______ ____  /_ ___  /___(_)___  /_
+            _  __/_  __ `/__  __ \__  / __  / __  __ \
+            / /_  / /_/ / _  /_/ /_  /  _  /  _  /_/ /
+            \__/  \__,_/  /_.___/ /_/   /_/   /_.___/
         
         
         
@@ -31,21 +31,31 @@
         - YAML (Sets + Books)
         - Pandas DataFrames (Sets)
         - HTML (Sets)
+        - Jira (Sets)
         - TSV (Sets)
-        - OSD (Sets)
+        - ODS (Sets)
         - CSV (Sets)
         - DBF (Sets)
         
         Note that tablib *purposefully* excludes XML support. It always will. 
(Note: This is a joke. Pull requests are welcome.)
         
+        If you're interested in financially supporting Kenneth Reitz open 
source, consider `visiting this link <https://cash.me/$KennethReitz>`_. Your 
support helps tremendously with sustainability of motivation, as Open Source is 
no longer part of my day job.
+        
         Overview
         --------
         
         `tablib.Dataset()`
-               A Dataset is a table of tabular data. It may or may not have a 
header row. They can be build and manipulated as raw Python datatypes (Lists of 
tuples|dictionaries). Datasets can be imported from JSON, YAML, DBF, and CSV; 
they can be exported to XLSX, XLS, ODS, JSON, YAML, DBF, CSV, TSV, and HTML.
+            A Dataset is a table of tabular data.
+            It may or may not have a header row.
+            They can be build and manipulated as raw Python datatypes (Lists 
of tuples|dictionaries).
+            Datasets can be imported from JSON, YAML, DBF, and CSV;
+            they can be exported to XLSX, XLS, ODS, JSON, YAML, DBF, CSV, TSV, 
and HTML.
         
         `tablib.Databook()`
-               A Databook is a set of Datasets. The most common form of a 
Databook is an Excel file with multiple spreadsheets. Databooks can be imported 
from JSON and YAML; they can be exported to XLSX, XLS, ODS, JSON, and YAML.
+            A Databook is a set of Datasets.
+            The most common form of a Databook is an Excel file with multiple 
spreadsheets.
+            Databooks can be imported from JSON and YAML;
+            they can be exported to XLSX, XLS, ODS, JSON, and YAML.
         
         Usage
         -----
@@ -140,10 +150,10 @@
         
             >>> with open('people.dbf', 'wb') as f:
             ...     f.write(data.export('dbf'))
-            
+        
         Pandas DataFrame!
         +++++++++++++++++
-        :: 
+        ::
         
             >>> print(data.export('df')):
                   first_name last_name  age
@@ -158,7 +168,7 @@
         
         To install tablib, simply: ::
         
-               $ pip install tablib
+               $ pip install tablib[pandas]
         
         Make sure to check out `Tablib on PyPi 
<https://pypi.python.org/pypi/tablib/>`_!
         
@@ -434,7 +444,6 @@
         * DataBook Export for XLS, JSON, and YAML.
         * Python Dict Property Support.
         
-        
 Platform: UNKNOWN
 Classifier: Development Status :: 5 - Production/Stable
 Classifier: Intended Audience :: Developers
@@ -442,7 +451,7 @@
 Classifier: License :: OSI Approved :: MIT License
 Classifier: Programming Language :: Python
 Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3.3
 Classifier: Programming Language :: Python :: 3.4
 Classifier: Programming Language :: Python :: 3.5
 Classifier: Programming Language :: Python :: 3.6
+Provides-Extra: pandas
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/README.rst new/tablib-0.13.0/README.rst
--- old/tablib-0.12.1/README.rst        2017-09-01 21:36:16.000000000 +0200
+++ new/tablib-0.13.0/README.rst        2019-03-08 13:15:56.000000000 +0100
@@ -6,11 +6,11 @@
 
 ::
 
-       _____         ______  ___________ ______
-       __  /_______ ____  /_ ___  /___(_)___  /_
-       _  __/_  __ `/__  __ \__  / __  / __  __ \
-       / /_  / /_/ / _  /_/ /_  /  _  /  _  /_/ /
-       \__/  \__,_/  /_.___/ /_/   /_/   /_.___/
+    _____         ______  ___________ ______
+    __  /_______ ____  /_ ___  /___(_)___  /_
+    _  __/_  __ `/__  __ \__  / __  / __  __ \
+    / /_  / /_/ / _  /_/ /_  /  _  /  _  /_/ /
+    \__/  \__,_/  /_.___/ /_/   /_/   /_.___/
 
 
 
@@ -23,21 +23,31 @@
 - YAML (Sets + Books)
 - Pandas DataFrames (Sets)
 - HTML (Sets)
+- Jira (Sets)
 - TSV (Sets)
-- OSD (Sets)
+- ODS (Sets)
 - CSV (Sets)
 - DBF (Sets)
 
 Note that tablib *purposefully* excludes XML support. It always will. (Note: 
This is a joke. Pull requests are welcome.)
 
+If you're interested in financially supporting Kenneth Reitz open source, 
consider `visiting this link <https://cash.me/$KennethReitz>`_. Your support 
helps tremendously with sustainability of motivation, as Open Source is no 
longer part of my day job.
+
 Overview
 --------
 
 `tablib.Dataset()`
-       A Dataset is a table of tabular data. It may or may not have a header 
row. They can be build and manipulated as raw Python datatypes (Lists of 
tuples|dictionaries). Datasets can be imported from JSON, YAML, DBF, and CSV; 
they can be exported to XLSX, XLS, ODS, JSON, YAML, DBF, CSV, TSV, and HTML.
+    A Dataset is a table of tabular data.
+    It may or may not have a header row.
+    They can be build and manipulated as raw Python datatypes (Lists of 
tuples|dictionaries).
+    Datasets can be imported from JSON, YAML, DBF, and CSV;
+    they can be exported to XLSX, XLS, ODS, JSON, YAML, DBF, CSV, TSV, and 
HTML.
 
 `tablib.Databook()`
-       A Databook is a set of Datasets. The most common form of a Databook is 
an Excel file with multiple spreadsheets. Databooks can be imported from JSON 
and YAML; they can be exported to XLSX, XLS, ODS, JSON, and YAML.
+    A Databook is a set of Datasets.
+    The most common form of a Databook is an Excel file with multiple 
spreadsheets.
+    Databooks can be imported from JSON and YAML;
+    they can be exported to XLSX, XLS, ODS, JSON, and YAML.
 
 Usage
 -----
@@ -132,10 +142,10 @@
 
     >>> with open('people.dbf', 'wb') as f:
     ...     f.write(data.export('dbf'))
-    
+
 Pandas DataFrame!
 +++++++++++++++++
-:: 
+::
 
     >>> print(data.export('df')):
           first_name last_name  age
@@ -150,7 +160,7 @@
 
 To install tablib, simply: ::
 
-       $ pip install tablib
+       $ pip install tablib[pandas]
 
 Make sure to check out `Tablib on PyPi 
<https://pypi.python.org/pypi/tablib/>`_!
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/setup.py new/tablib-0.13.0/setup.py
--- old/tablib-0.12.1/setup.py  2017-09-01 21:36:16.000000000 +0200
+++ new/tablib-0.13.0/setup.py  2019-03-08 13:15:56.000000000 +0100
@@ -14,15 +14,6 @@
     os.system("python setup.py sdist upload")
     sys.exit()
 
-if sys.argv[-1] == 'speedups':
-    try:
-        __import__('pip')
-    except ImportError:
-        print('Pip required.')
-        sys.exit(1)
-
-    os.system('pip install ujson')
-    sys.exit()
 
 if sys.argv[-1] == 'test':
     try:
@@ -43,8 +34,8 @@
 
 install = [
     'odfpy',
-    'openpyxl',
-    'unicodecsv',
+    'openpyxl>=2.4.0',
+    'backports.csv',
     'xlrd',
     'xlwt',
     'pyyaml',
@@ -73,7 +64,6 @@
         'License :: OSI Approved :: MIT License',
         'Programming Language :: Python',
         'Programming Language :: Python :: 2.7',
-        'Programming Language :: Python :: 3.3',
         'Programming Language :: Python :: 3.4',
         'Programming Language :: Python :: 3.5',
         'Programming Language :: Python :: 3.6',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/__init__.py 
new/tablib-0.13.0/tablib/__init__.py
--- old/tablib-0.12.1/tablib/__init__.py        2017-08-27 09:23:48.000000000 
+0200
+++ new/tablib-0.13.0/tablib/__init__.py        2019-03-08 13:15:56.000000000 
+0100
@@ -5,4 +5,3 @@
     InvalidDatasetType, InvalidDimensions, UnsupportedFormat,
     __version__
 )
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/compat.py 
new/tablib-0.13.0/tablib/compat.py
--- old/tablib-0.12.1/tablib/compat.py  2017-08-27 09:23:48.000000000 +0200
+++ new/tablib-0.13.0/tablib/compat.py  2019-03-08 13:15:56.000000000 +0100
@@ -13,35 +13,25 @@
 is_py3 = (sys.version_info[0] > 2)
 
 
-
-try:
-    from collections import OrderedDict
-except ImportError:
-    from tablib.packages.ordereddict import OrderedDict
-
-
 if is_py3:
     from io import BytesIO
+    from io import StringIO
     from tablib.packages import markup3 as markup
-    import tablib.packages.dbfpy3 as dbfpy
-
+    from statistics import median
+    from itertools import zip_longest as izip_longest
     import csv
-    from io import StringIO
-    # py3 mappings
+    import tablib.packages.dbfpy3 as dbfpy
 
-    ifilter = filter
     unicode = str
-    bytes = bytes
-    basestring = str
     xrange = range
 
 else:
     from cStringIO import StringIO as BytesIO
-    from cStringIO import StringIO
+    from StringIO import StringIO
     from tablib.packages import markup
-    from itertools import ifilter
-
-    import unicodecsv as csv
+    from tablib.packages.statistics import median
+    from itertools import izip_longest
+    from backports import csv
     import tablib.packages.dbfpy as dbfpy
 
     unicode = unicode
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/core.py 
new/tablib-0.13.0/tablib/core.py
--- old/tablib-0.12.1/tablib/core.py    2017-09-01 21:36:56.000000000 +0200
+++ new/tablib-0.13.0/tablib/core.py    2019-03-08 13:16:57.000000000 +0100
@@ -9,16 +9,17 @@
     :license: MIT, see LICENSE for more details.
 """
 
+from collections import OrderedDict
 from copy import copy
 from operator import itemgetter
 
 from tablib import formats
 
-from tablib.compat import OrderedDict, unicode
+from tablib.compat import unicode
 
 
 __title__ = 'tablib'
-__version__ = '0.12.1'
+__version__ = '0.13.0'
 __build__ = 0x001201
 __author__ = 'Kenneth Reitz'
 __license__ = 'MIT'
@@ -178,7 +179,7 @@
 
 
     def __getitem__(self, key):
-        if isinstance(key, str) or isinstance(key, unicode):
+        if isinstance(key, (str, unicode)):
             if key in self.headers:
                 pos = self.headers.index(key) # get 'key' index from each data
                 return [row[pos] for row in self._data]
@@ -197,7 +198,7 @@
 
 
     def __delitem__(self, key):
-        if isinstance(key, str) or isinstance(key, unicode):
+        if isinstance(key, (str, unicode)):
 
             if key in self.headers:
 
@@ -526,9 +527,9 @@
 
         Import assumes (for now) that headers exist.
 
-        .. admonition:: Binary Warning
+        .. admonition:: Binary Warning for Python 2
 
-             :class:`Dataset.csv` uses \\r\\n line endings by default, so make
+             :class:`Dataset.csv` uses \\r\\n line endings by default so, in 
Python 2, make
              sure to write in binary mode::
 
                  with open('output.csv', 'wb') as f:
@@ -536,6 +537,18 @@
 
              If you do not do this, and you export the file on Windows, your
              CSV file will open in Excel with a blank line between each row.
+
+        .. admonition:: Line endings for Python 3
+
+             :class:`Dataset.csv` uses \\r\\n line endings by default so, in 
Python 3, make
+             sure to include newline='' otherwise you will get a blank line 
between each row
+             when you open the file in Excel::
+
+                 with open('output.csv', 'w', newline='') as f:
+                     f.write(data.csv)
+
+             If you do not do this, and you export the file on Windows, your
+             CSV file will open in Excel with a blank line between each row.
         """
         pass
 
@@ -631,7 +644,6 @@
         """
         pass
 
-
     @property
     def latex():
         """A LaTeX booktabs representation of the :class:`Dataset` object. If a
@@ -641,6 +653,13 @@
         """
         pass
 
+    @property
+    def jira():
+        """A Jira table representation of the :class:`Dataset` object.
+
+        .. note:: This method can be used for export only.
+        """
+        pass
 
     # ----
     # Rows
@@ -843,7 +862,7 @@
            against each cell value.
         """
 
-        if isinstance(col, str):
+        if isinstance(col, unicode):
             if col in self.headers:
                 col = self.headers.index(col) # get 'key' index from each data
             else:
@@ -876,7 +895,7 @@
         sorted.
         """
 
-        if isinstance(col, str) or isinstance(col, unicode):
+        if isinstance(col, (str, unicode)):
 
             if not self.headers:
                 raise HeadersNeeded
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/__init__.py 
new/tablib-0.13.0/tablib/formats/__init__.py
--- old/tablib-0.12.1/tablib/formats/__init__.py        2017-08-27 
09:23:48.000000000 +0200
+++ new/tablib-0.13.0/tablib/formats/__init__.py        2019-03-08 
13:15:56.000000000 +0100
@@ -14,5 +14,7 @@
 from . import _dbf as dbf
 from . import _latex as latex
 from . import _df as df
+from . import _rst as rst
+from . import _jira as jira
 
-available = (json, xls, yaml, csv, dbf, tsv, html, latex, xlsx, ods, df)
+available = (json, xls, yaml, csv, dbf, tsv, html, jira, latex, xlsx, ods, df, 
rst)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/_csv.py 
new/tablib-0.13.0/tablib/formats/_csv.py
--- old/tablib-0.12.1/tablib/formats/_csv.py    2017-08-27 09:23:48.000000000 
+0200
+++ new/tablib-0.13.0/tablib/formats/_csv.py    2019-03-08 13:15:56.000000000 
+0100
@@ -3,15 +3,14 @@
 """ Tablib - *SV Support.
 """
 
-from tablib.compat import is_py3, csv, StringIO
+from tablib.compat import csv, StringIO, unicode
 
 
 title = 'csv'
 extensions = ('csv',)
 
 
-DEFAULT_ENCODING = 'utf-8'
-DEFAULT_DELIMITER = ','
+DEFAULT_DELIMITER = unicode(',')
 
 
 def export_set(dataset, **kwargs):
@@ -19,8 +18,6 @@
     stream = StringIO()
 
     kwargs.setdefault('delimiter', DEFAULT_DELIMITER)
-    if not is_py3:
-        kwargs.setdefault('encoding', DEFAULT_ENCODING)
 
     _csv = csv.writer(stream, **kwargs)
 
@@ -36,15 +33,13 @@
     dset.wipe()
 
     kwargs.setdefault('delimiter', DEFAULT_DELIMITER)
-    if not is_py3:
-        kwargs.setdefault('encoding', DEFAULT_ENCODING)
 
     rows = csv.reader(StringIO(in_stream), **kwargs)
     for i, row in enumerate(rows):
 
         if (i == 0) and (headers):
             dset.headers = row
-        else:
+        elif row:
             dset.append(row)
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/_dbf.py 
new/tablib-0.13.0/tablib/formats/_dbf.py
--- old/tablib-0.12.1/tablib/formats/_dbf.py    2017-08-27 09:23:48.000000000 
+0200
+++ new/tablib-0.13.0/tablib/formats/_dbf.py    2019-03-08 13:15:56.000000000 
+0100
@@ -89,6 +89,3 @@
         # When unpacking a string argument with less than 8 chars, 
struct.error is
         # raised.
         return False
-
-
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/_jira.py 
new/tablib-0.13.0/tablib/formats/_jira.py
--- old/tablib-0.12.1/tablib/formats/_jira.py   1970-01-01 01:00:00.000000000 
+0100
+++ new/tablib-0.13.0/tablib/formats/_jira.py   2019-03-08 13:15:56.000000000 
+0100
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+
+"""Tablib - Jira table export support.
+
+   Generates a Jira table from the dataset.
+"""
+from tablib.compat import unicode
+
+title = 'jira'
+
+
+def export_set(dataset):
+    """Formats the dataset according to the Jira table syntax:
+
+    ||heading 1||heading 2||heading 3||
+    |col A1|col A2|col A3|
+    |col B1|col B2|col B3|
+
+    :param dataset: dataset to serialize
+    :type dataset: tablib.core.Dataset
+    """
+
+    header = _get_header(dataset.headers) if dataset.headers else ''
+    body = _get_body(dataset)
+    return '%s\n%s' % (header, body) if header else body
+
+
+def _get_body(dataset):
+    return '\n'.join([_serialize_row(row) for row in dataset])
+
+
+def _get_header(headers):
+    return _serialize_row(headers, delimiter='||')
+
+
+def _serialize_row(row, delimiter='|'):
+    return '%s%s%s' % (delimiter,
+                       delimiter.join([unicode(item) if item else ' ' for item 
in row]),
+                       delimiter)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/_json.py 
new/tablib-0.13.0/tablib/formats/_json.py
--- old/tablib-0.12.1/tablib/formats/_json.py   2017-08-27 09:23:48.000000000 
+0200
+++ new/tablib-0.13.0/tablib/formats/_json.py   2019-03-08 13:15:56.000000000 
+0100
@@ -3,36 +3,33 @@
 """ Tablib - JSON Support
 """
 import decimal
+import json
+from uuid import UUID
 
 import tablib
 
-try:
-    import ujson as json
-except ImportError:
-    import json
 
 title = 'json'
 extensions = ('json', 'jsn')
 
 
-def date_handler(obj):
-    if isinstance(obj, decimal.Decimal):
+def serialize_objects_handler(obj):
+    if isinstance(obj, (decimal.Decimal, UUID)):
         return str(obj)
     elif hasattr(obj, 'isoformat'):
         return obj.isoformat()
     else:
         return obj
-    # return obj.isoformat() if hasattr(obj, 'isoformat') else obj
 
 
 def export_set(dataset):
     """Returns JSON representation of Dataset."""
-    return json.dumps(dataset.dict, default=date_handler)
+    return json.dumps(dataset.dict, default=serialize_objects_handler)
 
 
 def export_book(databook):
     """Returns JSON representation of Databook."""
-    return json.dumps(databook._package(), default=date_handler)
+    return json.dumps(databook._package(), default=serialize_objects_handler)
 
 
 def import_set(dset, in_stream):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/_rst.py 
new/tablib-0.13.0/tablib/formats/_rst.py
--- old/tablib-0.12.1/tablib/formats/_rst.py    1970-01-01 01:00:00.000000000 
+0100
+++ new/tablib-0.13.0/tablib/formats/_rst.py    2019-03-08 13:15:56.000000000 
+0100
@@ -0,0 +1,273 @@
+# -*- coding: utf-8 -*-
+
+""" Tablib - reStructuredText Support
+"""
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from textwrap import TextWrapper
+
+from tablib.compat import (
+    median,
+    unicode,
+    izip_longest,
+)
+
+
+title = 'rst'
+extensions = ('rst',)
+
+
+MAX_TABLE_WIDTH = 80  # Roughly. It may be wider to avoid breaking words.
+
+
+JUSTIFY_LEFT = 'left'
+JUSTIFY_CENTER = 'center'
+JUSTIFY_RIGHT = 'right'
+JUSTIFY_VALUES = (JUSTIFY_LEFT, JUSTIFY_CENTER, JUSTIFY_RIGHT)
+
+
+def to_unicode(value):
+    if isinstance(value, bytes):
+        return value.decode('utf-8')
+    return unicode(value)
+
+
+def _max_word_len(text):
+    """
+    Return the length of the longest word in `text`.
+
+
+    >>> _max_word_len('Python Module for Tabular Datasets')
+    8
+
+    """
+    return max((len(word) for word in text.split()))
+
+
+def _get_column_string_lengths(dataset):
+    """
+    Returns a list of string lengths of each column, and a list of
+    maximum word lengths.
+    """
+    if dataset.headers:
+        column_lengths = [[len(h)] for h in dataset.headers]
+        word_lens = [_max_word_len(h) for h in dataset.headers]
+    else:
+        column_lengths = [[] for _ in range(dataset.width)]
+        word_lens = [0 for _ in range(dataset.width)]
+    for row in dataset.dict:
+        values = iter(row.values() if hasattr(row, 'values') else row)
+        for i, val in enumerate(values):
+            text = to_unicode(val)
+            column_lengths[i].append(len(text))
+            word_lens[i] = max(word_lens[i], _max_word_len(text))
+    return column_lengths, word_lens
+
+
+def _row_to_lines(values, widths, wrapper, sep='|', justify=JUSTIFY_LEFT):
+    """
+    Returns a table row of wrapped values as a list of lines
+    """
+    if justify not in JUSTIFY_VALUES:
+        raise ValueError('Value of "justify" must be one of "{}"'.format(
+            '", "'.join(JUSTIFY_VALUES)
+        ))
+    if justify == JUSTIFY_LEFT:
+        just = lambda text, width: text.ljust(width)
+    elif justify == JUSTIFY_CENTER:
+        just = lambda text, width: text.center(width)
+    else:
+        just = lambda text, width: text.rjust(width)
+    lpad = sep + ' ' if sep else ''
+    rpad = ' ' + sep if sep else ''
+    pad = ' ' + sep + ' '
+    cells = []
+    for value, width in zip(values, widths):
+        wrapper.width = width
+        text = to_unicode(value)
+        cell = wrapper.wrap(text)
+        cells.append(cell)
+    lines = izip_longest(*cells, fillvalue='')
+    lines = (
+        (just(cell_line, widths[i]) for i, cell_line in enumerate(line))
+        for line in lines
+    )
+    lines = [''.join((lpad, pad.join(line), rpad)) for line in lines]
+    return lines
+
+
+def _get_column_widths(dataset, max_table_width=MAX_TABLE_WIDTH, pad_len=3):
+    """
+    Returns a list of column widths proportional to the median length
+    of the text in their cells.
+    """
+    str_lens, word_lens = _get_column_string_lengths(dataset)
+    median_lens = [int(median(lens)) for lens in str_lens]
+    total = sum(median_lens)
+    if total > max_table_width - (pad_len * len(median_lens)):
+        column_widths = (max_table_width * l // total for l in median_lens)
+    else:
+        column_widths = (l for l in median_lens)
+    # Allow for separator and padding:
+    column_widths = (w - pad_len if w > pad_len else w for w in column_widths)
+    # Rather widen table than break words:
+    column_widths = [max(w, l) for w, l in zip(column_widths, word_lens)]
+    return column_widths
+
+
+def export_set_as_simple_table(dataset, column_widths=None):
+    """
+    Returns reStructuredText grid table representation of dataset.
+    """
+    lines = []
+    wrapper = TextWrapper()
+    if column_widths is None:
+        column_widths = _get_column_widths(dataset, pad_len=2)
+    border = '  '.join(['=' * w for w in column_widths])
+
+    lines.append(border)
+    if dataset.headers:
+        lines.extend(_row_to_lines(
+            dataset.headers,
+            column_widths,
+            wrapper,
+            sep='',
+            justify=JUSTIFY_CENTER,
+        ))
+        lines.append(border)
+    for row in dataset.dict:
+        values = iter(row.values() if hasattr(row, 'values') else row)
+        lines.extend(_row_to_lines(values, column_widths, wrapper, ''))
+    lines.append(border)
+    return '\n'.join(lines)
+
+
+def export_set_as_grid_table(dataset, column_widths=None):
+    """
+    Returns reStructuredText grid table representation of dataset.
+
+
+    >>> from tablib import Dataset
+    >>> from tablib.formats import rst
+    >>> bits = ((0, 0), (1, 0), (0, 1), (1, 1))
+    >>> data = Dataset()
+    >>> data.headers = ['A', 'B', 'A and B']
+    >>> for a, b in bits:
+    ...     data.append([bool(a), bool(b), bool(a * b)])
+    >>> print(rst.export_set(data, force_grid=True))
+    +-------+-------+-------+
+    |   A   |   B   | A and |
+    |       |       |   B   |
+    +=======+=======+=======+
+    | False | False | False |
+    +-------+-------+-------+
+    | True  | False | False |
+    +-------+-------+-------+
+    | False | True  | False |
+    +-------+-------+-------+
+    | True  | True  | True  |
+    +-------+-------+-------+
+
+    """
+    lines = []
+    wrapper = TextWrapper()
+    if column_widths is None:
+        column_widths = _get_column_widths(dataset)
+    header_sep = '+=' + '=+='.join(['=' * w for w in column_widths]) + '=+'
+    row_sep = '+-' + '-+-'.join(['-' * w for w in column_widths]) + '-+'
+
+    lines.append(row_sep)
+    if dataset.headers:
+        lines.extend(_row_to_lines(
+            dataset.headers,
+            column_widths,
+            wrapper,
+            justify=JUSTIFY_CENTER,
+        ))
+        lines.append(header_sep)
+    for row in dataset.dict:
+        values = iter(row.values() if hasattr(row, 'values') else row)
+        lines.extend(_row_to_lines(values, column_widths, wrapper))
+        lines.append(row_sep)
+    return '\n'.join(lines)
+
+
+def _use_simple_table(head0, col0, width0):
+    """
+    Use a simple table if the text in the first column is never wrapped
+
+
+    >>> _use_simple_table('menu', ['egg', 'bacon'], 10)
+    True
+    >>> _use_simple_table(None, ['lobster thermidor', 'spam'], 10)
+    False
+
+    """
+    if head0 is not None:
+        head0 = to_unicode(head0)
+        if len(head0) > width0:
+            return False
+    for cell in col0:
+        cell = to_unicode(cell)
+        if len(cell) > width0:
+            return False
+    return True
+
+
+def export_set(dataset, **kwargs):
+    """
+    Returns reStructuredText table representation of dataset.
+
+    Returns a simple table if the text in the first column is never
+    wrapped, otherwise returns a grid table.
+
+
+    >>> from tablib import Dataset
+    >>> bits = ((0, 0), (1, 0), (0, 1), (1, 1))
+    >>> data = Dataset()
+    >>> data.headers = ['A', 'B', 'A and B']
+    >>> for a, b in bits:
+    ...     data.append([bool(a), bool(b), bool(a * b)])
+    >>> table = data.rst
+    >>> table.split('\\n') == [
+    ...     '=====  =====  =====',
+    ...     '  A      B    A and',
+    ...     '                B  ',
+    ...     '=====  =====  =====',
+    ...     'False  False  False',
+    ...     'True   False  False',
+    ...     'False  True   False',
+    ...     'True   True   True ',
+    ...     '=====  =====  =====',
+    ... ]
+    True
+
+    """
+    if not dataset.dict:
+        return ''
+    force_grid = kwargs.get('force_grid', False)
+    max_table_width = kwargs.get('max_table_width', MAX_TABLE_WIDTH)
+    column_widths = _get_column_widths(dataset, max_table_width)
+
+    use_simple_table = _use_simple_table(
+        dataset.headers[0] if dataset.headers else None,
+        dataset.get_col(0),
+        column_widths[0],
+    )
+    if use_simple_table and not force_grid:
+        return export_set_as_simple_table(dataset, column_widths)
+    else:
+        return export_set_as_grid_table(dataset, column_widths)
+
+
+def export_book(databook):
+    """
+    reStructuredText representation of a Databook.
+
+    Tables are separated by a blank line. All tables use the grid
+    format.
+    """
+    return '\n\n'.join(export_set(dataset, force_grid=True)
+                       for dataset in databook._datasets)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/_tsv.py 
new/tablib-0.13.0/tablib/formats/_tsv.py
--- old/tablib-0.12.1/tablib/formats/_tsv.py    2017-08-27 09:23:48.000000000 
+0200
+++ new/tablib-0.13.0/tablib/formats/_tsv.py    2019-03-08 13:15:56.000000000 
+0100
@@ -3,6 +3,7 @@
 """ Tablib - TSV (Tab Separated Values) Support.
 """
 
+from tablib.compat import unicode
 from tablib.formats._csv import (
     export_set as export_set_wrapper,
     import_set as import_set_wrapper,
@@ -12,8 +13,7 @@
 title = 'tsv'
 extensions = ('tsv',)
 
-DEFAULT_ENCODING = 'utf-8'
-DELIMITER = '\t'
+DELIMITER = unicode('\t')
 
 def export_set(dataset):
     """Returns TSV representation of Dataset."""
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/_xls.py 
new/tablib-0.13.0/tablib/formats/_xls.py
--- old/tablib-0.12.1/tablib/formats/_xls.py    2017-08-27 09:23:48.000000000 
+0200
+++ new/tablib-0.13.0/tablib/formats/_xls.py    2019-03-08 13:15:56.000000000 
+0100
@@ -25,7 +25,7 @@
         xlrd.open_workbook(file_contents=stream)
         return True
     except (TypeError, XLRDError):
-        pass 
+        pass
     try:
         xlrd.open_workbook(file_contents=stream.read())
         return True
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/formats/_xlsx.py 
new/tablib-0.13.0/tablib/formats/_xlsx.py
--- old/tablib-0.12.1/tablib/formats/_xlsx.py   2017-08-27 09:23:48.000000000 
+0200
+++ new/tablib-0.13.0/tablib/formats/_xlsx.py   2019-03-08 13:15:56.000000000 
+0100
@@ -52,7 +52,7 @@
 
     wb = Workbook()
     for sheet in wb.worksheets:
-        wb.remove_sheet(sheet)
+        wb.remove(sheet)
     for i, dset in enumerate(databook._datasets):
         ws = wb.create_sheet()
         ws.title = dset.title if dset.title else 'Sheet%s' % (i)
@@ -71,7 +71,7 @@
     dset.wipe()
 
     xls_book = openpyxl.reader.excel.load_workbook(BytesIO(in_stream))
-    sheet = xls_book.get_active_sheet()
+    sheet = xls_book.active
 
     dset.title = sheet.title
 
@@ -119,7 +119,7 @@
         row_number = i + 1
         for j, col in enumerate(row):
             col_idx = get_column_letter(j + 1)
-            cell = ws.cell('%s%s' % (col_idx, row_number))
+            cell = ws['%s%s' % (col_idx, row_number)]
 
             # bold headers
             if (row_number == 1) and dataset.headers:
@@ -129,7 +129,7 @@
                 if freeze_panes:
                     #  Export Freeze only after first Line
                     ws.freeze_panes = 'A2'
-                    
+
             # bold separators
             elif len(row) < dataset.width:
                 cell.value = unicode('%s' % col, errors='ignore')
@@ -145,5 +145,3 @@
                         cell.value = unicode('%s' % col, errors='ignore')
                 except TypeError:
                     cell.value = unicode(col)
-
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/packages/dbfpy3/record.py 
new/tablib-0.13.0/tablib/packages/dbfpy3/record.py
--- old/tablib-0.12.1/tablib/packages/dbfpy3/record.py  2017-08-27 
09:23:48.000000000 +0200
+++ new/tablib-0.13.0/tablib/packages/dbfpy3/record.py  2019-03-08 
13:15:56.000000000 +0100
@@ -220,7 +220,7 @@
     def toString(self):
         """Return string packed record values."""
 #        for (_def, _dat) in zip(self.dbf.header.fields, self.fieldData):
-#            
+#
 
         return "".join([" *"[self.deleted]] + [
            _def.encodeValue(_dat)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/packages/markup3.py 
new/tablib-0.13.0/tablib/packages/markup3.py
--- old/tablib-0.12.1/tablib/packages/markup3.py        2017-08-27 
09:23:48.000000000 +0200
+++ new/tablib-0.13.0/tablib/packages/markup3.py        2019-03-08 
13:15:56.000000000 +0100
@@ -33,7 +33,7 @@
             self.tag = tag.lower( )
         else:
             self.tag = tag.upper( )
-    
+
     def __call__( self, *args, **kwargs ):
         if len( args ) > 1:
             raise ArgumentError( self.tag )
@@ -42,14 +42,14 @@
         if self.parent is not None and self.parent.class_ is not None:
             if 'class_' not in kwargs:
                 kwargs['class_'] = self.parent.class_
-            
+
         if self.parent is None and len( args ) == 1:
             x = [ self.render( self.tag, False, myarg, mydict ) for myarg, 
mydict in _argsdicts( args, kwargs ) ]
             return '\n'.join( x )
         elif self.parent is None and len( args ) == 0:
             x = [ self.render( self.tag, True, myarg, mydict ) for myarg, 
mydict in _argsdicts( args, kwargs ) ]
             return '\n'.join( x )
-            
+
         if self.tag in self.parent.twotags:
             for myarg, mydict in _argsdicts( args, kwargs ):
                 self.render( self.tag, False, myarg, mydict )
@@ -63,7 +63,7 @@
             raise DeprecationError( self.tag )
         else:
             raise InvalidElementError( self.tag, self.parent.mode )
-    
+
     def render( self, tag, single, between, kwargs ):
         """Append the actual tags to content."""
 
@@ -89,7 +89,7 @@
             self.parent.content.append( out )
         else:
             return out
-    
+
     def close( self ):
         """Append a closing tag unless element has only opening tag."""
 
@@ -128,11 +128,11 @@
                                 these two keyword arguments may be used to 
select
                                 the set of valid elements in 'xml' mode
                                 invalid elements will raise appropriate 
exceptions
-        
+
         separator --            string to place between added elements, 
defaults to newline
-        
+
         class_ --               a class that will be added to every element if 
defined"""
-        
+
         valid_onetags = [ "AREA", "BASE", "BR", "COL", "FRAME", "HR", "IMG", 
"INPUT", "LINK", "META", "PARAM" ]
         valid_twotags = [ "A", "ABBR", "ACRONYM", "ADDRESS", "B", "BDO", 
"BIG", "BLOCKQUOTE", "BODY", "BUTTON",
                 "CAPTION", "CITE", "CODE", "COLGROUP", "DD", "DEL", "DFN", 
"DIV", "DL", "DT", "EM", "FIELDSET",
@@ -163,7 +163,7 @@
             self.deptags += list(map( str.lower, self.deptags ))
             self.mode = 'strict_html'
         elif mode == 'loose_html':
-            self.onetags = valid_onetags + deprecated_onetags 
+            self.onetags = valid_onetags + deprecated_onetags
             self.onetags += list(map( str.lower, self.onetags ))
             self.twotags = valid_twotags + deprecated_twotags
             self.twotags += list(map( str.lower, self.twotags ))
@@ -187,12 +187,12 @@
         return element( attr, case=self.case, parent=self )
 
     def __str__( self ):
-        
+
         if self._full and ( self.mode == 'strict_html' or self.mode == 
'loose_html' ):
             end = [ '</body>', '</html>' ]
         else:
             end = [ ]
-        
+
         return self.separator.join( self.header + self.content + self.footer + 
end )
 
     def __call__( self, escape=False ):
@@ -232,7 +232,7 @@
 
         lang --     language, usually a two character string, will appear
                     as <html lang='en'> in html mode (ignored in xml mode)
-        
+
         css --      Cascading Style Sheet filename as a string or a list of
                     strings for multiple css files (ignored in xml mode)
 
@@ -306,7 +306,7 @@
     def css( self, filelist ):
         """This convenience function is only useful for html.
         It adds css stylesheet(s) to the document via the <link> element."""
-      
+
         if isinstance( filelist, str ):
             self.link( href=filelist, rel='stylesheet', type='text/css', 
media='all' )
         else:
@@ -339,10 +339,10 @@
     """An instance of oneliner returns a string corresponding to one element.
     This class can be used to write 'oneliners' that return a string
     immediately so there is no need to instantiate the page class."""
-    
+
     def __init__( self, case='lower' ):
         self.case = case
-    
+
     def __getattr__( self, attr ):
         if attr.startswith("__") and attr.endswith("__"):
             raise AttributeError(attr)
@@ -353,9 +353,9 @@
 
 def _argsdicts( args, mydict ):
     """A utility generator that pads argument list and dictionary values, will 
only be called with len( args ) = 0, 1."""
-    
+
     if len( args ) == 0:
-        args = None, 
+        args = None,
     elif len( args ) == 1:
         args = _totuple( args[0] )
     else:
@@ -418,7 +418,7 @@
 
 def unescape( text ):
     """Inverse of escape."""
-    
+
     if isinstance( text, str ):
         if '&amp;' in text:
             text = text.replace( '&amp;', '&' )
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/packages/ordereddict.py 
new/tablib-0.13.0/tablib/packages/ordereddict.py
--- old/tablib-0.12.1/tablib/packages/ordereddict.py    2017-08-27 
09:23:48.000000000 +0200
+++ new/tablib-0.13.0/tablib/packages/ordereddict.py    1970-01-01 
01:00:00.000000000 +0100
@@ -1,127 +0,0 @@
-# Copyright (c) 2009 Raymond Hettinger
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation files
-# (the "Software"), to deal in the Software without restriction,
-# including without limitation the rights to use, copy, modify, merge,
-# publish, distribute, sublicense, and/or sell copies of the Software,
-# and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
-#
-#     The above copyright notice and this permission notice shall be
-#     included in all copies or substantial portions of the Software.
-#
-#     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-#     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-#     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-#     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-#     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-#     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-#     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-#     OTHER DEALINGS IN THE SOFTWARE.
-
-from UserDict import DictMixin
-
-class OrderedDict(dict, DictMixin):
-
-    def __init__(self, *args, **kwds):
-        if len(args) > 1:
-            raise TypeError('expected at most 1 arguments, got %d' % len(args))
-        try:
-            self.__end
-        except AttributeError:
-            self.clear()
-        self.update(*args, **kwds)
-
-    def clear(self):
-        self.__end = end = []
-        end += [None, end, end]         # sentinel node for doubly linked list
-        self.__map = {}                 # key --> [key, prev, next]
-        dict.clear(self)
-
-    def __setitem__(self, key, value):
-        if key not in self:
-            end = self.__end
-            curr = end[1]
-            curr[2] = end[1] = self.__map[key] = [key, curr, end]
-        dict.__setitem__(self, key, value)
-
-    def __delitem__(self, key):
-        dict.__delitem__(self, key)
-        key, prev, next = self.__map.pop(key)
-        prev[2] = next
-        next[1] = prev
-
-    def __iter__(self):
-        end = self.__end
-        curr = end[2]
-        while curr is not end:
-            yield curr[0]
-            curr = curr[2]
-
-    def __reversed__(self):
-        end = self.__end
-        curr = end[1]
-        while curr is not end:
-            yield curr[0]
-            curr = curr[1]
-
-    def popitem(self, last=True):
-        if not self:
-            raise KeyError('dictionary is empty')
-        if last:
-            key = next(reversed(self))
-        else:
-            key = next(iter(self))
-        value = self.pop(key)
-        return key, value
-
-    def __reduce__(self):
-        items = [[k, self[k]] for k in self]
-        tmp = self.__map, self.__end
-        del self.__map, self.__end
-        inst_dict = vars(self).copy()
-        self.__map, self.__end = tmp
-        if inst_dict:
-            return (self.__class__, (items,), inst_dict)
-        return self.__class__, (items,)
-
-    def keys(self):
-        return list(self)
-
-    setdefault = DictMixin.setdefault
-    update = DictMixin.update
-    pop = DictMixin.pop
-    values = DictMixin.values
-    items = DictMixin.items
-    iterkeys = DictMixin.iterkeys
-    itervalues = DictMixin.itervalues
-    iteritems = DictMixin.iteritems
-
-    def __repr__(self):
-        if not self:
-            return '%s()' % (self.__class__.__name__,)
-        return '%s(%r)' % (self.__class__.__name__, list(self.items()))
-
-    def copy(self):
-        return self.__class__(self)
-
-    @classmethod
-    def fromkeys(cls, iterable, value=None):
-        d = cls()
-        for key in iterable:
-            d[key] = value
-        return d
-
-    def __eq__(self, other):
-        if isinstance(other, OrderedDict):
-            if len(self) != len(other):
-                return False
-            for p, q in  zip(list(self.items()), list(other.items())):
-                if p != q:
-                    return False
-            return True
-        return dict.__eq__(self, other)
-
-    def __ne__(self, other):
-        return not self == other
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib/packages/statistics.py 
new/tablib-0.13.0/tablib/packages/statistics.py
--- old/tablib-0.12.1/tablib/packages/statistics.py     1970-01-01 
01:00:00.000000000 +0100
+++ new/tablib-0.13.0/tablib/packages/statistics.py     2019-03-08 
13:15:56.000000000 +0100
@@ -0,0 +1,24 @@
+from __future__ import division
+
+
+def median(data):
+    """
+    Return the median (middle value) of numeric data, using the common
+    "mean of middle two" method. If data is empty, ValueError is raised.
+
+    Mimics the behaviour of Python3's statistics.median
+
+    >>> median([1, 3, 5])
+    3
+    >>> median([1, 3, 5, 7])
+    4.0
+
+    """
+    data = sorted(data)
+    n = len(data)
+    if not n:
+        raise ValueError("No median for empty data")
+    i = n // 2
+    if n % 2:
+        return data[i]
+    return (data[i - 1] + data[i]) / 2
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib.egg-info/PKG-INFO 
new/tablib-0.13.0/tablib.egg-info/PKG-INFO
--- old/tablib-0.12.1/tablib.egg-info/PKG-INFO  2017-09-01 21:37:16.000000000 
+0200
+++ new/tablib-0.13.0/tablib.egg-info/PKG-INFO  2019-03-08 13:17:26.000000000 
+0100
@@ -1,6 +1,6 @@
-Metadata-Version: 1.1
+Metadata-Version: 2.1
 Name: tablib
-Version: 0.12.1
+Version: 0.13.0
 Summary: Format agnostic tabular data library (XLS, JSON, YAML, CSV)
 Home-page: http://python-tablib.org
 Author: Kenneth Reitz
@@ -14,11 +14,11 @@
         
         ::
         
-               _____         ______  ___________ ______
-               __  /_______ ____  /_ ___  /___(_)___  /_
-               _  __/_  __ `/__  __ \__  / __  / __  __ \
-               / /_  / /_/ / _  /_/ /_  /  _  /  _  /_/ /
-               \__/  \__,_/  /_.___/ /_/   /_/   /_.___/
+            _____         ______  ___________ ______
+            __  /_______ ____  /_ ___  /___(_)___  /_
+            _  __/_  __ `/__  __ \__  / __  / __  __ \
+            / /_  / /_/ / _  /_/ /_  /  _  /  _  /_/ /
+            \__/  \__,_/  /_.___/ /_/   /_/   /_.___/
         
         
         
@@ -31,21 +31,31 @@
         - YAML (Sets + Books)
         - Pandas DataFrames (Sets)
         - HTML (Sets)
+        - Jira (Sets)
         - TSV (Sets)
-        - OSD (Sets)
+        - ODS (Sets)
         - CSV (Sets)
         - DBF (Sets)
         
         Note that tablib *purposefully* excludes XML support. It always will. 
(Note: This is a joke. Pull requests are welcome.)
         
+        If you're interested in financially supporting Kenneth Reitz open 
source, consider `visiting this link <https://cash.me/$KennethReitz>`_. Your 
support helps tremendously with sustainability of motivation, as Open Source is 
no longer part of my day job.
+        
         Overview
         --------
         
         `tablib.Dataset()`
-               A Dataset is a table of tabular data. It may or may not have a 
header row. They can be build and manipulated as raw Python datatypes (Lists of 
tuples|dictionaries). Datasets can be imported from JSON, YAML, DBF, and CSV; 
they can be exported to XLSX, XLS, ODS, JSON, YAML, DBF, CSV, TSV, and HTML.
+            A Dataset is a table of tabular data.
+            It may or may not have a header row.
+            They can be build and manipulated as raw Python datatypes (Lists 
of tuples|dictionaries).
+            Datasets can be imported from JSON, YAML, DBF, and CSV;
+            they can be exported to XLSX, XLS, ODS, JSON, YAML, DBF, CSV, TSV, 
and HTML.
         
         `tablib.Databook()`
-               A Databook is a set of Datasets. The most common form of a 
Databook is an Excel file with multiple spreadsheets. Databooks can be imported 
from JSON and YAML; they can be exported to XLSX, XLS, ODS, JSON, and YAML.
+            A Databook is a set of Datasets.
+            The most common form of a Databook is an Excel file with multiple 
spreadsheets.
+            Databooks can be imported from JSON and YAML;
+            they can be exported to XLSX, XLS, ODS, JSON, and YAML.
         
         Usage
         -----
@@ -140,10 +150,10 @@
         
             >>> with open('people.dbf', 'wb') as f:
             ...     f.write(data.export('dbf'))
-            
+        
         Pandas DataFrame!
         +++++++++++++++++
-        :: 
+        ::
         
             >>> print(data.export('df')):
                   first_name last_name  age
@@ -158,7 +168,7 @@
         
         To install tablib, simply: ::
         
-               $ pip install tablib
+               $ pip install tablib[pandas]
         
         Make sure to check out `Tablib on PyPi 
<https://pypi.python.org/pypi/tablib/>`_!
         
@@ -434,7 +444,6 @@
         * DataBook Export for XLS, JSON, and YAML.
         * Python Dict Property Support.
         
-        
 Platform: UNKNOWN
 Classifier: Development Status :: 5 - Production/Stable
 Classifier: Intended Audience :: Developers
@@ -442,7 +451,7 @@
 Classifier: License :: OSI Approved :: MIT License
 Classifier: Programming Language :: Python
 Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3.3
 Classifier: Programming Language :: Python :: 3.4
 Classifier: Programming Language :: Python :: 3.5
 Classifier: Programming Language :: Python :: 3.6
+Provides-Extra: pandas
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib.egg-info/SOURCES.txt 
new/tablib-0.13.0/tablib.egg-info/SOURCES.txt
--- old/tablib-0.12.1/tablib.egg-info/SOURCES.txt       2017-09-01 
21:37:16.000000000 +0200
+++ new/tablib-0.13.0/tablib.egg-info/SOURCES.txt       2019-03-08 
13:17:26.000000000 +0100
@@ -19,9 +19,11 @@
 tablib/formats/_dbf.py
 tablib/formats/_df.py
 tablib/formats/_html.py
+tablib/formats/_jira.py
 tablib/formats/_json.py
 tablib/formats/_latex.py
 tablib/formats/_ods.py
+tablib/formats/_rst.py
 tablib/formats/_tsv.py
 tablib/formats/_xls.py
 tablib/formats/_xlsx.py
@@ -29,7 +31,7 @@
 tablib/packages/__init__.py
 tablib/packages/markup.py
 tablib/packages/markup3.py
-tablib/packages/ordereddict.py
+tablib/packages/statistics.py
 tablib/packages/dbfpy/__init__.py
 tablib/packages/dbfpy/dbf.py
 tablib/packages/dbfpy/dbfnew.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/tablib.egg-info/requires.txt 
new/tablib-0.13.0/tablib.egg-info/requires.txt
--- old/tablib-0.12.1/tablib.egg-info/requires.txt      2017-09-01 
21:37:16.000000000 +0200
+++ new/tablib-0.13.0/tablib.egg-info/requires.txt      2019-03-08 
13:17:26.000000000 +0100
@@ -1,6 +1,6 @@
 odfpy
-openpyxl
-unicodecsv
+openpyxl>=2.4.0
+backports.csv
 xlrd
 xlwt
 pyyaml
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/tablib-0.12.1/test_tablib.py 
new/tablib-0.13.0/test_tablib.py
--- old/tablib-0.12.1/test_tablib.py    2017-08-27 09:27:51.000000000 +0200
+++ new/tablib-0.13.0/test_tablib.py    2019-03-08 13:15:56.000000000 +0100
@@ -1,16 +1,19 @@
 #!/usr/bin/env python
 # -*- coding: utf-8 -*-
 """Tests for Tablib."""
+from __future__ import unicode_literals
 
+import datetime
+import doctest
 import json
-import unittest
 import sys
-
-import datetime
+import unittest
+from uuid import uuid4
 
 import tablib
 from tablib.compat import markup, unicode, is_py3
 from tablib.core import Row
+from tablib.formats import csv as csv_format
 
 
 class TablibTestCase(unittest.TestCase):
@@ -226,6 +229,22 @@
         # Delete from invalid index
         self.assertRaises(IndexError, self.founders.__delitem__, 3)
 
+    def test_json_export(self):
+        """Verify exporting dataset object as JSON"""
+
+        address_id = uuid4()
+        headers = self.headers + ('address_id',)
+        founders = tablib.Dataset(headers=headers, title='Founders')
+        founders.append(('John', 'Adams', 90, address_id))
+        founders_json = founders.export('json')
+
+        expected_json = (
+            '[{"first_name": "John", "last_name": "Adams", "gpa": 90, '
+            '"address_id": "%s"}]' % str(address_id)
+        )
+
+        self.assertEqual(founders_json, expected_json)
+
     def test_csv_export(self):
         """Verify exporting dataset object as CSV."""
 
@@ -298,6 +317,23 @@
 
         self.assertEqual(html, d.html)
 
+    def test_jira_export(self):
+
+        expected = """||first_name||last_name||gpa||
+|John|Adams|90|
+|George|Washington|67|
+|Thomas|Jefferson|50|"""
+        self.assertEqual(expected, self.founders.jira)
+
+    def test_jira_export_no_headers(self):
+        self.assertEqual('|a|b|c|', tablib.Dataset(['a', 'b', 'c']).jira)
+
+    def test_jira_export_none_and_empty_values(self):
+        self.assertEqual('| | |c|', tablib.Dataset(['', None, 'c']).jira)
+
+    def test_jira_export_empty_dataset(self):
+        self.assertTrue(tablib.Dataset().jira is not None)
+
     def test_latex_export(self):
         """LaTeX export"""
 
@@ -381,8 +417,10 @@
         data.xlsx
         data.ods
         data.html
+        data.jira
         data.latex
         data.df
+        data.rst
 
     def test_datetime_append(self):
         """Passes in a single datetime and a single date and exports."""
@@ -402,7 +440,9 @@
         data.xlsx
         data.ods
         data.html
+        data.jira
         data.latex
+        data.rst
 
     def test_book_export_no_exceptions(self):
         """Test that various exports don't error out."""
@@ -416,6 +456,7 @@
         book.xlsx
         book.ods
         book.html
+        data.rst
 
     def test_json_import_set(self):
         """Generate and import JSON set serialization."""
@@ -531,6 +572,15 @@
 
         self.assertEqual(_csv, data.csv)
 
+    def test_csv_import_set_with_unicode_str(self):
+        """Import CSV set with non-ascii characters in unicode literal"""
+        csv_text = (
+            
"id,givenname,surname,loginname,email,pref_firstname,pref_lastname\n"
+            "13765,Ævar,Arnfjörð,testing,[email protected],Ævar,Arnfjörð"
+        )
+        data.csv = csv_text
+        self.assertEqual(data.width, 7)
+
     def test_tsv_import_set(self):
         """Generate and import TSV set serialization."""
         data.append(self.john)
@@ -961,6 +1011,24 @@
         self.founders.append(('First\nSecond', 'Name', 42))
         self.founders.export('xlsx')
 
+    def test_rst_force_grid(self):
+        data.append(self.john)
+        data.append(self.george)
+        data.headers = self.headers
+
+        simple = tablib.formats._rst.export_set(data)
+        grid = tablib.formats._rst.export_set(data, force_grid=True)
+        self.assertNotEqual(simple, grid)
+        self.assertNotIn('+', simple)
+        self.assertIn('+', grid)
+
+
+class DocTests(unittest.TestCase):
+
+    def test_rst_formatter_doctests(self):
+        results = doctest.testmod(tablib.formats._rst)
+        self.assertEqual(results.failed, 0)
+
 
 if __name__ == '__main__':
     unittest.main()


Reply via email to