Hello community,
here is the log from the commit of package python-backports.csv for
openSUSE:Factory checked in at 2019-03-11 13:51:43
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-backports.csv (Old)
and /work/SRC/openSUSE:Factory/.python-backports.csv.new.28833 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-backports.csv"
Mon Mar 11 13:51:43 2019 rev:2 rq:683740 version:1.0.7
Changes:
--------
---
/work/SRC/openSUSE:Factory/python-backports.csv/python-backports.csv.changes
2019-02-26 22:18:58.622138473 +0100
+++
/work/SRC/openSUSE:Factory/.python-backports.csv.new.28833/python-backports.csv.changes
2019-03-11 13:51:46.845251609 +0100
@@ -1,0 +2,7 @@
+Mon Mar 11 09:49:35 UTC 2019 - Tomáš Chvátal <[email protected]>
+
+- Update to 1.0.7:
+ * Ship tests
+- Run tests
+
+-------------------------------------------------------------------
Old:
----
backports.csv-1.0.6.tar.gz
New:
----
backports.csv-1.0.7.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-backports.csv.spec ++++++
--- /var/tmp/diff_new_pack.8NkzaZ/_old 2019-03-11 13:51:47.565251138 +0100
+++ /var/tmp/diff_new_pack.8NkzaZ/_new 2019-03-11 13:51:47.565251138 +0100
@@ -13,21 +13,23 @@
# published by the Open Source Initiative.
# Please submit bugfixes or comments via https://bugs.opensuse.org/
+#
+
%define skip_python3 1
%{?!python_module:%define python_module() python-%{**} python3-%{**}}
Name: python-backports.csv
-Version: 1.0.6
+Version: 1.0.7
Release: 0
-License: Python-2.0
Summary: Backport of Python 3 csv module
-Url: https://github.com/ryanhiebert/backports.csv
+License: Python-2.0
Group: Development/Languages/Python
+Url: https://github.com/ryanhiebert/backports.csv
Source:
https://files.pythonhosted.org/packages/source/b/backports.csv/backports.csv-%{version}.tar.gz
-BuildRequires: python-rpm-macros
BuildRequires: %{python_module setuptools}
BuildRequires: fdupes
+BuildRequires: python-rpm-macros
BuildArch: noarch
%python_subpackages
@@ -51,6 +53,9 @@
%python_install
%python_expand %fdupes %{buildroot}%{$python_sitelib}
+%check
+%python_exec setup.py test
+
%files %{python_files}
%doc README.rst HISTORY.rst
%license LICENSE.rst
++++++ backports.csv-1.0.6.tar.gz -> backports.csv-1.0.7.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/backports.csv-1.0.6/HISTORY.rst
new/backports.csv-1.0.7/HISTORY.rst
--- old/backports.csv-1.0.6/HISTORY.rst 2018-05-22 16:15:16.000000000 +0200
+++ new/backports.csv-1.0.7/HISTORY.rst 2019-03-11 04:05:17.000000000 +0100
@@ -1,3 +1,9 @@
+1.0.7 (2019-03-10)
+++++++++++++++++++
+
+* Add tests to ``MANIFEST.in``.
+ - thanks to @jayvdb for the pull request
+
1.0.6 (2018-05-22)
++++++++++++++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/backports.csv-1.0.6/MANIFEST.in
new/backports.csv-1.0.7/MANIFEST.in
--- old/backports.csv-1.0.6/MANIFEST.in 2018-05-22 16:15:16.000000000 +0200
+++ new/backports.csv-1.0.7/MANIFEST.in 2019-03-11 04:05:17.000000000 +0100
@@ -1 +1,2 @@
include *.rst
+include tox.ini tests.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/backports.csv-1.0.6/PKG-INFO
new/backports.csv-1.0.7/PKG-INFO
--- old/backports.csv-1.0.6/PKG-INFO 2018-05-22 16:15:38.000000000 +0200
+++ new/backports.csv-1.0.7/PKG-INFO 2019-03-11 04:05:31.000000000 +0100
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: backports.csv
-Version: 1.0.6
+Version: 1.0.7
Summary: Backport of Python 3 csv module
Home-page: https://github.com/ryanhiebert/backports.csv
Author: Ryan Hiebert
@@ -76,6 +76,12 @@
since the csv module does its own (universal) newline handling.
+ 1.0.7 (2019-03-10)
+ ++++++++++++++++++
+
+ * Add tests to ``MANIFEST.in``.
+ - thanks to @jayvdb for the pull request
+
1.0.6 (2018-05-22)
++++++++++++++++++
@@ -130,3 +136,4 @@
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/backports.csv-1.0.6/setup.py
new/backports.csv-1.0.7/setup.py
--- old/backports.csv-1.0.6/setup.py 2018-05-22 16:15:16.000000000 +0200
+++ new/backports.csv-1.0.7/setup.py 2019-03-11 04:05:17.000000000 +0100
@@ -11,7 +11,7 @@
author='Ryan Hiebert',
author_email='[email protected]',
url='https://github.com/ryanhiebert/backports.csv',
- version='1.0.6',
+ version='1.0.7',
py_modules=['backports.csv'],
namespace_packages=['backports'],
classifiers=[
@@ -24,6 +24,7 @@
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
],
package_dir={'': 'src'},
)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/backports.csv-1.0.6/src/backports.csv.egg-info/PKG-INFO
new/backports.csv-1.0.7/src/backports.csv.egg-info/PKG-INFO
--- old/backports.csv-1.0.6/src/backports.csv.egg-info/PKG-INFO 2018-05-22
16:15:38.000000000 +0200
+++ new/backports.csv-1.0.7/src/backports.csv.egg-info/PKG-INFO 2019-03-11
04:05:31.000000000 +0100
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: backports.csv
-Version: 1.0.6
+Version: 1.0.7
Summary: Backport of Python 3 csv module
Home-page: https://github.com/ryanhiebert/backports.csv
Author: Ryan Hiebert
@@ -76,6 +76,12 @@
since the csv module does its own (universal) newline handling.
+ 1.0.7 (2019-03-10)
+ ++++++++++++++++++
+
+ * Add tests to ``MANIFEST.in``.
+ - thanks to @jayvdb for the pull request
+
1.0.6 (2018-05-22)
++++++++++++++++++
@@ -130,3 +136,4 @@
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/backports.csv-1.0.6/src/backports.csv.egg-info/SOURCES.txt
new/backports.csv-1.0.7/src/backports.csv.egg-info/SOURCES.txt
--- old/backports.csv-1.0.6/src/backports.csv.egg-info/SOURCES.txt
2018-05-22 16:15:38.000000000 +0200
+++ new/backports.csv-1.0.7/src/backports.csv.egg-info/SOURCES.txt
2019-03-11 04:05:31.000000000 +0100
@@ -4,6 +4,8 @@
README.rst
setup.cfg
setup.py
+tests.py
+tox.ini
src/backports/__init__.py
src/backports/csv.py
src/backports.csv.egg-info/PKG-INFO
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/backports.csv-1.0.6/tests.py
new/backports.csv-1.0.7/tests.py
--- old/backports.csv-1.0.6/tests.py 1970-01-01 01:00:00.000000000 +0100
+++ new/backports.csv-1.0.7/tests.py 2019-03-11 04:05:17.000000000 +0100
@@ -0,0 +1,1111 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2001,2002 Python Software Foundation
+# csv package unit tests
+from __future__ import absolute_import, unicode_literals
+
+import io
+import sys
+import os
+import unittest
+from io import BytesIO, StringIO, TextIOWrapper
+import gc
+
+try:
+ from backports import csv
+except ImportError:
+ import csv
+
+PY3 = sys.version_info[0] == 3
+text_type = str if PY3 else unicode
+
+
+def TemporaryFile(mode='w+b', newline=None, encoding=None):
+ """
+ A Python 2/3 compatible wrapper for TemporaryFile.
+
+ It has only the functionality needed for this test module.
+ """
+ assert mode == 'w+', 'Only w+ mode is supported'
+ file_like = BytesIO()
+ return TextIOWrapper(file_like, newline=newline, encoding=encoding)
+
+
+class TestCase(unittest.TestCase):
+ def assertIn(self, item, container):
+ self.assertTrue(item in container,
+ '{0} not in {1}'.format(repr(item), repr(container)))
+
+ def assertNotIn(self, item, container):
+ self.assertFalse(item in container,
+ '{0} in {1}'.format(repr(item), repr(container)))
+
+ def assertRaisesGetException(self, exc, func, *args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ self.fail('Error not raised')
+ except exc as inst:
+ return inst
+
+
+class Test_Csv(TestCase):
+ """
+ Test the underlying C csv parser in ways that are not appropriate
+ from the high level interface. Further tests of this nature are done
+ in TestDialectRegistry.
+ """
+ def _test_arg_valid(self, ctor, arg):
+ self.assertRaises(TypeError, ctor)
+ self.assertRaises(TypeError, ctor, None)
+ self.assertRaises(TypeError, ctor, arg, bad_attr = 0)
+ self.assertRaises(TypeError, ctor, arg, delimiter = 0)
+ self.assertRaises(TypeError, ctor, arg, delimiter = 'XX')
+ self.assertRaises(csv.Error, ctor, arg, 'foo')
+ self.assertRaises(TypeError, ctor, arg, delimiter=None)
+ self.assertRaises(TypeError, ctor, arg, delimiter=1)
+ self.assertRaises(TypeError, ctor, arg, quotechar=1)
+ self.assertRaises(TypeError, ctor, arg, lineterminator=None)
+ self.assertRaises(TypeError, ctor, arg, lineterminator=1)
+ self.assertRaises(TypeError, ctor, arg, quoting=None)
+ self.assertRaises(TypeError, ctor, arg,
+ quoting=csv.QUOTE_ALL, quotechar='')
+ self.assertRaises(TypeError, ctor, arg,
+ quoting=csv.QUOTE_ALL, quotechar=None)
+
+ def test_reader_arg_valid(self):
+ self._test_arg_valid(csv.reader, [])
+
+ def test_writer_arg_valid(self):
+ self._test_arg_valid(csv.writer, StringIO())
+
+ def _test_default_attrs(self, ctor, *args):
+ obj = ctor(*args)
+ # Check defaults
+ self.assertEqual(obj.dialect.delimiter, ',')
+ self.assertEqual(obj.dialect.doublequote, True)
+ self.assertEqual(obj.dialect.escapechar, None)
+ self.assertEqual(obj.dialect.lineterminator, "\r\n")
+ self.assertEqual(obj.dialect.quotechar, '"')
+ self.assertEqual(obj.dialect.quoting, csv.QUOTE_MINIMAL)
+ self.assertEqual(obj.dialect.skipinitialspace, False)
+ self.assertEqual(obj.dialect.strict, False)
+ # Try deleting or changing attributes (they are read-only)
+ self.assertRaises(AttributeError, delattr, obj.dialect, 'delimiter')
+ self.assertRaises(AttributeError, setattr, obj.dialect, 'delimiter',
':')
+ self.assertRaises(AttributeError, delattr, obj.dialect, 'quoting')
+ self.assertRaises(AttributeError, setattr, obj.dialect,
+ 'quoting', None)
+
+ def test_reader_attrs(self):
+ self._test_default_attrs(csv.reader, [])
+
+ def test_writer_attrs(self):
+ self._test_default_attrs(csv.writer, StringIO())
+
+ def _test_kw_attrs(self, ctor, *args):
+ # Now try with alternate options
+ kwargs = dict(delimiter=':', doublequote=False, escapechar='\\',
+ lineterminator='\r', quotechar='*',
+ quoting=csv.QUOTE_NONE, skipinitialspace=True,
+ strict=True)
+ obj = ctor(*args, **kwargs)
+ self.assertEqual(obj.dialect.delimiter, ':')
+ self.assertEqual(obj.dialect.doublequote, False)
+ self.assertEqual(obj.dialect.escapechar, '\\')
+ self.assertEqual(obj.dialect.lineterminator, "\r")
+ self.assertEqual(obj.dialect.quotechar, '*')
+ self.assertEqual(obj.dialect.quoting, csv.QUOTE_NONE)
+ self.assertEqual(obj.dialect.skipinitialspace, True)
+ self.assertEqual(obj.dialect.strict, True)
+
+ def test_reader_kw_attrs(self):
+ self._test_kw_attrs(csv.reader, [])
+
+ def test_writer_kw_attrs(self):
+ self._test_kw_attrs(csv.writer, StringIO())
+
+ def _test_dialect_attrs(self, ctor, *args):
+ # Now try with dialect-derived options
+ class dialect:
+ delimiter='-'
+ doublequote=False
+ escapechar='^'
+ lineterminator='$'
+ quotechar='#'
+ quoting=csv.QUOTE_ALL
+ skipinitialspace=True
+ strict=False
+ args = args + (dialect,)
+ obj = ctor(*args)
+ self.assertEqual(obj.dialect.delimiter, '-')
+ self.assertEqual(obj.dialect.doublequote, False)
+ self.assertEqual(obj.dialect.escapechar, '^')
+ self.assertEqual(obj.dialect.lineterminator, "$")
+ self.assertEqual(obj.dialect.quotechar, '#')
+ self.assertEqual(obj.dialect.quoting, csv.QUOTE_ALL)
+ self.assertEqual(obj.dialect.skipinitialspace, True)
+ self.assertEqual(obj.dialect.strict, False)
+
+ def test_reader_dialect_attrs(self):
+ self._test_dialect_attrs(csv.reader, [])
+
+ def test_writer_dialect_attrs(self):
+ self._test_dialect_attrs(csv.writer, StringIO())
+
+
+ def _write_test(self, fields, expect, **kwargs):
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj, **kwargs)
+ writer.writerow(fields)
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(),
+ expect + writer.dialect.lineterminator)
+
+ def _write_error_test(self, exc, fields, **kwargs):
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj, **kwargs)
+ self.assertRaises(exc, writer.writerow, fields)
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), '')
+
+ def test_write_arg_valid(self):
+ self._write_error_test(csv.Error, None)
+ self._write_test((), '')
+ self._write_test([None], '""')
+ self._write_error_test(csv.Error, [None], quoting = csv.QUOTE_NONE)
+ # Check that exceptions are passed up the chain
+ class BadList:
+ def __len__(self):
+ return 10;
+ def __getitem__(self, i):
+ if i > 2:
+ raise OSError
+ self._write_error_test(OSError, BadList())
+ class BadItem:
+ def __str__(self):
+ raise OSError
+ self._write_error_test(OSError, [BadItem()])
+
+ def test_write_bigfield(self):
+ # This exercises the buffer realloc functionality
+ bigstring = 'X' * 50000
+ self._write_test([bigstring,bigstring], '%s,%s' % \
+ (bigstring, bigstring))
+
+ def test_write_quoting(self):
+ self._write_test(['a',1,'p,q'], 'a,1,"p,q"')
+ self._write_error_test(csv.Error, ['a',1,'p,q'],
+ quoting = csv.QUOTE_NONE)
+ self._write_test(['a',1,'p,q'], 'a,1,"p,q"',
+ quoting = csv.QUOTE_MINIMAL)
+ self._write_test(['a',1,'p,q'], '"a",1,"p,q"',
+ quoting = csv.QUOTE_NONNUMERIC)
+ self._write_test(['a',1,'p,q'], '"a","1","p,q"',
+ quoting = csv.QUOTE_ALL)
+ self._write_test(['a\nb',1], '"a\nb","1"',
+ quoting = csv.QUOTE_ALL)
+
+ def test_write_escape(self):
+ self._write_test(['a',1,'p,q'], 'a,1,"p,q"',
+ escapechar='\\')
+ self._write_error_test(csv.Error, ['a',1,'p,"q"'],
+ escapechar=None, doublequote=False)
+ self._write_test(['a',1,'p,"q"'], 'a,1,"p,\\"q\\""',
+ escapechar='\\', doublequote = False)
+ self._write_test(['"'], '""""',
+ escapechar='\\', quoting = csv.QUOTE_MINIMAL)
+ self._write_test(['"'], '\\"',
+ escapechar='\\', quoting = csv.QUOTE_MINIMAL,
+ doublequote = False)
+ self._write_test(['"'], '\\"',
+ escapechar='\\', quoting = csv.QUOTE_NONE)
+ self._write_test(['a',1,'p,q'], 'a,1,p\\,q',
+ escapechar='\\', quoting = csv.QUOTE_NONE)
+
+ def test_write_iterable(self):
+ self._write_test(iter(['a', 1, 'p,q']), 'a,1,"p,q"')
+ self._write_test(iter(['a', 1, None]), 'a,1,')
+ self._write_test(iter([]), '')
+ self._write_test(iter([None]), '""')
+ self._write_error_test(csv.Error, iter([None]), quoting=csv.QUOTE_NONE)
+ self._write_test(iter([None, None]), ',')
+
+ def test_writerows(self):
+ class BrokenFile:
+ def write(self, buf):
+ raise OSError
+ writer = csv.writer(BrokenFile())
+ self.assertRaises(OSError, writer.writerows, [['a']])
+
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj)
+ self.assertRaises(TypeError, writer.writerows, None)
+ writer.writerows([['a','b'],['c','d']])
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), "a,b\r\nc,d\r\n")
+
+ def _read_test(self, input, expect, **kwargs):
+ reader = csv.reader(input, **kwargs)
+ result = list(reader)
+ self.assertEqual(result, expect)
+
+ def test_read_oddinputs(self):
+ self._read_test([], [])
+ self._read_test([''], [[]])
+ self.assertRaises(csv.Error, self._read_test,
+ ['"ab"c'], None, strict = 1)
+ # cannot handle null bytes for the moment
+ self.assertRaises(csv.Error, self._read_test,
+ ['ab\0c'], None, strict = 1)
+ self._read_test(['"ab"c'], [['abc']], doublequote = 0)
+
+ self.assertRaises(csv.Error, self._read_test,
+ [b'ab\0c'], None)
+
+
+ def test_read_eol(self):
+ self._read_test(['a,b'], [['a','b']])
+ self._read_test(['a,b\n'], [['a','b']])
+ self._read_test(['a,b\r\n'], [['a','b']])
+ self._read_test(['a,b\r'], [['a','b']])
+ self.assertRaises(csv.Error, self._read_test, ['a,b\rc,d'], [])
+ self.assertRaises(csv.Error, self._read_test, ['a,b\nc,d'], [])
+ self.assertRaises(csv.Error, self._read_test, ['a,b\r\nc,d'], [])
+
+ def test_read_eof(self):
+ self._read_test(['a,"'], [['a', '']])
+ self._read_test(['"a'], [['a']])
+ self._read_test(['^'], [['\n']], escapechar='^')
+ self.assertRaises(csv.Error, self._read_test, ['a,"'], [], strict=True)
+ self.assertRaises(csv.Error, self._read_test, ['"a'], [], strict=True)
+ self.assertRaises(csv.Error, self._read_test,
+ ['^'], [], escapechar='^', strict=True)
+
+ def test_read_escape(self):
+ self._read_test(['a,\\b,c'], [['a', 'b', 'c']], escapechar='\\')
+ self._read_test(['a,b\\,c'], [['a', 'b,c']], escapechar='\\')
+ self._read_test(['a,"b\\,c"'], [['a', 'b,c']], escapechar='\\')
+ self._read_test(['a,"b,\\c"'], [['a', 'b,c']], escapechar='\\')
+ self._read_test(['a,"b,c\\""'], [['a', 'b,c"']], escapechar='\\')
+ self._read_test(['a,"b,c"\\'], [['a', 'b,c\\']], escapechar='\\')
+
+ def test_read_quoting(self):
+ self._read_test(['1,",3,",5'], [['1', ',3,', '5']])
+ self._read_test(['1,",3,",5'], [['1', '"', '3', '"', '5']],
+ quotechar=None, escapechar='\\')
+ self._read_test(['1,",3,",5'], [['1', '"', '3', '"', '5']],
+ quoting=csv.QUOTE_NONE, escapechar='\\')
+ # will this fail where locale uses comma for decimals?
+ self._read_test([',3,"5",7.3, 9'], [['', 3, '5', 7.3, 9]],
+ quoting=csv.QUOTE_NONNUMERIC)
+ self._read_test(['"a\nb", 7'], [['a\nb', ' 7']])
+ self.assertRaises(ValueError, self._read_test,
+ ['abc,3'], [[]],
+ quoting=csv.QUOTE_NONNUMERIC)
+
+ def test_read_bigfield(self):
+ # This exercises the buffer realloc functionality and field size
+ # limits.
+ limit = csv.field_size_limit()
+ try:
+ size = 50000
+ bigstring = 'X' * size
+ bigline = '%s,%s' % (bigstring, bigstring)
+ self._read_test([bigline], [[bigstring, bigstring]])
+ csv.field_size_limit(size)
+ self._read_test([bigline], [[bigstring, bigstring]])
+ self.assertEqual(csv.field_size_limit(), size)
+ csv.field_size_limit(size-1)
+ self.assertRaises(csv.Error, self._read_test, [bigline], [])
+ self.assertRaises(TypeError, csv.field_size_limit, None)
+ self.assertRaises(TypeError, csv.field_size_limit, 1, None)
+ finally:
+ csv.field_size_limit(limit)
+
+ def test_read_linenum(self):
+ r = csv.reader(['line,1', 'line,2', 'line,3'])
+ self.assertEqual(r.line_num, 0)
+ next(r)
+ self.assertEqual(r.line_num, 1)
+ next(r)
+ self.assertEqual(r.line_num, 2)
+ next(r)
+ self.assertEqual(r.line_num, 3)
+ self.assertRaises(StopIteration, next, r)
+ self.assertEqual(r.line_num, 3)
+
+ def test_roundtrip_quoteed_newlines(self):
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj)
+ self.assertRaises(TypeError, writer.writerows, None)
+ rows = [['a\nb','b'],['c','x\r\nd']]
+ writer.writerows(rows)
+ fileobj.seek(0)
+ for i, row in enumerate(csv.reader(fileobj)):
+ self.assertEqual(row, rows[i])
+
+ def test_roundtrip_escaped_unquoted_newlines(self):
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj,quoting=csv.QUOTE_NONE,escapechar="\\")
+ rows = [['a\nb','b'],['c','x\r\nd']]
+ writer.writerows(rows)
+ fileobj.seek(0)
+ for i, row in
enumerate(csv.reader(fileobj,quoting=csv.QUOTE_NONE,escapechar="\\")):
+ self.assertEqual(row,rows[i])
+
+class TestDialectRegistry(TestCase):
+ def test_registry_badargs(self):
+ self.assertRaises(TypeError, csv.list_dialects, None)
+ self.assertRaises(TypeError, csv.get_dialect)
+ self.assertRaises(csv.Error, csv.get_dialect, None)
+ self.assertRaises(csv.Error, csv.get_dialect, "nonesuch")
+ self.assertRaises(TypeError, csv.unregister_dialect)
+ self.assertRaises(csv.Error, csv.unregister_dialect, None)
+ self.assertRaises(csv.Error, csv.unregister_dialect, "nonesuch")
+ self.assertRaises(TypeError, csv.register_dialect, None)
+ self.assertRaises(TypeError, csv.register_dialect, None, None)
+ self.assertRaises(TypeError, csv.register_dialect, "nonesuch", 0, 0)
+ self.assertRaises(TypeError, csv.register_dialect, "nonesuch",
+ badargument=None)
+ self.assertRaises(TypeError, csv.register_dialect, "nonesuch",
+ quoting=None)
+ self.assertRaises(TypeError, csv.register_dialect, [])
+
+ def test_registry(self):
+ class myexceltsv(csv.excel):
+ delimiter = "\t"
+ name = "myexceltsv"
+ expected_dialects = csv.list_dialects() + [name]
+ expected_dialects.sort()
+ csv.register_dialect(name, myexceltsv)
+ try:
+ self.assertEqual(csv.get_dialect(name).delimiter, '\t')
+ got_dialects = sorted(csv.list_dialects())
+ self.assertEqual(expected_dialects, got_dialects)
+ finally:
+ csv.unregister_dialect(name)
+
+ def test_register_kwargs(self):
+ name = 'fedcba'
+ csv.register_dialect(name, delimiter=';')
+ try:
+ self.assertEqual(csv.get_dialect(name).delimiter, ';')
+ self.assertEqual([['X', 'Y', 'Z']], list(csv.reader(['X;Y;Z'],
name)))
+ finally:
+ csv.unregister_dialect(name)
+
+ def test_incomplete_dialect(self):
+ class myexceltsv(csv.Dialect):
+ delimiter = "\t"
+ self.assertRaises(csv.Error, myexceltsv)
+
+ def test_space_dialect(self):
+ class space(csv.excel):
+ delimiter = " "
+ quoting = csv.QUOTE_NONE
+ escapechar = "\\"
+
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("abc def\nc1ccccc1 benzene\n")
+ fileobj.seek(0)
+ reader = csv.reader(fileobj, dialect=space())
+ self.assertEqual(next(reader), ["abc", "def"])
+ self.assertEqual(next(reader), ["c1ccccc1", "benzene"])
+
+ def compare_dialect_123(self, expected, *writeargs, **kwwriteargs):
+
+ with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
+
+ writer = csv.writer(fileobj, *writeargs, **kwwriteargs)
+ writer.writerow([1,2,3])
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+
+ def test_dialect_apply(self):
+ class testA(csv.excel):
+ delimiter = "\t"
+ class testB(csv.excel):
+ delimiter = ":"
+ class testC(csv.excel):
+ delimiter = "|"
+ class testUni(csv.excel):
+ delimiter = "\u039B"
+
+ csv.register_dialect('testC', testC)
+ try:
+ self.compare_dialect_123("1,2,3\r\n")
+ self.compare_dialect_123("1\t2\t3\r\n", testA)
+ self.compare_dialect_123("1:2:3\r\n", dialect=testB())
+ self.compare_dialect_123("1|2|3\r\n", dialect='testC')
+ self.compare_dialect_123("1;2;3\r\n", dialect=testA,
+ delimiter=';')
+ self.compare_dialect_123("1\u039B2\u039B3\r\n",
+ dialect=testUni)
+
+ finally:
+ csv.unregister_dialect('testC')
+
+ def test_bad_dialect(self):
+ # Unknown parameter
+ self.assertRaises(TypeError, csv.reader, [], bad_attr = 0)
+ # Bad values
+ self.assertRaises(TypeError, csv.reader, [], delimiter = None)
+ self.assertRaises(TypeError, csv.reader, [], quoting = -1)
+ self.assertRaises(TypeError, csv.reader, [], quoting = 100)
+
+class TestCsvBase(TestCase):
+ def readerAssertEqual(self, input, expected_result):
+ with TemporaryFile("w+", newline='') as fileobj:
+ fileobj.write(input)
+ fileobj.seek(0)
+ reader = csv.reader(fileobj, dialect = self.dialect)
+ fields = list(reader)
+ self.assertEqual(fields, expected_result)
+
+ def writerAssertEqual(self, input, expected_result):
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj, dialect = self.dialect)
+ writer.writerows(input)
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected_result)
+
+class TestDialectExcel(TestCsvBase):
+ dialect = 'excel'
+
+ def test_single(self):
+ self.readerAssertEqual('abc', [['abc']])
+
+ def test_simple(self):
+ self.readerAssertEqual('1,2,3,4,5', [['1','2','3','4','5']])
+
+ def test_blankline(self):
+ self.readerAssertEqual('', [])
+
+ def test_empty_fields(self):
+ self.readerAssertEqual(',', [['', '']])
+
+ def test_singlequoted(self):
+ self.readerAssertEqual('""', [['']])
+
+ def test_singlequoted_left_empty(self):
+ self.readerAssertEqual('"",', [['','']])
+
+ def test_singlequoted_right_empty(self):
+ self.readerAssertEqual(',""', [['','']])
+
+ def test_single_quoted_quote(self):
+ self.readerAssertEqual('""""', [['"']])
+
+ def test_quoted_quotes(self):
+ self.readerAssertEqual('""""""', [['""']])
+
+ def test_inline_quote(self):
+ self.readerAssertEqual('a""b', [['a""b']])
+
+ def test_inline_quotes(self):
+ self.readerAssertEqual('a"b"c', [['a"b"c']])
+
+ def test_quotes_and_more(self):
+ # Excel would never write a field containing '"a"b', but when
+ # reading one, it will return 'ab'.
+ self.readerAssertEqual('"a"b', [['ab']])
+
+ def test_lone_quote(self):
+ self.readerAssertEqual('a"b', [['a"b']])
+
+ def test_quote_and_quote(self):
+ # Excel would never write a field containing '"a" "b"', but when
+ # reading one, it will return 'a "b"'.
+ self.readerAssertEqual('"a" "b"', [['a "b"']])
+
+ def test_space_and_quote(self):
+ self.readerAssertEqual(' "a"', [[' "a"']])
+
+ def test_quoted(self):
+ self.readerAssertEqual('1,2,3,"I think, therefore I am",5,6',
+ [['1', '2', '3',
+ 'I think, therefore I am',
+ '5', '6']])
+
+ def test_quoted_quote(self):
+ self.readerAssertEqual('1,2,3,"""I see,"" said the blind man","as he
picked up his hammer and saw"',
+ [['1', '2', '3',
+ '"I see," said the blind man',
+ 'as he picked up his hammer and saw']])
+
+ def test_quoted_nl(self):
+ input = '''\
+1,2,3,"""I see,""
+said the blind man","as he picked up his
+hammer and saw"
+9,8,7,6'''
+ self.readerAssertEqual(input,
+ [['1', '2', '3',
+ '"I see,"\nsaid the blind man',
+ 'as he picked up his\nhammer and saw'],
+ ['9','8','7','6']])
+
+ def test_dubious_quote(self):
+ self.readerAssertEqual('12,12,1",', [['12', '12', '1"', '']])
+
+ def test_null(self):
+ self.writerAssertEqual([], '')
+
+ def test_single_writer(self):
+ self.writerAssertEqual([['abc']], 'abc\r\n')
+
+ def test_simple_writer(self):
+ self.writerAssertEqual([[1, 2, 'abc', 3, 4]], '1,2,abc,3,4\r\n')
+
+ def test_quotes(self):
+ self.writerAssertEqual([[1, 2, 'a"bc"', 3, 4]],
'1,2,"a""bc""",3,4\r\n')
+
+ def test_quote_fieldsep(self):
+ self.writerAssertEqual([['abc,def']], '"abc,def"\r\n')
+
+ def test_newlines(self):
+ self.writerAssertEqual([[1, 2, 'a\nbc', 3, 4]], '1,2,"a\nbc",3,4\r\n')
+
+class EscapedExcel(csv.excel):
+ quoting = csv.QUOTE_NONE
+ escapechar = '\\'
+
+class TestEscapedExcel(TestCsvBase):
+ dialect = EscapedExcel()
+
+ def test_escape_fieldsep(self):
+ self.writerAssertEqual([['abc,def']], 'abc\\,def\r\n')
+
+ def test_read_escape_fieldsep(self):
+ self.readerAssertEqual('abc\\,def\r\n', [['abc,def']])
+
+class TestDialectUnix(TestCsvBase):
+ dialect = 'unix'
+
+ def test_simple_writer(self):
+ self.writerAssertEqual([[1, 'abc def', 'abc']], '"1","abc
def","abc"\n')
+
+ def test_simple_reader(self):
+ self.readerAssertEqual('"1","abc def","abc"\n', [['1', 'abc def',
'abc']])
+
+class QuotedEscapedExcel(csv.excel):
+ quoting = csv.QUOTE_NONNUMERIC
+ escapechar = '\\'
+
+class TestQuotedEscapedExcel(TestCsvBase):
+ dialect = QuotedEscapedExcel()
+
+ def test_write_escape_fieldsep(self):
+ self.writerAssertEqual([['abc,def']], '"abc,def"\r\n')
+
+ def test_read_escape_fieldsep(self):
+ self.readerAssertEqual('"abc\\,def"\r\n', [['abc,def']])
+
+class TestDictFields(TestCase):
+ ### "long" means the row is longer than the number of fieldnames
+ ### "short" means there are fewer elements in the row than fieldnames
+ def test_write_simple_dict(self):
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
+ writer.writeheader()
+ fileobj.seek(0)
+ self.assertEqual(fileobj.readline(), "f1,f2,f3\r\n")
+ writer.writerow({"f1": 10, "f3": "abc"})
+ fileobj.seek(0)
+ fileobj.readline() # header
+ self.assertEqual(fileobj.read(), "10,,abc\r\n")
+
+ def test_write_multiple_dict_rows(self):
+ fileobj = StringIO()
+ writer = csv.DictWriter(fileobj, fieldnames=["f1", "f2", "f3"])
+ writer.writeheader()
+ self.assertEqual(fileobj.getvalue(), "f1,f2,f3\r\n")
+ writer.writerows([{"f1": 1, "f2": "abc", "f3": "f"},
+ {"f1": 2, "f2": 5, "f3": "xyz"}])
+ self.assertEqual(fileobj.getvalue(),
+ "f1,f2,f3\r\n1,abc,f\r\n2,5,xyz\r\n")
+
+ def test_write_no_fields(self):
+ fileobj = StringIO()
+ self.assertRaises(TypeError, csv.DictWriter, fileobj)
+
+ def test_write_fields_not_in_fieldnames(self):
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
+ # Of special note is the non-string key (issue 19449)
+ exception = str(self.assertRaisesGetException(
+ ValueError, writer.writerow,
+ {"f4": 10, "f2": "spam", 1: "abc"}),
+ )
+ self.assertIn("fieldnames", exception)
+ self.assertIn("'f4'", exception)
+ self.assertNotIn("'f2'", exception)
+ self.assertIn("1", exception)
+
+ def test_read_dict_fields(self):
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("1,2,abc\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames=["f1", "f2", "f3"])
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
+
+ def test_read_dict_no_fieldnames(self):
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj)
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
+ self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
+
+ # Two test cases to make sure existing ways of implicitly setting
+ # fieldnames continue to work. Both arise from discussion in issue3436.
+ def test_read_dict_fieldnames_from_file(self):
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames=next(csv.reader(fileobj)))
+ self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2', "f3": 'abc'})
+
+ def test_read_dict_fieldnames_chain(self):
+ import itertools
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("f1,f2,f3\r\n1,2,abc\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj)
+ first = next(reader)
+ for row in itertools.chain([first], reader):
+ self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
+ self.assertEqual(row, {"f1": '1', "f2": '2', "f3": 'abc'})
+
+ def test_read_long(self):
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("1,2,abc,4,5,6\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames=["f1", "f2"])
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2',
+ None: ["abc", "4", "5", "6"]})
+
+ def test_read_long_with_rest(self):
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("1,2,abc,4,5,6\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames=["f1", "f2"], restkey="_rest")
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2',
+ "_rest": ["abc", "4", "5", "6"]})
+
+ def test_read_long_with_rest_no_fieldnames(self):
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("f1,f2\r\n1,2,abc,4,5,6\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj, restkey="_rest")
+ self.assertEqual(reader.fieldnames, ["f1", "f2"])
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2',
+ "_rest": ["abc", "4", "5", "6"]})
+
+ def test_read_short(self):
+ with TemporaryFile("w+") as fileobj:
+ fileobj.write("1,2,abc,4,5,6\r\n1,2,abc\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames="1 2 3 4 5 6".split(),
+ restval="DEFAULT")
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": '4', "5": '5', "6": '6'})
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": 'DEFAULT', "5": 'DEFAULT',
+ "6": 'DEFAULT'})
+
+ def test_read_multi(self):
+ sample = [
+ '2147483648,43.0e12,17,abc,def\r\n',
+ '147483648,43.0e2,17,abc,def\r\n',
+ '47483648,43.0,170,abc,def\r\n'
+ ]
+
+ reader = csv.DictReader(sample,
+ fieldnames="i1 float i2 s1 s2".split())
+ self.assertEqual(next(reader), {"i1": '2147483648',
+ "float": '43.0e12',
+ "i2": '17',
+ "s1": 'abc',
+ "s2": 'def'})
+
+ def test_read_with_blanks(self):
+ reader = csv.DictReader(["1,2,abc,4,5,6\r\n","\r\n",
+ "1,2,abc,4,5,6\r\n"],
+ fieldnames="1 2 3 4 5 6".split())
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": '4', "5": '5', "6": '6'})
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": '4', "5": '5', "6": '6'})
+
+ def test_read_semi_sep(self):
+ reader = csv.DictReader(["1;2;abc;4;5;6\r\n"],
+ fieldnames="1 2 3 4 5 6".split(),
+ delimiter=';')
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": '4', "5": '5', "6": '6'})
+
+class TestArrayWrites(TestCase):
+ def test_int_write(self):
+ import array
+ contents = [(20-i) for i in range(20)]
+ a = array.array(str('i'), contents)
+
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj, dialect="excel")
+ writer.writerow(a)
+ expected = ",".join([str(i) for i in a])+"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+
+ def test_double_write(self):
+ import array
+ contents = [(20-i)*0.1 for i in range(20)]
+ a = array.array(str('d'), contents)
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj, dialect="excel")
+ writer.writerow(a)
+ expected = ",".join([str(i) for i in a])+"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+
+ def test_float_write(self):
+ import array
+ contents = [(20-i)*0.1 for i in range(20)]
+ a = array.array(str('f'), contents)
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj, dialect="excel")
+ writer.writerow(a)
+ expected = ",".join([str(i) for i in a])+"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+
+ def test_char_write(self):
+ import array, string
+ a = array.array(str('u'), text_type(string.ascii_letters))
+
+ with TemporaryFile("w+", newline='') as fileobj:
+ writer = csv.writer(fileobj, dialect="excel")
+ writer.writerow(a)
+ expected = ",".join(a)+"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+
+class TestDialectValidity(TestCase):
+ def test_quoting(self):
+ class mydialect(csv.Dialect):
+ delimiter = ";"
+ escapechar = '\\'
+ doublequote = False
+ skipinitialspace = True
+ lineterminator = '\r\n'
+ quoting = csv.QUOTE_NONE
+ d = mydialect()
+ self.assertEqual(d.quoting, csv.QUOTE_NONE)
+
+ mydialect.quoting = None
+ self.assertRaises(csv.Error, mydialect)
+
+ mydialect.doublequote = True
+ mydialect.quoting = csv.QUOTE_ALL
+ mydialect.quotechar = '"'
+ d = mydialect()
+ self.assertEqual(d.quoting, csv.QUOTE_ALL)
+ self.assertEqual(d.quotechar, '"')
+ self.assertTrue(d.doublequote)
+
+ mydialect.quotechar = "''"
+ exception = self.assertRaisesGetException(csv.Error, mydialect)
+ self.assertEqual(str(exception),
+ str('"quotechar" must be a 1-character string'))
+
+ mydialect.quotechar = 4
+ exception = self.assertRaisesGetException(csv.Error, mydialect)
+ self.assertEqual(str(exception),
+ str('"quotechar" must be string, not int'))
+
+ def test_delimiter(self):
+ class mydialect(csv.Dialect):
+ delimiter = ";"
+ escapechar = '\\'
+ doublequote = False
+ skipinitialspace = True
+ lineterminator = '\r\n'
+ quoting = csv.QUOTE_NONE
+ d = mydialect()
+ self.assertEqual(d.delimiter, ";")
+
+ mydialect.delimiter = ":::"
+ exception = self.assertRaisesGetException(csv.Error, mydialect)
+ self.assertEqual(str(exception),
+ str('"delimiter" must be a 1-character string'))
+
+ mydialect.delimiter = ""
+ exception = self.assertRaisesGetException(csv.Error, mydialect)
+ self.assertEqual(str(exception),
+ str('"delimiter" must be a 1-character string'))
+
+ mydialect.delimiter = b","
+ exception = self.assertRaisesGetException(csv.Error, mydialect)
+ self.assertEqual(str(exception),
+ str('"delimiter" must be string, not bytes'))
+
+ mydialect.delimiter = 4
+ exception = self.assertRaisesGetException(csv.Error, mydialect)
+ self.assertEqual(str(exception),
+ str('"delimiter" must be string, not int'))
+
+ def test_lineterminator(self):
+ class mydialect(csv.Dialect):
+ delimiter = ";"
+ escapechar = '\\'
+ doublequote = False
+ skipinitialspace = True
+ lineterminator = '\r\n'
+ quoting = csv.QUOTE_NONE
+ d = mydialect()
+ self.assertEqual(d.lineterminator, '\r\n')
+
+ mydialect.lineterminator = ":::"
+ d = mydialect()
+ self.assertEqual(d.lineterminator, ":::")
+
+ mydialect.lineterminator = 4
+ exception = self.assertRaisesGetException(csv.Error, mydialect)
+ self.assertEqual(str(exception),
+ str('"lineterminator" must be a string'))
+
+ def do_invalid_chars(self, field_name):
+ def create_invalid(field_name, value):
+ class mydialect(csv.Dialect):
+ pass
+ setattr(mydialect, field_name, value)
+ d = mydialect()
+
+ self.assertRaises(csv.Error, create_invalid, field_name, "")
+ self.assertRaises(csv.Error, create_invalid, field_name, "abc")
+ self.assertRaises(csv.Error, create_invalid, field_name, b'x')
+ self.assertRaises(csv.Error, create_invalid, field_name, 5)
+
+ def test_invalid_chars_delimiter(self):
+ self.do_invalid_chars("delimiter")
+
+ def test_invalid_chars_escapechar(self):
+ self.do_invalid_chars("escapechar")
+
+ def test_invalid_chars_quotechar(self):
+ self.do_invalid_chars("quotechar")
+
+class TestSniffer(TestCase):
+ sample1 = """\
+Harry's, Arlington Heights, IL, 2/1/03, Kimi Hayes
+Shark City, Glendale Heights, IL, 12/28/02, Prezence
+Tommy's Place, Blue Island, IL, 12/28/02, Blue Sunday/White Crow
+Stonecutters Seafood and Chop House, Lemont, IL, 12/19/02, Week Back
+"""
+ sample2 = """\
+'Harry''s':'Arlington Heights':'IL':'2/1/03':'Kimi Hayes'
+'Shark City':'Glendale Heights':'IL':'12/28/02':'Prezence'
+'Tommy''s Place':'Blue Island':'IL':'12/28/02':'Blue Sunday/White Crow'
+'Stonecutters ''Seafood'' and Chop House':'Lemont':'IL':'12/19/02':'Week Back'
+"""
+ header1 = '''\
+"venue","city","state","date","performers"
+'''
+ sample3 = '''\
+05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
+05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
+05/05/03?05/05/03?05/05/03?05/05/03?05/05/03?05/05/03
+'''
+
+ sample4 = '''\
+2147483648;43.0e12;17;abc;def
+147483648;43.0e2;17;abc;def
+47483648;43.0;170;abc;def
+'''
+
+ sample5 = "aaa\tbbb\r\nAAA\t\r\nBBB\t\r\n"
+ sample6 = "a|b|c\r\nd|e|f\r\n"
+ sample7 = "'a'|'b'|'c'\r\n'd'|e|f\r\n"
+
+# Issue 18155: Use a delimiter that is a special char to regex:
+
+ header2 = '''\
+"venue"+"city"+"state"+"date"+"performers"
+'''
+ sample8 = """\
+Harry's+ Arlington Heights+ IL+ 2/1/03+ Kimi Hayes
+Shark City+ Glendale Heights+ IL+ 12/28/02+ Prezence
+Tommy's Place+ Blue Island+ IL+ 12/28/02+ Blue Sunday/White Crow
+Stonecutters Seafood and Chop House+ Lemont+ IL+ 12/19/02+ Week Back
+"""
+ sample9 = """\
+'Harry''s'+ Arlington Heights'+ 'IL'+ '2/1/03'+ 'Kimi Hayes'
+'Shark City'+ Glendale Heights'+' IL'+ '12/28/02'+ 'Prezence'
+'Tommy''s Place'+ Blue Island'+ 'IL'+ '12/28/02'+ 'Blue Sunday/White Crow'
+'Stonecutters ''Seafood'' and Chop House'+ 'Lemont'+ 'IL'+ '12/19/02'+ 'Week
Back'
+"""
+
+ def test_has_header(self):
+ sniffer = csv.Sniffer()
+ self.assertEqual(sniffer.has_header(self.sample1), False)
+ self.assertEqual(sniffer.has_header(self.header1 + self.sample1),
+ True)
+
+ def test_has_header_regex_special_delimiter(self):
+ sniffer = csv.Sniffer()
+ self.assertEqual(sniffer.has_header(self.sample8), False)
+ self.assertEqual(sniffer.has_header(self.header2 + self.sample8),
+ True)
+
+ def test_sniff(self):
+ sniffer = csv.Sniffer()
+ dialect = sniffer.sniff(self.sample1)
+ self.assertEqual(dialect.delimiter, ",")
+ self.assertEqual(dialect.quotechar, '"')
+ self.assertEqual(dialect.skipinitialspace, True)
+
+ dialect = sniffer.sniff(self.sample2)
+ self.assertEqual(dialect.delimiter, ":")
+ self.assertEqual(dialect.quotechar, "'")
+ self.assertEqual(dialect.skipinitialspace, False)
+
+ def test_delimiters(self):
+ sniffer = csv.Sniffer()
+ dialect = sniffer.sniff(self.sample3)
+ # given that all three lines in sample3 are equal,
+ # I think that any character could have been 'guessed' as the
+ # delimiter, depending on dictionary order
+ self.assertIn(dialect.delimiter, self.sample3)
+ dialect = sniffer.sniff(self.sample3, delimiters="?,")
+ self.assertEqual(dialect.delimiter, "?")
+ dialect = sniffer.sniff(self.sample3, delimiters="/,")
+ self.assertEqual(dialect.delimiter, "/")
+ dialect = sniffer.sniff(self.sample4)
+ self.assertEqual(dialect.delimiter, ";")
+ dialect = sniffer.sniff(self.sample5)
+ self.assertEqual(dialect.delimiter, "\t")
+ dialect = sniffer.sniff(self.sample6)
+ self.assertEqual(dialect.delimiter, "|")
+ dialect = sniffer.sniff(self.sample7)
+ self.assertEqual(dialect.delimiter, "|")
+ self.assertEqual(dialect.quotechar, "'")
+ dialect = sniffer.sniff(self.sample8)
+ self.assertEqual(dialect.delimiter, '+')
+ dialect = sniffer.sniff(self.sample9)
+ self.assertEqual(dialect.delimiter, '+')
+ self.assertEqual(dialect.quotechar, "'")
+
+ def test_doublequote(self):
+ sniffer = csv.Sniffer()
+ dialect = sniffer.sniff(self.header1)
+ self.assertFalse(dialect.doublequote)
+ dialect = sniffer.sniff(self.header2)
+ self.assertFalse(dialect.doublequote)
+ dialect = sniffer.sniff(self.sample2)
+ self.assertTrue(dialect.doublequote)
+ dialect = sniffer.sniff(self.sample8)
+ self.assertFalse(dialect.doublequote)
+ dialect = sniffer.sniff(self.sample9)
+ self.assertTrue(dialect.doublequote)
+
+
+class TestUnicode(TestCase):
+
+ names = ["Martin von Löwis",
+ "Marc André Lemburg",
+ "Guido van Rossum",
+ "François Pinard"]
+
+ def test_unicode_read(self):
+ import io
+ with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
+ fileobj.write(",".join(self.names) + "\r\n")
+ fileobj.seek(0)
+ reader = csv.reader(fileobj)
+ self.assertEqual(list(reader), [self.names])
+
+
+ def test_unicode_write(self):
+ import io
+ with TemporaryFile("w+", newline='', encoding="utf-8") as fileobj:
+ writer = csv.writer(fileobj)
+ writer.writerow(self.names)
+ expected = ",".join(self.names)+"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+
+
+class TestRegression(TestCase):
+ """Tests of bugs not covered by the standard tests."""
+
+ def test_quote_nonnumeric_decimal(self):
+ """Decimals should not be quoted with non-numeric quoting."""
+ import decimal
+ with TemporaryFile('w+', newline='', encoding='utf-8') as fileobj:
+ writer = csv.writer(fileobj, quoting=csv.QUOTE_NONNUMERIC)
+ writer.writerow([10, 10.0, decimal.Decimal('10.0'), '10.0'])
+ expected = '10,10.0,10.0,"10.0"\r\n'
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+
+ def test_writerow_return(self):
+ """writerow should return the return value from calling write."""
+ with TemporaryFile('w+', newline='', encoding='utf-8') as fileobj:
+ writer = csv.writer(fileobj)
+ self.assertEqual(writer.writerow([10, 10.0, 'Piña Colada']), 21)
+
+ def test_quote_none_quotechar_none(self):
+ """A QUOTE_NONE dialect should not error if quotechar is None."""
+ class CustomDialect(csv.Dialect):
+ delimiter = '\t'
+ skipinitialspace = False
+ lineterminator = '\n'
+ escapechar = None
+ quoting = csv.QUOTE_NONE
+
+ csv.writer(io.StringIO(), CustomDialect)
+
+ def test_quote_none_quotechar_undefined(self):
+ """A QUOTE_NONE dialect should not error if quotechar is undefined."""
+ class CustomDialect(csv.Dialect):
+ delimiter = '\t'
+ skipinitialspace = False
+ lineterminator = '\n'
+ quoting = csv.QUOTE_NONE
+
+ csv.writer(io.StringIO(), CustomDialect)
+
+ def test_quote_all_quotechar_none(self):
+ """A QUOTE_ALL dialect should error if quotechar is None."""
+ class CustomDialect(csv.Dialect):
+ delimiter = '\t'
+ skipinitialspace = False
+ lineterminator = '\n'
+ quotechar = None
+ quoting = csv.QUOTE_ALL
+
+ exception = self.assertRaisesGetException(
+ TypeError, csv.writer, io.StringIO(), CustomDialect)
+ assert exception.args[0] == 'quotechar must be set if quoting enabled'
+
+ def test_quote_all_quotechar_unset(self):
+ """A QUOTE_ALL dialect should error if quotechar is unset."""
+ class CustomDialect(csv.Dialect):
+ delimiter = '\t'
+ skipinitialspace = False
+ lineterminator = '\n'
+ quoting = csv.QUOTE_ALL
+
+ exception = self.assertRaisesGetException(
+ TypeError, csv.writer, io.StringIO(), CustomDialect)
+ assert exception.args[0] == 'quotechar must be set if quoting enabled'
+
+
+if __name__ == '__main__':
+ unittest.main()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/backports.csv-1.0.6/tox.ini
new/backports.csv-1.0.7/tox.ini
--- old/backports.csv-1.0.6/tox.ini 1970-01-01 01:00:00.000000000 +0100
+++ new/backports.csv-1.0.7/tox.ini 2019-03-11 04:05:17.000000000 +0100
@@ -0,0 +1,13 @@
+[tox]
+envlist = py{26,27,33,34,35,36,37,py}, stdlib
+
+[testenv]
+deps = pytest
+commands = {posargs:pytest}
+
+[testenv:stdlib]
+basepython = python3.7
+skip_install = True
+
+[pytest]
+testpaths = tests.py