Hello community,

here is the log from the commit of package python-feedparser for 
openSUSE:Factory checked in at 2020-09-28 14:30:03
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-feedparser (Old)
 and      /work/SRC/openSUSE:Factory/.python-feedparser.new.4249 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-feedparser"

Mon Sep 28 14:30:03 2020 rev:27 rq:838247 version:6.0.1

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-feedparser/python-feedparser.changes      
2020-09-25 16:31:40.563897928 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-feedparser.new.4249/python-feedparser.changes
    2020-09-28 14:30:52.854242157 +0200
@@ -1,0 +2,7 @@
+Mon Sep 28 11:20:26 UTC 2020 - Dirk Mueller <dmuel...@suse.com>
+
+- update to 6.0.1:
+  * Remove all Python 2 compatibility code (#228)
+  * Add *python_requires* to ``setup.py`` (#2 
+
+-------------------------------------------------------------------

Old:
----
  feedparser-6.0.0.tar.gz

New:
----
  feedparser-6.0.1.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-feedparser.spec ++++++
--- /var/tmp/diff_new_pack.nr3ztG/_old  2020-09-28 14:30:53.558242768 +0200
+++ /var/tmp/diff_new_pack.nr3ztG/_new  2020-09-28 14:30:53.562242771 +0200
@@ -16,10 +16,10 @@
 #
 
 
-%define skip_python2 1
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
+%define skip_python2 1
 Name:           python-feedparser
-Version:        6.0.0
+Version:        6.0.1
 Release:        0
 Summary:        Universal Feed Parser Module for Python
 License:        BSD-2-Clause

++++++ feedparser-6.0.0.tar.gz -> feedparser-6.0.1.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/NEWS new/feedparser-6.0.1/NEWS
--- old/feedparser-6.0.0/NEWS   2020-09-12 21:27:17.000000000 +0200
+++ new/feedparser-6.0.1/NEWS   2020-09-15 13:50:57.000000000 +0200
@@ -1,5 +1,9 @@
 coming in the next release:
 
+6.0.1 - 15 September 2020
+    * Remove all Python 2 compatibility code (#228)
+    * Add *python_requires* to ``setup.py`` (#231)
+
 6.0.0 - 12 September 2020
     * Support Python 3.6, 3.7, 3.8 and 3.9
     * Drop support for Python 2.4 through 2.7, and Python 3.0 through 3.5 
(#169)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/PKG-INFO 
new/feedparser-6.0.1/PKG-INFO
--- old/feedparser-6.0.0/PKG-INFO       2020-09-12 21:27:28.846880000 +0200
+++ new/feedparser-6.0.1/PKG-INFO       2020-09-15 13:51:18.969905000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: feedparser
-Version: 6.0.0
+Version: 6.0.1
 Summary: Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 
0.3, and Atom 1.0 feeds
 Home-page: https://github.com/kurtmckee/feedparser
 Author: Kurt McKee
@@ -75,4 +75,5 @@
 Classifier: Programming Language :: Python :: 3.9
 Classifier: Topic :: Software Development :: Libraries :: Python Modules
 Classifier: Topic :: Text Processing :: Markup :: XML
+Requires-Python: >=3.6
 Description-Content-Type: text/x-rst
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/__init__.py 
new/feedparser-6.0.1/feedparser/__init__.py
--- old/feedparser-6.0.0/feedparser/__init__.py 2020-09-12 21:27:17.000000000 
+0200
+++ new/feedparser-6.0.1/feedparser/__init__.py 2020-09-15 13:50:57.000000000 
+0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE."""
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 from .api import parse
 from .datetimes import registerDateHandler
 from .exceptions import *
@@ -35,7 +32,7 @@
 
 __author__ = 'Kurt McKee <contac...@kurtmckee.org>'
 __license__ = 'BSD 2-clause'
-__version__ = '6.0.0'
+__version__ = '6.0.1'
 
 # HTTP "User-Agent" header to send to servers when downloading feeds.
 # If you are embedding feedparser in a larger application, you should
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/api.py 
new/feedparser-6.0.1/feedparser/api.py
--- old/feedparser-6.0.0/feedparser/api.py      2020-09-01 05:42:10.000000000 
+0200
+++ new/feedparser-6.0.1/feedparser/api.py      2020-09-15 13:49:58.000000000 
+0200
@@ -26,29 +26,10 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
+import io
+import urllib.parse
 import xml.sax
 
-try:
-    from io import BytesIO as _StringIO
-except ImportError:
-    # Python 2.7
-    try:
-        from cStringIO import StringIO as _StringIO
-    except ImportError:
-        from StringIO import StringIO as _StringIO
-
-try:
-    import urllib.parse
-except ImportError:
-    from urlparse import urlparse
-
-    class urllib(object):
-        class parse(object):
-            urlparse = staticmethod(urlparse)
-
 from .datetimes import registerDateHandler, _parse_date
 from .encodings import convert_to_utf8
 from .exceptions import *
@@ -63,14 +44,6 @@
 from .urls import convert_to_idn, make_safe_absolute_uri
 from .util import FeedParserDict
 
-bytes_ = type(b'')
-unicode_ = type('')
-try:
-    unichr
-    basestring
-except NameError:
-    unichr = chr
-    basestring = str
 
 # List of preferred XML parsers, by SAX driver name.  These will be tried 
first,
 # but if they're not installed, Python will keep searching through its own list
@@ -130,13 +103,13 @@
     if request_headers is supplied it is a dictionary of HTTP request headers
     that will override the values generated by FeedParser.
 
-    :return: A :class:`StringIO.StringIO` or :class:`io.BytesIO`.
+    :return: A bytes object.
     """
 
     if hasattr(url_file_stream_or_string, 'read'):
         return url_file_stream_or_string.read()
 
-    if isinstance(url_file_stream_or_string, basestring) \
+    if isinstance(url_file_stream_or_string, str) \
        and urllib.parse.urlparse(url_file_stream_or_string)[0] in ('http', 
'https', 'ftp', 'file', 'feed'):
         return http.get(url_file_stream_or_string, etag, modified, agent, 
referrer, handlers, request_headers, result)
 
@@ -145,7 +118,7 @@
         with open(url_file_stream_or_string, 'rb') as f:
             data = f.read()
     except (IOError, UnicodeEncodeError, TypeError, ValueError):
-        # if url_file_stream_or_string is a unicode object that
+        # if url_file_stream_or_string is a str object that
         # cannot be converted to the encoding returned by
         # sys.getfilesystemencoding(), a UnicodeEncodeError
         # will be thrown
@@ -157,19 +130,19 @@
         return data
 
     # treat url_file_stream_or_string as string
-    if not isinstance(url_file_stream_or_string, bytes_):
+    if not isinstance(url_file_stream_or_string, bytes):
         return url_file_stream_or_string.encode('utf-8')
     return url_file_stream_or_string
 
 
 LooseFeedParser = type(
-    str('LooseFeedParser'),  # `str()` call required for Python 2.7
+    'LooseFeedParser',
     (_LooseFeedParser, _FeedParserMixin, _BaseHTMLProcessor, object),
     {},
 )
 
 StrictFeedParser = type(
-    str('StrictFeedParser'),  # `str()` call required for Python 2.7
+    'StrictFeedParser',
     (_StrictFeedParser, _FeedParserMixin, xml.sax.handler.ContentHandler, 
object),
     {},
 )
@@ -257,7 +230,7 @@
     baseuri = make_safe_absolute_uri(href, contentloc) or 
make_safe_absolute_uri(contentloc) or href
 
     baselang = result['headers'].get('content-language', None)
-    if isinstance(baselang, bytes_) and baselang is not None:
+    if isinstance(baselang, bytes) and baselang is not None:
         baselang = baselang.decode('utf-8', 'ignore')
 
     if not _XML_AVAILABLE:
@@ -277,14 +250,14 @@
         saxparser.setContentHandler(feedparser)
         saxparser.setErrorHandler(feedparser)
         source = xml.sax.xmlreader.InputSource()
-        source.setByteStream(_StringIO(data))
+        source.setByteStream(io.BytesIO(data))
         try:
             saxparser.parse(source)
         except xml.sax.SAXException as e:
             result['bozo'] = 1
             result['bozo_exception'] = feedparser.exc or e
             use_strict_parser = 0
-    if not use_strict_parser and _SGML_AVAILABLE:
+    if not use_strict_parser:
         feedparser = LooseFeedParser(baseuri, baselang, 'utf-8', entities)
         feedparser.resolve_relative_uris = resolve_relative_uris
         feedparser.sanitize_html = sanitize_html
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/__init__.py 
new/feedparser-6.0.1/feedparser/datetimes/__init__.py
--- old/feedparser-6.0.0/feedparser/datetimes/__init__.py       2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/__init__.py       2020-09-15 
13:49:58.000000000 +0200
@@ -25,8 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-
 from .asctime import _parse_date_asctime
 from .greek import _parse_date_greek
 from .hungarian import _parse_date_hungarian
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/asctime.py 
new/feedparser-6.0.1/feedparser/datetimes/asctime.py
--- old/feedparser-6.0.0/feedparser/datetimes/asctime.py        2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/asctime.py        2020-09-15 
13:49:58.000000000 +0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 from .rfc822 import _parse_date_rfc822
 
 _months = [
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/greek.py 
new/feedparser-6.0.1/feedparser/datetimes/greek.py
--- old/feedparser-6.0.0/feedparser/datetimes/greek.py  2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/greek.py  2020-09-15 
13:49:58.000000000 +0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import re
 
 from .rfc822 import _parse_date_rfc822
@@ -56,13 +53,13 @@
 }
 
 _greek_wdays = {
-   '\u039a\u03c5\u03c1': 'Sun', # caf5f1 in iso-8859-7
-   '\u0394\u03b5\u03c5': 'Mon', # c4e5f5 in iso-8859-7
-   '\u03a4\u03c1\u03b9': 'Tue', # d4f1e9 in iso-8859-7
-   '\u03a4\u03b5\u03c4': 'Wed', # d4e5f4 in iso-8859-7
-   '\u03a0\u03b5\u03bc': 'Thu', # d0e5ec in iso-8859-7
-   '\u03a0\u03b1\u03c1': 'Fri', # d0e1f1 in iso-8859-7
-   '\u03a3\u03b1\u03b2': 'Sat', # d3e1e2 in iso-8859-7
+   '\u039a\u03c5\u03c1': 'Sun',  # caf5f1 in iso-8859-7
+   '\u0394\u03b5\u03c5': 'Mon',  # c4e5f5 in iso-8859-7
+   '\u03a4\u03c1\u03b9': 'Tue',  # d4f1e9 in iso-8859-7
+   '\u03a4\u03b5\u03c4': 'Wed',  # d4e5f4 in iso-8859-7
+   '\u03a0\u03b5\u03bc': 'Thu',  # d0e5ec in iso-8859-7
+   '\u03a0\u03b1\u03c1': 'Fri',  # d0e1f1 in iso-8859-7
+   '\u03a3\u03b1\u03b2': 'Sat',  # d3e1e2 in iso-8859-7
 }
 
 _greek_date_format_re = 
re.compile(r'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/hungarian.py 
new/feedparser-6.0.1/feedparser/datetimes/hungarian.py
--- old/feedparser-6.0.0/feedparser/datetimes/hungarian.py      2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/hungarian.py      2020-09-15 
13:49:58.000000000 +0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import re
 
 from .w3dtf import _parse_date_w3dtf
@@ -48,7 +45,7 @@
     'december':      '12',
 }
 
-_hungarian_date_format_re = 
re.compile(r'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
+_hungarian_date_format_re = 
re.compile(r'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})([+-](\d{,2}:\d{2}))')
 
 
 def _parse_date_hungarian(date_string):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/iso8601.py 
new/feedparser-6.0.1/feedparser/datetimes/iso8601.py
--- old/feedparser-6.0.0/feedparser/datetimes/iso8601.py        2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/iso8601.py        2020-09-15 
13:49:58.000000000 +0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import re
 import time
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/korean.py 
new/feedparser-6.0.1/feedparser/datetimes/korean.py
--- old/feedparser-6.0.0/feedparser/datetimes/korean.py 2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/korean.py 2020-09-15 
13:49:58.000000000 +0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import re
 
 from .w3dtf import _parse_date_w3dtf
@@ -55,8 +52,8 @@
     if not m:
         return
     w3dtfdate = 
'%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
-                {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
-                 'hour': m.group(4), 'minute': m.group(5), 'second': 
m.group(6),\
+                {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),
+                 'hour': m.group(4), 'minute': m.group(5), 'second': 
m.group(6),
                  'zonediff': '+09:00'}
     return _parse_date_w3dtf(w3dtfdate)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/perforce.py 
new/feedparser-6.0.1/feedparser/datetimes/perforce.py
--- old/feedparser-6.0.0/feedparser/datetimes/perforce.py       2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/perforce.py       2020-09-15 
13:49:58.000000000 +0200
@@ -25,14 +25,7 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-try:
-    import rfc822
-except ImportError:
-    from email import _parseaddr as rfc822
-
+import email._parseaddr
 import re
 import time
 
@@ -48,6 +41,6 @@
     dow, year, month, day, hour, minute, second, tz = m.groups()
     months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 
'Oct', 'Nov', 'Dec']
     new_date_string = "%s, %s %s %s %s:%s:%s %s" % (dow, day, 
months[int(month) - 1], year, hour, minute, second, tz)
-    tm = rfc822.parsedate_tz(new_date_string)
+    tm = email._parseaddr.parsedate_tz(new_date_string)
     if tm:
-        return time.gmtime(rfc822.mktime_tz(tm))
+        return time.gmtime(email._parseaddr.mktime_tz(tm))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/rfc822.py 
new/feedparser-6.0.1/feedparser/datetimes/rfc822.py
--- old/feedparser-6.0.0/feedparser/datetimes/rfc822.py 2020-09-12 
21:27:17.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/rfc822.py 2020-09-15 
13:49:58.000000000 +0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import datetime
 
 timezone_names = {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/datetimes/w3dtf.py 
new/feedparser-6.0.1/feedparser/datetimes/w3dtf.py
--- old/feedparser-6.0.0/feedparser/datetimes/w3dtf.py  2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/datetimes/w3dtf.py  2020-09-15 
13:49:58.000000000 +0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import datetime
 
 timezonenames = {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/encodings.py 
new/feedparser-6.0.1/feedparser/encodings.py
--- old/feedparser-6.0.0/feedparser/encodings.py        2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/encodings.py        2020-09-15 
13:49:58.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import cgi
 import codecs
 import re
@@ -43,12 +40,7 @@
     lazy_chardet_encoding = None
 else:
     def lazy_chardet_encoding(data):
-        chardet_encoding = chardet.detect(data)['encoding']
-        if not chardet_encoding:
-            chardet_encoding = ''
-        if isinstance(chardet_encoding, bytes_):
-            chardet_encoding = chardet_encoding.encode('ascii', 'ignore')
-        return chardet_encoding
+        return chardet.detect(data)['encoding'] or ''
 
 from .exceptions import (
     CharacterEncodingOverride,
@@ -56,8 +48,6 @@
     NonXMLContentType,
 )
 
-bytes_ = type(b'')
-unicode_ = type('')
 
 # Each marker represents some of the characters of the opening XML
 # processing instruction ('<?xm') in the specified encoding.
@@ -193,7 +183,7 @@
     http_content_type = http_headers.get('content-type') or ''
     http_content_type, params = cgi.parse_header(http_content_type)
     http_encoding = params.get('charset', '').replace("'", "")
-    if isinstance(http_encoding, bytes_):
+    if isinstance(http_encoding, bytes):
         http_encoding = http_encoding.decode('utf-8', 'ignore')
 
     acceptable_content_type = 0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/exceptions.py 
new/feedparser-6.0.1/feedparser/exceptions.py
--- old/feedparser-6.0.0/feedparser/exceptions.py       2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/exceptions.py       2020-09-15 
13:49:58.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 __all__ = [
     'ThingsNobodyCaresAboutButMe',
     'CharacterEncodingOverride',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/html.py 
new/feedparser-6.0.1/feedparser/html.py
--- old/feedparser-6.0.0/feedparser/html.py     2020-09-01 05:42:10.000000000 
+0200
+++ new/feedparser-6.0.1/feedparser/html.py     2020-09-15 13:49:58.000000000 
+0200
@@ -25,18 +25,9 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
+import html.entities
 import re
 
-try:
-    from html.entities import name2codepoint
-except ImportError:
-    # Python 2
-    # noinspection PyUnresolvedReferences
-    from htmlentitydefs import name2codepoint
-
 from .sgml import *
 
 _cp1252 = {
@@ -251,7 +242,7 @@
 
         # Called for each entity reference, e.g. '&copy;' will extract 'copy'
         # Reconstruct the original entity reference.
-        if ref in name2codepoint or ref == 'apos':
+        if ref in html.entities.name2codepoint or ref == 'apos':
             self.pieces.append('&%s;' % ref)
         else:
             self.pieces.append('&amp;%s' % ref)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/http.py 
new/feedparser-6.0.1/feedparser/http.py
--- old/feedparser-6.0.0/feedparser/http.py     2020-09-12 21:27:17.000000000 
+0200
+++ new/feedparser-6.0.1/feedparser/http.py     2020-09-15 13:49:58.000000000 
+0200
@@ -25,29 +25,19 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
+import base64
 import datetime
 import gzip
+import io
 import re
 import struct
-import zlib
-
-import base64
-from io import BytesIO as _StringIO
 import urllib.parse
 import urllib.request
+import zlib
 
 from .datetimes import _parse_date
 from .urls import convert_to_idn
 
-try:
-    basestring
-except NameError:
-    basestring = str
-
-bytes_ = type(b'')
 
 # HTTP "Accept" header to send to servers when downloading feeds.  If you don't
 # want to send an Accept header, set this to None.
@@ -102,7 +92,7 @@
     request.add_header('User-Agent', agent)
     if etag:
         request.add_header('If-None-Match', etag)
-    if isinstance(modified, basestring):
+    if isinstance(modified, str):
         modified = _parse_date(modified)
     elif isinstance(modified, datetime.datetime):
         modified = modified.utctimetuple()
@@ -158,7 +148,7 @@
             auth = 
base64.standard_b64encode(f'{url_pieces.username}:{url_pieces.password}').strip()
 
     # iri support
-    if not isinstance(url, bytes_):
+    if not isinstance(url, bytes):
         url = convert_to_idn(url)
 
     # try to open with urllib2 (to use optional headers)
@@ -175,7 +165,7 @@
     # if feed is gzip-compressed, decompress it
     if data and 'gzip' in result['headers'].get('content-encoding', ''):
         try:
-            data = gzip.GzipFile(fileobj=_StringIO(data)).read()
+            data = gzip.GzipFile(fileobj=io.BytesIO(data)).read()
         except (EOFError, IOError, struct.error) as e:
             # IOError can occur if the gzip header is bad.
             # struct.error can occur if the data is damaged.
@@ -200,7 +190,7 @@
     # save HTTP headers
     if 'etag' in result['headers']:
         etag = result['headers'].get('etag', '')
-        if isinstance(etag, bytes_):
+        if isinstance(etag, bytes):
             etag = etag.decode('utf-8', 'ignore')
         if etag:
             result['etag'] = etag
@@ -209,7 +199,7 @@
         if modified:
             result['modified'] = modified
             result['modified_parsed'] = _parse_date(modified)
-    if isinstance(f.url, bytes_):
+    if isinstance(f.url, bytes):
         result['href'] = f.url.decode('utf-8', 'ignore')
     else:
         result['href'] = f.url
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/mixin.py 
new/feedparser-6.0.1/feedparser/mixin.py
--- old/feedparser-6.0.0/feedparser/mixin.py    2020-09-12 21:27:17.000000000 
+0200
+++ new/feedparser-6.0.1/feedparser/mixin.py    2020-09-15 13:49:58.000000000 
+0200
@@ -25,21 +25,12 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import base64
 import binascii
 import copy
+import html.entities
 import re
-from xml.sax.saxutils import escape as _xmlescape
-
-try:
-    from html.entities import name2codepoint, entitydefs
-except ImportError:
-    # Python 2
-    # noinspection PyUnresolvedReferences
-    from htmlentitydefs import name2codepoint, entitydefs
+import xml.sax.saxutils
 
 from .html import _cp1252
 from .namespaces import _base, cc, dc, georss, itunes, mediarss, psc
@@ -48,15 +39,6 @@
 from .urls import _urljoin, make_safe_absolute_uri, resolve_relative_uris
 
 
-bytes_ = type(b'')
-try:
-    # Python 2
-    # noinspection PyUnresolvedReferences,PyShadowingBuiltins
-    chr = unichr
-except NameError:
-    pass
-
-
 class _FeedParserMixin(
         _base.Namespace,
         cc.Namespace,
@@ -237,7 +219,7 @@
         # track xml:base and xml:lang
         attrs_d = dict(attrs)
         baseuri = attrs_d.get('xml:base', attrs_d.get('base')) or self.baseuri
-        if isinstance(baseuri, bytes_):
+        if isinstance(baseuri, bytes):
             baseuri = baseuri.decode(self.encoding, 'ignore')
         # ensure that self.baseuri is always an absolute URI that
         # uses a whitelisted URI scheme (e.g. not `javscript:`)
@@ -388,11 +370,11 @@
                 return self.handle_entityref(text)
         else:
             try:
-                name2codepoint[ref]
+                html.entities.name2codepoint[ref]
             except KeyError:
                 text = '&%s;' % ref
             else:
-                text = chr(name2codepoint[ref]).encode('utf-8')
+                text = chr(html.entities.name2codepoint[ref]).encode('utf-8')
         self.elementstack[-1][2].append(text)
 
     def handle_data(self, text, escape=1):
@@ -401,7 +383,7 @@
         if not self.elementstack:
             return
         if escape and self.contentparams.get('type') == 
'application/xhtml+xml':
-            text = _xmlescape(text)
+            text = xml.sax.saxutils.escape(text)
         self.elementstack[-1][2].append(text)
 
     def handle_comment(self, text):
@@ -423,7 +405,7 @@
                 # CDATA block began but didn't finish
                 k = len(self.rawdata)
                 return k
-            self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
+            self.handle_data(xml.sax.saxutils.escape(self.rawdata[i+9:k]), 0)
             return k+3
         else:
             k = self.rawdata.find('>', i)
@@ -473,7 +455,7 @@
     @staticmethod
     def strattrs(attrs):
         return ''.join(
-            ' %s="%s"' % (t[0], _xmlescape(t[1], {'"': '&quot;'}))
+            ' %s="%s"' % (t[0], xml.sax.saxutils.escape(t[1], {'"': '&quot;'}))
             for t in attrs
         )
 
@@ -490,7 +472,7 @@
 
         # Ensure each piece is a str for Python 3
         for (i, v) in enumerate(pieces):
-            if isinstance(v, bytes_):
+            if isinstance(v, bytes):
                 pieces[i] = v.decode('utf-8')
 
         if self.version == 'atom10' and self.contentparams.get('type', 'text') 
== 'application/xhtml+xml':
@@ -566,19 +548,19 @@
             if element in self.can_contain_dangerous_markup:
                 output = _sanitize_html(output, self.encoding, 
self.contentparams.get('type', 'text/html'))
 
-        if self.encoding and isinstance(output, bytes_):
+        if self.encoding and isinstance(output, bytes):
             output = output.decode(self.encoding, 'ignore')
 
         # address common error where people take data that is already
         # utf-8, presume that it is iso-8859-1, and re-encode it.
-        if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and not 
isinstance(output, bytes_):
+        if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and not 
isinstance(output, bytes):
             try:
                 output = output.encode('iso-8859-1').decode('utf-8')
             except (UnicodeEncodeError, UnicodeDecodeError):
                 pass
 
         # map win-1252 extensions to the proper code points
-        if not isinstance(output, bytes_):
+        if not isinstance(output, bytes):
             output = output.translate(_cp1252)
 
         # categories/tags/keywords/whatever are handled in _end_category or
@@ -670,7 +652,7 @@
             return False
 
         # all entities must have been defined as valid HTML entities
-        if any((e for e in re.findall(r'&(\w+);', s) if e not in entitydefs)):
+        if any((e for e in re.findall(r'&(\w+);', s) if e not in 
html.entities.entitydefs)):
             return False
 
         return True
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/namespaces/_base.py 
new/feedparser-6.0.1/feedparser/namespaces/_base.py
--- old/feedparser-6.0.0/feedparser/namespaces/_base.py 2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/namespaces/_base.py 2020-09-15 
13:49:58.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import copy
 
 from ..datetimes import _parse_date
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/namespaces/admin.py 
new/feedparser-6.0.1/feedparser/namespaces/admin.py
--- old/feedparser-6.0.0/feedparser/namespaces/admin.py 2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/namespaces/admin.py 2020-09-15 
13:49:58.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 from ..util import FeedParserDict
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/namespaces/cc.py 
new/feedparser-6.0.1/feedparser/namespaces/cc.py
--- old/feedparser-6.0.0/feedparser/namespaces/cc.py    2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/namespaces/cc.py    2020-09-15 
13:49:58.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 from ..util import FeedParserDict
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/namespaces/dc.py 
new/feedparser-6.0.1/feedparser/namespaces/dc.py
--- old/feedparser-6.0.0/feedparser/namespaces/dc.py    2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/namespaces/dc.py    2020-09-15 
13:49:58.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 from ..datetimes import _parse_date
 from ..util import FeedParserDict
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/namespaces/georss.py 
new/feedparser-6.0.1/feedparser/namespaces/georss.py
--- old/feedparser-6.0.0/feedparser/namespaces/georss.py        2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/namespaces/georss.py        2020-09-15 
13:49:58.000000000 +0200
@@ -26,8 +26,7 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
+# Required for Python 3.6 compatibility.
 from __future__ import generator_stop
 
 from ..util import FeedParserDict
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/namespaces/itunes.py 
new/feedparser-6.0.1/feedparser/namespaces/itunes.py
--- old/feedparser-6.0.0/feedparser/namespaces/itunes.py        2020-09-12 
21:27:17.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/namespaces/itunes.py        2020-09-15 
13:49:58.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 from ..util import FeedParserDict
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/namespaces/mediarss.py 
new/feedparser-6.0.1/feedparser/namespaces/mediarss.py
--- old/feedparser-6.0.0/feedparser/namespaces/mediarss.py      2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/namespaces/mediarss.py      2020-09-15 
13:49:58.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 from ..util import FeedParserDict
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/namespaces/psc.py 
new/feedparser-6.0.1/feedparser/namespaces/psc.py
--- old/feedparser-6.0.0/feedparser/namespaces/psc.py   2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/namespaces/psc.py   2020-09-15 
13:49:59.000000000 +0200
@@ -26,9 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import datetime
 import re
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/parsers/loose.py 
new/feedparser-6.0.1/feedparser/parsers/loose.py
--- old/feedparser-6.0.0/feedparser/parsers/loose.py    2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/parsers/loose.py    2020-09-15 
13:49:59.000000000 +0200
@@ -26,10 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-
 class _LooseFeedParser(object):
     contentparams = None
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/parsers/strict.py 
new/feedparser-6.0.1/feedparser/parsers/strict.py
--- old/feedparser-6.0.0/feedparser/parsers/strict.py   2020-09-01 
05:42:10.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/parsers/strict.py   2020-09-15 
13:49:59.000000000 +0200
@@ -26,8 +26,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import, unicode_literals
-
 from ..exceptions import UndeclaredNamespace
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/sanitizer.py 
new/feedparser-6.0.1/feedparser/sanitizer.py
--- old/feedparser-6.0.0/feedparser/sanitizer.py        2020-09-12 
21:27:17.000000000 +0200
+++ new/feedparser-6.0.1/feedparser/sanitizer.py        2020-09-15 
13:49:59.000000000 +0200
@@ -25,13 +25,9 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import re
 
 from .html import _BaseHTMLProcessor
-from .sgml import _SGML_AVAILABLE
 from .urls import make_safe_absolute_uri
 
 
@@ -882,8 +878,6 @@
 
 
 def _sanitize_html(html_source, encoding, _type):
-    if not _SGML_AVAILABLE:
-        return html_source
     p = _HTMLSanitizer(encoding, _type)
     html_source = html_source.replace('<![CDATA[', '&lt;![CDATA[')
     p.feed(html_source)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/sgml.py 
new/feedparser-6.0.1/feedparser/sgml.py
--- old/feedparser-6.0.0/feedparser/sgml.py     2020-09-01 05:42:10.000000000 
+0200
+++ new/feedparser-6.0.1/feedparser/sgml.py     2020-09-15 13:49:59.000000000 
+0200
@@ -25,12 +25,11 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-
 import re
 
+import sgmllib
+
 __all__ = [
-    '_SGML_AVAILABLE',
     'sgmllib',
     'charref',
     'tagfind',
@@ -44,93 +43,56 @@
     'endbracket',
 ]
 
-# sgmllib is not available by default in Python 3; if the end user doesn't have
-# it available then we'll lose illformed XML parsing and content sanitizing
-try:
-    import sgmllib
-except ImportError:
-    # This is probably Python 3, which doesn't include sgmllib anymore
-    _SGML_AVAILABLE = 0
-
-    # Mock sgmllib enough to allow subclassing later on
-    class sgmllib(object):
-        SGMLParseError = EnvironmentError
-
-        class SGMLParser(object):
-            lasttag = None
-            rawdata = None
-
-            def close(self):
-                pass
-
-            def feed(self, data):
-                pass
-
-            def goahead(self, i):
-                pass
-
-            def parse_declaration(self, i):
-                pass
-
-            def parse_starttag(self, i):
-                pass
-
-            def reset(self):
-                pass
-
-else:
-    _SGML_AVAILABLE = 1
-
-    # sgmllib defines a number of module-level regular expressions that are
-    # insufficient for the XML parsing feedparser needs. Rather than modify
-    # the variables directly in sgmllib, they're defined here using the same
-    # names, and the compiled code objects of several sgmllib.SGMLParser
-    # methods are copied into _BaseHTMLProcessor so that they execute in
-    # feedparser's scope instead of sgmllib's scope.
-    charref = re.compile(r'&#(\d+|[xX][0-9a-fA-F]+);')
-    tagfind = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*')
-    attrfind = re.compile(
-        r"""\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*"""
-        r"""('[^']*'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$()_#=~'"@]*))?"""
-    )
-
-    # Unfortunately, these must be copied over to prevent NameError exceptions
-    entityref = sgmllib.entityref
-    incomplete = sgmllib.incomplete
-    interesting = sgmllib.interesting
-    shorttag = sgmllib.shorttag
-    shorttagopen = sgmllib.shorttagopen
-    starttagopen = sgmllib.starttagopen
-
-
-    class _EndBracketRegEx:
-        def __init__(self):
-            # Overriding the built-in sgmllib.endbracket regex allows the
-            # parser to find angle brackets embedded in element attributes.
-            self.endbracket = re.compile(
-                r'('
-                r"""[^'"<>]"""
-                r"""|"[^"]*"(?=>|/|\s|\w+=)"""
-                r"""|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])"""
-                r"""|.*?(?=[<>]"""
-                r')'
-            )
-
-        def search(self, target, index=0):
-            match = self.endbracket.match(target, index)
-            if match is not None:
-                # Returning a new object in the calling thread's context
-                # resolves a thread-safety.
-                return EndBracketMatch(match)
-            return None
-
-
-    class EndBracketMatch:
-        def __init__(self, match):
-            self.match = match
+# sgmllib defines a number of module-level regular expressions that are
+# insufficient for the XML parsing feedparser needs. Rather than modify
+# the variables directly in sgmllib, they're defined here using the same
+# names, and the compiled code objects of several sgmllib.SGMLParser
+# methods are copied into _BaseHTMLProcessor so that they execute in
+# feedparser's scope instead of sgmllib's scope.
+charref = re.compile(r'&#(\d+|[xX][0-9a-fA-F]+);')
+tagfind = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*')
+attrfind = re.compile(
+    r"""\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*"""
+    r"""('[^']*'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$()_#=~'"@]*))?"""
+)
+
+# Unfortunately, these must be copied over to prevent NameError exceptions
+entityref = sgmllib.entityref
+incomplete = sgmllib.incomplete
+interesting = sgmllib.interesting
+shorttag = sgmllib.shorttag
+shorttagopen = sgmllib.shorttagopen
+starttagopen = sgmllib.starttagopen
+
+
+class _EndBracketRegEx:
+    def __init__(self):
+        # Overriding the built-in sgmllib.endbracket regex allows the
+        # parser to find angle brackets embedded in element attributes.
+        self.endbracket = re.compile(
+            r'('
+            r"""[^'"<>]"""
+            r"""|"[^"]*"(?=>|/|\s|\w+=)"""
+            r"""|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])"""
+            r"""|.*?(?=[<>]"""
+            r')'
+        )
+
+    def search(self, target, index=0):
+        match = self.endbracket.match(target, index)
+        if match is not None:
+            # Returning a new object in the calling thread's context
+            # resolves a thread-safety.
+            return EndBracketMatch(match)
+        return None
+
+
+class EndBracketMatch:
+    def __init__(self, match):
+        self.match = match
 
-        def start(self, n):
-            return self.match.end(n)
+    def start(self, n):
+        return self.match.end(n)
 
 
-    endbracket = _EndBracketRegEx()
+endbracket = _EndBracketRegEx()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/urls.py 
new/feedparser-6.0.1/feedparser/urls.py
--- old/feedparser-6.0.0/feedparser/urls.py     2020-09-01 05:42:10.000000000 
+0200
+++ new/feedparser-6.0.1/feedparser/urls.py     2020-09-15 13:49:59.000000000 
+0200
@@ -25,15 +25,8 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import re
-
-try:
-    import urllib.parse as urlparse
-except ImportError:
-    import urlparse as urlparse
+import urllib.parse
 
 from .html import _BaseHTMLProcessor
 
@@ -59,7 +52,7 @@
 def _urljoin(base, uri):
     uri = _urifixer.sub(r'\1\3', uri)
     try:
-        uri = urlparse.urljoin(base, uri)
+        uri = urllib.parse.urljoin(base, uri)
     except ValueError:
         uri = ''
     return uri
@@ -70,7 +63,7 @@
     # this function should only be called with a unicode string
     # strategy: if the host cannot be encoded in ascii, then
     # it'll be necessary to encode it in idn form
-    parts = list(urlparse.urlsplit(url))
+    parts = list(urllib.parse.urlsplit(url))
     try:
         parts[1].encode('ascii')
     except UnicodeEncodeError:
@@ -85,7 +78,7 @@
         parts[1] = '.'.join(newhost)
         if port:
             parts[1] += ':' + port
-        return urlparse.urlunsplit(parts)
+        return urllib.parse.urlunsplit(parts)
     else:
         return url
 
@@ -98,7 +91,7 @@
         return rel or ''
     if not rel:
         try:
-            scheme = urlparse.urlparse(base)[0]
+            scheme = urllib.parse.urlparse(base)[0]
         except ValueError:
             return ''
         if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser/util.py 
new/feedparser-6.0.1/feedparser/util.py
--- old/feedparser-6.0.0/feedparser/util.py     2020-09-01 05:42:10.000000000 
+0200
+++ new/feedparser-6.0.1/feedparser/util.py     2020-09-15 13:49:59.000000000 
+0200
@@ -25,9 +25,6 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 import warnings
 
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/feedparser.egg-info/PKG-INFO 
new/feedparser-6.0.1/feedparser.egg-info/PKG-INFO
--- old/feedparser-6.0.0/feedparser.egg-info/PKG-INFO   2020-09-12 
21:27:23.000000000 +0200
+++ new/feedparser-6.0.1/feedparser.egg-info/PKG-INFO   2020-09-15 
13:51:13.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 2.1
 Name: feedparser
-Version: 6.0.0
+Version: 6.0.1
 Summary: Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 
0.3, and Atom 1.0 feeds
 Home-page: https://github.com/kurtmckee/feedparser
 Author: Kurt McKee
@@ -75,4 +75,5 @@
 Classifier: Programming Language :: Python :: 3.9
 Classifier: Topic :: Software Development :: Libraries :: Python Modules
 Classifier: Topic :: Text Processing :: Markup :: XML
+Requires-Python: >=3.6
 Description-Content-Type: text/x-rst
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/setup.py 
new/feedparser-6.0.1/setup.py
--- old/feedparser-6.0.0/setup.py       2020-09-12 21:27:17.000000000 +0200
+++ new/feedparser-6.0.1/setup.py       2020-09-15 13:49:59.000000000 +0200
@@ -59,6 +59,7 @@
     platforms=['POSIX', 'Windows'],
     packages=['feedparser', 'feedparser.datetimes', 'feedparser.namespaces', 
'feedparser.parsers'],
     install_requires=['sgmllib3k'],
+    python_requires='>=3.6', 
     keywords=['atom', 'cdf', 'feed', 'parser', 'rdf', 'rss'],
     classifiers=[
         'Development Status :: 5 - Production/Stable',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/feedparser-6.0.0/tests/runtests.py 
new/feedparser-6.0.1/tests/runtests.py
--- old/feedparser-6.0.0/tests/runtests.py      2020-09-12 21:27:17.000000000 
+0200
+++ new/feedparser-6.0.1/tests/runtests.py      2020-09-15 13:49:59.000000000 
+0200
@@ -23,15 +23,13 @@
 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 # POSSIBILITY OF SUCH DAMAGE.
 
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
 __author__ = "Kurt McKee <contac...@kurtmckee.org>"
 __license__ = "BSD 2-clause"
 
 import codecs
 import datetime
 import glob
+import http.server
 import io
 import os
 import posixpath
@@ -46,13 +44,6 @@
 import xml.sax
 import zlib
 
-# Account for stdlib differences between Python 2 and Python 3.
-try:
-    from BaseHTTPServer import HTTPServer
-    from SimpleHTTPServer import SimpleHTTPRequestHandler
-except ImportError:
-    from http.server import HTTPServer, SimpleHTTPRequestHandler
-
 import feedparser
 import feedparser.api
 import feedparser.datetimes
@@ -76,7 +67,7 @@
 _HOST = '127.0.0.1'  # also not really configurable
 
 
-class FeedParserTestRequestHandler(SimpleHTTPRequestHandler):
+class FeedParserTestRequestHandler(http.server.SimpleHTTPRequestHandler):
     headers_re = re.compile(br"^Header:\s+([^:]+):(.+)$", re.MULTILINE)
 
     def send_head(self):
@@ -93,7 +84,7 @@
             self.send_response(304)
             self.send_header('Content-type', 'text/xml')
             self.end_headers()
-            return feedparser.api._StringIO(b'')
+            return io.BytesIO(b'')
         path = self.translate_path(self.path)
         # the compression tests' filenames determine the header sent
         if self.path.startswith('/tests/compression'):
@@ -142,7 +133,7 @@
         self.httpd = None
 
     def run(self):
-        self.httpd = HTTPServer((_HOST, _PORT), FeedParserTestRequestHandler)
+        self.httpd = http.server.HTTPServer((_HOST, _PORT), 
FeedParserTestRequestHandler)
         self.ready.set()
         while self.requests:
             self.httpd.handle_request()
@@ -382,7 +373,7 @@
     """Ensure that `_open_resource()` interprets its arguments as URIs, 
file-like objects, or in-memory feeds as expected"""
 
     def test_fileobj(self):
-        r = feedparser.api._open_resource(feedparser.api._StringIO(b''), '', 
'', '', '', [], {}, {})
+        r = feedparser.api._open_resource(io.BytesIO(b''), '', '', '', '', [], 
{}, {})
         self.assertEqual(r, b'')
 
     def test_feed(self):
@@ -755,7 +746,7 @@
             doc = b"<feed>&illformed_charref</feed>"
             # Importing lxml.etree currently causes libxml2 to
             # throw SAXException instead of SAXParseException.
-            feedparser.parse(feedparser.api._StringIO(doc))
+            feedparser.parse(io.BytesIO(doc))
         self.assertTrue(True)
 
 


Reply via email to