Hello community,

here is the log from the commit of package python-podcastparser for 
openSUSE:Factory checked in at 2019-05-22 11:14:45
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-podcastparser (Old)
 and      /work/SRC/openSUSE:Factory/.python-podcastparser.new.5148 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-podcastparser"

Wed May 22 11:14:45 2019 rev:2 rq:704279 version:0.6.4

Changes:
--------
--- 
/work/SRC/openSUSE:Factory/python-podcastparser/python-podcastparser.changes    
    2016-12-12 12:46:27.952203512 +0100
+++ 
/work/SRC/openSUSE:Factory/.python-podcastparser.new.5148/python-podcastparser.changes
      2019-05-22 11:14:52.138546645 +0200
@@ -1,0 +2,8 @@
+Mon May 20 14:27:09 UTC 2019 - [email protected]
+
+- version update to 0.6.4
+  * no upstream changelog
+- convert to single spec
+- run the testsuite
+
+-------------------------------------------------------------------

Old:
----
  podcastparser-0.6.0.tar.gz

New:
----
  podcastparser-0.6.4.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-podcastparser.spec ++++++
--- /var/tmp/diff_new_pack.wgKuqT/_old  2019-05-22 11:14:53.146546459 +0200
+++ /var/tmp/diff_new_pack.wgKuqT/_new  2019-05-22 11:14:53.150546459 +0200
@@ -1,7 +1,7 @@
 #
 # spec file for package python-podcastparser
 #
-# Copyright (c) 2016 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -12,52 +12,47 @@
 # license that conforms to the Open Source Definition (Version 1.9)
 # published by the Open Source Initiative.
 
-# Please submit bugfixes or comments via http://bugs.opensuse.org/
+# Please submit bugfixes or comments via https://bugs.opensuse.org/
 #
 
 
-%global pkgname podcastparser
-%if 0%{?suse_version} && 0%{?suse_version} <= 1110
-%{!?python_sitelib: %global python_sitelib %(python -c "from 
distutils.sysconfig import get_python_lib; print get_python_lib()")}
-%else
-BuildArch:      noarch
-%endif
-Name:           python-%{pkgname}
-Version:        0.6.0
+%{?!python_module:%define python_module() python-%{**} python3-%{**}}
+Name:           python-podcastparser
+Version:        0.6.4
 Release:        0
 Summary:        Simple, fast and efficient podcast parser
 License:        ISC
 Group:          Development/Libraries/Python
-Url:            http://gpodder.org/%{pkgname}/
-Source:         http://gpodder.org/%{pkgname}/%{pkgname}-%{version}.tar.gz
-BuildRequires:  python-devel
-BuildRequires:  python-nose
-BuildRequires:  python-setuptools
-BuildRequires:  python-xml
+URL:            https://github.com/gpodder/podcastparser
+Source:         
https://files.pythonhosted.org/packages/source/p/podcastparser/podcastparser-%{version}.tar.gz
+BuildRequires:  %{python_module nose}
+BuildRequires:  %{python_module setuptools}
+BuildRequires:  %{python_module xml}
+BuildRequires:  fdupes
 Requires:       python-xml
-BuildRoot:      %{_tmppath}/%{name}-%{version}-build
+BuildArch:      noarch
+%python_subpackages
 
 %description
 The podcast parser project is a library from the gPodder project to provide an
 easy and reliable way of parsing RSS- and Atom-based podcast feeds in Python.
 
 %prep
-%setup -q -n %{pkgname}-%{version}
+%setup -q -n podcastparser-%{version}
 
 %build
-python setup.py build
-
-%if 0%{?suse_version} > 1010
-%check
-python test_%{pkgname}.py
-%endif
+%python_build
 
 %install
-python setup.py install --prefix=%{_prefix} --root=%{buildroot}
+%python_install
+%python_expand %fdupes %{buildroot}%{$python_sitelib}
+
+%check
+%python_expand nosetests-%{$python_bin_suffix}
 
-%files
-%defattr(-,root,root)
-%doc LICENSE README.md
+%files %{python_files}
+%license LICENSE
+%doc README.md
 %{python_sitelib}/*
 
 %changelog

++++++ podcastparser-0.6.0.tar.gz -> podcastparser-0.6.4.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/LICENSE 
new/podcastparser-0.6.4/LICENSE
--- old/podcastparser-0.6.0/LICENSE     2014-11-30 21:48:54.000000000 +0100
+++ new/podcastparser-0.6.4/LICENSE     2018-08-19 18:16:16.000000000 +0200
@@ -1,5 +1,5 @@
 
-Copyright (c) 2012, 2013, 2014, Thomas Perl <[email protected]>
+Copyright (c) 2012, 2013, 2014, 2018, Thomas Perl <[email protected]>
 
 Permission to use, copy, modify, and/or distribute this software for any
 purpose with or without fee is hereby granted, provided that the above
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/PKG-INFO 
new/podcastparser-0.6.4/PKG-INFO
--- old/podcastparser-0.6.0/PKG-INFO    2015-05-24 20:40:07.000000000 +0200
+++ new/podcastparser-0.6.4/PKG-INFO    2018-08-19 18:16:34.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: podcastparser
-Version: 0.6.0
+Version: 0.6.4
 Summary:  Simplified, fast RSS parser 
 Home-page: http://gpodder.org/podcastparser/
 Author: Thomas Perl
@@ -12,6 +12,9 @@
         The podcast parser project is a library from the gPodder project to 
provide an
         easy and reliable way of parsing RSS- and Atom-based podcast feeds in 
Python.
         
-        * Web: http://gpodder.org/podcastparser/
+        
+        ## Automated Tests
+        
+        To run the unit tests you need 
[`nose`](http://nose.readthedocs.io/en/latest/).  If you have `nose` installed, 
use the `nosetests` command in the repository's root directory to run the tests.
         
 Platform: UNKNOWN
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/README.md 
new/podcastparser-0.6.4/README.md
--- old/podcastparser-0.6.0/README.md   2014-11-30 21:48:54.000000000 +0100
+++ new/podcastparser-0.6.4/README.md   2018-08-19 18:16:16.000000000 +0200
@@ -4,4 +4,7 @@
 The podcast parser project is a library from the gPodder project to provide an
 easy and reliable way of parsing RSS- and Atom-based podcast feeds in Python.
 
-* Web: http://gpodder.org/podcastparser/
+
+## Automated Tests
+
+To run the unit tests you need 
[`nose`](http://nose.readthedocs.io/en/latest/).  If you have `nose` installed, 
use the `nosetests` command in the repository's root directory to run the tests.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/doc/index.rst 
new/podcastparser-0.6.4/doc/index.rst
--- old/podcastparser-0.6.0/doc/index.rst       2014-11-30 21:48:54.000000000 
+0100
+++ new/podcastparser-0.6.4/doc/index.rst       2018-08-19 18:16:16.000000000 
+0200
@@ -192,6 +192,49 @@
 .. automodule:: podcastparser
    :members:
 
+Unsupported Namespaces
+======================
+
+This is a list of podcast-related XML namespaces that are not yet
+supported by podcastparser, but might be in the future.
+
+Chapter Marks
+-------------
+
+- `rawvoice RSS`_: Rating, Frequency, Poster, WebM, MP4, Metamark (kind of 
chapter-like markers)
+- `IGOR`_: Chapter Marks
+
+.. _rawvoice RSS: http://www.rawvoice.com/rawvoiceRssModule/
+.. _IGOR: http://emonk.net/IGOR
+
+Others
+------
+
+- `libSYN RSS Extensions`_: contactPhone, contactEmail, contactTwitter, 
contactWebsite, wallpaper, pdf, background
+- `Comment API`_: Comments to a given item (readable via RSS)
+- `MVCB`_: Error Reports To Field (usually a mailto: link)
+- `Syndication Module`_: Update period, frequency and base (for skipping 
updates)
+- `Creative Commons RSS`_: Creative commons license for the content
+- `Pheedo`_: Original link to website and original link to enclosure (without 
going through pheedo redirect)
+- `WGS84`_: Geo-Coordinates per item
+- `Conversations Network`_: Intro duration in milliseconds (for skipping the 
intro), ratings
+- `purl DC Elements`_: dc:creator (author / creator of the podcast, possibly 
with e-mail address)
+- `Tristana`_: tristana:self (canonical URL to feed)
+- `Blip`_: Show name, show page, picture, username, language, rating, 
thumbnail_src, license
+
+.. _libSYN RSS Extensions: http://libsyn.com/rss-extension
+.. _Comment API: http://www.wellformedweb.org/CommentAPI/
+.. _MVCB: http://webns.net/mvcb/
+.. _Syndication Module: http://web.resource.org/rss/1.0/modules/syndication/
+.. _Creative Commons RSS: http://backend.userland.com/creativeCommonsRssModule
+.. _Pheedo: http://www.pheedo.com/namespace/pheedo
+.. _WGS84: http://www.w3.org/2003/01/geo/wgs84_pos#
+.. _Conversations Network: http://conversationsnetwork.org/rssNamespace-1.0/
+.. _purl DC Elements: http://purl.org/dc/elements/1.1/
+.. _Tristana: http://www.tristana.org
+.. _Blip: http://blip.tv/dtd/blip/1.0
+
+
 Indices and tables
 ==================
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/podcastparser.py 
new/podcastparser-0.6.4/podcastparser.py
--- old/podcastparser-0.6.0/podcastparser.py    2015-05-24 20:37:39.000000000 
+0200
+++ new/podcastparser-0.6.4/podcastparser.py    2018-08-19 18:16:16.000000000 
+0200
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 #
 # Podcastparser: A simple, fast and efficient podcast parser
-# Copyright (c) 2012, 2013, 2014, Thomas Perl <[email protected]>
+# Copyright (c) 2012, 2013, 2014, 2018, Thomas Perl <[email protected]>
 #
 # Permission to use, copy, modify, and/or distribute this software for any
 # purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
 
 # Will be parsed by setup.py to determine package metadata
 __author__ = 'Thomas Perl <[email protected]>'
-__version__ = '0.6.0'
+__version__ = '0.6.4'
 __website__ = 'http://gpodder.org/podcastparser/'
 __license__ = 'ISC License'
 
@@ -48,7 +48,17 @@
 
 try:
     # Python 2
-    from rfc822 import mktime_tz, parsedate_tz
+    from rfc822 import parsedate_tz
+    import calendar
+    # This is taken from Python 3's email._parseaddr, since it handles
+    # pre-epoch dates better than what Python 2 does (time.mktime())
+    def mktime_tz(data):
+        if data[9] is None:
+            # No zone info, so localtime is better assumption than GMT
+            return time.mktime(data[:8] + (-1,))
+        else:
+            t = calendar.timegm(data)
+            return t - data[9]
 except ImportError:
     # Python 3
     from email.utils import mktime_tz, parsedate_tz
@@ -60,9 +70,10 @@
 class Target(object):
     WANT_TEXT = False
 
-    def __init__(self, key=None, filter_func=lambda x: x.strip()):
+    def __init__(self, key=None, filter_func=lambda x: x.strip(), 
overwrite=True):
         self.key = key
         self.filter_func = filter_func
+        self.overwrite = overwrite
 
     def start(self, handler, attrs):
         pass
@@ -73,7 +84,7 @@
 
 class RSS(Target):
     def start(self, handler, attrs):
-        if 'xml:base' in attrs:
+        if 'xml:base' in attrs.keys():
             handler.set_base(attrs.get('xml:base'))
 
 
@@ -119,6 +130,8 @@
     WANT_TEXT = True
 
     def end(self, handler, text):
+        if not self.overwrite and handler.get_episode_attr(self.key):
+            return
         handler.set_episode_attr(self.key, self.filter_func(text))
 
 
@@ -164,7 +177,7 @@
         if url is None:
             return
 
-        url = parse_url(urlparse.urljoin(handler.base, url))
+        url = parse_url(urlparse.urljoin(handler.base, url.lstrip()))
         file_size = parse_length(attrs.get(self.file_size_attribute))
         mime_type = parse_type(attrs.get('type'))
 
@@ -517,6 +530,15 @@
     >>> parse_pubdate('Fri, 21 Nov 1997 09:55:06 -0600')
     880127706
 
+    >>> parse_pubdate('2003-12-13T00:00:00+02:00')
+    1071266400
+
+    >>> parse_pubdate('2003-12-13T18:30:02Z')
+    1071340202
+
+    >>> parse_pubdate('Mon, 02 May 1960 09:05:01 +0100')
+    -305049299
+
     >>> parse_pubdate('')
     0
 
@@ -528,13 +550,29 @@
 
     parsed = parsedate_tz(text)
     if parsed is not None:
-        return int(mktime_tz(parsed))
-
-    # TODO: Fully RFC 3339-compliant parsing (w/ timezone)
+        try:
+            pubtimeseconds = int(mktime_tz(parsed))
+            return pubtimeseconds
+        except(OverflowError,ValueError):
+            logger.warn('bad pubdate %s is before epoch or after end of time 
(2038)',parsed)
+            return 0
+        
     try:
         parsed = time.strptime(text[:19], '%Y-%m-%dT%H:%M:%S')
         if parsed is not None:
-            return int(time.mktime(parsed))
+            m = re.match(r'^(?:Z|([+-])([0-9]{2})[:]([0-9]{2}))$', text[19:])
+            if m:
+                parsed = list(iter(parsed))
+                if m.group(1):
+                    offset = 3600 * int(m.group(2)) + 60 * int(m.group(3))
+                    if m.group(1) == '-':
+                        offset = 0 - offset
+                else:
+                    offset = 0
+                parsed.append(offset)
+                return int(mktime_tz(tuple(parsed)))
+            else:
+                return int(time.mktime(parsed))
     except Exception:
         pass
 
@@ -560,6 +598,7 @@
     'rss/channel/item/link': EpisodeAttrRelativeLink('link'),
     'rss/channel/item/description': EpisodeAttr('description', 
squash_whitespace),
     'rss/channel/item/itunes:summary': EpisodeAttr('description', 
squash_whitespace),
+    'rss/channel/item/media:description': EpisodeAttr('description', 
squash_whitespace),
     'rss/channel/item/itunes:subtitle': EpisodeAttr('subtitle', 
squash_whitespace),
     'rss/channel/item/content:encoded': EpisodeAttr('description_html'),
     'rss/channel/item/itunes:duration': EpisodeAttr('total_time', parse_time),
@@ -582,11 +621,27 @@
     'atom:feed/atom:entry/atom:title': EpisodeAttr('title', squash_whitespace),
     'atom:feed/atom:entry/atom:link': AtomLink(),
     'atom:feed/atom:entry/atom:content': AtomContent(),
+    'atom:feed/atom:entry/content:encoded': EpisodeAttr('description_html'),
     'atom:feed/atom:entry/atom:published': EpisodeAttr('published', 
parse_pubdate),
+    'atom:feed/atom:entry/atom:updated': EpisodeAttr('published', 
parse_pubdate, overwrite=False),
+    'atom:feed/atom:entry/media:group/media:description': 
EpisodeAttr('description', squash_whitespace),
     'atom:feed/atom:entry/psc:chapters': PodloveChapters(),
     'atom:feed/atom:entry/psc:chapters/psc:chapter': PodloveChapter(),
 }
 
+# Derive valid root elements from the supported MAPPINGs
+VALID_ROOTS = set(path.split('/')[0] for path in MAPPING.keys())
+
+
+class FeedParseError(sax.SAXParseException, ValueError):
+    """
+    Exception raised when asked to parse an invalid feed
+    
+    This exception allows users of this library to catch exceptions
+    without having to import the XML parsing library themselves.
+    """
+    pass
+
 
 class PodcastHandler(sax.handler.ContentHandler):
     def __init__(self, url, max_episodes):
@@ -635,6 +690,13 @@
         if len(entry['chapters']) == 0:
             del entry['chapters']
 
+        # Ensures `description` does not contain HTML
+        if 'description' in entry and is_html(entry['description']):
+            if 'description_html' not in entry:
+                entry['description_html'] = entry['description']
+            entry['description'] = ''
+
+        # Sets `description` to stripped `description_html` when absent
         if 'description_html' in entry and not entry['description']:
             entry['description'] = remove_html_tags(entry['description_html'])
 
@@ -673,7 +735,14 @@
 
     def startElement(self, name, attrs):
         self.namespace = Namespace(attrs, self.namespace)
-        self.path_stack.append(self.namespace.map(name))
+        name = self.namespace.map(name)
+        if not self.path_stack and name not in VALID_ROOTS:
+            raise FeedParseError(
+                msg='Unsupported feed type: {}'.format(name),
+                exception=None,
+                locator=self._locator,
+            )
+        self.path_stack.append(name)
 
         target = MAPPING.get('/'.join(self.path_stack))
         if target is not None:
@@ -707,7 +776,10 @@
     :returns: a dict with the parsed contents of the feed
     """
     handler = PodcastHandler(url, max_episodes)
-    sax.parse(stream, handler)
+    try:
+        sax.parse(stream, handler)
+    except sax.SAXParseException as e:
+        raise FeedParseError(e.getMessage(), e.getException(), e._locator)
     return handler.data
 
 
@@ -781,6 +853,12 @@
     # urlunsplit might return "a slighty different, but equivalent URL"
     return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
 
+def is_html(text):
+    """
+    Tests whether the given string contains HTML encoded data
+    """
+    html_test = re.compile(r'<[a-z][\s\S]*>', re.IGNORECASE)
+    return bool(html_test.search(text))
 
 def remove_html_tags(html):
     """
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/test_podcastparser.py 
new/podcastparser-0.6.4/test_podcastparser.py
--- old/podcastparser-0.6.0/test_podcastparser.py       2015-03-22 
09:35:09.000000000 +0100
+++ new/podcastparser-0.6.4/test_podcastparser.py       2018-08-19 
18:16:16.000000000 +0200
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 #
 # test_podcastparser: Test Runner for the podcastparser (2012-12-29)
-# Copyright (c) 2012, 2013, 2014, Thomas Perl <[email protected]>
+# Copyright (c) 2012, 2013, 2014, 2018, Thomas Perl <[email protected]>
 # Copyright (c) 2013, Stefan Kögl <[email protected]>
 #
 # Permission to use, copy, modify, and/or distribute this software for any
@@ -21,8 +21,16 @@
 import os
 import glob
 import json
+try:
+    # Python 2
+    from StringIO import StringIO
+except ImportError:
+    # Python 3
+    from io import StringIO
+
 
 from nose.tools import assert_equal
+from nose.tools import assert_raises
 
 import podcastparser
 
@@ -39,7 +47,10 @@
             params = json.load(open(param_filename))
 
         expected = json.load(open(json_filename))
-        parsed = podcastparser.parse('file://' + rss_filename,
+        normalized_rss_filename = rss_filename
+        if os.sep == '\\':
+            normalized_rss_filename = normalized_rss_filename.replace(os.sep, 
'/')
+        parsed = podcastparser.parse('file://' + normalized_rss_filename,
                                      open(rss_filename), **params)
 
         assert_equal.__self__.maxDiff = None
@@ -47,3 +58,16 @@
 
     for rss_filename in glob.glob(os.path.join('tests', 'data', '*.rss')):
         yield test_parse_rss, rss_filename
+
+def test_invalid_roots():
+    def test_fail_parse(feed):
+        with assert_raises(podcastparser.FeedParseError):
+            podcastparser.parse('file://example.com/feed.xml', StringIO(feed))
+
+    feeds = [
+        '<html><body/></html>',
+        '<foo xmlns="http://example.com/foo.xml";><bar/></foo>',
+        '<baz:foo xmlns:baz="http://example.com/baz.xml";><baz:bar/></baz:foo>',
+    ]
+    for feed in feeds:
+        yield test_fail_parse, feed
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/atom_content_encoded.json 
new/podcastparser-0.6.4/tests/data/atom_content_encoded.json
--- old/podcastparser-0.6.0/tests/data/atom_content_encoded.json        
1970-01-01 01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/atom_content_encoded.json        
2018-08-19 18:16:16.000000000 +0200
@@ -0,0 +1,10 @@
+{"episodes": [{"description": "Hello",
+               "description_html": "<h1>Hello</h1>",
+               "enclosures": [],
+               "guid": "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
+               "link": "http://example.org/2003/12/13/atom03";,
+               "payment_url": null,
+               "published": 1071340202,
+               "title": "Atom-Powered Robots Run Amok",
+               "total_time": 0}],
+"title": "Example Feed"}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/atom_content_encoded.rss 
new/podcastparser-0.6.4/tests/data/atom_content_encoded.rss
--- old/podcastparser-0.6.0/tests/data/atom_content_encoded.rss 1970-01-01 
01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/atom_content_encoded.rss 2018-08-19 
18:16:16.000000000 +0200
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom"; 
xmlns:content="http://purl.org/rss/1.0/modules/content/";>
+
+  <title>Example Feed</title>
+  <link href="http://example.org/"/>
+  <updated>2003-12-13T18:30:02Z</updated>
+  <author>
+    <name>John Doe</name>
+  </author>
+  <id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
+
+  <entry>
+    <title>Atom-Powered Robots Run Amok</title>
+    <link href="http://example.org/2003/12/13/atom03"/>
+    <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
+    <updated>2003-12-13T18:30:02Z</updated>
+    <summary>Some text.</summary>
+    <content:encoded><![CDATA[<h1>Hello</h1>]]></content:encoded>
+  </entry>
+
+</feed>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/atom_published_updated.json 
new/podcastparser-0.6.4/tests/data/atom_published_updated.json
--- old/podcastparser-0.6.0/tests/data/atom_published_updated.json      
1970-01-01 01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/atom_published_updated.json      
2018-08-19 18:16:16.000000000 +0200
@@ -0,0 +1,9 @@
+{"episodes": [{"description": "",
+               "enclosures": [],
+               "guid": "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
+               "link": "http://example.org/2003/12/13/atom03";,
+               "payment_url": null,
+               "published": 1071340202,
+               "title": "Atom-Powered Robots Run Amok",
+               "total_time": 0}],
+"title": "Example Feed"}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/atom_published_updated.rss 
new/podcastparser-0.6.4/tests/data/atom_published_updated.rss
--- old/podcastparser-0.6.0/tests/data/atom_published_updated.rss       
1970-01-01 01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/atom_published_updated.rss       
2018-08-19 18:16:16.000000000 +0200
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom";>
+
+  <title>Example Feed</title>
+  <link href="http://example.org/"/>
+  <updated>2003-12-13T18:30:02Z</updated>
+  <author>
+    <name>John Doe</name>
+  </author>
+  <id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
+
+  <entry>
+    <title>Atom-Powered Robots Run Amok</title>
+    <link href="http://example.org/2003/12/13/atom03"/>
+    <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
+    <published>2003-12-13T18:30:02Z</published>
+    <updated>2017-12-13T18:30:02Z</updated>
+    <summary>Some text.</summary>
+  </entry>
+
+</feed>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/tests/data/atom_updated.json 
new/podcastparser-0.6.4/tests/data/atom_updated.json
--- old/podcastparser-0.6.0/tests/data/atom_updated.json        1970-01-01 
01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/atom_updated.json        2018-08-19 
18:16:16.000000000 +0200
@@ -0,0 +1,9 @@
+{"episodes": [{"description": "",
+               "enclosures": [],
+               "guid": "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a",
+               "link": "http://example.org/2003/12/13/atom03";,
+               "payment_url": null,
+               "published": 1071340202,
+               "title": "Atom-Powered Robots Run Amok",
+               "total_time": 0}],
+"title": "Example Feed"}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/tests/data/atom_updated.rss 
new/podcastparser-0.6.4/tests/data/atom_updated.rss
--- old/podcastparser-0.6.0/tests/data/atom_updated.rss 1970-01-01 
01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/atom_updated.rss 2018-08-19 
18:16:16.000000000 +0200
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom";>
+
+  <title>Example Feed</title>
+  <link href="http://example.org/"/>
+  <updated>2003-12-13T18:30:02Z</updated>
+  <author>
+    <name>John Doe</name>
+  </author>
+  <id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
+
+  <entry>
+    <title>Atom-Powered Robots Run Amok</title>
+    <link href="http://example.org/2003/12/13/atom03"/>
+    <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
+    <updated>2003-12-13T18:30:02Z</updated>
+    <summary>Some text.</summary>
+  </entry>
+
+</feed>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/html_in_description.json 
new/podcastparser-0.6.4/tests/data/html_in_description.json
--- old/podcastparser-0.6.0/tests/data/html_in_description.json 1970-01-01 
01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/html_in_description.json 2018-08-19 
18:16:16.000000000 +0200
@@ -0,0 +1,22 @@
+{
+    "title": "HTML Podcast",
+    "episodes": [
+        {
+            "title": "Ep 1",
+            "description": "This is a test",
+            "description_html": "<h1>This is a <em>test</em></h1>",
+            "published": 0,
+            "guid": "http://example.org/example.opus";,
+            "link": "",
+            "total_time": 0,
+            "payment_url": null,
+            "enclosures": [
+                {
+                    "file_size": -1,
+                    "url": "http://example.org/example.opus";,
+                    "mime_type": "application/octet-stream"
+                }
+            ]
+        }
+    ]
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/html_in_description.rss 
new/podcastparser-0.6.4/tests/data/html_in_description.rss
--- old/podcastparser-0.6.0/tests/data/html_in_description.rss  1970-01-01 
01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/html_in_description.rss  2018-08-19 
18:16:16.000000000 +0200
@@ -0,0 +1,12 @@
+<rss>
+    <channel>
+    <title>HTML Podcast</title>
+    <item>
+        <title>Ep 1</title>
+        <enclosure url="http://example.org/example.opus"/>
+        <description>
+            <![CDATA[ <h1>This is a <em>test</em></h1> ]]>
+        </description>
+    </item>
+    </channel>
+</rss>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/html_in_description_rss_both.json 
new/podcastparser-0.6.4/tests/data/html_in_description_rss_both.json
--- old/podcastparser-0.6.0/tests/data/html_in_description_rss_both.json        
1970-01-01 01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/html_in_description_rss_both.json        
2018-08-19 18:16:16.000000000 +0200
@@ -0,0 +1,22 @@
+{
+    "title": "HTML Podcast with Text Description",
+    "episodes": [
+        {
+            "title": "Ep 1",
+            "description": "This is also a test",
+            "description_html": "<h1>This is also a <em>test</em></h1>",
+            "published": 0,
+            "guid": "http://example.org/example.opus";,
+            "link": "",
+            "total_time": 0,
+            "payment_url": null,
+            "enclosures": [
+                {
+                    "file_size": -1,
+                    "url": "http://example.org/example.opus";,
+                    "mime_type": "application/octet-stream"
+                }
+            ]
+        }
+    ]
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/html_in_description_rss_both.rss 
new/podcastparser-0.6.4/tests/data/html_in_description_rss_both.rss
--- old/podcastparser-0.6.0/tests/data/html_in_description_rss_both.rss 
1970-01-01 01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/html_in_description_rss_both.rss 
2018-08-19 18:16:16.000000000 +0200
@@ -0,0 +1,13 @@
+<rss>
+    <channel>
+    <title>HTML Podcast with Text Description</title>
+    <item>
+        <title>Ep 1</title>
+        <enclosure url="http://example.org/example.opus"/>
+        <content:encoded 
xmlns:content="http://purl.org/rss/1.0/modules/content/";>
+            <![CDATA[ <h1>This is also a <em>test</em></h1> ]]>
+        </content:encoded>
+        <description><![CDATA[ <h1>This is also a <em>test</em></h1> 
]]></description>
+    </item>
+    </channel>
+</rss>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/html_in_description_rss_both_different.json 
new/podcastparser-0.6.4/tests/data/html_in_description_rss_both_different.json
--- 
old/podcastparser-0.6.0/tests/data/html_in_description_rss_both_different.json  
    1970-01-01 01:00:00.000000000 +0100
+++ 
new/podcastparser-0.6.4/tests/data/html_in_description_rss_both_different.json  
    2018-08-19 18:16:16.000000000 +0200
@@ -0,0 +1,22 @@
+{
+    "title": "HTML Podcast with Text Description",
+    "episodes": [
+        {
+            "title": "Ep 1",
+            "description": "This is also a test",
+            "description_html": "<h1>This is also a <em>test</em></h1>",
+            "published": 0,
+            "guid": "http://example.org/example.opus";,
+            "link": "",
+            "total_time": 0,
+            "payment_url": null,
+            "enclosures": [
+                {
+                    "file_size": -1,
+                    "url": "http://example.org/example.opus";,
+                    "mime_type": "application/octet-stream"
+                }
+            ]
+        }
+    ]
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/html_in_description_rss_both_different.rss 
new/podcastparser-0.6.4/tests/data/html_in_description_rss_both_different.rss
--- 
old/podcastparser-0.6.0/tests/data/html_in_description_rss_both_different.rss   
    1970-01-01 01:00:00.000000000 +0100
+++ 
new/podcastparser-0.6.4/tests/data/html_in_description_rss_both_different.rss   
    2018-08-19 18:16:16.000000000 +0200
@@ -0,0 +1,13 @@
+<rss>
+    <channel>
+    <title>HTML Podcast with Text Description</title>
+    <item>
+        <title>Ep 1</title>
+        <enclosure url="http://example.org/example.opus"/>
+        <content:encoded 
xmlns:content="http://purl.org/rss/1.0/modules/content/";>
+            <![CDATA[ <h1>This is also a <em>test</em></h1> ]]>
+        </content:encoded>
+        <description><![CDATA[ <h1>This text will be discarded</h1> 
]]></description>
+    </item>
+    </channel>
+</rss>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/podcastparser-0.6.0/tests/data/leading_space_url.json 
new/podcastparser-0.6.4/tests/data/leading_space_url.json
--- old/podcastparser-0.6.0/tests/data/leading_space_url.json   1970-01-01 
01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/leading_space_url.json   2018-08-19 
18:16:16.000000000 +0200
@@ -0,0 +1,21 @@
+{
+  "title": "leading_space_url",
+  "episodes": [
+    {
+      "total_time": 0,
+      "description": "",
+      "payment_url": null,
+      "link": "",
+      "published": 0,
+      "title": "Episode",
+      "guid": "http://example.com/example.mp3";,
+      "enclosures": [
+          {
+              "url": "http://example.com/example.mp3";,
+              "mime_type": "application/octet-stream",
+              "file_size": -1
+          }
+      ]
+    }
+  ]
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/podcastparser-0.6.0/tests/data/leading_space_url.rss 
new/podcastparser-0.6.4/tests/data/leading_space_url.rss
--- old/podcastparser-0.6.0/tests/data/leading_space_url.rss    1970-01-01 
01:00:00.000000000 +0100
+++ new/podcastparser-0.6.4/tests/data/leading_space_url.rss    2018-08-19 
18:16:16.000000000 +0200
@@ -0,0 +1,8 @@
+<rss>
+    <channel>
+    <item>
+        <title>Episode</title>
+        <enclosure url=" http://example.com/example.mp3"/>
+    </item>
+    </channel>
+</rss>


Reply via email to