Hello community,

here is the log from the commit of package youtube-dl for openSUSE:Factory 
checked in at 2018-11-26 10:31:06
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/youtube-dl (Old)
 and      /work/SRC/openSUSE:Factory/.youtube-dl.new.19453 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "youtube-dl"

Mon Nov 26 10:31:06 2018 rev:88 rq:651431 version:2018.11.23

Changes:
--------
--- /work/SRC/openSUSE:Factory/youtube-dl/python-youtube-dl.changes     
2018-11-12 09:45:42.484801875 +0100
+++ /work/SRC/openSUSE:Factory/.youtube-dl.new.19453/python-youtube-dl.changes  
2018-11-26 10:31:50.496920009 +0100
@@ -1,0 +2,28 @@
+Fri Nov 23 13:34:30 UTC 2018 - [email protected]
+
+- Update to new upstream release 2018.11.23
+  * [mixcloud] Fallback to hardcoded decryption key
+  * [nbc:news] Fix article extraction
+  * [foxsports] Fix extraction
+  * [ciscolive] Add support for ciscolive.cisco.com
+  * [nzz] Relax kaltura regex
+  * [kaltura] Limit requested MediaEntry fields
+  * [americastestkitchen] Add support for zype embeds
+  * [nova:embed] Fix extraction
+
+-------------------------------------------------------------------
+Sun Nov 18 01:35:33 UTC 2018 - [email protected]
+
+- Update to new upstream release 2018.11.18
+  * [wwe] Add support for wwe.com
+  * [vk] Detect geo restriction
+  * [openload] Use original host during extraction
+  * [atvat] Fix extraction
+  * [rte] Add support for new API endpoint
+  * [tnaflixnetwork:embed] Fix extraction
+  * [picarto] Use API and add token support
+  * [zype] Add support for player.zype.com
+  * [vivo] Fix extraction
+  * [ruutu] Update API endpoint
+
+-------------------------------------------------------------------
youtube-dl.changes: same change

Old:
----
  youtube-dl-2018.11.07.tar.gz
  youtube-dl-2018.11.07.tar.gz.sig

New:
----
  youtube-dl-2018.11.23.tar.gz
  youtube-dl-2018.11.23.tar.gz.sig

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.MO6Oxm/_old  2018-11-26 10:31:54.816914947 +0100
+++ /var/tmp/diff_new_pack.MO6Oxm/_new  2018-11-26 10:31:54.820914943 +0100
@@ -19,7 +19,7 @@
 %define modname youtube-dl
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-youtube-dl
-Version:        2018.11.07
+Version:        2018.11.23
 Release:        0
 Summary:        A python module for downloading from video sites for offline 
watching
 License:        SUSE-Public-Domain AND CC-BY-SA-3.0

++++++ youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.MO6Oxm/_old  2018-11-26 10:31:54.844914914 +0100
+++ /var/tmp/diff_new_pack.MO6Oxm/_new  2018-11-26 10:31:54.844914914 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           youtube-dl
-Version:        2018.11.07
+Version:        2018.11.23
 Release:        0
 Summary:        A tool for downloading from video sites for offline watching
 License:        SUSE-Public-Domain AND CC-BY-SA-3.0
@@ -42,23 +42,22 @@
 rm -f youtube-dl
 
 %build
-perl -i -pe '
-        s{^PREFIX\ \?=\ %_prefix/local}{PREFIX ?= %_prefix};
-        s{^BINDIR\ \?=\ \$\(PREFIX\)/bin}{BINDIR\ \?=\ %_bindir};
-        s{^MANDIR\ \?=\ \$\(PREFIX\)/man}{MANDIR\ \?=\ %_mandir};
-        s{^SHAREDIR\ \?=\ \$\(PREFIX\)/share}{SHAREDIR\ \?=\ %_datadir};' 
Makefile
 make %{?_smp_mflags}
 
 %install
-%make_install
+install -D -m 755 youtube-dl %buildroot/%_bindir/youtube-dl
+install -D -m 644 youtube-dl.bash-completion 
%buildroot/%_datadir/bash-completion/completions/youtube-dl
+install -D -m 644 youtube-dl.zsh 
%buildroot/%_datadir/zsh/site-functions/_youtube-dl
+install -D -m 644 youtube-dl.fish 
%buildroot/%_datadir/fish/completions/youtube-dl.fish
+install -D -m 644 youtube-dl.1 %buildroot/%_mandir/man1/youtube-dl.1
 
 %files
 %license LICENSE
 %doc README.txt
 %_bindir/youtube-dl
-%_mandir/man1/*
-%config %_sysconfdir/bash_completion.d/
-%config %_sysconfdir/fish/
-%config %_datadir/zsh/
+%_mandir/man1/youtube-dl.1*
+%_datadir/fish/
+%_datadir/zsh/
+%_datadir/bash-completion/
 
 %changelog

++++++ youtube-dl-2018.11.07.tar.gz -> youtube-dl-2018.11.23.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/ChangeLog new/youtube-dl/ChangeLog
--- old/youtube-dl/ChangeLog    2018-11-06 19:38:22.000000000 +0100
+++ new/youtube-dl/ChangeLog    2018-11-22 18:16:43.000000000 +0100
@@ -1,3 +1,40 @@
+version 2018.11.23
+
+Core
++ [setup.py] Add more relevant classifiers
+
+Extractors
+* [mixcloud] Fallback to hardcoded decryption key (#18016)
+* [nbc:news] Fix article extraction (#16194)
+* [foxsports] Fix extraction (#17543)
+* [loc] Relax regular expression and improve formats extraction
++ [ciscolive] Add support for ciscolive.cisco.com (#17984)
+* [nzz] Relax kaltura regex (#18228)
+* [sixplay] Fix formats extraction
+* [bitchute] Improve title extraction
+* [kaltura] Limit requested MediaEntry fields
++ [americastestkitchen] Add support for zype embeds (#18225)
++ [pornhub] Add pornhub.net alias
+* [nova:embed] Fix extraction (#18222)
+
+
+version 2018.11.18
+
+Extractors
++ [wwe] Extract subtitles
++ [wwe] Add support for playlistst (#14781)
++ [wwe] Add support for wwe.com (#14781, #17450)
+* [vk] Detect geo restriction (#17767)
+* [openload] Use original host during extraction (#18211)
+* [atvat] Fix extraction (#18041)
++ [rte] Add support for new API endpoint (#18206)
+* [tnaflixnetwork:embed] Fix extraction (#18205)
+* [picarto] Use API and add token support (#16518)
++ [zype] Add support for player.zype.com (#18143)
+* [vivo] Fix extraction (#18139)
+* [ruutu] Update API endpoint (#18138)
+
+
 version 2018.11.07
 
 Extractors
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/docs/supportedsites.md 
new/youtube-dl/docs/supportedsites.md
--- old/youtube-dl/docs/supportedsites.md       2018-11-06 19:38:25.000000000 
+0100
+++ new/youtube-dl/docs/supportedsites.md       2018-11-22 18:16:45.000000000 
+0100
@@ -163,6 +163,8 @@
  - **chirbit**
  - **chirbit:profile**
  - **Cinchcast**
+ - **CiscoLiveSearch**
+ - **CiscoLiveSession**
  - **CJSW**
  - **cliphunter**
  - **Clippit**
@@ -1080,6 +1082,7 @@
  - **wrzuta.pl:playlist**
  - **WSJ**: Wall Street Journal
  - **WSJArticle**
+ - **WWE**
  - **XBef**
  - **XboxClips**
  - **XFileShare**: XFileShare based sites: DaClips, FileHoot, GorillaVid, 
MovPod, PowerWatch, Rapidvideo.ws, TheVideoBee, Vidto, Streamin.To, XVIDSTAGE, 
Vid ABC, VidBom, vidlo, RapidVideo.TV, FastVideo.me
@@ -1139,3 +1142,4 @@
  - **ZDF**
  - **ZDFChannel**
  - **zingmp3**: mp3.zing.vn
+ - **Zype**
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/setup.py new/youtube-dl/setup.py
--- old/youtube-dl/setup.py     2018-11-06 19:36:15.000000000 +0100
+++ new/youtube-dl/setup.py     2018-11-21 23:55:12.000000000 +0100
@@ -124,6 +124,8 @@
         'Development Status :: 5 - Production/Stable',
         'Environment :: Console',
         'License :: Public Domain',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2',
         'Programming Language :: Python :: 2.6',
         'Programming Language :: Python :: 2.7',
         'Programming Language :: Python :: 3',
@@ -132,6 +134,13 @@
         'Programming Language :: Python :: 3.4',
         'Programming Language :: Python :: 3.5',
         'Programming Language :: Python :: 3.6',
+        'Programming Language :: Python :: 3.7',
+        'Programming Language :: Python :: 3.8',
+        'Programming Language :: Python :: Implementation',
+        'Programming Language :: Python :: Implementation :: CPython',
+        'Programming Language :: Python :: Implementation :: IronPython',
+        'Programming Language :: Python :: Implementation :: Jython',
+        'Programming Language :: Python :: Implementation :: PyPy',
     ],
 
     cmdclass={'build_lazy_extractors': build_lazy_extractors},
Binary files old/youtube-dl/youtube-dl and new/youtube-dl/youtube-dl differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/youtube-dl/youtube_dl/extractor/americastestkitchen.py 
new/youtube-dl/youtube_dl/extractor/americastestkitchen.py
--- old/youtube-dl/youtube_dl/extractor/americastestkitchen.py  2018-11-06 
19:36:16.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/americastestkitchen.py  2018-11-21 
23:55:12.000000000 +0100
@@ -43,10 +43,6 @@
 
         webpage = self._download_webpage(url, video_id)
 
-        partner_id = self._search_regex(
-            
r'src=["\'](?:https?:)?//(?:[^/]+\.)kaltura\.com/(?:[^/]+/)*(?:p|partner_id)/(\d+)',
-            webpage, 'kaltura partner id')
-
         video_data = self._parse_json(
             self._search_regex(
                 r'window\.__INITIAL_STATE__\s*=\s*({.+?})\s*;\s*</script>',
@@ -58,7 +54,18 @@
             (lambda x: x['episodeDetail']['content']['data'],
              lambda x: x['videoDetail']['content']['data']), dict)
         ep_meta = ep_data.get('full_video', {})
-        external_id = ep_data.get('external_id') or ep_meta['external_id']
+
+        zype_id = ep_meta.get('zype_id')
+        if zype_id:
+            embed_url = 
'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ'
 % zype_id
+            ie_key = 'Zype'
+        else:
+            partner_id = self._search_regex(
+                
r'src=["\'](?:https?:)?//(?:[^/]+\.)kaltura\.com/(?:[^/]+/)*(?:p|partner_id)/(\d+)',
+                webpage, 'kaltura partner id')
+            external_id = ep_data.get('external_id') or ep_meta['external_id']
+            embed_url = 'kaltura:%s:%s' % (partner_id, external_id)
+            ie_key = 'Kaltura'
 
         title = ep_data.get('title') or ep_meta.get('title')
         description = clean_html(ep_meta.get('episode_description') or 
ep_data.get(
@@ -72,8 +79,8 @@
 
         return {
             '_type': 'url_transparent',
-            'url': 'kaltura:%s:%s' % (partner_id, external_id),
-            'ie_key': 'Kaltura',
+            'url': embed_url,
+            'ie_key': ie_key,
             'title': title,
             'description': description,
             'thumbnail': thumbnail,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/atvat.py 
new/youtube-dl/youtube_dl/extractor/atvat.py
--- old/youtube-dl/youtube_dl/extractor/atvat.py        2018-11-06 
19:36:16.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/atvat.py        2018-11-21 
23:55:00.000000000 +0100
@@ -28,8 +28,10 @@
         display_id = self._match_id(url)
         webpage = self._download_webpage(url, display_id)
         video_data = self._parse_json(unescapeHTML(self._search_regex(
-            r'class="[^"]*jsb_video/FlashPlayer[^"]*"[^>]+data-jsb="([^"]+)"',
-            webpage, 'player data')), display_id)['config']['initial_video']
+            [r'flashPlayerOptions\s*=\s*(["\'])(?P<json>(?:(?!\1).)+)\1',
+             
r'class="[^"]*jsb_video/FlashPlayer[^"]*"[^>]+data-jsb="(?P<json>[^"]+)"'],
+            webpage, 'player data', group='json')),
+            display_id)['config']['initial_video']
 
         video_id = video_data['id']
         video_title = video_data['title']
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/bitchute.py 
new/youtube-dl/youtube_dl/extractor/bitchute.py
--- old/youtube-dl/youtube_dl/extractor/bitchute.py     2018-11-06 
19:36:16.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/bitchute.py     2018-11-21 
23:55:12.000000000 +0100
@@ -37,7 +37,7 @@
                 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) 
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.57 Safari/537.36',
             })
 
-        title = self._search_regex(
+        title = self._html_search_regex(
             (r'<[^>]+\bid=["\']video-title[^>]+>([^<]+)', r'<title>([^<]+)'),
             webpage, 'title', default=None) or self._html_search_meta(
             'description', webpage, 'title',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/ciscolive.py 
new/youtube-dl/youtube_dl/extractor/ciscolive.py
--- old/youtube-dl/youtube_dl/extractor/ciscolive.py    1970-01-01 
01:00:00.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/ciscolive.py    2018-11-21 
23:55:12.000000000 +0100
@@ -0,0 +1,142 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import itertools
+
+from .common import InfoExtractor
+from ..compat import (
+    compat_parse_qs,
+    compat_urllib_parse_urlparse,
+)
+from ..utils import (
+    clean_html,
+    float_or_none,
+    int_or_none,
+    try_get,
+    urlencode_postdata,
+)
+
+
+class CiscoLiveBaseIE(InfoExtractor):
+    # These appear to be constant across all Cisco Live presentations
+    # and are not tied to any user session or event
+    RAINFOCUS_API_URL = 'https://events.rainfocus.com/api/%s'
+    RAINFOCUS_API_PROFILE_ID = 'Na3vqYdAlJFSxhYTYQGuMbpafMqftalz'
+    RAINFOCUS_WIDGET_ID = 'n6l4Lo05R8fiy3RpUBm447dZN8uNWoye'
+    BRIGHTCOVE_URL_TEMPLATE = 
'http://players.brightcove.net/5647924234001/SyK2FdqjM_default/index.html?videoId=%s'
+
+    HEADERS = {
+        'Origin': 'https://ciscolive.cisco.com',
+        'rfApiProfileId': RAINFOCUS_API_PROFILE_ID,
+        'rfWidgetId': RAINFOCUS_WIDGET_ID,
+    }
+
+    def _call_api(self, ep, rf_id, query, referrer, note=None):
+        headers = self.HEADERS.copy()
+        headers['Referer'] = referrer
+        return self._download_json(
+            self.RAINFOCUS_API_URL % ep, rf_id, note=note,
+            data=urlencode_postdata(query), headers=headers)
+
+    def _parse_rf_item(self, rf_item):
+        event_name = rf_item.get('eventName')
+        title = rf_item['title']
+        description = clean_html(rf_item.get('abstract'))
+        presenter_name = try_get(rf_item, lambda x: 
x['participants'][0]['fullName'])
+        bc_id = rf_item['videos'][0]['url']
+        bc_url = self.BRIGHTCOVE_URL_TEMPLATE % bc_id
+        duration = float_or_none(try_get(rf_item, lambda x: 
x['times'][0]['length']))
+        location = try_get(rf_item, lambda x: x['times'][0]['room'])
+
+        if duration:
+            duration = duration * 60
+
+        return {
+            '_type': 'url_transparent',
+            'url': bc_url,
+            'ie_key': 'BrightcoveNew',
+            'title': title,
+            'description': description,
+            'duration': duration,
+            'creator': presenter_name,
+            'location': location,
+            'series': event_name,
+        }
+
+
+class CiscoLiveSessionIE(CiscoLiveBaseIE):
+    _VALID_URL = 
r'https?://ciscolive\.cisco\.com/on-demand-library/\??[^#]*#/session/(?P<id>[^/?&]+)'
+    _TEST = {
+        'url': 
'https://ciscolive.cisco.com/on-demand-library/?#/session/1423353499155001FoSs',
+        'md5': 'c98acf395ed9c9f766941c70f5352e22',
+        'info_dict': {
+            'id': '5803694304001',
+            'ext': 'mp4',
+            'title': '13 Smart Automations to Monitor Your Cisco IOS Network',
+            'description': 'md5:ec4a436019e09a918dec17714803f7cc',
+            'timestamp': 1530305395,
+            'upload_date': '20180629',
+            'uploader_id': '5647924234001',
+            'location': '16B Mezz.',
+        },
+    }
+
+    def _real_extract(self, url):
+        rf_id = self._match_id(url)
+        rf_result = self._call_api('session', rf_id, {'id': rf_id}, url)
+        return self._parse_rf_item(rf_result['items'][0])
+
+
+class CiscoLiveSearchIE(CiscoLiveBaseIE):
+    _VALID_URL = r'https?://ciscolive\.cisco\.com/on-demand-library/'
+    _TESTS = [{
+        'url': 
'https://ciscolive.cisco.com/on-demand-library/?search.event=ciscoliveus2018&search.technicallevel=scpsSkillLevel_aintroductory&search.focus=scpsSessionFocus_designAndDeployment#/',
+        'info_dict': {
+            'title': 'Search query',
+        },
+        'playlist_count': 5,
+    }, {
+        'url': 
'https://ciscolive.cisco.com/on-demand-library/?search.technology=scpsTechnology_applicationDevelopment&search.technology=scpsTechnology_ipv6&search.focus=scpsSessionFocus_troubleshootingTroubleshooting#/',
+        'only_matching': True,
+    }]
+
+    @classmethod
+    def suitable(cls, url):
+        return False if CiscoLiveSessionIE.suitable(url) else 
super(CiscoLiveSearchIE, cls).suitable(url)
+
+    @staticmethod
+    def _check_bc_id_exists(rf_item):
+        return int_or_none(try_get(rf_item, lambda x: x['videos'][0]['url'])) 
is not None
+
+    def _entries(self, query, url):
+        query['size'] = 50
+        query['from'] = 0
+        for page_num in itertools.count(1):
+            results = self._call_api(
+                'search', None, query, url,
+                'Downloading search JSON page %d' % page_num)
+            sl = try_get(results, lambda x: x['sectionList'][0], dict)
+            if sl:
+                results = sl
+            items = results.get('items')
+            if not items or not isinstance(items, list):
+                break
+            for item in items:
+                if not isinstance(item, dict):
+                    continue
+                if not self._check_bc_id_exists(item):
+                    continue
+                yield self._parse_rf_item(item)
+            size = int_or_none(results.get('size'))
+            if size is not None:
+                query['size'] = size
+            total = int_or_none(results.get('total'))
+            if total is not None and query['from'] + query['size'] > total:
+                break
+            query['from'] += query['size']
+
+    def _real_extract(self, url):
+        query = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
+        query['type'] = 'session'
+        return self.playlist_result(
+            self._entries(query, url), playlist_title='Search query')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/extractors.py 
new/youtube-dl/youtube_dl/extractor/extractors.py
--- old/youtube-dl/youtube_dl/extractor/extractors.py   2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/extractors.py   2018-11-21 
23:55:12.000000000 +0100
@@ -194,6 +194,10 @@
     ChirbitProfileIE,
 )
 from .cinchcast import CinchcastIE
+from .ciscolive import (
+    CiscoLiveSessionIE,
+    CiscoLiveSearchIE,
+)
 from .cjsw import CJSWIE
 from .cliphunter import CliphunterIE
 from .clippit import ClippitIE
@@ -1386,6 +1390,7 @@
     WSJIE,
     WSJArticleIE,
 )
+from .wwe import WWEIE
 from .xbef import XBefIE
 from .xboxclips import XboxClipsIE
 from .xfileshare import XFileShareIE
@@ -1478,3 +1483,4 @@
 )
 from .zdf import ZDFIE, ZDFChannelIE
 from .zingmp3 import ZingMp3IE
+from .zype import ZypeIE
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/foxsports.py 
new/youtube-dl/youtube_dl/extractor/foxsports.py
--- old/youtube-dl/youtube_dl/extractor/foxsports.py    2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/foxsports.py    2018-11-21 
23:55:12.000000000 +0100
@@ -1,43 +1,33 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import (
-    smuggle_url,
-    update_url_query,
-)
 
 
 class FoxSportsIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?foxsports\.com/(?:[^/]+/)*(?P<id>[^/]+)'
+    _VALID_URL = 
r'https?://(?:www\.)?foxsports\.com/(?:[^/]+/)*video/(?P<id>\d+)'
 
     _TEST = {
         'url': 'http://www.foxsports.com/tennessee/video/432609859715',
         'md5': 'b49050e955bebe32c301972e4012ac17',
         'info_dict': {
-            'id': 'bwduI3X_TgUB',
+            'id': '432609859715',
             'ext': 'mp4',
             'title': 'Courtney Lee on going up 2-0 in series vs. Blazers',
             'description': 'Courtney Lee talks about Memphis being focused.',
-            'upload_date': '20150423',
-            'timestamp': 1429761109,
+            # TODO: fix timestamp
+            'upload_date': '19700101',  # '20150423',
+            # 'timestamp': 1429761109,
             'uploader': 'NEWA-FNG-FOXSPORTS',
         },
+        'params': {
+            # m3u8 download
+            'skip_download': True,
+        },
         'add_ie': ['ThePlatform'],
     }
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
-        webpage = self._download_webpage(url, video_id)
-
-        config = self._parse_json(
-            self._html_search_regex(
-                
r"""class="[^"]*(?:fs-player|platformPlayer-wrapper)[^"]*".+?data-player-config='([^']+)'""",
-                webpage, 'data player config'),
-            video_id)
-
-        return self.url_result(smuggle_url(update_url_query(
-            config['releaseURL'], {
-                'mbr': 'true',
-                'switch': 'http',
-            }), {'force_smil_url': True}))
+        return self.url_result(
+            'https://feed.theplatform.com/f/BKQ29B/foxsports-all?byId=' + 
video_id, 'ThePlatformFeed')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/generic.py 
new/youtube-dl/youtube_dl/extractor/generic.py
--- old/youtube-dl/youtube_dl/extractor/generic.py      2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/generic.py      2018-11-21 
23:55:01.000000000 +0100
@@ -114,6 +114,7 @@
 from .foxnews import FoxNewsIE
 from .viqeo import ViqeoIE
 from .expressen import ExpressenIE
+from .zype import ZypeIE
 
 
 class GenericIE(InfoExtractor):
@@ -2071,6 +2072,20 @@
             'playlist_count': 6,
         },
         {
+            # Zype embed
+            'url': 
'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
+            'info_dict': {
+                'id': '5b400b834b32992a310622b9',
+                'ext': 'mp4',
+                'title': 'Smoky Barbecue Favorites',
+                'thumbnail': r're:^https?://.*\.jpe?g',
+            },
+            'add_ie': [ZypeIE.ie_key()],
+            'params': {
+                'skip_download': True,
+            },
+        },
+        {
             # videojs embed
             'url': 'https://video.sibnet.ru/shell.php?videoid=3422904',
             'info_dict': {
@@ -3129,6 +3144,11 @@
             return self.playlist_from_matches(
                 expressen_urls, video_id, video_title, ie=ExpressenIE.ie_key())
 
+        zype_urls = ZypeIE._extract_urls(webpage)
+        if zype_urls:
+            return self.playlist_from_matches(
+                zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
+
         # Look for HTML5 media
         entries = self._parse_html5_media_entries(url, webpage, video_id, 
m3u8_id='hls')
         if entries:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/kaltura.py 
new/youtube-dl/youtube_dl/extractor/kaltura.py
--- old/youtube-dl/youtube_dl/extractor/kaltura.py      2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/kaltura.py      2018-11-21 
23:55:12.000000000 +0100
@@ -192,6 +192,8 @@
                 'entryId': video_id,
                 'service': 'baseentry',
                 'ks': '{1:result:ks}',
+                'responseProfile:fields': 
'createdAt,dataUrl,duration,name,plays,thumbnailUrl,userId',
+                'responseProfile:type': 1,
             },
             {
                 'action': 'getbyentryid',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/libraryofcongress.py 
new/youtube-dl/youtube_dl/extractor/libraryofcongress.py
--- old/youtube-dl/youtube_dl/extractor/libraryofcongress.py    2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/libraryofcongress.py    2018-11-21 
23:55:12.000000000 +0100
@@ -16,16 +16,15 @@
 class LibraryOfCongressIE(InfoExtractor):
     IE_NAME = 'loc'
     IE_DESC = 'Library of Congress'
-    _VALID_URL = 
r'https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P<id>[0-9]+)'
+    _VALID_URL = 
r'https?://(?:www\.)?loc\.gov/(?:item/|today/cyberlc/feature_wdesc\.php\?.*\brec=)(?P<id>[0-9a-z_.]+)'
     _TESTS = [{
         # embedded via <div class="media-player"
         'url': 'http://loc.gov/item/90716351/',
-        'md5': '353917ff7f0255aa6d4b80a034833de8',
+        'md5': '6ec0ae8f07f86731b1b2ff70f046210a',
         'info_dict': {
             'id': '90716351',
             'ext': 'mp4',
             'title': "Pa's trip to Mars",
-            'thumbnail': r're:^https?://.*\.jpg$',
             'duration': 0,
             'view_count': int,
         },
@@ -57,6 +56,12 @@
         'params': {
             'skip_download': True,
         },
+    }, {
+        'url': 'https://www.loc.gov/item/ihas.200197114/',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.loc.gov/item/afc1981005_afs20503/',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
@@ -67,12 +72,13 @@
             (r'id=(["\'])media-player-(?P<id>.+?)\1',
              r'<video[^>]+id=(["\'])uuid-(?P<id>.+?)\1',
              r'<video[^>]+data-uuid=(["\'])(?P<id>.+?)\1',
-             r'mediaObjectId\s*:\s*(["\'])(?P<id>.+?)\1'),
+             r'mediaObjectId\s*:\s*(["\'])(?P<id>.+?)\1',
+             r'data-tab="share-media-(?P<id>[0-9A-F]{32})"'),
             webpage, 'media id', group='id')
 
         data = self._download_json(
             'https://media.loc.gov/services/v1/media?id=%s&context=json' % 
media_id,
-            video_id)['mediaObject']
+            media_id)['mediaObject']
 
         derivative = data['derivatives'][0]
         media_url = derivative['derivativeUrl']
@@ -89,25 +95,29 @@
         if ext not in ('mp4', 'mp3'):
             media_url += '.mp4' if is_video else '.mp3'
 
-        if 'vod/mp4:' in media_url:
-            formats = [{
-                'url': media_url.replace('vod/mp4:', 'hls-vod/media/') + 
'.m3u8',
+        formats = []
+        if '/vod/mp4:' in media_url:
+            formats.append({
+                'url': media_url.replace('/vod/mp4:', '/hls-vod/media/') + 
'.m3u8',
                 'format_id': 'hls',
                 'ext': 'mp4',
                 'protocol': 'm3u8_native',
                 'quality': 1,
-            }]
-        elif 'vod/mp3:' in media_url:
-            formats = [{
-                'url': media_url.replace('vod/mp3:', ''),
-                'vcodec': 'none',
-            }]
+            })
+        http_format = {
+            'url': re.sub(r'(://[^/]+/)(?:[^/]+/)*(?:mp4|mp3):', r'\1', 
media_url),
+            'format_id': 'http',
+            'quality': 1,
+        }
+        if not is_video:
+            http_format['vcodec'] = 'none'
+        formats.append(http_format)
 
         download_urls = set()
         for m in re.finditer(
                 
r'<option[^>]+value=(["\'])(?P<url>.+?)\1[^>]+data-file-download=[^>]+>\s*(?P<id>.+?)(?:(?:&nbsp;|\s+)\((?P<size>.+?)\))?\s*<',
 webpage):
             format_id = m.group('id').lower()
-            if format_id == 'gif':
+            if format_id in ('gif', 'jpeg'):
                 continue
             download_url = m.group('url')
             if download_url in download_urls:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/mixcloud.py 
new/youtube-dl/youtube_dl/extractor/mixcloud.py
--- old/youtube-dl/youtube_dl/extractor/mixcloud.py     2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/mixcloud.py     2018-11-21 
23:55:12.000000000 +0100
@@ -161,11 +161,17 @@
             stream_info = info_json['streamInfo']
             formats = []
 
+            def decrypt_url(f_url):
+                for k in (key, 
'IFYOUWANTTHEARTISTSTOGETPAIDDONOTDOWNLOADFROMMIXCLOUD'):
+                    decrypted_url = self._decrypt_xor_cipher(k, f_url)
+                    if re.search(r'^https?://[0-9a-z.]+/[0-9A-Za-z/.?=&_-]+$', 
decrypted_url):
+                        return decrypted_url
+
             for url_key in ('url', 'hlsUrl', 'dashUrl'):
                 format_url = stream_info.get(url_key)
                 if not format_url:
                     continue
-                decrypted = self._decrypt_xor_cipher(key, 
compat_b64decode(format_url))
+                decrypted = decrypt_url(compat_b64decode(format_url))
                 if not decrypted:
                     continue
                 if url_key == 'hlsUrl':
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/nbc.py 
new/youtube-dl/youtube_dl/extractor/nbc.py
--- old/youtube-dl/youtube_dl/extractor/nbc.py  2018-11-06 19:36:17.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/nbc.py  2018-11-21 23:55:12.000000000 
+0100
@@ -9,10 +9,8 @@
 from .adobepass import AdobePassIE
 from ..compat import compat_urllib_parse_unquote
 from ..utils import (
-    find_xpath_attr,
     smuggle_url,
     try_get,
-    unescapeHTML,
     update_url_query,
     int_or_none,
 )
@@ -269,27 +267,14 @@
 
 
 class NBCNewsIE(ThePlatformIE):
-    _VALID_URL = r'''(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/
-        (?:video/.+?/(?P<id>\d+)|
-        ([^/]+/)*(?:.*-)?(?P<mpx_id>[^/?]+))
-        '''
+    _VALID_URL = 
r'(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/([^/]+/)*(?:.*-)?(?P<id>[^/?]+)'
 
     _TESTS = [
         {
-            'url': 'http://www.nbcnews.com/video/nbc-news/52753292',
-            'md5': '47abaac93c6eaf9ad37ee6c4463a5179',
-            'info_dict': {
-                'id': '52753292',
-                'ext': 'flv',
-                'title': 'Crew emerges after four-month Mars food study',
-                'description': 'md5:24e632ffac72b35f8b67a12d1b6ddfc1',
-            },
-        },
-        {
             'url': 
'http://www.nbcnews.com/watch/nbcnews-com/how-twitter-reacted-to-the-snowden-interview-269389891880',
             'md5': 'af1adfa51312291a017720403826bb64',
             'info_dict': {
-                'id': 'p_tweet_snow_140529',
+                'id': '269389891880',
                 'ext': 'mp4',
                 'title': 'How Twitter Reacted To The Snowden Interview',
                 'description': 'md5:65a0bd5d76fe114f3c2727aa3a81fe64',
@@ -313,7 +298,7 @@
             'url': 
'http://www.nbcnews.com/nightly-news/video/nightly-news-with-brian-williams-full-broadcast-february-4-394064451844',
             'md5': '73135a2e0ef819107bbb55a5a9b2a802',
             'info_dict': {
-                'id': 'nn_netcast_150204',
+                'id': '394064451844',
                 'ext': 'mp4',
                 'title': 'Nightly News with Brian Williams Full Broadcast 
(February 4)',
                 'description': 'md5:1c10c1eccbe84a26e5debb4381e2d3c5',
@@ -326,7 +311,7 @@
             'url': 
'http://www.nbcnews.com/business/autos/volkswagen-11-million-vehicles-could-have-suspect-software-emissions-scandal-n431456',
             'md5': 'a49e173825e5fcd15c13fc297fced39d',
             'info_dict': {
-                'id': 'x_lon_vwhorn_150922',
+                'id': '529953347624',
                 'ext': 'mp4',
                 'title': 'Volkswagen U.S. Chief:\xa0 We Have Totally Screwed 
Up',
                 'description': 'md5:c8be487b2d80ff0594c005add88d8351',
@@ -339,7 +324,7 @@
             'url': 
'http://www.today.com/video/see-the-aurora-borealis-from-space-in-stunning-new-nasa-video-669831235788',
             'md5': '118d7ca3f0bea6534f119c68ef539f71',
             'info_dict': {
-                'id': 'tdy_al_space_160420',
+                'id': '669831235788',
                 'ext': 'mp4',
                 'title': 'See the aurora borealis from space in stunning new 
NASA video',
                 'description': 'md5:74752b7358afb99939c5f8bb2d1d04b1',
@@ -352,7 +337,7 @@
             'url': 
'http://www.msnbc.com/all-in-with-chris-hayes/watch/the-chaotic-gop-immigration-vote-314487875924',
             'md5': '6d236bf4f3dddc226633ce6e2c3f814d',
             'info_dict': {
-                'id': 'n_hayes_Aimm_140801_272214',
+                'id': '314487875924',
                 'ext': 'mp4',
                 'title': 'The chaotic GOP immigration vote',
                 'description': 'The Republican House votes on a border bill 
that has no chance of getting through the Senate or signed by the President and 
is drawing criticism from all sides.',
@@ -374,60 +359,22 @@
     ]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('id')
-        if video_id is not None:
-            all_info = 
self._download_xml('http://www.nbcnews.com/id/%s/displaymode/1219' % video_id, 
video_id)
-            info = all_info.find('video')
-
-            return {
-                'id': video_id,
-                'title': info.find('headline').text,
-                'ext': 'flv',
-                'url': find_xpath_attr(info, 'media', 'type', 
'flashVideo').text,
-                'description': info.find('caption').text,
-                'thumbnail': find_xpath_attr(info, 'media', 'type', 
'thumbnail').text,
-            }
-        else:
-            # "feature" and "nightly-news" pages use theplatform.com
-            video_id = mobj.group('mpx_id')
+        video_id = self._match_id(url)
+        if not video_id.isdigit():
             webpage = self._download_webpage(url, video_id)
 
-            filter_param = 'byId'
-            bootstrap_json = self._search_regex(
-                
[r'(?m)(?:var\s+(?:bootstrapJson|playlistData)|NEWS\.videoObj)\s*=\s*({.+});?\s*$',
-                 r'videoObj\s*:\s*({.+})', r'data-video="([^"]+)"',
-                 r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);'],
-                webpage, 'bootstrap json', default=None)
-            if bootstrap_json:
-                bootstrap = self._parse_json(
-                    bootstrap_json, video_id, transform_source=unescapeHTML)
-
-                info = None
-                if 'results' in bootstrap:
-                    info = bootstrap['results'][0]['video']
-                elif 'video' in bootstrap:
-                    info = bootstrap['video']
-                elif 'msnbcVideoInfo' in bootstrap:
-                    info = bootstrap['msnbcVideoInfo']['meta']
-                elif 'msnbcThePlatform' in bootstrap:
-                    info = 
bootstrap['msnbcThePlatform']['videoPlayer']['video']
-                else:
-                    info = bootstrap
-
-                if 'guid' in info:
-                    video_id = info['guid']
-                    filter_param = 'byGuid'
-                elif 'mpxId' in info:
-                    video_id = info['mpxId']
-
-            return {
-                '_type': 'url_transparent',
-                'id': video_id,
-                # http://feed.theplatform.com/f/2E2eJC/nbcnews also works
-                'url': 
update_url_query('http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews', 
{filter_param: video_id}),
-                'ie_key': 'ThePlatformFeed',
-            }
+            data = self._parse_json(self._search_regex(
+                r'window\.__data\s*=\s*({.+});', webpage,
+                'bootstrap json'), video_id)
+            video_id = 
data['article']['content'][0]['primaryMedia']['video']['mpxMetadata']['id']
+
+        return {
+            '_type': 'url_transparent',
+            'id': video_id,
+            # http://feed.theplatform.com/f/2E2eJC/nbcnews also works
+            'url': 
update_url_query('http://feed.theplatform.com/f/2E2eJC/nnd_NBCNews', {'byId': 
video_id}),
+            'ie_key': 'ThePlatformFeed',
+        }
 
 
 class NBCOlympicsIE(InfoExtractor):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/nova.py 
new/youtube-dl/youtube_dl/extractor/nova.py
--- old/youtube-dl/youtube_dl/extractor/nova.py 2018-11-06 19:36:17.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/nova.py 2018-11-21 23:55:12.000000000 
+0100
@@ -35,7 +35,7 @@
 
         bitrates = self._parse_json(
             self._search_regex(
-                r'(?s)bitrates\s*=\s*({.+?})\s*;', webpage, 'formats'),
+                r'(?s)(?:src|bitrates)\s*=\s*({.+?})\s*;', webpage, 'formats'),
             video_id, transform_source=js_to_json)
 
         QUALITIES = ('lq', 'mq', 'hq', 'hd')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/nzz.py 
new/youtube-dl/youtube_dl/extractor/nzz.py
--- old/youtube-dl/youtube_dl/extractor/nzz.py  2018-11-06 19:36:17.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/nzz.py  2018-11-21 23:55:12.000000000 
+0100
@@ -11,20 +11,27 @@
 
 class NZZIE(InfoExtractor):
     _VALID_URL = 
r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
-    _TEST = {
+    _TESTS = [{
         'url': 
'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
         'info_dict': {
             'id': '9153',
         },
         'playlist_mincount': 6,
-    }
+    }, {
+        'url': 
'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',
+        'info_dict': {
+            'id': '1368112',
+        },
+        'playlist_count': 1,
+    }]
 
     def _real_extract(self, url):
         page_id = self._match_id(url)
         webpage = self._download_webpage(url, page_id)
 
         entries = []
-        for player_element in 
re.findall(r'(<[^>]+class="kalturaPlayer"[^>]*>)', webpage):
+        for player_element in re.findall(
+                r'(<[^>]+class="kalturaPlayer[^"]*"[^>]*>)', webpage):
             player_params = extract_attributes(player_element)
             if player_params.get('data-type') not in 
('kaltura_singleArticle',):
                 self.report_warning('Unsupported player type')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/openload.py 
new/youtube-dl/youtube_dl/extractor/openload.py
--- old/youtube-dl/youtube_dl/extractor/openload.py     2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/openload.py     2018-11-21 
23:55:02.000000000 +0100
@@ -243,7 +243,18 @@
 
 
 class OpenloadIE(InfoExtractor):
-    _VALID_URL = 
r'https?://(?:www\.)?(?:openload\.(?:co|io|link)|oload\.(?:tv|stream|site|xyz|win|download|cloud|cc|icu|fun))/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?P<host>
+                            (?:www\.)?
+                            (?:
+                                openload\.(?:co|io|link)|
+                                
oload\.(?:tv|stream|site|xyz|win|download|cloud|cc|icu|fun)
+                            )
+                        )/
+                        (?:f|embed)/
+                        (?P<id>[a-zA-Z0-9-_]+)
+                    '''
 
     _TESTS = [{
         'url': 'https://openload.co/f/kUEfGclsU9o',
@@ -334,8 +345,11 @@
             webpage)
 
     def _real_extract(self, url):
-        video_id = self._match_id(url)
-        url_pattern = 'https://openload.co/%%s/%s/' % video_id
+        mobj = re.match(self._VALID_URL, url)
+        host = mobj.group('host')
+        video_id = mobj.group('id')
+
+        url_pattern = 'https://%s/%%s/%s/' % (host, video_id)
         headers = {
             'User-Agent': self._USER_AGENT,
         }
@@ -368,7 +382,7 @@
                            r'>\s*([\w~-]+~[a-f0-9:]+~[\w~-]+)'), webpage,
                           'stream URL'))
 
-        video_url = 'https://openload.co/stream/%s?mime=true' % decoded_id
+        video_url = 'https://%s/stream/%s?mime=true' % (host, decoded_id)
 
         title = self._og_search_title(webpage, default=None) or 
self._search_regex(
             r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage,
@@ -379,7 +393,7 @@
         entry = entries[0] if entries else {}
         subtitles = entry.get('subtitles')
 
-        info_dict = {
+        return {
             'id': video_id,
             'title': title,
             'thumbnail': entry.get('thumbnail') or 
self._og_search_thumbnail(webpage, default=None),
@@ -388,4 +402,3 @@
             'subtitles': subtitles,
             'http_headers': headers,
         }
-        return info_dict
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/picarto.py 
new/youtube-dl/youtube_dl/extractor/picarto.py
--- old/youtube-dl/youtube_dl/extractor/picarto.py      2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/picarto.py      2018-11-21 
23:55:02.000000000 +0100
@@ -1,6 +1,7 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import re
 import time
 
 from .common import InfoExtractor
@@ -15,7 +16,7 @@
 
 
 class PicartoIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www.)?picarto\.tv/(?P<id>[a-zA-Z0-9]+)'
+    _VALID_URL = 
r'https?://(?:www.)?picarto\.tv/(?P<id>[a-zA-Z0-9]+)(?:/(?P<token>[a-zA-Z0-9]+))?'
     _TEST = {
         'url': 'https://picarto.tv/Setz',
         'info_dict': {
@@ -33,20 +34,14 @@
         return False if PicartoVodIE.suitable(url) else super(PicartoIE, 
cls).suitable(url)
 
     def _real_extract(self, url):
-        channel_id = self._match_id(url)
-        stream_page = self._download_webpage(url, channel_id)
+        mobj = re.match(self._VALID_URL, url)
+        channel_id = mobj.group('id')
 
-        if '>This channel does not exist' in stream_page:
-            raise ExtractorError(
-                'Channel %s does not exist' % channel_id, expected=True)
-
-        player = self._parse_json(
-            self._search_regex(
-                r'(?s)playerSettings\[\d+\]\s*=\s*(\{.+?\}\s*\n)', stream_page,
-                'player settings'),
-            channel_id, transform_source=js_to_json)
+        metadata = self._download_json(
+            'https://api.picarto.tv/v1/channel/name/' + channel_id,
+            channel_id)
 
-        if player.get('online') is False:
+        if metadata.get('online') is False:
             raise ExtractorError('Stream is offline', expected=True)
 
         cdn_data = self._download_json(
@@ -54,20 +49,13 @@
             data=urlencode_postdata({'loadbalancinginfo': channel_id}),
             note='Downloading load balancing info')
 
-        def get_event(key):
-            return try_get(player, lambda x: x['event'][key], compat_str) or ''
-
+        token = mobj.group('token') or 'public'
         params = {
-            'token': player.get('token') or '',
-            'ticket': get_event('ticket'),
             'con': int(time.time() * 1000),
-            'type': get_event('ticket'),
-            'scope': get_event('scope'),
+            'token': token,
         }
 
         prefered_edge = cdn_data.get('preferedEdge')
-        default_tech = player.get('defaultTech')
-
         formats = []
 
         for edge in cdn_data['edges']:
@@ -81,8 +69,6 @@
                 preference = 0
                 if edge_id == prefered_edge:
                     preference += 1
-                if tech_type == default_tech:
-                    preference += 1
                 format_id = []
                 if edge_id:
                     format_id.append(edge_id)
@@ -109,7 +95,7 @@
                     continue
         self._sort_formats(formats)
 
-        mature = player.get('mature')
+        mature = metadata.get('adult')
         if mature is None:
             age_limit = None
         else:
@@ -117,9 +103,11 @@
 
         return {
             'id': channel_id,
-            'title': self._live_title(channel_id),
+            'title': self._live_title(metadata.get('title') or channel_id),
             'is_live': True,
-            'thumbnail': player.get('vodThumb'),
+            'thumbnail': try_get(metadata, lambda x: x['thumbnails']['web']),
+            'channel': channel_id,
+            'channel_url': 'https://picarto.tv/%s' % channel_id,
             'age_limit': age_limit,
             'formats': formats,
         }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/pornhub.py 
new/youtube-dl/youtube_dl/extractor/pornhub.py
--- old/youtube-dl/youtube_dl/extractor/pornhub.py      2018-11-06 
19:36:17.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/pornhub.py      2018-11-21 
23:55:12.000000000 +0100
@@ -27,7 +27,7 @@
     _VALID_URL = r'''(?x)
                     https?://
                         (?:
-                            
(?:[^/]+\.)?pornhub\.com/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
+                            
(?:[^/]+\.)?pornhub\.(?:com|net)/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
                             (?:www\.)?thumbzilla\.com/video/
                         )
                         (?P<id>[\da-z]+)
@@ -121,6 +121,9 @@
     }, {
         'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
         'only_matching': True,
+    }, {
+        'url': 'https://www.pornhub.net/view_video.php?viewkey=203640933',
+        'only_matching': True,
     }]
 
     @staticmethod
@@ -340,7 +343,7 @@
 
 
 class PornHubPlaylistIE(PornHubPlaylistBaseIE):
-    _VALID_URL = r'https?://(?:[^/]+\.)?pornhub\.com/playlist/(?P<id>\d+)'
+    _VALID_URL = 
r'https?://(?:[^/]+\.)?pornhub\.(?:com|net)/playlist/(?P<id>\d+)'
     _TESTS = [{
         'url': 'http://www.pornhub.com/playlist/4667351',
         'info_dict': {
@@ -355,7 +358,7 @@
 
 
 class PornHubUserVideosIE(PornHubPlaylistBaseIE):
-    _VALID_URL = 
r'https?://(?:[^/]+\.)?pornhub\.com/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
+    _VALID_URL = 
r'https?://(?:[^/]+\.)?pornhub\.(?:com|net)/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/]+)/videos'
     _TESTS = [{
         'url': 'http://www.pornhub.com/users/zoe_ph/videos/public',
         'info_dict': {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/rte.py 
new/youtube-dl/youtube_dl/extractor/rte.py
--- old/youtube-dl/youtube_dl/extractor/rte.py  2018-11-06 19:36:18.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/rte.py  2018-11-21 23:55:02.000000000 
+0100
@@ -8,7 +8,10 @@
 from ..utils import (
     float_or_none,
     parse_iso8601,
+    str_or_none,
+    try_get,
     unescapeHTML,
+    url_or_none,
     ExtractorError,
 )
 
@@ -17,65 +20,87 @@
     def _real_extract(self, url):
         item_id = self._match_id(url)
 
-        try:
-            json_string = self._download_json(
-                
'http://www.rte.ie/rteavgen/getplaylist/?type=web&format=json&id=' + item_id,
-                item_id)
-        except ExtractorError as ee:
-            if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404:
-                error_info = self._parse_json(ee.cause.read().decode(), 
item_id, fatal=False)
-                if error_info:
-                    raise ExtractorError(
-                        '%s said: %s' % (self.IE_NAME, error_info['message']),
-                        expected=True)
-            raise
-
-        # NB the string values in the JSON are stored using XML escaping(!)
-        show = json_string['shows'][0]
-        title = unescapeHTML(show['title'])
-        description = unescapeHTML(show.get('description'))
-        thumbnail = show.get('thumbnail')
-        duration = float_or_none(show.get('duration'), 1000)
-        timestamp = parse_iso8601(show.get('published'))
-
-        mg = show['media:group'][0]
-
+        info_dict = {}
         formats = []
 
-        if mg.get('url'):
-            m = 
re.match(r'(?P<url>rtmpe?://[^/]+)/(?P<app>.+)/(?P<playpath>mp4:.*)', mg['url'])
-            if m:
-                m = m.groupdict()
-                formats.append({
-                    'url': m['url'] + '/' + m['app'],
-                    'app': m['app'],
-                    'play_path': m['playpath'],
-                    'player_url': url,
-                    'ext': 'flv',
-                    'format_id': 'rtmp',
-                })
-
-        if mg.get('hls_server') and mg.get('hls_url'):
-            formats.extend(self._extract_m3u8_formats(
-                mg['hls_server'] + mg['hls_url'], item_id, 'mp4',
-                entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
-
-        if mg.get('hds_server') and mg.get('hds_url'):
-            formats.extend(self._extract_f4m_formats(
-                mg['hds_server'] + mg['hds_url'], item_id,
-                f4m_id='hds', fatal=False))
+        ENDPOINTS = (
+            
'https://feeds.rasset.ie/rteavgen/player/playlist?type=iptv&format=json&showId=',
+            'http://www.rte.ie/rteavgen/getplaylist/?type=web&format=json&id=',
+        )
+
+        for num, ep_url in enumerate(ENDPOINTS, start=1):
+            try:
+                data = self._download_json(ep_url + item_id, item_id)
+            except ExtractorError as ee:
+                if num < len(ENDPOINTS) or formats:
+                    continue
+                if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 
404:
+                    error_info = self._parse_json(ee.cause.read().decode(), 
item_id, fatal=False)
+                    if error_info:
+                        raise ExtractorError(
+                            '%s said: %s' % (self.IE_NAME, 
error_info['message']),
+                            expected=True)
+                raise
+
+            # NB the string values in the JSON are stored using XML escaping(!)
+            show = try_get(data, lambda x: x['shows'][0], dict)
+            if not show:
+                continue
+
+            if not info_dict:
+                title = unescapeHTML(show['title'])
+                description = unescapeHTML(show.get('description'))
+                thumbnail = show.get('thumbnail')
+                duration = float_or_none(show.get('duration'), 1000)
+                timestamp = parse_iso8601(show.get('published'))
+                info_dict = {
+                    'id': item_id,
+                    'title': title,
+                    'description': description,
+                    'thumbnail': thumbnail,
+                    'timestamp': timestamp,
+                    'duration': duration,
+                }
+
+            mg = try_get(show, lambda x: x['media:group'][0], dict)
+            if not mg:
+                continue
+
+            if mg.get('url'):
+                m = 
re.match(r'(?P<url>rtmpe?://[^/]+)/(?P<app>.+)/(?P<playpath>mp4:.*)', mg['url'])
+                if m:
+                    m = m.groupdict()
+                    formats.append({
+                        'url': m['url'] + '/' + m['app'],
+                        'app': m['app'],
+                        'play_path': m['playpath'],
+                        'player_url': url,
+                        'ext': 'flv',
+                        'format_id': 'rtmp',
+                    })
+
+            if mg.get('hls_server') and mg.get('hls_url'):
+                formats.extend(self._extract_m3u8_formats(
+                    mg['hls_server'] + mg['hls_url'], item_id, 'mp4',
+                    entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
+
+            if mg.get('hds_server') and mg.get('hds_url'):
+                formats.extend(self._extract_f4m_formats(
+                    mg['hds_server'] + mg['hds_url'], item_id,
+                    f4m_id='hds', fatal=False))
+
+            mg_rte_server = str_or_none(mg.get('rte:server'))
+            mg_url = str_or_none(mg.get('url'))
+            if mg_rte_server and mg_url:
+                hds_url = url_or_none(mg_rte_server + mg_url)
+                if hds_url:
+                    formats.extend(self._extract_f4m_formats(
+                        hds_url, item_id, f4m_id='hds', fatal=False))
 
         self._sort_formats(formats)
 
-        return {
-            'id': item_id,
-            'title': title,
-            'description': description,
-            'thumbnail': thumbnail,
-            'timestamp': timestamp,
-            'duration': duration,
-            'formats': formats,
-        }
+        info_dict['formats'] = formats
+        return info_dict
 
 
 class RteIE(RteBaseIE):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/ruutu.py 
new/youtube-dl/youtube_dl/extractor/ruutu.py
--- old/youtube-dl/youtube_dl/extractor/ruutu.py        2018-11-06 
19:36:18.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/ruutu.py        2018-11-21 
23:55:02.000000000 +0100
@@ -65,7 +65,8 @@
         video_id = self._match_id(url)
 
         video_xml = self._download_xml(
-            'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, 
video_id)
+            'https://gatling.nelonenmedia.fi/media-xml-cache', video_id,
+            query={'id': video_id})
 
         formats = []
         processed_urls = []
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/shared.py 
new/youtube-dl/youtube_dl/extractor/shared.py
--- old/youtube-dl/youtube_dl/extractor/shared.py       2018-11-06 
19:36:18.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/shared.py       2018-11-21 
23:55:02.000000000 +0100
@@ -5,6 +5,7 @@
 from ..utils import (
     ExtractorError,
     int_or_none,
+    url_or_none,
     urlencode_postdata,
 )
 
@@ -86,9 +87,16 @@
     }
 
     def _extract_video_url(self, webpage, video_id, *args):
+        def decode_url(encoded_url):
+            return compat_b64decode(encoded_url).decode('utf-8')
+
+        stream_url = url_or_none(decode_url(self._search_regex(
+            r'data-stream\s*=\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
+            'stream url', default=None, group='url')))
+        if stream_url:
+            return stream_url
         return self._parse_json(
             self._search_regex(
                 r'InitializeStream\s*\(\s*(["\'])(?P<url>(?:(?!\1).)+)\1',
                 webpage, 'stream', group='url'),
-            video_id,
-            transform_source=lambda x: compat_b64decode(x).decode('utf-8'))[0]
+            video_id, transform_source=decode_url)[0]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/sixplay.py 
new/youtube-dl/youtube_dl/extractor/sixplay.py
--- old/youtube-dl/youtube_dl/extractor/sixplay.py      2018-11-06 
19:36:18.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/sixplay.py      2018-11-21 
23:55:12.000000000 +0100
@@ -64,7 +64,7 @@
         for asset in clip_data['assets']:
             asset_url = asset.get('full_physical_path')
             protocol = asset.get('protocol')
-            if not asset_url or protocol == 'primetime' or asset_url in urls:
+            if not asset_url or protocol == 'primetime' or asset.get('type') 
== 'usp_hlsfp_h264' or asset_url in urls:
                 continue
             urls.append(asset_url)
             container = asset.get('video_container')
@@ -81,19 +81,17 @@
                         if not urlh:
                             continue
                         asset_url = urlh.geturl()
-                    asset_url = re.sub(r'/([^/]+)\.ism/[^/]*\.m3u8', 
r'/\1.ism/\1.m3u8', asset_url)
-                    formats.extend(self._extract_m3u8_formats(
-                        asset_url, video_id, 'mp4', 'm3u8_native',
-                        m3u8_id='hls', fatal=False))
-                    formats.extend(self._extract_f4m_formats(
-                        asset_url.replace('.m3u8', '.f4m'),
-                        video_id, f4m_id='hds', fatal=False))
-                    formats.extend(self._extract_mpd_formats(
-                        asset_url.replace('.m3u8', '.mpd'),
-                        video_id, mpd_id='dash', fatal=False))
-                    formats.extend(self._extract_ism_formats(
-                        re.sub(r'/[^/]+\.m3u8', '/Manifest', asset_url),
-                        video_id, ism_id='mss', fatal=False))
+                    for i in range(3, 0, -1):
+                        asset_url = asset_url = asset_url.replace('_sd1/', 
'_sd%d/' % i)
+                        m3u8_formats = self._extract_m3u8_formats(
+                            asset_url, video_id, 'mp4', 'm3u8_native',
+                            m3u8_id='hls', fatal=False)
+                        formats.extend(m3u8_formats)
+                        formats.extend(self._extract_mpd_formats(
+                            asset_url.replace('.m3u8', '.mpd'),
+                            video_id, mpd_id='dash', fatal=False))
+                        if m3u8_formats:
+                            break
                 else:
                     formats.extend(self._extract_m3u8_formats(
                         asset_url, video_id, 'mp4', 'm3u8_native',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/theplatform.py 
new/youtube-dl/youtube_dl/extractor/theplatform.py
--- old/youtube-dl/youtube_dl/extractor/theplatform.py  2018-11-06 
19:36:18.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/theplatform.py  2018-11-21 
23:55:12.000000000 +0100
@@ -343,7 +343,7 @@
     def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, 
custom_fields=None, asset_types_query={}, account_id=None):
         real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, 
feed_id, filter_query)
         entry = self._download_json(real_url, video_id)['entries'][0]
-        main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % 
(provider_id, account_id, entry['guid']) if account_id else None
+        main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % 
(provider_id, account_id, entry['guid']) if account_id else 
entry.get('plmedia$publicUrl')
 
         formats = []
         subtitles = {}
@@ -356,7 +356,8 @@
             if first_video_id is None:
                 first_video_id = cur_video_id
                 duration = float_or_none(item.get('plfile$duration'))
-            for asset_type in item['plfile$assetTypes']:
+            file_asset_types = item.get('plfile$assetTypes') or 
compat_parse_qs(compat_urllib_parse_urlparse(smil_url).query)['assetTypes']
+            for asset_type in file_asset_types:
                 if asset_type in asset_types:
                     continue
                 asset_types.append(asset_type)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/tnaflix.py 
new/youtube-dl/youtube_dl/extractor/tnaflix.py
--- old/youtube-dl/youtube_dl/extractor/tnaflix.py      2018-11-06 
19:36:18.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/tnaflix.py      2018-11-21 
23:55:02.000000000 +0100
@@ -18,8 +18,9 @@
 class TNAFlixNetworkBaseIE(InfoExtractor):
     # May be overridden in descendants if necessary
     _CONFIG_REGEX = [
-        r'flashvars\.config\s*=\s*escape\("([^"]+)"',
-        r'<input[^>]+name="config\d?" value="([^"]+)"',
+        r'flashvars\.config\s*=\s*escape\("(?P<url>[^"]+)"',
+        r'<input[^>]+name="config\d?" value="(?P<url>[^"]+)"',
+        r'config\s*=\s*(["\'])(?P<url>(?:https?:)?//(?:(?!\1).)+)\1',
     ]
     _HOST = 'tna'
     _VKEY_SUFFIX = ''
@@ -85,7 +86,8 @@
         webpage = self._download_webpage(url, display_id)
 
         cfg_url = self._proto_relative_url(self._html_search_regex(
-            self._CONFIG_REGEX, webpage, 'flashvars.config', default=None), 
'http:')
+            self._CONFIG_REGEX, webpage, 'flashvars.config', default=None,
+            group='url'), 'http:')
 
         if not cfg_url:
             inputs = self._hidden_inputs(webpage)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/vk.py 
new/youtube-dl/youtube_dl/extractor/vk.py
--- old/youtube-dl/youtube_dl/extractor/vk.py   2018-11-06 19:36:18.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/vk.py   2018-11-21 23:55:03.000000000 
+0100
@@ -293,8 +293,12 @@
             # This video is no longer available, because its author has been 
blocked.
             'url': 'https://vk.com/video-10639516_456240611',
             'only_matching': True,
-        }
-    ]
+        },
+        {
+            # The video is not available in your region.
+            'url': 'https://vk.com/video-51812607_171445436',
+            'only_matching': True,
+        }]
 
     def _real_extract(self, url):
         mobj = re.match(self._VALID_URL, url)
@@ -354,6 +358,9 @@
 
             r'<!>This video is no longer available, because it has been 
deleted.':
             'Video %s is no longer available, because it has been deleted.',
+
+            r'<!>The video .+? is not available in your region.':
+            'Video %s is not available in your region.',
         }
 
         for error_re, error_msg in ERRORS.items():
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/wwe.py 
new/youtube-dl/youtube_dl/extractor/wwe.py
--- old/youtube-dl/youtube_dl/extractor/wwe.py  1970-01-01 01:00:00.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/wwe.py  2018-11-21 23:55:03.000000000 
+0100
@@ -0,0 +1,140 @@
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+    try_get,
+    unescapeHTML,
+    url_or_none,
+    urljoin,
+)
+
+
+class WWEBaseIE(InfoExtractor):
+    _SUBTITLE_LANGS = {
+        'English': 'en',
+        'Deutsch': 'de',
+    }
+
+    def _extract_entry(self, data, url, video_id=None):
+        video_id = compat_str(video_id or data['nid'])
+        title = data['title']
+
+        formats = self._extract_m3u8_formats(
+            data['file'], video_id, 'mp4', entry_protocol='m3u8_native',
+            m3u8_id='hls')
+
+        description = data.get('description')
+        thumbnail = urljoin(url, data.get('image'))
+        series = data.get('show_name')
+        episode = data.get('episode_name')
+
+        subtitles = {}
+        tracks = data.get('tracks')
+        if isinstance(tracks, list):
+            for track in tracks:
+                if not isinstance(track, dict):
+                    continue
+                if track.get('kind') != 'captions':
+                    continue
+                track_file = url_or_none(track.get('file'))
+                if not track_file:
+                    continue
+                label = track.get('label')
+                lang = self._SUBTITLE_LANGS.get(label, label) or 'en'
+                subtitles.setdefault(lang, []).append({
+                    'url': track_file,
+                })
+
+        return {
+            'id': video_id,
+            'title': title,
+            'description': description,
+            'thumbnail': thumbnail,
+            'series': series,
+            'episode': episode,
+            'formats': formats,
+            'subtitles': subtitles,
+        }
+
+
+class WWEIE(WWEBaseIE):
+    _VALID_URL = 
r'https?://(?:[^/]+\.)?wwe\.com/(?:[^/]+/)*videos/(?P<id>[^/?#&]+)'
+    _TESTS = [{
+        'url': 
'https://www.wwe.com/videos/daniel-bryan-vs-andrade-cien-almas-smackdown-live-sept-4-2018',
+        'md5': '92811c6a14bfc206f7a6a9c5d9140184',
+        'info_dict': {
+            'id': '40048199',
+            'ext': 'mp4',
+            'title': 'Daniel Bryan vs. Andrade "Cien" Almas: SmackDown LIVE, 
Sept. 4, 2018',
+            'description': 'md5:2d7424dbc6755c61a0e649d2a8677f67',
+            'thumbnail': r're:^https?://.*\.jpg$',
+        }
+    }, {
+        'url': 
'https://de.wwe.com/videos/gran-metalik-vs-tony-nese-wwe-205-live-sept-4-2018',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+
+        landing = self._parse_json(
+            self._html_search_regex(
+                r'(?s)Drupal\.settings\s*,\s*({.+?})\s*\)\s*;',
+                webpage, 'drupal settings'),
+            display_id)['WWEVideoLanding']
+
+        data = landing['initialVideo']['playlist'][0]
+        video_id = landing.get('initialVideoId')
+
+        info = self._extract_entry(data, url, video_id)
+        info['display_id'] = display_id
+        return info
+
+
+class WWEPlaylistIE(WWEBaseIE):
+    _VALID_URL = r'https?://(?:[^/]+\.)?wwe\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
+    _TESTS = [{
+        'url': 'https://www.wwe.com/shows/raw/2018-11-12',
+        'info_dict': {
+            'id': '2018-11-12',
+        },
+        'playlist_mincount': 11,
+    }, {
+        'url': 'http://www.wwe.com/article/walk-the-prank-wwe-edition',
+        'only_matching': True,
+    }, {
+        'url': 
'https://www.wwe.com/shows/wwenxt/article/matt-riddle-interview',
+        'only_matching': True,
+    }]
+
+    @classmethod
+    def suitable(cls, url):
+        return False if WWEIE.suitable(url) else super(WWEPlaylistIE, 
cls).suitable(url)
+
+    def _real_extract(self, url):
+        display_id = self._match_id(url)
+        webpage = self._download_webpage(url, display_id)
+
+        entries = []
+        for mobj in re.finditer(
+                r'data-video\s*=\s*(["\'])(?P<data>{.+?})\1', webpage):
+            video = self._parse_json(
+                mobj.group('data'), display_id, transform_source=unescapeHTML,
+                fatal=False)
+            if not video:
+                continue
+            data = try_get(video, lambda x: x['playlist'][0], dict)
+            if not data:
+                continue
+            try:
+                entry = self._extract_entry(data, url)
+            except Exception:
+                continue
+            entry['extractor_key'] = WWEIE.ie_key()
+            entries.append(entry)
+
+        return self.playlist_result(entries, display_id)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/zype.py 
new/youtube-dl/youtube_dl/extractor/zype.py
--- old/youtube-dl/youtube_dl/extractor/zype.py 1970-01-01 01:00:00.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/zype.py 2018-11-21 23:55:03.000000000 
+0100
@@ -0,0 +1,57 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import re
+
+from .common import InfoExtractor
+
+
+class ZypeIE(InfoExtractor):
+    _VALID_URL = 
r'https?://player\.zype\.com/embed/(?P<id>[\da-fA-F]+)\.js\?.*?api_key=[^&]+'
+    _TEST = {
+        'url': 
'https://player.zype.com/embed/5b400b834b32992a310622b9.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ&autoplay=false&controls=true&da=false',
+        'md5': 'eaee31d474c76a955bdaba02a505c595',
+        'info_dict': {
+            'id': '5b400b834b32992a310622b9',
+            'ext': 'mp4',
+            'title': 'Smoky Barbecue Favorites',
+            'thumbnail': r're:^https?://.*\.jpe?g',
+        },
+    }
+
+    @staticmethod
+    def _extract_urls(webpage):
+        return [
+            mobj.group('url')
+            for mobj in re.finditer(
+                
r'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//player\.zype\.com/embed/[\da-fA-F]+\.js\?.*?api_key=.+?)\1',
+                webpage)]
+
+    def _real_extract(self, url):
+        video_id = self._match_id(url)
+
+        webpage = self._download_webpage(url, video_id)
+
+        title = self._search_regex(
+            r'video_title\s*[:=]\s*(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
+            'title', group='value')
+
+        m3u8_url = self._search_regex(
+            r'(["\'])(?P<url>(?:(?!\1).)+\.m3u8(?:(?!\1).)*)\1', webpage,
+            'm3u8 url', group='url')
+
+        formats = self._extract_m3u8_formats(
+            m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
+            m3u8_id='hls')
+        self._sort_formats(formats)
+
+        thumbnail = self._search_regex(
+            r'poster\s*[:=]\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage, 
'thumbnail',
+            default=False, group='url')
+
+        return {
+            'id': video_id,
+            'title': title,
+            'thumbnail': thumbnail,
+            'formats': formats,
+        }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/version.py 
new/youtube-dl/youtube_dl/version.py
--- old/youtube-dl/youtube_dl/version.py        2018-11-06 19:38:22.000000000 
+0100
+++ new/youtube-dl/youtube_dl/version.py        2018-11-22 18:16:43.000000000 
+0100
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2018.11.07'
+__version__ = '2018.11.23'


Reply via email to