Hello community,

here is the log from the commit of package youtube-dl for openSUSE:Factory 
checked in at 2020-11-19 11:59:51
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/youtube-dl (Old)
 and      /work/SRC/openSUSE:Factory/.youtube-dl.new.5913 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "youtube-dl"

Thu Nov 19 11:59:51 2020 rev:142 rq:849349 version:2020.11.19

Changes:
--------
--- /work/SRC/openSUSE:Factory/youtube-dl/python-youtube-dl.changes     
2020-11-17 21:26:44.901464750 +0100
+++ /work/SRC/openSUSE:Factory/.youtube-dl.new.5913/python-youtube-dl.changes   
2020-11-23 10:54:52.894865794 +0100
@@ -1,0 +2,13 @@
+Wed Nov 18 22:44:58 UTC 2020 - Jan Engelhardt <jeng...@inai.de>
+
+- Update to release 2020.11.19
+  * arte: Extract m3u8 formats
+  * mgtv: fix format extraction
+
+-------------------------------------------------------------------
+Tue Nov 17 23:36:26 UTC 2020 - Jan Engelhardt <jeng...@inai.de>
+
+- Update to release 2020.11.18
+  * francetv: Add fallback video URL extraction
+
+-------------------------------------------------------------------
youtube-dl.changes: same change

Old:
----
  youtube-dl-2020.11.17.tar.gz
  youtube-dl-2020.11.17.tar.gz.sig

New:
----
  youtube-dl-2020.11.19.tar.gz
  youtube-dl-2020.11.19.tar.gz.sig

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.DXRebu/_old  2020-11-23 10:54:53.946866846 +0100
+++ /var/tmp/diff_new_pack.DXRebu/_new  2020-11-23 10:54:53.946866846 +0100
@@ -19,7 +19,7 @@
 %define modname youtube-dl
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-youtube-dl
-Version:        2020.11.17
+Version:        2020.11.19
 Release:        0
 Summary:        A Python module for downloading from video sites for offline 
watching
 License:        SUSE-Public-Domain AND CC-BY-SA-3.0

++++++ youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.DXRebu/_old  2020-11-23 10:54:53.974866873 +0100
+++ /var/tmp/diff_new_pack.DXRebu/_new  2020-11-23 10:54:53.978866878 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           youtube-dl
-Version:        2020.11.17
+Version:        2020.11.19
 Release:        0
 Summary:        A tool for downloading from video sites for offline watching
 License:        SUSE-Public-Domain AND CC-BY-SA-3.0

++++++ youtube-dl-2020.11.17.tar.gz -> youtube-dl-2020.11.19.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/ChangeLog new/youtube-dl/ChangeLog
--- old/youtube-dl/ChangeLog    2020-11-16 21:59:50.000000000 +0100
+++ new/youtube-dl/ChangeLog    2020-11-18 23:22:24.000000000 +0100
@@ -1,3 +1,39 @@
+version 2020.11.19
+
+Core
+* [extractor/common] Output error for invalid URLs in _is_valid_url (#21400,
+  #24151, #25617, #25618, #25586, #26068, #27072)
+
+Extractors
+* [youporn] Fix upload date extraction
+* [youporn] Make comment count optional (#26986)
+* [arte] Rework extractors
+    * Reimplement embed and playlist extractors to delegate to the single
+      entrypoint artetv extractor
+    * Improve embeds detection (#27057)
++ [arte] Extract m3u8 formats (#27061)
+* [mgtv] Fix format extraction (#26415)
++ [lbry] Add support for odysee.com (#26806)
+* [francetv] Improve info extraction
++ [francetv] Add fallback video URL extraction (#27047)
+
+
+version 2020.11.18
+
+Extractors
+* [spiegel] Fix extraction (#24206, #24767)
+* [youtube] Improve extraction
+    + Add support for --no-playlist (#27009)
+    * Improve playlist and mix extraction (#26390, #26509, #26534, #27011)
+    + Extract playlist uploader data
+* [youtube:tab] Fix view count extraction (#27051)
+* [malltv] Fix extraction (#27035)
++ [bandcamp] Extract playlist description (#22684)
+* [urplay] Fix extraction (#26828)
+* [youtube:tab] Fix playlist title extraction (#27015)
+* [youtube] Fix chapters extraction (#26005)
+
+
 version 2020.11.17
 
 Core
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/docs/supportedsites.md 
new/youtube-dl/docs/supportedsites.md
--- old/youtube-dl/docs/supportedsites.md       2020-11-16 21:59:54.000000000 
+0100
+++ new/youtube-dl/docs/supportedsites.md       2020-11-18 23:22:27.000000000 
+0100
@@ -58,9 +58,9 @@
  - **ARD:mediathek**
  - **ARDBetaMediathek**
  - **Arkena**
- - **arte.tv:+7**
- - **arte.tv:embed**
- - **arte.tv:playlist**
+ - **ArteTV**
+ - **ArteTVEmbed**
+ - **ArteTVPlaylist**
  - **AsianCrush**
  - **AsianCrushPlaylist**
  - **AtresPlayer**
@@ -824,8 +824,6 @@
  - **SpankBangPlaylist**
  - **Spankwire**
  - **Spiegel**
- - **Spiegel:Article**: Articles on spiegel.de
- - **Spiegeltv**
  - **sport.francetvinfo.fr**
  - **Sport5**
  - **SportBox**
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/test/test_all_urls.py 
new/youtube-dl/test/test_all_urls.py
--- old/youtube-dl/test/test_all_urls.py        2020-11-16 21:59:38.000000000 
+0100
+++ new/youtube-dl/test/test_all_urls.py        2020-11-18 23:21:54.000000000 
+0100
@@ -31,16 +31,17 @@
 
     def test_youtube_playlist_matching(self):
         assertPlaylist = lambda url: self.assertMatch(url, 
['youtube:playlist'])
+        assertTab = lambda url: self.assertMatch(url, ['youtube:tab'])
         assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
         assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q')  # 585
         assertPlaylist('PL63F0C78739B09958')
-        # 
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
+        
assertTab('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
         
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
-        # 
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
-        
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')
  # 668
+        
assertTab('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
+        
assertTab('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')
  # 668
         self.assertFalse('youtube:playlist' in 
self.matching_ies('PLtS2H6bU1M'))
         # Top tracks
-        # assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
+        assertTab('https://www.youtube.com/playlist?list=MCUS.20142101')
 
     def test_youtube_matching(self):
         self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
Binary files old/youtube-dl/youtube-dl and new/youtube-dl/youtube-dl differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/arte.py 
new/youtube-dl/youtube_dl/extractor/arte.py
--- old/youtube-dl/youtube_dl/extractor/arte.py 2020-11-16 21:59:38.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/arte.py 2020-11-18 23:22:06.000000000 
+0100
@@ -4,23 +4,57 @@
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_str
+from ..compat import (
+    compat_str,
+    compat_urlparse,
+)
 from ..utils import (
     ExtractorError,
     int_or_none,
     qualities,
     try_get,
     unified_strdate,
+    url_or_none,
 )
 
-# There are different sources of video in arte.tv, the extraction process
-# is different for each one. The videos usually expire in 7 days, so we can't
-# add tests.
-
 
 class ArteTVBaseIE(InfoExtractor):
-    def _extract_from_json_url(self, json_url, video_id, lang, title=None):
-        info = self._download_json(json_url, video_id)
+    _ARTE_LANGUAGES = 'fr|de|en|es|it|pl'
+    _API_BASE = 'https://api.arte.tv/api/player/v1'
+
+
+class ArteTVIE(ArteTVBaseIE):
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?:
+                            (?:www\.)?arte\.tv/(?P<lang>%(langs)s)/videos|
+                            
api\.arte\.tv/api/player/v\d+/config/(?P<lang_2>%(langs)s)
+                        )
+                        /(?P<id>\d{6}-\d{3}-[AF])
+                    ''' % {'langs': ArteTVBaseIE._ARTE_LANGUAGES}
+    _TESTS = [{
+        'url': 
'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
+        'info_dict': {
+            'id': '088501-000-A',
+            'ext': 'mp4',
+            'title': 'Mexico: Stealing Petrol to Survive',
+            'upload_date': '20190628',
+        },
+    }, {
+        'url': 
'https://www.arte.tv/pl/videos/100103-000-A/usa-dyskryminacja-na-porodowce/',
+        'only_matching': True,
+    }, {
+        'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A',
+        'only_matching': True,
+    }]
+
+    def _real_extract(self, url):
+        mobj = re.match(self._VALID_URL, url)
+        video_id = mobj.group('id')
+        lang = mobj.group('lang') or mobj.group('lang_2')
+
+        info = self._download_json(
+            '%s/config/%s/%s' % (self._API_BASE, lang, video_id), video_id)
         player_info = info['videoJsonPlayer']
 
         vsr = try_get(player_info, lambda x: x['VSR'], dict)
@@ -37,18 +71,11 @@
         if not upload_date_str:
             upload_date_str = (player_info.get('VRA') or 
player_info.get('VDA') or '').split(' ')[0]
 
-        title = (player_info.get('VTI') or title or player_info['VID']).strip()
+        title = (player_info.get('VTI') or player_info['VID']).strip()
         subtitle = player_info.get('VSU', '').strip()
         if subtitle:
             title += ' - %s' % subtitle
 
-        info_dict = {
-            'id': player_info['VID'],
-            'title': title,
-            'description': player_info.get('VDE'),
-            'upload_date': unified_strdate(upload_date_str),
-            'thumbnail': player_info.get('programImage') or 
player_info.get('VTU', {}).get('IUR'),
-        }
         qfunc = qualities(['MQ', 'HQ', 'EQ', 'SQ'])
 
         LANGS = {
@@ -65,6 +92,10 @@
         formats = []
         for format_id, format_dict in vsr.items():
             f = dict(format_dict)
+            format_url = url_or_none(f.get('url'))
+            streamer = f.get('streamer')
+            if not format_url and not streamer:
+                continue
             versionCode = f.get('versionCode')
             l = re.escape(langcode)
 
@@ -107,6 +138,16 @@
             else:
                 lang_pref = -1
 
+            media_type = f.get('mediaType')
+            if media_type == 'hls':
+                m3u8_formats = self._extract_m3u8_formats(
+                    format_url, video_id, 'mp4', entry_protocol='m3u8_native',
+                    m3u8_id=format_id, fatal=False)
+                for m3u8_format in m3u8_formats:
+                    m3u8_format['language_preference'] = lang_pref
+                formats.extend(m3u8_formats)
+                continue
+
             format = {
                 'format_id': format_id,
                 'preference': -10 if f.get('videoFormat') == 'M3U8' else None,
@@ -118,7 +159,7 @@
                 'quality': qfunc(f.get('quality')),
             }
 
-            if f.get('mediaType') == 'rtmp':
+            if media_type == 'rtmp':
                 format['url'] = f['streamer']
                 format['play_path'] = 'mp4:' + f['url']
                 format['ext'] = 'flv'
@@ -127,56 +168,50 @@
 
             formats.append(format)
 
-        self._check_formats(formats, video_id)
         self._sort_formats(formats)
 
-        info_dict['formats'] = formats
-        return info_dict
-
+        return {
+            'id': player_info.get('VID') or video_id,
+            'title': title,
+            'description': player_info.get('VDE'),
+            'upload_date': unified_strdate(upload_date_str),
+            'thumbnail': player_info.get('programImage') or 
player_info.get('VTU', {}).get('IUR'),
+            'formats': formats,
+        }
 
-class ArteTVPlus7IE(ArteTVBaseIE):
-    IE_NAME = 'arte.tv:+7'
-    _VALID_URL = 
r'https?://(?:www\.)?arte\.tv/(?P<lang>fr|de|en|es|it|pl)/videos/(?P<id>\d{6}-\d{3}-[AF])'
 
+class ArteTVEmbedIE(InfoExtractor):
+    _VALID_URL = 
r'https?://(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+'
     _TESTS = [{
-        'url': 
'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
+        'url': 
'https://www.arte.tv/player/v5/index.php?json_url=https%3A%2F%2Fapi.arte.tv%2Fapi%2Fplayer%2Fv2%2Fconfig%2Fde%2F100605-013-A&lang=de&autoplay=true&mute=0100605-013-A',
         'info_dict': {
-            'id': '088501-000-A',
+            'id': '100605-013-A',
             'ext': 'mp4',
-            'title': 'Mexico: Stealing Petrol to Survive',
-            'upload_date': '20190628',
+            'title': 'United we Stream November Lockdown Edition #13',
+            'description': 'md5:be40b667f45189632b78c1425c7c2ce1',
+            'upload_date': '20201116',
         },
+    }, {
+        'url': 
'https://www.arte.tv/player/v3/index.php?json_url=https://api.arte.tv/api/player/v2/config/de/100605-013-A',
+        'only_matching': True,
     }]
 
-    def _real_extract(self, url):
-        lang, video_id = re.match(self._VALID_URL, url).groups()
-        return self._extract_from_json_url(
-            'https://api.arte.tv/api/player/v1/config/%s/%s' % (lang, 
video_id),
-            video_id, lang)
-
-
-class ArteTVEmbedIE(ArteTVPlus7IE):
-    IE_NAME = 'arte.tv:embed'
-    _VALID_URL = r'''(?x)
-        https://www\.arte\.tv
-        /player/v3/index\.php\?json_url=
-        (?P<json_url>
-            https?://api\.arte\.tv/api/player/v1/config/
-            (?P<lang>[^/]+)/(?P<id>\d{6}-\d{3}-[AF])
-        )
-    '''
-
-    _TESTS = []
+    @staticmethod
+    def _extract_urls(webpage):
+        return [url for _, url in re.findall(
+            
r'<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1',
+            webpage)]
 
     def _real_extract(self, url):
-        json_url, lang, video_id = re.match(self._VALID_URL, url).groups()
-        return self._extract_from_json_url(json_url, video_id, lang)
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        json_url = qs['json_url'][0]
+        video_id = ArteTVIE._match_id(json_url)
+        return self.url_result(
+            json_url, ie=ArteTVIE.ie_key(), video_id=video_id)
 
 
 class ArteTVPlaylistIE(ArteTVBaseIE):
-    IE_NAME = 'arte.tv:playlist'
-    _VALID_URL = 
r'https?://(?:www\.)?arte\.tv/(?P<lang>fr|de|en|es|it|pl)/videos/(?P<id>RC-\d{6})'
-
+    _VALID_URL = 
r'https?://(?:www\.)?arte\.tv/(?P<lang>%s)/videos/(?P<id>RC-\d{6})' % 
ArteTVBaseIE._ARTE_LANGUAGES
     _TESTS = [{
         'url': 'https://www.arte.tv/en/videos/RC-016954/earn-a-living/',
         'info_dict': {
@@ -185,17 +220,35 @@
             'description': 'md5:d322c55011514b3a7241f7fb80d494c2',
         },
         'playlist_mincount': 6,
+    }, {
+        'url': 'https://www.arte.tv/pl/videos/RC-014123/arte-reportage/',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
         lang, playlist_id = re.match(self._VALID_URL, url).groups()
         collection = self._download_json(
-            
'https://api.arte.tv/api/player/v1/collectionData/%s/%s?source=videos'
-            % (lang, playlist_id), playlist_id)
+            '%s/collectionData/%s/%s?source=videos'
+            % (self._API_BASE, lang, playlist_id), playlist_id)
+        entries = []
+        for video in collection['videos']:
+            if not isinstance(video, dict):
+                continue
+            video_url = url_or_none(video.get('url')) or 
url_or_none(video.get('jsonUrl'))
+            if not video_url:
+                continue
+            video_id = video.get('programId')
+            entries.append({
+                '_type': 'url_transparent',
+                'url': video_url,
+                'id': video_id,
+                'title': video.get('title'),
+                'alt_title': video.get('subtitle'),
+                'thumbnail': url_or_none(try_get(video, lambda x: 
x['mainImage']['url'], compat_str)),
+                'duration': int_or_none(video.get('durationSeconds')),
+                'view_count': int_or_none(video.get('views')),
+                'ie_key': ArteTVIE.ie_key(),
+            })
         title = collection.get('title')
         description = collection.get('shortDescription') or 
collection.get('teaserText')
-        entries = [
-            self._extract_from_json_url(
-                video['jsonUrl'], video.get('programId') or playlist_id, lang)
-            for video in collection['videos'] if video.get('jsonUrl')]
         return self.playlist_result(entries, playlist_id, title, description)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/bandcamp.py 
new/youtube-dl/youtube_dl/extractor/bandcamp.py
--- old/youtube-dl/youtube_dl/extractor/bandcamp.py     2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/bandcamp.py     2020-11-18 
23:21:55.000000000 +0100
@@ -270,6 +270,7 @@
             'title': '"Entropy" EP',
             'uploader_id': 'jstrecords',
             'id': 'entropy-ep',
+            'description': 'md5:0ff22959c943622972596062f2f366a5',
         },
         'playlist_mincount': 3,
     }, {
@@ -279,6 +280,7 @@
             'id': 'we-are-the-plague',
             'title': 'WE ARE THE PLAGUE',
             'uploader_id': 'insulters',
+            'description': 'md5:b3cf845ee41b2b1141dc7bde9237255f',
         },
         'playlist_count': 2,
     }]
@@ -305,11 +307,14 @@
             for t in track_info
             if t.get('duration')]
 
+        current = tralbum.get('current') or {}
+
         return {
             '_type': 'playlist',
             'uploader_id': uploader_id,
             'id': playlist_id,
-            'title': try_get(tralbum, lambda x: x['current']['title'], 
compat_str),
+            'title': current.get('title'),
+            'description': current.get('about'),
             'entries': entries,
         }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/common.py 
new/youtube-dl/youtube_dl/extractor/common.py
--- old/youtube-dl/youtube_dl/extractor/common.py       2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/common.py       2020-11-18 
23:22:06.000000000 +0100
@@ -1456,9 +1456,10 @@
         try:
             self._request_webpage(url, video_id, 'Checking %s URL' % item, 
headers=headers)
             return True
-        except ExtractorError:
+        except ExtractorError as e:
             self.to_screen(
-                '%s: %s URL is invalid, skipping' % (video_id, item))
+                '%s: %s URL is invalid, skipping: %s'
+                % (video_id, item, error_to_compat_str(e.cause)))
             return False
 
     def http_scheme(self):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/extractors.py 
new/youtube-dl/youtube_dl/extractor/extractors.py
--- old/youtube-dl/youtube_dl/extractor/extractors.py   2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/extractors.py   2020-11-18 
23:22:06.000000000 +0100
@@ -58,7 +58,7 @@
     ARDMediathekIE,
 )
 from .arte import (
-    ArteTVPlus7IE,
+    ArteTVIE,
     ArteTVEmbedIE,
     ArteTVPlaylistIE,
 )
@@ -1054,8 +1054,7 @@
     SpankBangPlaylistIE,
 )
 from .spankwire import SpankwireIE
-from .spiegel import SpiegelIE, SpiegelArticleIE
-from .spiegeltv import SpiegeltvIE
+from .spiegel import SpiegelIE
 from .spike import (
     BellatorIE,
     ParamountNetworkIE,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/francetv.py 
new/youtube-dl/youtube_dl/extractor/francetv.py
--- old/youtube-dl/youtube_dl/extractor/francetv.py     2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/francetv.py     2020-11-18 
23:22:06.000000000 +0100
@@ -17,6 +17,7 @@
     parse_duration,
     try_get,
     url_or_none,
+    urljoin,
 )
 from .dailymotion import DailymotionIE
 
@@ -128,18 +129,38 @@
 
         is_live = None
 
-        formats = []
-        for video in info['videos']:
-            if video['statut'] != 'ONLINE':
+        videos = []
+
+        for video in (info.get('videos') or []):
+            if video.get('statut') != 'ONLINE':
                 continue
-            video_url = video['url']
+            if not video.get('url'):
+                continue
+            videos.append(video)
+
+        if not videos:
+            for device_type in ['desktop', 'mobile']:
+                fallback_info = self._download_json(
+                    
'https://player.webservices.francetelevisions.fr/v1/videos/%s' % video_id,
+                    video_id, 'Downloading fallback %s video JSON' % 
device_type, query={
+                        'device_type': device_type,
+                        'browser': 'chrome',
+                    }, fatal=False)
+
+                if fallback_info and fallback_info.get('video'):
+                    videos.append(fallback_info['video'])
+
+        formats = []
+        for video in videos:
+            video_url = video.get('url')
             if not video_url:
                 continue
             if is_live is None:
                 is_live = (try_get(
-                    video, lambda x: x['plages_ouverture'][0]['direct'],
-                    bool) is True) or '/live.francetv.fr/' in video_url
-            format_id = video['format']
+                    video, lambda x: x['plages_ouverture'][0]['direct'], bool) 
is True
+                    or video.get('is_live') is True
+                    or '/live.francetv.fr/' in video_url)
+            format_id = video.get('format')
             ext = determine_ext(video_url)
             if ext == 'f4m':
                 if georestricted:
@@ -154,6 +175,9 @@
                     sign(video_url, format_id), video_id, 'mp4',
                     entry_protocol='m3u8_native', m3u8_id=format_id,
                     fatal=False))
+            elif ext == 'mpd':
+                formats.extend(self._extract_mpd_formats(
+                    sign(video_url, format_id), video_id, mpd_id=format_id, 
fatal=False))
             elif video_url.startswith('rtmp'):
                 formats.append({
                     'url': video_url,
@@ -166,6 +190,7 @@
                         'url': video_url,
                         'format_id': format_id,
                     })
+
         self._sort_formats(formats)
 
         title = info['titre']
@@ -185,10 +210,10 @@
         return {
             'id': video_id,
             'title': self._live_title(title) if is_live else title,
-            'description': clean_html(info['synopsis']),
-            'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', 
info['image']),
-            'duration': int_or_none(info.get('real_duration')) or 
parse_duration(info['duree']),
-            'timestamp': int_or_none(info['diffusion']['timestamp']),
+            'description': clean_html(info.get('synopsis')),
+            'thumbnail': urljoin('http://pluzz.francetv.fr', 
info.get('image')),
+            'duration': int_or_none(info.get('real_duration')) or 
parse_duration(info.get('duree')),
+            'timestamp': int_or_none(try_get(info, lambda x: 
x['diffusion']['timestamp'])),
             'is_live': is_live,
             'formats': formats,
             'subtitles': subtitles,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/generic.py 
new/youtube-dl/youtube_dl/extractor/generic.py
--- old/youtube-dl/youtube_dl/extractor/generic.py      2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/generic.py      2020-11-18 
23:22:06.000000000 +0100
@@ -91,6 +91,7 @@
 from .videa import VideaIE
 from .twentymin import TwentyMinutenIE
 from .ustream import UstreamIE
+from .arte import ArteTVEmbedIE
 from .videopress import VideoPressIE
 from .rutube import RutubeIE
 from .limelight import LimelightBaseIE
@@ -2760,11 +2761,9 @@
             return self.url_result(ustream_url, UstreamIE.ie_key())
 
         # Look for embedded arte.tv player
-        mobj = re.search(
-            r'<(?:script|iframe) 
[^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
-            webpage)
-        if mobj is not None:
-            return self.url_result(mobj.group('url'), 'ArteTVEmbed')
+        arte_urls = ArteTVEmbedIE._extract_urls(webpage)
+        if arte_urls:
+            return self.playlist_from_matches(arte_urls, video_id, video_title)
 
         # Look for embedded francetv player
         mobj = re.search(
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/lbry.py 
new/youtube-dl/youtube_dl/extractor/lbry.py
--- old/youtube-dl/youtube_dl/extractor/lbry.py 2020-11-16 21:59:38.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/lbry.py 2020-11-18 23:22:06.000000000 
+0100
@@ -16,7 +16,7 @@
 
 class LBRYIE(InfoExtractor):
     IE_NAME = 'lbry.tv'
-    _VALID_URL = 
r'https?://(?:www\.)?lbry\.tv/(?P<id>@[0-9a-zA-Z-]+:[0-9a-z]+/[0-9a-zA-Z().-]+:[0-9a-z])'
+    _VALID_URL = 
r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[0-9a-zA-Z-]+:[0-9a-z]+/[0-9a-zA-Z().-]+:[0-9a-z])'
     _TESTS = [{
         # Video
         'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
@@ -41,6 +41,9 @@
             'timestamp': 1591312601,
             'upload_date': '20200604',
         }
+    }, {
+        'url': 
'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
+        'only_matching': True,
     }]
 
     def _call_api_proxy(self, method, display_id, params):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/malltv.py 
new/youtube-dl/youtube_dl/extractor/malltv.py
--- old/youtube-dl/youtube_dl/extractor/malltv.py       2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/malltv.py       2020-11-18 
23:21:56.000000000 +0100
@@ -1,10 +1,16 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from ..utils import merge_dicts
+from ..utils import (
+    clean_html,
+    dict_get,
+    float_or_none,
+    int_or_none,
+    merge_dicts,
+    parse_duration,
+    try_get,
+)
 
 
 class MallTVIE(InfoExtractor):
@@ -17,7 +23,7 @@
             'display_id': 
'18-miliard-pro-neziskovky-opravdu-jsou-sportovci-nebo-clovek-v-tisni-pijavice',
             'ext': 'mp4',
             'title': '18 miliard pro neziskovky. Opravdu jsou sportovci nebo 
Člověk v tísni pijavice?',
-            'description': 'md5:25fc0ec42a72ba602b602c683fa29deb',
+            'description': 'md5:db7d5744a4bd4043d9d98324aa72ab35',
             'duration': 216,
             'timestamp': 1538870400,
             'upload_date': '20181007',
@@ -37,20 +43,46 @@
         webpage = self._download_webpage(
             url, display_id, headers=self.geo_verification_headers())
 
-        SOURCE_RE = 
r'(<source[^>]+\bsrc=(?:(["\'])(?:(?!\2).)+|[^\s]+)/(?P<id>[\da-z]+)/index)\b'
+        video = self._parse_json(self._search_regex(
+            r'videoObject\s*=\s*JSON\.parse\(JSON\.stringify\(({.+?})\)\);',
+            webpage, 'video object'), display_id)
+        video_source = video['VideoSource']
         video_id = self._search_regex(
-            SOURCE_RE, webpage, 'video id', group='id')
+            r'/([\da-z]+)/index\b', video_source, 'video id')
 
-        media = self._parse_html5_media_entries(
-            url, re.sub(SOURCE_RE, r'\1.m3u8', webpage), video_id,
-            m3u8_id='hls', m3u8_entry_protocol='m3u8_native')[0]
+        formats = self._extract_m3u8_formats(
+            video_source + '.m3u8', video_id, 'mp4', 'm3u8_native')
+        self._sort_formats(formats)
+
+        subtitles = {}
+        for s in (video.get('Subtitles') or {}):
+            s_url = s.get('Url')
+            if not s_url:
+                continue
+            subtitles.setdefault(s.get('Language') or 'cz', []).append({
+                'url': s_url,
+            })
+
+        entity_counts = video.get('EntityCounts') or {}
+
+        def get_count(k):
+            v = entity_counts.get(k + 's') or {}
+            return int_or_none(dict_get(v, ('Count', 'StrCount')))
 
         info = self._search_json_ld(webpage, video_id, default={})
 
-        return merge_dicts(media, info, {
+        return merge_dicts({
             'id': video_id,
             'display_id': display_id,
-            'title': self._og_search_title(webpage, default=None) or 
display_id,
-            'description': self._og_search_description(webpage, default=None),
-            'thumbnail': self._og_search_thumbnail(webpage, default=None),
-        })
+            'title': video.get('Title'),
+            'description': clean_html(video.get('Description')),
+            'thumbnail': video.get('ThumbnailUrl'),
+            'formats': formats,
+            'subtitles': subtitles,
+            'duration': int_or_none(video.get('DurationSeconds')) or 
parse_duration(video.get('Duration')),
+            'view_count': get_count('View'),
+            'like_count': get_count('Like'),
+            'dislike_count': get_count('Dislike'),
+            'average_rating': float_or_none(try_get(video, lambda x: 
x['EntityRating']['AvarageRate'])),
+            'comment_count': get_count('Comment'),
+        }, info)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/mgtv.py 
new/youtube-dl/youtube_dl/extractor/mgtv.py
--- old/youtube-dl/youtube_dl/extractor/mgtv.py 2020-11-16 21:59:38.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/mgtv.py 2020-11-18 23:22:06.000000000 
+0100
@@ -17,9 +17,8 @@
 
 
 class MGTVIE(InfoExtractor):
-    _VALID_URL = 
r'https?://(?:www\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html'
+    _VALID_URL = 
r'https?://(?:w(?:ww)?\.)?mgtv\.com/(v|b)/(?:[^/]+/)*(?P<id>\d+)\.html'
     IE_DESC = '芒果TV'
-    _GEO_COUNTRIES = ['CN']
 
     _TESTS = [{
         'url': 'http://www.mgtv.com/v/1/290525/f/3116640.html',
@@ -34,14 +33,18 @@
     }, {
         'url': 'http://www.mgtv.com/b/301817/3826653.html',
         'only_matching': True,
+    }, {
+        'url': 'https://w.mgtv.com/b/301817/3826653.html',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
+        tk2 = base64.urlsafe_b64encode(b'did=%s|pno=1030|ver=0.3.0301|clit=%d' 
% (compat_str(uuid.uuid4()).encode(), time.time()))[::-1]
         try:
             api_data = self._download_json(
                 'https://pcweb.api.mgtv.com/player/video', video_id, query={
-                    'tk2': 
base64.urlsafe_b64encode(b'did=%s|pno=1030|ver=0.3.0301|clit=%d' % 
(compat_str(uuid.uuid4()).encode(), time.time()))[::-1],
+                    'tk2': tk2,
                     'video_id': video_id,
                 }, headers=self.geo_verification_headers())['data']
         except ExtractorError as e:
@@ -56,6 +59,7 @@
         stream_data = self._download_json(
             'https://pcweb.api.mgtv.com/player/getSource', video_id, query={
                 'pm2': api_data['atc']['pm2'],
+                'tk2': tk2,
                 'video_id': video_id,
             }, headers=self.geo_verification_headers())['data']
         stream_domain = stream_data['stream_domain'][0]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/spiegel.py 
new/youtube-dl/youtube_dl/extractor/spiegel.py
--- old/youtube-dl/youtube_dl/extractor/spiegel.py      2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/spiegel.py      2020-11-18 
23:21:57.000000000 +0100
@@ -1,159 +1,54 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-
 from .common import InfoExtractor
-from .nexx import (
-    NexxIE,
-    NexxEmbedIE,
-)
-from .spiegeltv import SpiegeltvIE
-from ..compat import compat_urlparse
-from ..utils import (
-    parse_duration,
-    strip_or_none,
-    unified_timestamp,
-)
+from .jwplatform import JWPlatformIE
 
 
 class SpiegelIE(InfoExtractor):
-    _VALID_URL = 
r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$'
+    _UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
+    _VALID_URL = 
r'https?://(?:www\.)?(?:spiegel|manager-magazin)\.de(?:/[^/]+)+/[^/]*-(?P<id>[0-9]+|%s)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$'
 % _UUID_RE
     _TESTS = [{
         'url': 
'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
-        'md5': 'b57399839d055fccfeb9a0455c439868',
+        'md5': '50c7948883ec85a3e431a0a44b7ad1d6',
         'info_dict': {
-            'id': '563747',
+            'id': 'II0BUyxY',
+            'display_id': '1259285',
             'ext': 'mp4',
-            'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder 
aktiv',
+            'title': 'Vulkan Tungurahua in Ecuador ist wieder aktiv - DER 
SPIEGEL - Wissenschaft',
             'description': 'md5:8029d8310232196eb235d27575a8b9f4',
-            'duration': 49,
+            'duration': 48.0,
             'upload_date': '20130311',
-            'timestamp': 1362994320,
+            'timestamp': 1362997920,
         },
     }, {
         'url': 
'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html',
-        'md5': '5b6c2f4add9d62912ed5fc78a1faed80',
-        'info_dict': {
-            'id': '580988',
-            'ext': 'mp4',
-            'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die 
Fehlgriffe des Titelverteidigers',
-            'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
-            'duration': 983,
-            'upload_date': '20131115',
-            'timestamp': 1384546642,
-        },
+        'only_matching': True,
     }, {
-        'url': 
'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
-        'md5': '97b91083a672d72976faa8433430afb9',
-        'info_dict': {
-            'id': '601883',
-            'ext': 'mp4',
-            'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen 
Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier 
kommen seine Antworten auf die besten sechs Fragen.',
-            'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die 
Tageszeiten mit?"',
-            'upload_date': '20140904',
-            'timestamp': 1409834160,
-        }
+        'url': 
'https://www.spiegel.de/video/eifel-zoo-aufregung-um-ausgebrochene-raubtiere-video-99018031.html',
+        'only_matching': True,
     }, {
-        'url': 
'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html',
+        'url': 
'https://www.spiegel.de/panorama/urteile-im-goldmuenzenprozess-haftstrafen-fuer-clanmitglieder-a-aae8df48-43c1-4c61-867d-23f0a2d254b7',
         'only_matching': True,
     }, {
-        # nexx video
         'url': 
'http://www.spiegel.de/video/spiegel-tv-magazin-ueber-guellekrise-in-schleswig-holstein-video-99012776.html',
         'only_matching': True,
+    }, {
+        'url': 
'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        metadata_url = 'http://www.spiegel.de/video/metadata/video-%s.json' % 
video_id
-        handle = self._request_webpage(metadata_url, video_id)
-
-        # 302 to spiegel.tv, like 
http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html
-        if SpiegeltvIE.suitable(handle.geturl()):
-            return self.url_result(handle.geturl(), 'Spiegeltv')
-
-        video_data = self._parse_json(self._webpage_read_content(
-            handle, metadata_url, video_id), video_id)
-        title = video_data['title']
-        nexx_id = video_data['nexxOmniaId']
-        domain_id = video_data.get('nexxOmniaDomain') or '748'
-
+        webpage = self._download_webpage(url, video_id)
+        media_id = self._html_search_regex(
+            
r'(&#34;|["\'])mediaId\1\s*:\s*(&#34;|["\'])(?P<id>(?:(?!\2).)+)\2',
+            webpage, 'media id', group='id')
         return {
             '_type': 'url_transparent',
             'id': video_id,
-            'url': 'nexx:%s:%s' % (domain_id, nexx_id),
-            'title': title,
-            'description': strip_or_none(video_data.get('teaser')),
-            'duration': parse_duration(video_data.get('duration')),
-            'timestamp': unified_timestamp(video_data.get('datum')),
-            'ie_key': NexxIE.ie_key(),
+            'display_id': video_id,
+            'url': 'jwplatform:%s' % media_id,
+            'title': self._og_search_title(webpage, default=None),
+            'ie_key': JWPlatformIE.ie_key(),
         }
-
-
-class SpiegelArticleIE(InfoExtractor):
-    _VALID_URL = 
r'https?://(?:www\.)?spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
-    IE_NAME = 'Spiegel:Article'
-    IE_DESC = 'Articles on spiegel.de'
-    _TESTS = [{
-        'url': 
'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
-        'info_dict': {
-            'id': '1516455',
-            'ext': 'mp4',
-            'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
-            'description': 're:^Patrick Kämnitz gehört.{100,}',
-            'upload_date': '20140825',
-        },
-    }, {
-        'url': 
'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html',
-        'info_dict': {
-
-        },
-        'playlist_count': 6,
-    }, {
-        # Nexx iFrame embed
-        'url': 
'http://www.spiegel.de/sptv/spiegeltv/spiegel-tv-ueber-schnellste-katapult-achterbahn-der-welt-taron-a-1137884.html',
-        'info_dict': {
-            'id': '161464',
-            'ext': 'mp4',
-            'title': 'Nervenkitzel Achterbahn',
-            'alt_title': 'Karussellbauer in Deutschland',
-            'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc',
-            'release_year': 2005,
-            'creator': 'SPIEGEL TV',
-            'thumbnail': r're:^https?://.*\.jpg$',
-            'duration': 2761,
-            'timestamp': 1394021479,
-            'upload_date': '20140305',
-        },
-        'params': {
-            'format': 'bestvideo',
-            'skip_download': True,
-        },
-    }]
-
-    def _real_extract(self, url):
-        video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id)
-
-        # Single video on top of the page
-        video_link = self._search_regex(
-            r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
-            'video page URL', default=None)
-        if video_link:
-            video_url = compat_urlparse.urljoin(
-                self.http_scheme() + '//spiegel.de/', video_link)
-            return self.url_result(video_url)
-
-        # Multiple embedded videos
-        embeds = re.findall(
-            r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"',
-            webpage)
-        entries = [
-            self.url_result(compat_urlparse.urljoin(
-                self.http_scheme() + '//spiegel.de/', embed_path))
-            for embed_path in embeds]
-        if embeds:
-            return self.playlist_result(entries)
-
-        return self.playlist_from_matches(
-            NexxEmbedIE._extract_urls(webpage), ie=NexxEmbedIE.ie_key())
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/spiegeltv.py 
new/youtube-dl/youtube_dl/extractor/spiegeltv.py
--- old/youtube-dl/youtube_dl/extractor/spiegeltv.py    2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/spiegeltv.py    1970-01-01 
01:00:00.000000000 +0100
@@ -1,17 +0,0 @@
-from __future__ import unicode_literals
-
-from .common import InfoExtractor
-from .nexx import NexxIE
-
-
-class SpiegeltvIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/videos/(?P<id>\d+)'
-    _TEST = {
-        'url': 'http://www.spiegel.tv/videos/161681-flug-mh370/',
-        'only_matching': True,
-    }
-
-    def _real_extract(self, url):
-        return self.url_result(
-            'https://api.nexx.cloud/v3/748/videos/byid/%s'
-            % self._match_id(url), ie=NexxIE.ie_key())
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/urplay.py 
new/youtube-dl/youtube_dl/extractor/urplay.py
--- old/youtube-dl/youtube_dl/extractor/urplay.py       2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/urplay.py       2020-11-18 
23:21:58.000000000 +0100
@@ -2,7 +2,11 @@
 from __future__ import unicode_literals
 
 from .common import InfoExtractor
-from ..utils import unified_timestamp
+from ..utils import (
+    dict_get,
+    int_or_none,
+    unified_timestamp,
+)
 
 
 class URPlayIE(InfoExtractor):
@@ -15,8 +19,8 @@
             'ext': 'mp4',
             'title': 'UR Samtiden - Livet, universum och rymdens märkliga 
musik : Om vetenskap, kritiskt tänkande och motstånd',
             'description': 'md5:5344508a52aa78c1ced6c1b8b9e44e9a',
-            'timestamp': 1513512768,
-            'upload_date': '20171217',
+            'timestamp': 1513292400,
+            'upload_date': '20171214',
         },
     }, {
         'url': 'https://urskola.se/Produkter/190031-Tripp-Trapp-Trad-Sovkudde',
@@ -25,7 +29,7 @@
             'ext': 'mp4',
             'title': 'Tripp, Trapp, Träd : Sovkudde',
             'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1',
-            'timestamp': 1440093600,
+            'timestamp': 1440086400,
             'upload_date': '20150820',
         },
     }, {
@@ -35,37 +39,58 @@
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-
+        url = url.replace('skola.se/Produkter', 'play.se/program')
         webpage = self._download_webpage(url, video_id)
-        urplayer_data = self._parse_json(self._search_regex(
-            r'urPlayer\.init\(({.+?})\);', webpage, 'urplayer data'), video_id)
-        host = 
self._download_json('http://streaming-loadbalancer.ur.se/loadbalancer.json', 
video_id)['redirect']
+        urplayer_data = self._parse_json(self._html_search_regex(
+            
r'data-react-class="components/Player/Player"[^>]+data-react-props="({.+?})"',
+            webpage, 'urplayer data'), video_id)['currentProduct']
+        episode = urplayer_data['title']
+        raw_streaming_info = urplayer_data['streamingInfo']['raw']
+        host = self._download_json(
+            'http://streaming-loadbalancer.ur.se/loadbalancer.json',
+            video_id)['redirect']
 
         formats = []
-        for quality_attr, quality, preference in (('', 'sd', 0), ('_hd', 'hd', 
1)):
-            file_http = urplayer_data.get('file_http' + quality_attr) or 
urplayer_data.get('file_http_sub' + quality_attr)
+        for k, v in raw_streaming_info.items():
+            if not (k in ('sd', 'hd') and isinstance(v, dict)):
+                continue
+            file_http = v.get('location')
             if file_http:
                 formats.extend(self._extract_wowza_formats(
-                    'http://%s/%splaylist.m3u8' % (host, file_http), video_id, 
skip_protocols=['rtmp', 'rtsp']))
+                    'http://%s/%splaylist.m3u8' % (host, file_http),
+                    video_id, skip_protocols=['f4m', 'rtmp', 'rtsp']))
         self._sort_formats(formats)
 
-        subtitles = {}
-        for subtitle in urplayer_data.get('subtitles', []):
-            subtitle_url = subtitle.get('file')
-            kind = subtitle.get('kind')
-            if not subtitle_url or (kind and kind != 'captions'):
-                continue
-            subtitles.setdefault(subtitle.get('label', 'Svenska'), []).append({
-                'url': subtitle_url,
-            })
+        image = urplayer_data.get('image') or {}
+        thumbnails = []
+        for k, v in image.items():
+            t = {
+                'id': k,
+                'url': v,
+            }
+            wh = k.split('x')
+            if len(wh) == 2:
+                t.update({
+                    'width': int_or_none(wh[0]),
+                    'height': int_or_none(wh[1]),
+                })
+            thumbnails.append(t)
+
+        series = urplayer_data.get('series') or {}
+        series_title = dict_get(series, ('seriesTitle', 'title')) or 
dict_get(urplayer_data, ('seriesTitle', 'mainTitle'))
 
         return {
             'id': video_id,
-            'title': urplayer_data['title'],
-            'description': self._og_search_description(webpage),
-            'thumbnail': urplayer_data.get('image'),
-            'timestamp': 
unified_timestamp(self._html_search_meta(('uploadDate', 'schema:uploadDate'), 
webpage, 'timestamp')),
-            'series': urplayer_data.get('series_title'),
-            'subtitles': subtitles,
+            'title': '%s : %s' % (series_title, episode) if series_title else 
episode,
+            'description': urplayer_data.get('description'),
+            'thumbnails': thumbnails,
+            'timestamp': unified_timestamp(urplayer_data.get('publishedAt')),
+            'series': series_title,
             'formats': formats,
+            'duration': int_or_none(urplayer_data.get('duration')),
+            'categories': urplayer_data.get('categories'),
+            'tags': urplayer_data.get('keywords'),
+            'season': series.get('label'),
+            'episode': episode,
+            'episode_number': int_or_none(urplayer_data.get('episodeNumber')),
         }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/youporn.py 
new/youtube-dl/youtube_dl/extractor/youporn.py
--- old/youtube-dl/youtube_dl/extractor/youporn.py      2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/youporn.py      2020-11-18 
23:22:06.000000000 +0100
@@ -29,7 +29,6 @@
             'upload_date': '20101217',
             'average_rating': int,
             'view_count': int,
-            'comment_count': int,
             'categories': list,
             'tags': list,
             'age_limit': 18,
@@ -48,7 +47,6 @@
             'upload_date': '20110418',
             'average_rating': int,
             'view_count': int,
-            'comment_count': int,
             'categories': list,
             'tags': list,
             'age_limit': 18,
@@ -156,7 +154,8 @@
             r'(?s)<div[^>]+class=["\']submitByLink["\'][^>]*>(.+?)</div>',
             webpage, 'uploader', fatal=False)
         upload_date = unified_strdate(self._html_search_regex(
-            [r'Date\s+[Aa]dded:\s*<span>([^<]+)',
+            [r'UPLOADED:\s*<span>([^<]+)',
+             r'Date\s+[Aa]dded:\s*<span>([^<]+)',
              
r'(?s)<div[^>]+class=["\']videoInfo(?:Date|Time)["\'][^>]*>(.+?)</div>'],
             webpage, 'upload date', fatal=False))
 
@@ -171,7 +170,7 @@
             webpage, 'view count', fatal=False, group='count'))
         comment_count = str_to_int(self._search_regex(
             r'>All [Cc]omments? \(([\d,.]+)\)',
-            webpage, 'comment count', fatal=False))
+            webpage, 'comment count', default=None))
 
         def extract_tag_box(regex, title):
             tag_box = self._search_regex(regex, webpage, title, default=None)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/youtube.py 
new/youtube-dl/youtube_dl/extractor/youtube.py
--- old/youtube-dl/youtube_dl/extractor/youtube.py      2020-11-16 
21:59:38.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/youtube.py      2020-11-18 
23:21:58.000000000 +0100
@@ -45,6 +45,7 @@
     unescapeHTML,
     unified_strdate,
     unsmuggle_url,
+    update_url_query,
     uppercase_escape,
     url_or_none,
     urlencode_postdata,
@@ -65,7 +66,7 @@
     # If True it will raise an error if no login info is provided
     _LOGIN_REQUIRED = False
 
-    _PLAYLIST_ID_RE = 
r'(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}'
+    _PLAYLIST_ID_RE = 
r'(?:(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)[0-9A-Za-z-_]{10,}|RDMM)'
 
     _YOUTUBE_CLIENT_HEADERS = {
         'x-youtube-client-name': '1',
@@ -975,10 +976,6 @@
             'only_matching': True,
         },
         {
-            'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
-            'only_matching': True,
-        },
-        {
             'url': 'https://invidio.us/watch?v=BaW_jenozKc',
             'only_matching': True,
         },
@@ -1465,21 +1462,11 @@
     def _extract_chapters_from_json(self, webpage, video_id, duration):
         if not webpage:
             return
-        player = self._parse_json(
-            self._search_regex(
-                r'RELATED_PLAYER_ARGS["\']\s*:\s*({.+})\s*,?\s*\n', webpage,
-                'player args', default='{}'),
-            video_id, fatal=False)
-        if not player or not isinstance(player, dict):
-            return
-        watch_next_response = player.get('watch_next_response')
-        if not isinstance(watch_next_response, compat_str):
-            return
-        response = self._parse_json(watch_next_response, video_id, fatal=False)
-        if not response or not isinstance(response, dict):
+        data = self._extract_yt_initial_data(video_id, webpage)
+        if not data or not isinstance(data, dict):
             return
         chapters_list = try_get(
-            response,
+            data,
             lambda x: x['playerOverlays']
                        ['playerOverlayRenderer']
                        ['decoratedPlayerBarRenderer']
@@ -2361,7 +2348,7 @@
 
 class YoutubeTabIE(YoutubeBaseInfoExtractor):
     IE_DESC = 'YouTube.com tab'
-    _VALID_URL = 
r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|playlist\?.*?\blist=)(?P<id>[^/?#&]+)'
+    _VALID_URL = 
r'https?://(?:\w+\.)?(?:youtube(?:kids)?\.com|invidio\.us)/(?:(?:channel|c|user)/|(?:playlist|watch)\?.*?\blist=)(?P<id>[^/?#&]+)'
     IE_NAME = 'youtube:tab'
 
     _TESTS = [{
@@ -2371,6 +2358,7 @@
         'info_dict': {
             'id': 'UCqj7Cz7revf5maW9g5pgNcg',
             'title': 'Игорь Клейнер - Playlists',
+            'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
         },
     }, {
         # playlists, multipage, different order
@@ -2379,14 +2367,16 @@
         'info_dict': {
             'id': 'UCqj7Cz7revf5maW9g5pgNcg',
             'title': 'Игорь Клейнер - Playlists',
+            'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
         },
     }, {
         # playlists, singlepage
         'url': 'https://www.youtube.com/user/ThirstForScience/playlists',
         'playlist_mincount': 4,
         'info_dict': {
-            'id': 'ThirstForScience',
-            'title': 'ThirstForScience',
+            'id': 'UCAEtajcuhQ6an9WEzY9LEMQ',
+            'title': 'ThirstForScience - Playlists',
+            'description': 'md5:609399d937ea957b0f53cbffb747a14c',
         }
     }, {
         'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
@@ -2417,6 +2407,7 @@
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Home',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 2,
     }, {
@@ -2425,6 +2416,7 @@
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Videos',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 975,
     }, {
@@ -2433,6 +2425,7 @@
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Videos',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 199,
     }, {
@@ -2441,6 +2434,7 @@
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Playlists',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 17,
     }, {
@@ -2449,6 +2443,7 @@
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Community',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 18,
     }, {
@@ -2457,6 +2452,7 @@
         'info_dict': {
             'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
             'title': 'lex will - Channels',
+            'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
         },
         'playlist_mincount': 138,
     }, {
@@ -2475,7 +2471,7 @@
             'title': '29C3: Not my department',
             'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
             'uploader': 'Christiaan008',
-            'uploader_id': 'ChRiStIaAn008',
+            'uploader_id': 'UCEPzS1rYsrkqzSLNp76nrcg',
         },
         'playlist_count': 96,
     }, {
@@ -2485,7 +2481,7 @@
             'title': 'Uploads from Cauchemar',
             'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
             'uploader': 'Cauchemar',
-            'uploader_id': 'Cauchemar89',
+            'uploader_id': 'UCBABnxM4Ar9ten8Mdjj1j0Q',
         },
         'playlist_mincount': 1123,
     }, {
@@ -2499,7 +2495,7 @@
             'title': 'Uploads from Interstellar Movie',
             'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
             'uploader': 'Interstellar Movie',
-            'uploader_id': 'InterstellarMovie1',
+            'uploader_id': 'UCXw-G3eDE9trcvY2sBMM_aA',
         },
         'playlist_mincount': 21,
     }, {
@@ -2508,13 +2504,43 @@
         'info_dict': {
             'title': 'Data Analysis with Dr Mike Pound',
             'id': 'PLzH6n4zXuckpfMu_4Ff8E7Z1behQks5ba',
-            'uploader_id': 'Computerphile',
+            'uploader_id': 'UC9-y-6csu5WGm29I7JiwpnA',
             'uploader': 'Computerphile',
         },
         'playlist_mincount': 11,
     }, {
         'url': 
'https://invidio.us/playlist?list=PLDIoUOhQQPlXr63I_vwF9GD8sAKh77dWU',
         'only_matching': True,
+    }, {
+        # Playlist URL that does not actually serve a playlist
+        'url': 
'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
+        'info_dict': {
+            'id': 'FqZTN594JQw',
+            'ext': 'webm',
+            'title': "Smiley's People 01 detective, Adventure Series, Action",
+            'uploader': 'STREEM',
+            'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
+            'uploader_url': 
r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
+            'upload_date': '20150526',
+            'license': 'Standard YouTube License',
+            'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
+            'categories': ['People & Blogs'],
+            'tags': list,
+            'view_count': int,
+            'like_count': int,
+            'dislike_count': int,
+        },
+        'params': {
+            'skip_download': True,
+        },
+        'skip': 'This video is not available.',
+        'add_ie': [YoutubeIE.ie_key()],
+    }, {
+        'url': 
'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.youtube.com/watch?v=MuAGGZNfUkU&list=RDMM',
+        'only_matching': True,
     }]
 
     @classmethod
@@ -2545,7 +2571,9 @@
     def _extract_video(self, renderer):
         video_id = renderer.get('videoId')
         title = try_get(
-            renderer, lambda x: x['title']['runs'][0]['text'], compat_str)
+            renderer,
+            (lambda x: x['title']['runs'][0]['text'],
+             lambda x: x['title']['simpleText']), compat_str)
         description = try_get(
             renderer, lambda x: x['descriptionSnippet']['runs'][0]['text'],
             compat_str)
@@ -2553,8 +2581,8 @@
             renderer, lambda x: x['lengthText']['simpleText'], compat_str))
         view_count_text = try_get(
             renderer, lambda x: x['viewCountText']['simpleText'], compat_str) 
or ''
-        view_count = int_or_none(self._search_regex(
-            r'^(\d+)', re.sub(r'\s', '', view_count_text),
+        view_count = str_to_int(self._search_regex(
+            r'^([\d,]+)', re.sub(r'\s', '', view_count_text),
             'view count', default=None))
         uploader = try_get(
             renderer, lambda x: x['ownerText']['runs'][0]['text'], compat_str)
@@ -2625,7 +2653,7 @@
         for content in video_list_renderer['contents']:
             if not isinstance(content, dict):
                 continue
-            renderer = content.get('playlistVideoRenderer')
+            renderer = content.get('playlistVideoRenderer') or 
content.get('playlistPanelVideoRenderer')
             if not isinstance(renderer, dict):
                 continue
             video_id = renderer.get('videoId')
@@ -2725,7 +2753,7 @@
 
     def _entries(self, tab, identity_token):
         continuation = None
-        slr_contents = tab['sectionListRenderer']['contents']
+        slr_contents = try_get(tab, lambda x: 
x['sectionListRenderer']['contents'], list) or []
         for slr_content in slr_contents:
             if not isinstance(slr_content, dict):
                 continue
@@ -2834,60 +2862,110 @@
         else:
             raise ExtractorError('Unable to find selected tab')
 
+    @staticmethod
+    def _extract_uploader(data):
+        uploader = {}
+        sidebar_renderer = try_get(
+            data, lambda x: x['sidebar']['playlistSidebarRenderer']['items'], 
list)
+        if sidebar_renderer:
+            for item in sidebar_renderer:
+                if not isinstance(item, dict):
+                    continue
+                renderer = item.get('playlistSidebarSecondaryInfoRenderer')
+                if not isinstance(renderer, dict):
+                    continue
+                owner = try_get(
+                    renderer, lambda x: 
x['videoOwner']['videoOwnerRenderer']['title']['runs'][0], dict)
+                if owner:
+                    uploader['uploader'] = owner.get('text')
+                    uploader['uploader_id'] = try_get(
+                        owner, lambda x: 
x['navigationEndpoint']['browseEndpoint']['browseId'], compat_str)
+                    uploader['uploader_url'] = urljoin(
+                        'https://www.youtube.com/',
+                        try_get(owner, lambda x: 
x['navigationEndpoint']['browseEndpoint']['canonicalBaseUrl'], compat_str))
+        return uploader
+
+    def _extract_from_tabs(self, item_id, webpage, data, tabs, identity_token):
+        selected_tab = self._extract_selected_tab(tabs)
+        renderer = try_get(
+            data, lambda x: x['metadata']['channelMetadataRenderer'], dict)
+        if renderer:
+            channel_title = renderer.get('title') or item_id
+            tab_title = selected_tab.get('title')
+            title = channel_title or item_id
+            if tab_title:
+                title += ' - %s' % tab_title
+            description = renderer.get('description')
+            playlist_id = renderer.get('externalId')
+        renderer = try_get(
+            data, lambda x: x['metadata']['playlistMetadataRenderer'], dict)
+        if renderer:
+            title = renderer.get('title')
+            description = None
+            playlist_id = item_id
+        playlist = self.playlist_result(
+            self._entries(selected_tab['content'], identity_token),
+            playlist_id=playlist_id, playlist_title=title,
+            playlist_description=description)
+        playlist.update(self._extract_uploader(data))
+        return playlist
+
+    def _extract_from_playlist(self, item_id, data, playlist):
+        title = playlist.get('title') or try_get(
+            data, lambda x: x['titleText']['simpleText'], compat_str)
+        playlist_id = playlist.get('playlistId') or item_id
+        return self.playlist_result(
+            self._playlist_entries(playlist), playlist_id=playlist_id,
+            playlist_title=title)
+
     def _real_extract(self, url):
-        channel_id = self._match_id(url)
+        item_id = self._match_id(url)
         url = compat_urlparse.urlunparse(
             compat_urlparse.urlparse(url)._replace(netloc='www.youtube.com'))
-        webpage = self._download_webpage(url, channel_id)
-        data = self._extract_yt_initial_data(channel_id, webpage)
-        tabs = data['contents']['twoColumnBrowseResultsRenderer']['tabs']
-        selected_tab = self._extract_selected_tab(tabs)
-        channel_title = try_get(
-            data, lambda x: x['metadata']['channelMetadataRenderer']['title'],
-            compat_str)
-        channel_external_id = try_get(
-            data, lambda x: 
x['metadata']['channelMetadataRenderer']['externalId'],
-            compat_str)
-        tab_title = selected_tab.get('title')
-        title = channel_title or channel_id
-        if tab_title:
-            title += ' - %s' % tab_title
+        # Handle both video/playlist URLs
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        video_id = qs.get('v', [None])[0]
+        playlist_id = qs.get('list', [None])[0]
+        if video_id and playlist_id:
+            if self._downloader.params.get('noplaylist'):
+                self.to_screen('Downloading just video %s because of 
--no-playlist' % video_id)
+                return self.url_result(video_id, ie=YoutubeIE.ie_key(), 
video_id=video_id)
+            self.to_screen('Downloading playlist %s - add --no-playlist to 
just download video %s' % (playlist_id, video_id))
+        webpage = self._download_webpage(url, item_id)
         identity_token = self._search_regex(
             r'\bID_TOKEN["\']\s*:\s*["\'](.+?)["\']', webpage,
             'identity token', default=None)
-        return self.playlist_result(
-            self._entries(selected_tab['content'], identity_token),
-            playlist_id=channel_external_id or channel_id,
-            playlist_title=title)
+        data = self._extract_yt_initial_data(item_id, webpage)
+        tabs = try_get(
+            data, lambda x: 
x['contents']['twoColumnBrowseResultsRenderer']['tabs'], list)
+        if tabs:
+            return self._extract_from_tabs(item_id, webpage, data, tabs, 
identity_token)
+        playlist = try_get(
+            data, lambda x: 
x['contents']['twoColumnWatchNextResults']['playlist']['playlist'], dict)
+        if playlist:
+            return self._extract_from_playlist(item_id, data, playlist)
+        # Fallback to video extraction if no playlist alike page is recognized
+        if video_id:
+            return self.url_result(video_id, ie=YoutubeIE.ie_key(), 
video_id=video_id)
+        # Failed to recognize
+        raise ExtractorError('Unable to recognize tab page')
 
 
 class YoutubePlaylistIE(InfoExtractor):
     IE_DESC = 'YouTube.com playlists'
-    _VALID_URL = r"""(?x)(?:
+    _VALID_URL = r'''(?x)(?:
                         (?:https?://)?
                         (?:\w+\.)?
                         (?:
                             (?:
                                 youtube(?:kids)?\.com|
-                                invidio\.us
+                                invidio\.us|
+                                youtu\.be
                             )
-                            /
-                            (?:
-                               
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/(?:videoseries|[0-9A-Za-z_-]{11}))
-                               \? (?:.*?[&;])*? (?:p|a|list)=
-                            |  p/
-                            )|
-                            youtu\.be/[0-9A-Za-z_-]{11}\?.*?\blist=
-                        )
-                        (
-                            
(?:PL|LL|EC|UU|FL|RD|UL|TL|PU|OLAK5uy_)?[0-9A-Za-z-_]{10,}
-                            # Top tracks, they can also include dots
-                            |(?:MC)[\w\.]*
-                        )
-                        .*
-                     |
-                        (%(playlist_id)s)
-                     )""" % {'playlist_id': 
YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
+                            /.*?\?.*?\blist=
+                        )?
+                        (?P<id>%(playlist_id)s)
+                     )''' % {'playlist_id': 
YoutubeBaseInfoExtractor._PLAYLIST_ID_RE}
     IE_NAME = 'youtube:playlist'
     _TESTS = [{
         'note': 'issue #673',
@@ -2896,7 +2974,7 @@
             'title': '[OLD]Team Fortress 2 (Class-based LP)',
             'id': 'PLBB231211A4F62143',
             'uploader': 'Wickydoo',
-            'uploader_id': 'Wickydoo',
+            'uploader_id': 'UCKSpbfbl5kRQpTdL7kMc-1Q',
         },
         'playlist_mincount': 29,
     }, {
@@ -2924,42 +3002,9 @@
             'title': '2018 Chinese New Singles (11/6 updated)',
             'id': 'PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
             'uploader': 'LBK',
-            'uploader_id': 'sdragonfang',
+            'uploader_id': 'UC21nz3_MesPLqtDqwdvnoxA',
         }
     }, {
-        'note': 'Embedded SWF player',
-        'url': 
'https://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
-        'playlist_count': 4,
-        'info_dict': {
-            'title': 'JODA7',
-            'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
-        },
-        'skip': 'This playlist does not exist',
-    }, {
-        # Playlist URL that does not actually serve a playlist
-        'url': 
'https://www.youtube.com/watch?v=FqZTN594JQw&list=PLMYEtVRpaqY00V9W81Cwmzp6N6vZqfUKD4',
-        'info_dict': {
-            'id': 'FqZTN594JQw',
-            'ext': 'webm',
-            'title': "Smiley's People 01 detective, Adventure Series, Action",
-            'uploader': 'STREEM',
-            'uploader_id': 'UCyPhqAZgwYWZfxElWVbVJng',
-            'uploader_url': 
r're:https?://(?:www\.)?youtube\.com/channel/UCyPhqAZgwYWZfxElWVbVJng',
-            'upload_date': '20150526',
-            'license': 'Standard YouTube License',
-            'description': 'md5:507cdcb5a49ac0da37a920ece610be80',
-            'categories': ['People & Blogs'],
-            'tags': list,
-            'view_count': int,
-            'like_count': int,
-            'dislike_count': int,
-        },
-        'params': {
-            'skip_download': True,
-        },
-        'skip': 'This video is not available.',
-        'add_ie': [YoutubeIE.ie_key()],
-    }, {
         'url': 
'https://youtu.be/yeWKywCrFtk?list=PL2qgrgXsNUG5ig9cat4ohreBjYLAPC0J5',
         'info_dict': {
             'id': 'yeWKywCrFtk',
@@ -2989,9 +3034,6 @@
         # music album playlist
         'url': 'OLAK5uy_m4xAFdmMC5rX3Ji3g93pQe3hqLZw_9LhM',
         'only_matching': True,
-    }, {
-        'url': 
'https://www.youtubekids.com/watch?v=Agk7R8I8o5U&list=PUZ6jURNr1WQZCNHF0ao-c0g',
-        'only_matching': True,
     }]
 
     @classmethod
@@ -3000,13 +3042,12 @@
             YoutubePlaylistIE, cls).suitable(url)
 
     def _real_extract(self, url):
-        # Extract playlist id
-        mobj = re.match(self._VALID_URL, url)
-        if mobj is None:
-            raise ExtractorError('Invalid URL: %s' % url)
-        playlist_id = mobj.group(1) or mobj.group(2)
+        playlist_id = self._match_id(url)
+        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
+        if not qs:
+            qs = {'list': playlist_id}
         return self.url_result(
-            'https://www.youtube.com/playlist?list=%s' % playlist_id,
+            update_url_query('https://www.youtube.com/playlist', qs),
             ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
 
 
@@ -3254,13 +3295,13 @@
 class YoutubeWatchLaterIE(InfoExtractor):
     IE_NAME = 'youtube:watchlater'
     IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires 
authentication)'
-    _VALID_URL = 
r'https?://(?:www\.)?youtube\.com/(?:feed/watch_later|(?:playlist|watch)\?(?:.+&)?list=WL)|:ytwatchlater'
+    _VALID_URL = 
r'https?://(?:www\.)?youtube\.com/feed/watch_later|:ytwatchlater'
 
     _TESTS = [{
-        'url': 'https://www.youtube.com/watch?v=bCNU9TrbiRk&index=1&list=WL',
+        'url': 'https://www.youtube.com/feed/watch_later',
         'only_matching': True,
     }, {
-        'url': 'https://www.youtube.com/feed/watch_later',
+        'url': ':ytwatchlater',
         'only_matching': True,
     }]
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/version.py 
new/youtube-dl/youtube_dl/version.py
--- old/youtube-dl/youtube_dl/version.py        2020-11-16 21:59:50.000000000 
+0100
+++ new/youtube-dl/youtube_dl/version.py        2020-11-18 23:22:24.000000000 
+0100
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2020.11.17'
+__version__ = '2020.11.19'
_______________________________________________
openSUSE Commits mailing list -- commit@lists.opensuse.org
To unsubscribe, email commit-le...@lists.opensuse.org
List Netiquette: https://en.opensuse.org/openSUSE:Mailing_list_netiquette
List Archives: 
https://lists.opensuse.org/archives/list/commit@lists.opensuse.org

Reply via email to