Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package youtube-dl for openSUSE:Factory 
checked in at 2021-04-01 14:17:59
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/youtube-dl (Old)
 and      /work/SRC/openSUSE:Factory/.youtube-dl.new.2401 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "youtube-dl"

Thu Apr  1 14:17:59 2021 rev:163 rq:882458 version:2021.04.01

Changes:
--------
--- /work/SRC/openSUSE:Factory/youtube-dl/python-youtube-dl.changes     
2021-03-28 11:56:27.952229457 +0200
+++ /work/SRC/openSUSE:Factory/.youtube-dl.new.2401/python-youtube-dl.changes   
2021-04-01 14:19:27.236167290 +0200
@@ -1,0 +2,10 @@
+Wed Mar 31 22:09:07 UTC 2021 - Jan Engelhardt <jeng...@inai.de>
+
+- Update to release 2021.04.01
+  * youtube: fix playlist/comunity continuation items extraction
+  * vimeo: fix unlisted video extraction
+  * youtube: fix video's channel extraction
+  * youtube: imporve age-restricted video extraction
+  * youtube: setup CONSENT cookie when needed
+
+-------------------------------------------------------------------
youtube-dl.changes: same change

Old:
----
  youtube-dl-2021.03.25.tar.gz
  youtube-dl-2021.03.25.tar.gz.sig

New:
----
  youtube-dl-2021.04.01.tar.gz
  youtube-dl-2021.04.01.tar.gz.sig

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.aM0J0c/_old  2021-04-01 14:19:27.952168245 +0200
+++ /var/tmp/diff_new_pack.aM0J0c/_new  2021-04-01 14:19:27.956168251 +0200
@@ -19,7 +19,7 @@
 %define modname youtube-dl
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-youtube-dl
-Version:        2021.03.25
+Version:        2021.04.01
 Release:        0
 Summary:        A Python module for downloading from video sites for offline 
watching
 License:        CC-BY-SA-3.0 AND SUSE-Public-Domain

++++++ youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.aM0J0c/_old  2021-04-01 14:19:27.972168272 +0200
+++ /var/tmp/diff_new_pack.aM0J0c/_new  2021-04-01 14:19:27.976168277 +0200
@@ -17,7 +17,7 @@
 
 
 Name:           youtube-dl
-Version:        2021.03.25
+Version:        2021.04.01
 Release:        0
 Summary:        A tool for downloading from video sites for offline watching
 License:        CC-BY-SA-3.0 AND SUSE-Public-Domain

++++++ youtube-dl-2021.03.25.tar.gz -> youtube-dl-2021.04.01.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/ChangeLog new/youtube-dl/ChangeLog
--- old/youtube-dl/ChangeLog    2021-03-24 18:04:07.000000000 +0100
+++ new/youtube-dl/ChangeLog    2021-03-31 23:47:08.000000000 +0200
@@ -1,3 +1,26 @@
+version 2021.04.01
+
+Extractors
+* [youtube] Setup CONSENT cookie when needed (#28604)
+* [vimeo] Fix password protected review extraction (#27591)
+* [youtube] Improve age-restricted video extraction (#28578)
+
+
+version 2021.03.31
+
+Extractors
+* [vlive] Fix inkey request (#28589)
+* [francetvinfo] Improve video id extraction (#28584)
++ [instagram] Extract duration (#28469)
+* [instagram] Improve title extraction (#28469)
++ [sbs] Add support for ondemand watch URLs (#28566)
+* [youtube] Fix video's channel extraction (#28562)
+* [picarto] Fix live stream extraction (#28532)
+* [vimeo] Fix unlisted video extraction (#28414)
+* [youtube:tab] Fix playlist/community continuation items extraction (#28266)
+* [ard] Improve clip id extraction (#22724, #28528)
+
+
 version 2021.03.25
 
 Extractors
Binary files old/youtube-dl/youtube-dl and new/youtube-dl/youtube-dl differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/ard.py 
new/youtube-dl/youtube_dl/extractor/ard.py
--- old/youtube-dl/youtube_dl/extractor/ard.py  2021-03-24 18:03:49.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/ard.py  2021-03-30 22:01:34.000000000 
+0200
@@ -335,7 +335,7 @@
 
 
 class ARDBetaMediathekIE(ARDMediathekBaseIE):
-    _VALID_URL = 
r'https://(?:(?:beta|www)\.)?ardmediathek\.de/(?P<client>[^/]+)/(?:player|live|video)/(?P<display_id>(?:[^/]+/)*)(?P<video_id>[a-zA-Z0-9]+)'
+    _VALID_URL = 
r'https://(?:(?:beta|www)\.)?ardmediathek\.de/(?:[^/]+/)?(?:player|live|video)/(?:[^/]+/)*(?P<id>Y3JpZDovL[a-zA-Z0-9]+)'
     _TESTS = [{
         'url': 
'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/',
         'md5': 'a1dc75a39c61601b980648f7c9f9f71d',
@@ -365,22 +365,22 @@
     }, {
         'url': 
'https://www.ardmediathek.de/swr/live/Y3JpZDovL3N3ci5kZS8xMzQ4MTA0Mg',
         'only_matching': True,
+    }, {
+        'url': 
'https://www.ardmediathek.de/video/coronavirus-update-ndr-info/astrazeneca-kurz-lockdown-und-pims-syndrom-81/ndr/Y3JpZDovL25kci5kZS84NzE0M2FjNi0wMWEwLTQ5ODEtOTE5NS1mOGZhNzdhOTFmOTI/',
+        'only_matching': True,
+    }, {
+        'url': 
'https://www.ardmediathek.de/ard/player/Y3JpZDovL3dkci5kZS9CZWl0cmFnLWQ2NDJjYWEzLTMwZWYtNGI4NS1iMTI2LTU1N2UxYTcxOGIzOQ/tatort-duo-koeln-leipzig-ihr-kinderlein-kommet',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        video_id = mobj.group('video_id')
-        display_id = mobj.group('display_id')
-        if display_id:
-            display_id = display_id.rstrip('/')
-        if not display_id:
-            display_id = video_id
+        video_id = self._match_id(url)
 
         player_page = self._download_json(
             'https://api.ardmediathek.de/public-gateway',
-            display_id, data=json.dumps({
+            video_id, data=json.dumps({
                 'query': '''{
-  playerPage(client:"%s", clipId: "%s") {
+  playerPage(client: "ard", clipId: "%s") {
     blockedByFsk
     broadcastedOn
     maturityContentRating
@@ -410,7 +410,7 @@
       }
     }
   }
-}''' % (mobj.group('client'), video_id),
+}''' % video_id,
             }).encode(), headers={
                 'Content-Type': 'application/json'
             })['data']['playerPage']
@@ -435,7 +435,6 @@
                 r'\(FSK\s*(\d+)\)\s*$', description, 'age limit', 
default=None))
         info.update({
             'age_limit': age_limit,
-            'display_id': display_id,
             'title': title,
             'description': description,
             'timestamp': unified_timestamp(player_page.get('broadcastedOn')),
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/francetv.py 
new/youtube-dl/youtube_dl/extractor/francetv.py
--- old/youtube-dl/youtube_dl/extractor/francetv.py     2021-03-24 
18:03:49.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/francetv.py     2021-03-30 
22:01:34.000000000 +0200
@@ -399,7 +399,8 @@
         video_id = self._search_regex(
             (r'player\.load[^;]+src:\s*["\']([^"\']+)',
              r'id-video=([^@]+@[^"]+)',
-             
r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"'),
+             
r'<a[^>]+href="(?:https?:)?//videos\.francetv\.fr/video/([^@]+@[^"]+)"',
+             
r'data-id=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
             webpage, 'video id')
 
         return self._make_url_result(video_id)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/instagram.py 
new/youtube-dl/youtube_dl/extractor/instagram.py
--- old/youtube-dl/youtube_dl/extractor/instagram.py    2021-03-24 
18:03:49.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/instagram.py    2021-03-30 
22:01:34.000000000 +0200
@@ -12,6 +12,7 @@
 )
 from ..utils import (
     ExtractorError,
+    float_or_none,
     get_element_by_attribute,
     int_or_none,
     lowercase_escape,
@@ -32,6 +33,7 @@
             'title': 'Video by naomipq',
             'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
             'thumbnail': r're:^https?://.*\.jpg',
+            'duration': 0,
             'timestamp': 1371748545,
             'upload_date': '20130620',
             'uploader_id': 'naomipq',
@@ -48,6 +50,7 @@
             'ext': 'mp4',
             'title': 'Video by britneyspears',
             'thumbnail': r're:^https?://.*\.jpg',
+            'duration': 0,
             'timestamp': 1453760977,
             'upload_date': '20160125',
             'uploader_id': 'britneyspears',
@@ -87,6 +90,24 @@
             'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
         },
     }, {
+        # IGTV
+        'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
+        'info_dict': {
+            'id': 'BkfuX9UB-eK',
+            'ext': 'mp4',
+            'title': 'Fingerboarding Tricks with @cass.fb',
+            'thumbnail': r're:^https?://.*\.jpg',
+            'duration': 53.83,
+            'timestamp': 1530032919,
+            'upload_date': '20180626',
+            'uploader_id': 'instagram',
+            'uploader': 'Instagram',
+            'like_count': int,
+            'comment_count': int,
+            'comments': list,
+            'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro 
who can perform tiny ollies and kickflips while blindfolded.',
+        }
+    }, {
         'url': 'https://instagram.com/p/-Cmh1cukG2/',
         'only_matching': True,
     }, {
@@ -159,7 +180,9 @@
             description = try_get(
                 media, lambda x: 
x['edge_media_to_caption']['edges'][0]['node']['text'],
                 compat_str) or media.get('caption')
+            title = media.get('title')
             thumbnail = media.get('display_src') or media.get('display_url')
+            duration = float_or_none(media.get('video_duration'))
             timestamp = int_or_none(media.get('taken_at_timestamp') or 
media.get('date'))
             uploader = media.get('owner', {}).get('full_name')
             uploader_id = media.get('owner', {}).get('username')
@@ -200,9 +223,10 @@
                             continue
                         entries.append({
                             'id': node.get('shortcode') or node['id'],
-                            'title': 'Video %d' % edge_num,
+                            'title': node.get('title') or 'Video %d' % 
edge_num,
                             'url': node_video_url,
                             'thumbnail': node.get('display_url'),
+                            'duration': 
float_or_none(node.get('video_duration')),
                             'width': int_or_none(try_get(node, lambda x: 
x['dimensions']['width'])),
                             'height': int_or_none(try_get(node, lambda x: 
x['dimensions']['height'])),
                             'view_count': 
int_or_none(node.get('video_view_count')),
@@ -239,8 +263,9 @@
             'id': video_id,
             'formats': formats,
             'ext': 'mp4',
-            'title': 'Video by %s' % uploader_id,
+            'title': title or 'Video by %s' % uploader_id,
             'description': description,
+            'duration': duration,
             'thumbnail': thumbnail,
             'timestamp': timestamp,
             'uploader_id': uploader_id,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/picarto.py 
new/youtube-dl/youtube_dl/extractor/picarto.py
--- old/youtube-dl/youtube_dl/extractor/picarto.py      2021-03-24 
18:03:49.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/picarto.py      2021-03-30 
22:01:34.000000000 +0200
@@ -1,22 +1,15 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
-import re
-import time
-
 from .common import InfoExtractor
-from ..compat import compat_str
 from ..utils import (
     ExtractorError,
     js_to_json,
-    try_get,
-    update_url_query,
-    urlencode_postdata,
 )
 
 
 class PicartoIE(InfoExtractor):
-    _VALID_URL = 
r'https?://(?:www.)?picarto\.tv/(?P<id>[a-zA-Z0-9]+)(?:/(?P<token>[a-zA-Z0-9]+))?'
+    _VALID_URL = r'https?://(?:www.)?picarto\.tv/(?P<id>[a-zA-Z0-9]+)'
     _TEST = {
         'url': 'https://picarto.tv/Setz',
         'info_dict': {
@@ -34,65 +27,46 @@
         return False if PicartoVodIE.suitable(url) else super(PicartoIE, 
cls).suitable(url)
 
     def _real_extract(self, url):
-        mobj = re.match(self._VALID_URL, url)
-        channel_id = mobj.group('id')
+        channel_id = self._match_id(url)
 
-        metadata = self._download_json(
-            'https://api.picarto.tv/v1/channel/name/' + channel_id,
-            channel_id)
+        data = self._download_json(
+            'https://ptvintern.picarto.tv/ptvapi', channel_id, query={
+                'query': '''{
+  channel(name: "%s") {
+    adult
+    id
+    online
+    stream_name
+    title
+  }
+  getLoadBalancerUrl(channel_name: "%s") {
+    url
+  }
+}''' % (channel_id, channel_id),
+            })['data']
+        metadata = data['channel']
 
-        if metadata.get('online') is False:
+        if metadata.get('online') == 0:
             raise ExtractorError('Stream is offline', expected=True)
+        title = metadata['title']
 
         cdn_data = self._download_json(
-            'https://picarto.tv/process/channel', channel_id,
-            data=urlencode_postdata({'loadbalancinginfo': channel_id}),
-            note='Downloading load balancing info')
-
-        token = mobj.group('token') or 'public'
-        params = {
-            'con': int(time.time() * 1000),
-            'token': token,
-        }
+            data['getLoadBalancerUrl']['url'] + '/stream/json_' + 
metadata['stream_name'] + '.js',
+            channel_id, 'Downloading load balancing info')
 
-        prefered_edge = cdn_data.get('preferedEdge')
         formats = []
-
-        for edge in cdn_data['edges']:
-            edge_ep = edge.get('ep')
-            if not edge_ep or not isinstance(edge_ep, compat_str):
+        for source in (cdn_data.get('source') or []):
+            source_url = source.get('url')
+            if not source_url:
                 continue
-            edge_id = edge.get('id')
-            for tech in cdn_data['techs']:
-                tech_label = tech.get('label')
-                tech_type = tech.get('type')
-                preference = 0
-                if edge_id == prefered_edge:
-                    preference += 1
-                format_id = []
-                if edge_id:
-                    format_id.append(edge_id)
-                if tech_type == 'application/x-mpegurl' or tech_label == 'HLS':
-                    format_id.append('hls')
-                    formats.extend(self._extract_m3u8_formats(
-                        update_url_query(
-                            'https://%s/hls/%s/index.m3u8'
-                            % (edge_ep, channel_id), params),
-                        channel_id, 'mp4', preference=preference,
-                        m3u8_id='-'.join(format_id), fatal=False))
-                    continue
-                elif tech_type == 'video/mp4' or tech_label == 'MP4':
-                    format_id.append('mp4')
-                    formats.append({
-                        'url': update_url_query(
-                            'https://%s/mp4/%s.mp4' % (edge_ep, channel_id),
-                            params),
-                        'format_id': '-'.join(format_id),
-                        'preference': preference,
-                    })
-                else:
-                    # rtmp format does not seem to work
-                    continue
+            source_type = source.get('type')
+            if source_type == 'html5/application/vnd.apple.mpegurl':
+                formats.extend(self._extract_m3u8_formats(
+                    source_url, channel_id, 'mp4', m3u8_id='hls', fatal=False))
+            elif source_type == 'html5/video/mp4':
+                formats.append({
+                    'url': source_url,
+                })
         self._sort_formats(formats)
 
         mature = metadata.get('adult')
@@ -103,10 +77,10 @@
 
         return {
             'id': channel_id,
-            'title': self._live_title(metadata.get('title') or channel_id),
+            'title': self._live_title(title.strip()),
             'is_live': True,
-            'thumbnail': try_get(metadata, lambda x: x['thumbnails']['web']),
             'channel': channel_id,
+            'channel_id': metadata.get('id'),
             'channel_url': 'https://picarto.tv/%s' % channel_id,
             'age_limit': age_limit,
             'formats': formats,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/sbs.py 
new/youtube-dl/youtube_dl/extractor/sbs.py
--- old/youtube-dl/youtube_dl/extractor/sbs.py  2021-03-24 18:03:49.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/sbs.py  2021-03-30 22:01:34.000000000 
+0200
@@ -10,7 +10,7 @@
 
 class SBSIE(InfoExtractor):
     IE_DESC = 'sbs.com.au'
-    _VALID_URL = 
r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand(?:/video/(?:single/)?|.*?\bplay=)|news/(?:embeds/)?video/)(?P<id>[0-9]+)'
+    _VALID_URL = 
r'https?://(?:www\.)?sbs\.com\.au/(?:ondemand(?:/video/(?:single/)?|.*?\bplay=|/watch/)|news/(?:embeds/)?video/)(?P<id>[0-9]+)'
 
     _TESTS = [{
         # Original URL is handled by the generic IE which finds the iframe:
@@ -43,6 +43,9 @@
     }, {
         'url': 'https://www.sbs.com.au/news/embeds/video/1840778819866',
         'only_matching': True,
+    }, {
+        'url': 'https://www.sbs.com.au/ondemand/watch/1698704451971',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/vimeo.py 
new/youtube-dl/youtube_dl/extractor/vimeo.py
--- old/youtube-dl/youtube_dl/extractor/vimeo.py        2021-03-24 
18:03:54.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/vimeo.py        2021-03-30 
22:01:39.000000000 +0200
@@ -24,6 +24,7 @@
     merge_dicts,
     OnDemandPagedList,
     parse_filesize,
+    parse_iso8601,
     RegexNotFoundError,
     sanitized_Request,
     smuggle_url,
@@ -74,25 +75,28 @@
                     expected=True)
             raise ExtractorError('Unable to log in')
 
-    def _verify_video_password(self, url, video_id, webpage):
+    def _get_video_password(self):
         password = self._downloader.params.get('videopassword')
         if password is None:
-            raise ExtractorError('This video is protected by a password, use 
the --video-password option', expected=True)
-        token, vuid = self._extract_xsrft_and_vuid(webpage)
-        data = urlencode_postdata({
-            'password': password,
-            'token': token,
-        })
+            raise ExtractorError(
+                'This video is protected by a password, use the 
--video-password option',
+                expected=True)
+        return password
+
+    def _verify_video_password(self, url, video_id, password, token, vuid):
         if url.startswith('http://'):
             # vimeo only supports https now, but the user can give an http url
             url = url.replace('http://', 'https://')
-        password_request = sanitized_Request(url + '/password', data)
-        password_request.add_header('Content-Type', 
'application/x-www-form-urlencoded')
-        password_request.add_header('Referer', url)
         self._set_vimeo_cookie('vuid', vuid)
         return self._download_webpage(
-            password_request, video_id,
-            'Verifying the password', 'Wrong password')
+            url + '/password', video_id, 'Verifying the password',
+            'Wrong password', data=urlencode_postdata({
+                'password': password,
+                'token': token,
+            }), headers={
+                'Content-Type': 'application/x-www-form-urlencoded',
+                'Referer': url,
+            })
 
     def _extract_xsrft_and_vuid(self, webpage):
         xsrft = self._search_regex(
@@ -278,7 +282,7 @@
                             )?
                         (?:videos?/)?
                         (?P<id>[0-9]+)
-                        (?:/[\da-f]+)?
+                        (?:/(?P<unlisted_hash>[\da-f]{10}))?
                         /?(?:[?&].*)?(?:[#].*)?$
                     '''
     IE_NAME = 'vimeo'
@@ -331,9 +335,9 @@
                 'id': '54469442',
                 'ext': 'mp4',
                 'title': 'Kathy Sierra: Building the minimum Badass User, 
Business of Software 2012',
-                'uploader': 'The BLN & Business of Software',
-                'uploader_url': 
r're:https?://(?:www\.)?vimeo\.com/theblnbusinessofsoftware',
-                'uploader_id': 'theblnbusinessofsoftware',
+                'uploader': 'Business of Software',
+                'uploader_url': 
r're:https?://(?:www\.)?vimeo\.com/businessofsoftware',
+                'uploader_id': 'businessofsoftware',
                 'duration': 3610,
                 'description': None,
             },
@@ -468,6 +472,7 @@
                 'skip_download': True,
             },
             'expected_warnings': ['Unable to download JSON metadata'],
+            'skip': 'this page is no longer available.',
         },
         {
             'url': 'http://player.vimeo.com/video/68375962',
@@ -550,9 +555,7 @@
         return urls[0] if urls else None
 
     def _verify_player_video_password(self, url, video_id, headers):
-        password = self._downloader.params.get('videopassword')
-        if password is None:
-            raise ExtractorError('This video is protected by a password, use 
the --video-password option', expected=True)
+        password = self._get_video_password()
         data = urlencode_postdata({
             'password': base64.b64encode(password.encode()),
         })
@@ -577,11 +580,37 @@
         if 'Referer' not in headers:
             headers['Referer'] = url
 
-        channel_id = self._search_regex(
-            r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
-
         # Extract ID from URL
-        video_id = self._match_id(url)
+        video_id, unlisted_hash = re.match(self._VALID_URL, url).groups()
+        if unlisted_hash:
+            token = self._download_json(
+                'https://vimeo.com/_rv/jwt', video_id, headers={
+                    'X-Requested-With': 'XMLHttpRequest'
+                })['token']
+            video = self._download_json(
+                'https://api.vimeo.com/videos/%s:%s' % (video_id, 
unlisted_hash),
+                video_id, headers={
+                    'Authorization': 'jwt ' + token,
+                }, query={
+                    'fields': 
'config_url,created_time,description,license,metadata.connections.comments.total,metadata.connections.likes.total,release_time,stats.plays',
+                })
+            info = self._parse_config(self._download_json(
+                video['config_url'], video_id), video_id)
+            self._vimeo_sort_formats(info['formats'])
+            get_timestamp = lambda x: parse_iso8601(video.get(x + '_time'))
+            info.update({
+                'description': video.get('description'),
+                'license': video.get('license'),
+                'release_timestamp': get_timestamp('release'),
+                'timestamp': get_timestamp('created'),
+                'view_count': int_or_none(try_get(video, lambda x: 
x['stats']['plays'])),
+            })
+            connections = try_get(
+                video, lambda x: x['metadata']['connections'], dict) or {}
+            for k in ('comment', 'like'):
+                info[k + '_count'] = int_or_none(try_get(connections, lambda 
x: x[k + 's']['total']))
+            return info
+
         orig_url = url
         is_pro = 'vimeopro.com/' in url
         is_player = '://player.vimeo.com/video/' in url
@@ -670,7 +699,10 @@
             if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
                 if '_video_password_verified' in data:
                     raise ExtractorError('video password verification failed!')
-                self._verify_video_password(redirect_url, video_id, webpage)
+                video_password = self._get_video_password()
+                token, vuid = self._extract_xsrft_and_vuid(webpage)
+                self._verify_video_password(
+                    redirect_url, video_id, video_password, token, vuid)
                 return self._real_extract(
                     smuggle_url(redirect_url, {'_video_password_verified': 
'verified'}))
             else:
@@ -756,6 +788,8 @@
                 
r'<link[^>]+rel=["\']license["\'][^>]+href=(["\'])(?P<license>(?:(?!\1).)+)\1',
                 webpage, 'license', default=None, group='license')
 
+        channel_id = self._search_regex(
+            r'vimeo\.com/channels/([^/]+)', url, 'channel id', default=None)
         channel_url = 'https://vimeo.com/channels/%s' % channel_id if 
channel_id else None
 
         info_dict = {
@@ -1062,10 +1096,23 @@
 
     def _real_extract(self, url):
         page_url, video_id = re.match(self._VALID_URL, url).groups()
-        clip_data = self._download_json(
-            page_url.replace('/review/', '/review/data/'),
-            video_id)['clipData']
-        config_url = clip_data['configUrl']
+        data = self._download_json(
+            page_url.replace('/review/', '/review/data/'), video_id)
+        if data.get('isLocked') is True:
+            video_password = self._get_video_password()
+            viewer = self._download_json(
+                'https://vimeo.com/_rv/viewer', video_id)
+            webpage = self._verify_video_password(
+                'https://vimeo.com/' + video_id, video_id,
+                video_password, viewer['xsrft'], viewer['vuid'])
+            clip_page_config = self._parse_json(self._search_regex(
+                r'window\.vimeo\.clip_page_config\s*=\s*({.+?});',
+                webpage, 'clip page config'), video_id)
+            config_url = clip_page_config['player']['config_url']
+            clip_data = clip_page_config.get('clip') or {}
+        else:
+            clip_data = data['clipData']
+            config_url = clip_data['configUrl']
         config = self._download_json(config_url, video_id)
         info_dict = self._parse_config(config, video_id)
         source_format = self._extract_original_format(
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/vlive.py 
new/youtube-dl/youtube_dl/extractor/vlive.py
--- old/youtube-dl/youtube_dl/extractor/vlive.py        2021-03-24 
18:03:49.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/vlive.py        2021-03-30 
22:01:34.000000000 +0200
@@ -106,7 +106,7 @@
             raise ExtractorError('Unable to log in', expected=True)
 
     def _call_api(self, path_template, video_id, fields=None):
-        query = {'appId': self._APP_ID, 'gcc': 'KR'}
+        query = {'appId': self._APP_ID, 'gcc': 'KR', 'platformType': 'PC'}
         if fields:
             query['fields'] = fields
         try:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/youtube.py 
new/youtube-dl/youtube_dl/extractor/youtube.py
--- old/youtube-dl/youtube_dl/extractor/youtube.py      2021-03-24 
18:03:54.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/youtube.py      2021-03-30 
22:01:39.000000000 +0200
@@ -24,6 +24,7 @@
 from ..utils import (
     ExtractorError,
     clean_html,
+    dict_get,
     float_or_none,
     int_or_none,
     mimetype2ext,
@@ -248,7 +249,23 @@
 
         return True
 
+    def _initialize_consent(self):
+        cookies = self._get_cookies('https://www.youtube.com/')
+        if cookies.get('__Secure-3PSID'):
+            return
+        consent_id = None
+        consent = cookies.get('CONSENT')
+        if consent:
+            if 'YES' in consent.value:
+                return
+            consent_id = self._search_regex(
+                r'PENDING\+(\d+)', consent.value, 'consent', default=None)
+        if not consent_id:
+            consent_id = random.randint(100, 999)
+        self._set_cookie('.youtube.com', 'CONSENT', 
'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
+
     def _real_initialize(self):
+        self._initialize_consent()
         if self._downloader is None:
             return
         if not self._login():
@@ -1431,7 +1448,7 @@
         base_url = self.http_scheme() + '//www.youtube.com/'
         webpage_url = base_url + 'watch?v=' + video_id
         webpage = self._download_webpage(
-            webpage_url + '&bpctr=9999999999', video_id, fatal=False)
+            webpage_url + '&bpctr=9999999999&has_verified=1', video_id, 
fatal=False)
 
         player_response = None
         if webpage:
@@ -1895,7 +1912,7 @@
                     info['channel'] = get_text(try_get(
                         vsir,
                         lambda x: x['owner']['videoOwnerRenderer']['title'],
-                        compat_str))
+                        dict))
                     rows = try_get(
                         vsir,
                         lambda x: 
x['metadataRowContainer']['metadataRowContainerRenderer']['rows'],
@@ -2541,13 +2558,14 @@
                     continuation = 
self._extract_continuation(continuation_renderer)
                     continue
 
+            on_response_received = dict_get(response, 
('onResponseReceivedActions', 'onResponseReceivedEndpoints'))
             continuation_items = try_get(
-                response, lambda x: 
x['onResponseReceivedActions'][0]['appendContinuationItemsAction']['continuationItems'],
 list)
+                on_response_received, lambda x: 
x[0]['appendContinuationItemsAction']['continuationItems'], list)
             if continuation_items:
                 continuation_item = continuation_items[0]
                 if not isinstance(continuation_item, dict):
                     continue
-                renderer = continuation_item.get('gridVideoRenderer')
+                renderer = self._extract_grid_item_renderer(continuation_item)
                 if renderer:
                     grid_renderer = {'items': continuation_items}
                     for entry in self._grid_entries(grid_renderer):
@@ -2561,6 +2579,13 @@
                         yield entry
                     continuation = 
self._extract_continuation(video_list_renderer)
                     continue
+                renderer = continuation_item.get('backstagePostThreadRenderer')
+                if renderer:
+                    continuation_renderer = {'contents': continuation_items}
+                    for entry in 
self._post_thread_continuation_entries(continuation_renderer):
+                        yield entry
+                    continuation = 
self._extract_continuation(continuation_renderer)
+                    continue
 
             break
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/version.py 
new/youtube-dl/youtube_dl/version.py
--- old/youtube-dl/youtube_dl/version.py        2021-03-24 18:04:07.000000000 
+0100
+++ new/youtube-dl/youtube_dl/version.py        2021-03-31 23:47:08.000000000 
+0200
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2021.03.25'
+__version__ = '2021.04.01'

Reply via email to