Hello community,

here is the log from the commit of package youtube-dl for openSUSE:Factory 
checked in at 2016-11-14 20:17:07
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/youtube-dl (Old)
 and      /work/SRC/openSUSE:Factory/.youtube-dl.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "youtube-dl"

Changes:
--------
--- /work/SRC/openSUSE:Factory/youtube-dl/youtube-dl.changes    2016-11-12 
13:03:30.000000000 +0100
+++ /work/SRC/openSUSE:Factory/.youtube-dl.new/youtube-dl.changes       
2016-11-14 20:17:09.000000000 +0100
@@ -1,0 +2,7 @@
+Mon Nov 14 12:11:21 UTC 2016 - jeng...@inai.de
+
+- Update to new upstream release 2016.11.14.1
+* audioboom: Recognize /posts/ URLs
+* afreecatv: Add support for vod.afreecatv.com
+
+-------------------------------------------------------------------

Old:
----
  youtube-dl-2016.11.08.1.tar.gz
  youtube-dl-2016.11.08.1.tar.gz.sig

New:
----
  youtube-dl-2016.11.14.1.tar.gz
  youtube-dl-2016.11.14.1.tar.gz.sig

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ youtube-dl.spec ++++++
--- /var/tmp/diff_new_pack.CGHo9d/_old  2016-11-14 20:17:10.000000000 +0100
+++ /var/tmp/diff_new_pack.CGHo9d/_new  2016-11-14 20:17:10.000000000 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           youtube-dl
-Version:        2016.11.08.1
+Version:        2016.11.14.1
 Release:        0
 Summary:        A tool for downloading from video sites for offline watching
 License:        SUSE-Public-Domain and CC-BY-SA-3.0

++++++ youtube-dl-2016.11.08.1.tar.gz -> youtube-dl-2016.11.14.1.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/ChangeLog new/youtube-dl/ChangeLog
--- old/youtube-dl/ChangeLog    2016-11-08 16:30:52.000000000 +0100
+++ new/youtube-dl/ChangeLog    2016-11-13 20:48:14.000000000 +0100
@@ -1,3 +1,21 @@
+version 2016.11.14.1
+
+Core
++ [downoader/fragment,f4m,hls] Respect HTTP headers from info dict
+* [extractor/common] Fix media templates with Bandwidth substitution pattern in
+  MPD manifests (#11175)
+* [extractor/common] Improve thumbnail extraction from JSON-LD
+
+Extractors
++ [nrk] Workaround geo restriction
++ [nrk] Improve error detection and messages
++ [afreecatv] Add support for vod.afreecatv.com (#11174)
+* [cda] Fix and improve extraction (#10929, #10936)
+* [plays] Fix extraction (#11165)
+* [eagleplatform] Fix extraction (#11160)
++ [audioboom] Recognize /posts/ URLs (#11149)
+
+
 version 2016.11.08.1
 
 Extractors
Files old/youtube-dl/youtube-dl and new/youtube-dl/youtube-dl differ
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/downloader/f4m.py 
new/youtube-dl/youtube_dl/downloader/f4m.py
--- old/youtube-dl/youtube_dl/downloader/f4m.py 2016-11-08 16:30:30.000000000 
+0100
+++ new/youtube-dl/youtube_dl/downloader/f4m.py 2016-11-13 20:27:51.000000000 
+0100
@@ -314,7 +314,8 @@
         man_url = info_dict['url']
         requested_bitrate = info_dict.get('tbr')
         self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
-        urlh = self.ydl.urlopen(man_url)
+
+        urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
         man_url = urlh.geturl()
         # Some manifests may be malformed, e.g. prosiebensat1 generated 
manifests
         # (see 
https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
@@ -387,7 +388,10 @@
             url_parsed = base_url_parsed._replace(path=base_url_parsed.path + 
name, query='&'.join(query))
             frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
             try:
-                success = ctx['dl'].download(frag_filename, {'url': 
url_parsed.geturl()})
+                success = ctx['dl'].download(frag_filename, {
+                    'url': url_parsed.geturl(),
+                    'http_headers': info_dict.get('http_headers'),
+                })
                 if not success:
                     return False
                 (down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/downloader/fragment.py 
new/youtube-dl/youtube_dl/downloader/fragment.py
--- old/youtube-dl/youtube_dl/downloader/fragment.py    2016-11-08 
16:30:30.000000000 +0100
+++ new/youtube-dl/youtube_dl/downloader/fragment.py    2016-11-13 
20:27:51.000000000 +0100
@@ -9,6 +9,7 @@
     error_to_compat_str,
     encodeFilename,
     sanitize_open,
+    sanitized_Request,
 )
 
 
@@ -37,6 +38,10 @@
     def report_skip_fragment(self, fragment_name):
         self.to_screen('[download] Skipping fragment %s...' % fragment_name)
 
+    def _prepare_url(self, info_dict, url):
+        headers = info_dict.get('http_headers')
+        return sanitized_Request(url, None, headers) if headers else url
+
     def _prepare_and_start_frag_download(self, ctx):
         self._prepare_frag_download(ctx)
         self._start_frag_download(ctx)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/downloader/hls.py 
new/youtube-dl/youtube_dl/downloader/hls.py
--- old/youtube-dl/youtube_dl/downloader/hls.py 2016-11-08 16:30:30.000000000 
+0100
+++ new/youtube-dl/youtube_dl/downloader/hls.py 2016-11-13 20:27:51.000000000 
+0100
@@ -59,7 +59,8 @@
     def real_download(self, filename, info_dict):
         man_url = info_dict['url']
         self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
-        manifest = self.ydl.urlopen(man_url).read()
+
+        manifest = self.ydl.urlopen(self._prepare_url(info_dict, 
man_url)).read()
 
         s = manifest.decode('utf-8', 'ignore')
 
@@ -112,7 +113,10 @@
                     count = 0
                     while count <= fragment_retries:
                         try:
-                            success = ctx['dl'].download(frag_filename, 
{'url': frag_url})
+                            success = ctx['dl'].download(frag_filename, {
+                                'url': frag_url,
+                                'http_headers': info_dict.get('http_headers'),
+                            })
                             if not success:
                                 return False
                             down, frag_sanitized = 
sanitize_open(frag_filename, 'rb')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/afreecatv.py 
new/youtube-dl/youtube_dl/extractor/afreecatv.py
--- old/youtube-dl/youtube_dl/extractor/afreecatv.py    2016-11-08 
16:30:30.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/afreecatv.py    2016-11-13 
20:27:51.000000000 +0100
@@ -11,6 +11,7 @@
 from ..utils import (
     ExtractorError,
     int_or_none,
+    update_url_query,
     xpath_element,
     xpath_text,
 )
@@ -18,12 +19,18 @@
 
 class AfreecaTVIE(InfoExtractor):
     IE_DESC = 'afreecatv.com'
-    _VALID_URL = r'''(?x)^
-        https?://(?:(live|afbbs|www)\.)?afreeca(?:tv)?\.com(?::\d+)?
-        (?:
-            /app/(?:index|read_ucc_bbs)\.cgi|
-            /player/[Pp]layer\.(?:swf|html))
-        \?.*?\bnTitleNo=(?P<id>\d+)'''
+    _VALID_URL = r'''(?x)
+                    https?://
+                        (?:
+                            
(?:(?:live|afbbs|www)\.)?afreeca(?:tv)?\.com(?::\d+)?
+                            (?:
+                                /app/(?:index|read_ucc_bbs)\.cgi|
+                                /player/[Pp]layer\.(?:swf|html)
+                            )\?.*?\bnTitleNo=|
+                            vod\.afreecatv\.com/PLAYER/STATION/
+                        )
+                        (?P<id>\d+)
+                    '''
     _TESTS = [{
         'url': 
'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
         'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
@@ -66,6 +73,9 @@
     }, {
         'url': 
'http://www.afreecatv.com/player/Player.swf?szType=szBjId=djleegoon&nStationNo=11273158&nBbsNo=13161095&nTitleNo=36327652',
         'only_matching': True,
+    }, {
+        'url': 'http://vod.afreecatv.com/PLAYER/STATION/15055030',
+        'only_matching': True,
     }]
 
     @staticmethod
@@ -83,7 +93,9 @@
         info_url = compat_urlparse.urlunparse(parsed_url._replace(
             netloc='afbbs.afreecatv.com:8080',
             path='/api/video/get_video_info.php'))
-        video_xml = self._download_xml(info_url, video_id)
+
+        video_xml = self._download_xml(
+            update_url_query(info_url, {'nTitleNo': video_id}), video_id)
 
         if xpath_element(video_xml, './track/video/file') is None:
             raise ExtractorError('Specified AfreecaTV video does not exist',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/audioboom.py 
new/youtube-dl/youtube_dl/extractor/audioboom.py
--- old/youtube-dl/youtube_dl/extractor/audioboom.py    2016-11-08 
16:30:30.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/audioboom.py    2016-11-13 
20:27:51.000000000 +0100
@@ -6,8 +6,8 @@
 
 
 class AudioBoomIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?audioboom\.com/boos/(?P<id>[0-9]+)'
-    _TEST = {
+    _VALID_URL = 
r'https?://(?:www\.)?audioboom\.com/(?:boos|posts)/(?P<id>[0-9]+)'
+    _TESTS = [{
         'url': 
'https://audioboom.com/boos/4279833-3-09-2016-czaban-hour-3?t=0',
         'md5': '63a8d73a055c6ed0f1e51921a10a5a76',
         'info_dict': {
@@ -19,7 +19,10 @@
             'uploader': 'Steve Czaban',
             'uploader_url': 
're:https?://(?:www\.)?audioboom\.com/channel/steveczabanyahoosportsradio',
         }
-    }
+    }, {
+        'url': 
'https://audioboom.com/posts/4279833-3-09-2016-czaban-hour-3?t=0',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/cda.py 
new/youtube-dl/youtube_dl/extractor/cda.py
--- old/youtube-dl/youtube_dl/extractor/cda.py  2016-11-08 16:30:30.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/cda.py  2016-11-13 20:27:51.000000000 
+0100
@@ -5,14 +5,16 @@
 
 from .common import InfoExtractor
 from ..utils import (
-    decode_packed_codes,
     ExtractorError,
-    parse_duration
+    float_or_none,
+    int_or_none,
+    parse_duration,
 )
 
 
 class CDAIE(InfoExtractor):
     _VALID_URL = 
r'https?://(?:(?:www\.)?cda\.pl/video|ebd\.cda\.pl/[0-9]+x[0-9]+)/(?P<id>[0-9a-z]+)'
+    _BASE_URL = 'http://www.cda.pl/'
     _TESTS = [{
         'url': 'http://www.cda.pl/video/5749950c',
         'md5': '6f844bf51b15f31fae165365707ae970',
@@ -21,6 +23,9 @@
             'ext': 'mp4',
             'height': 720,
             'title': 'Oto dlaczego przed zakrętem należy zwolnić.',
+            'description': 'md5:269ccd135d550da90d1662651fcb9772',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'average_rating': float,
             'duration': 39
         }
     }, {
@@ -30,6 +35,11 @@
             'id': '57413289',
             'ext': 'mp4',
             'title': 'Lądowanie na lotnisku na Maderze',
+            'description': 'md5:60d76b71186dcce4e0ba6d4bbdb13e1a',
+            'thumbnail': 're:^https?://.*\.jpg$',
+            'uploader': 'crash404',
+            'view_count': int,
+            'average_rating': float,
             'duration': 137
         }
     }, {
@@ -39,31 +49,55 @@
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage('http://ebd.cda.pl/0x0/' + video_id, 
video_id)
+        self._set_cookie('cda.pl', 'cda.player', 'html5')
+        webpage = self._download_webpage(
+            self._BASE_URL + '/video/' + video_id, video_id)
 
         if 'Ten film jest dostępny dla użytkowników premium' in webpage:
             raise ExtractorError('This video is only available for premium 
users.', expected=True)
 
-        title = self._html_search_regex(r'<title>(.+?)</title>', webpage, 
'title')
-
         formats = []
 
+        uploader = self._search_regex(r'''(?x)
+            <(span|meta)[^>]+itemprop=(["\'])author\2[^>]*>
+            (?:<\1[^>]*>[^<]*</\1>|(?!</\1>)(?:.|\n))*?
+            
<(span|meta)[^>]+itemprop=(["\'])name\4[^>]*>(?P<uploader>[^<]+)</\3>
+        ''', webpage, 'uploader', default=None, group='uploader')
+        view_count = self._search_regex(
+            r'Odsłony:(?:\s|&nbsp;)*([0-9]+)', webpage,
+            'view_count', default=None)
+        average_rating = self._search_regex(
+            
r'<(?:span|meta)[^>]+itemprop=(["\'])ratingValue\1[^>]*>(?P<rating_value>[0-9.]+)',
+            webpage, 'rating', fatal=False, group='rating_value')
+
         info_dict = {
             'id': video_id,
-            'title': title,
+            'title': self._og_search_title(webpage),
+            'description': self._og_search_description(webpage),
+            'uploader': uploader,
+            'view_count': int_or_none(view_count),
+            'average_rating': float_or_none(average_rating),
+            'thumbnail': self._og_search_thumbnail(webpage),
             'formats': formats,
             'duration': None,
         }
 
         def extract_format(page, version):
-            unpacked = decode_packed_codes(page)
-            format_url = self._search_regex(
-                r"(?:file|url)\s*:\s*(\\?[\"'])(?P<url>http.+?)\1", unpacked,
-                '%s url' % version, fatal=False, group='url')
-            if not format_url:
+            json_str = self._search_regex(
+                r'player_data=(\\?["\'])(?P<player_data>.+?)\1', page,
+                '%s player_json' % version, fatal=False, group='player_data')
+            if not json_str:
+                return
+            player_data = self._parse_json(
+                json_str, '%s player_data' % version, fatal=False)
+            if not player_data:
+                return
+            video = player_data.get('video')
+            if not video or 'file' not in video:
+                self.report_warning('Unable to extract %s version information' 
% version)
                 return
             f = {
-                'url': format_url,
+                'url': video['file'],
             }
             m = re.search(
                 
r'<a[^>]+data-quality="(?P<format_id>[^"]+)"[^>]+href="[^"]+"[^>]+class="[^"]*quality-btn-active[^"]*">(?P<height>[0-9]+)p',
@@ -75,9 +109,7 @@
                 })
             info_dict['formats'].append(f)
             if not info_dict['duration']:
-                info_dict['duration'] = parse_duration(self._search_regex(
-                    r"duration\s*:\s*(\\?[\"'])(?P<duration>.+?)\1",
-                    unpacked, 'duration', fatal=False, group='duration'))
+                info_dict['duration'] = parse_duration(video.get('duration'))
 
         extract_format(webpage, 'default')
 
@@ -85,7 +117,8 @@
                 
r'<a[^>]+data-quality="[^"]+"[^>]+href="([^"]+)"[^>]+class="quality-btn"[^>]*>([0-9]+p)',
                 webpage):
             webpage = self._download_webpage(
-                href, video_id, 'Downloading %s version information' % 
resolution, fatal=False)
+                self._BASE_URL + href, video_id,
+                'Downloading %s version information' % resolution, fatal=False)
             if not webpage:
                 # Manually report warning because empty page is returned when
                 # invalid version is requested.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/common.py 
new/youtube-dl/youtube_dl/extractor/common.py
--- old/youtube-dl/youtube_dl/extractor/common.py       2016-11-08 
16:30:30.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/common.py       2016-11-13 
20:27:51.000000000 +0100
@@ -886,7 +886,7 @@
                         'url': e.get('contentUrl'),
                         'title': unescapeHTML(e.get('name')),
                         'description': unescapeHTML(e.get('description')),
-                        'thumbnail': e.get('thumbnailUrl'),
+                        'thumbnail': e.get('thumbnailUrl') or 
e.get('thumbnailURL'),
                         'duration': parse_duration(e.get('duration')),
                         'timestamp': unified_timestamp(e.get('uploadDate')),
                         'filesize': float_or_none(e.get('contentSize')),
@@ -1703,7 +1703,7 @@
                                 representation_ms_info['fragments'] = [{
                                     'url': media_template % {
                                         'Number': segment_number,
-                                        'Bandwidth': 
representation_attrib.get('bandwidth'),
+                                        'Bandwidth': 
int_or_none(representation_attrib.get('bandwidth')),
                                     },
                                     'duration': segment_duration,
                                 } for segment_number in range(
@@ -1721,7 +1721,7 @@
                                 def add_segment_url():
                                     segment_url = media_template % {
                                         'Time': segment_time,
-                                        'Bandwidth': 
representation_attrib.get('bandwidth'),
+                                        'Bandwidth': 
int_or_none(representation_attrib.get('bandwidth')),
                                         'Number': segment_number,
                                     }
                                     
representation_ms_info['fragments'].append({
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/eagleplatform.py 
new/youtube-dl/youtube_dl/extractor/eagleplatform.py
--- old/youtube-dl/youtube_dl/extractor/eagleplatform.py        2016-11-08 
16:30:30.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/eagleplatform.py        2016-11-13 
20:27:51.000000000 +0100
@@ -4,11 +4,13 @@
 import re
 
 from .common import InfoExtractor
-from ..compat import compat_HTTPError
+from ..compat import (
+    compat_HTTPError,
+    compat_str,
+)
 from ..utils import (
     ExtractorError,
     int_or_none,
-    url_basename,
 )
 
 
@@ -77,7 +79,7 @@
         if status != 200:
             raise ExtractorError(' '.join(response['errors']), expected=True)
 
-    def _download_json(self, url_or_request, video_id, note='Downloading JSON 
metadata'):
+    def _download_json(self, url_or_request, video_id, note='Downloading JSON 
metadata', *args, **kwargs):
         try:
             response = super(EaglePlatformIE, 
self)._download_json(url_or_request, video_id, note)
         except ExtractorError as ee:
@@ -116,29 +118,38 @@
 
         m3u8_url = self._get_video_url(secure_m3u8, video_id, 'Downloading 
m3u8 JSON')
         m3u8_formats = self._extract_m3u8_formats(
-            m3u8_url, video_id,
-            'mp4', entry_protocol='m3u8_native', m3u8_id='hls')
+            m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
+            m3u8_id='hls', fatal=False)
         formats.extend(m3u8_formats)
 
-        mp4_url = self._get_video_url(
+        m3u8_formats_dict = {}
+        for f in m3u8_formats:
+            if f.get('height') is not None:
+                m3u8_formats_dict[f['height']] = f
+
+        mp4_data = self._download_json(
             # Secure mp4 URL is constructed according to Player.prototype.mp4 
from
             # http://lentaru.media.eagleplatform.com/player/player.js
-            re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4', secure_m3u8),
-            video_id, 'Downloading mp4 JSON')
-        mp4_url_basename = url_basename(mp4_url)
-        for m3u8_format in m3u8_formats:
-            mobj = re.search('/([^/]+)/index\.m3u8', m3u8_format['url'])
-            if mobj:
-                http_format = m3u8_format.copy()
-                video_url = mp4_url.replace(mp4_url_basename, mobj.group(1))
-                if not self._is_valid_url(video_url, video_id):
+            re.sub(r'm3u8|hlsvod|hls|f4m', 'mp4s', secure_m3u8),
+            video_id, 'Downloading mp4 JSON', fatal=False)
+        if mp4_data:
+            for format_id, format_url in mp4_data.get('data', {}).items():
+                if not isinstance(format_url, compat_str):
                     continue
-                http_format.update({
-                    'url': video_url,
-                    'format_id': m3u8_format['format_id'].replace('hls', 
'http'),
-                    'protocol': 'http',
-                })
-                formats.append(http_format)
+                height = int_or_none(format_id)
+                if height is not None and m3u8_formats_dict.get(height):
+                    f = m3u8_formats_dict[height].copy()
+                    f.update({
+                        'format_id': f['format_id'].replace('hls', 'http'),
+                        'protocol': 'http',
+                    })
+                else:
+                    f = {
+                        'format_id': 'http-%s' % format_id,
+                        'height': int_or_none(format_id),
+                    }
+                f['url'] = format_url
+                formats.append(f)
 
         self._sort_formats(formats)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/nrk.py 
new/youtube-dl/youtube_dl/extractor/nrk.py
--- old/youtube-dl/youtube_dl/extractor/nrk.py  2016-11-08 16:30:31.000000000 
+0100
+++ new/youtube-dl/youtube_dl/extractor/nrk.py  2016-11-13 20:27:51.000000000 
+0100
@@ -1,6 +1,7 @@
 # coding: utf-8
 from __future__ import unicode_literals
 
+import random
 import re
 
 from .common import InfoExtractor
@@ -14,6 +15,25 @@
 
 
 class NRKBaseIE(InfoExtractor):
+    _faked_ip = None
+
+    def _download_webpage_handle(self, *args, **kwargs):
+        # NRK checks X-Forwarded-For HTTP header in order to figure out the
+        # origin of the client behind proxy. This allows to bypass geo
+        # restriction by faking this header's value to some Norway IP.
+        # We will do so once we encounter any geo restriction error.
+        if self._faked_ip:
+            # NB: str is intentional
+            kwargs.setdefault(str('headers'), {})['X-Forwarded-For'] = 
self._faked_ip
+        return super(NRKBaseIE, self)._download_webpage_handle(*args, **kwargs)
+
+    def _fake_ip(self):
+        # Use fake IP from 37.191.128.0/17 in order to workaround geo
+        # restriction
+        def octet(lb=0, ub=255):
+            return random.randint(lb, ub)
+        self._faked_ip = '37.191.%d.%d' % (octet(128), octet())
+
     def _real_extract(self, url):
         video_id = self._match_id(url)
 
@@ -24,6 +44,8 @@
         title = data.get('fullTitle') or data.get('mainTitle') or data['title']
         video_id = data.get('id') or video_id
 
+        http_headers = {'X-Forwarded-For': self._faked_ip} if self._faked_ip 
else {}
+
         entries = []
 
         media_assets = data.get('mediaAssets')
@@ -54,6 +76,7 @@
                     'duration': duration,
                     'subtitles': subtitles,
                     'formats': formats,
+                    'http_headers': http_headers,
                 })
 
         if not entries:
@@ -70,10 +93,23 @@
                 }]
 
         if not entries:
-            if data.get('usageRights', {}).get('isGeoBlocked'):
-                raise ExtractorError(
-                    'NRK har ikke rettigheter til å vise dette programmet 
utenfor Norge',
-                    expected=True)
+            message_type = data.get('messageType', '')
+            # Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
+            if 'IsGeoBlocked' in message_type and not self._faked_ip:
+                self.report_warning(
+                    'Video is geo restricted, trying to fake IP')
+                self._fake_ip()
+                return self._real_extract(url)
+
+            MESSAGES = {
+                'ProgramRightsAreNotReady': 'Du kan dessverre ikke se eller 
høre programmet',
+                'ProgramRightsHasExpired': 'Programmet har gått ut',
+                'ProgramIsGeoBlocked': 'NRK har ikke rettigheter til å vise 
dette programmet utenfor Norge',
+            }
+            raise ExtractorError(
+                '%s said: %s' % (self.IE_NAME, MESSAGES.get(
+                    message_type, message_type)),
+                expected=True)
 
         conviva = data.get('convivaStatistics') or {}
         series = conviva.get('seriesName') or data.get('seriesTitle')
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/extractor/plays.py 
new/youtube-dl/youtube_dl/extractor/plays.py
--- old/youtube-dl/youtube_dl/extractor/plays.py        2016-11-08 
16:30:31.000000000 +0100
+++ new/youtube-dl/youtube_dl/extractor/plays.py        2016-11-13 
20:27:51.000000000 +0100
@@ -8,30 +8,31 @@
 
 
 class PlaysTVIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?plays\.tv/video/(?P<id>[0-9a-f]{18})'
-    _TEST = {
-        'url': 
'http://plays.tv/video/56af17f56c95335490/when-you-outplay-the-azir-wall',
+    _VALID_URL = 
r'https?://(?:www\.)?plays\.tv/(?:video|embeds)/(?P<id>[0-9a-f]{18})'
+    _TESTS = [{
+        'url': 
'https://plays.tv/video/56af17f56c95335490/when-you-outplay-the-azir-wall',
         'md5': 'dfeac1198506652b5257a62762cec7bc',
         'info_dict': {
             'id': '56af17f56c95335490',
             'ext': 'mp4',
-            'title': 'When you outplay the Azir wall',
+            'title': 'Bjergsen - When you outplay the Azir wall',
             'description': 'Posted by Bjergsen',
         }
-    }
+    }, {
+        'url': 'https://plays.tv/embeds/56af17f56c95335490',
+        'only_matching': True,
+    }]
 
     def _real_extract(self, url):
         video_id = self._match_id(url)
-        webpage = self._download_webpage(url, video_id)
+        webpage = self._download_webpage(
+            'https://plays.tv/video/%s' % video_id, video_id)
+
+        info = self._search_json_ld(webpage, video_id,)
 
-        title = self._og_search_title(webpage)
-        content = self._parse_json(
-            self._search_regex(
-                r'R\.bindContent\(({.+?})\);', webpage,
-                'content'), video_id)['content']
         mpd_url, sources = re.search(
             r'(?s)<video[^>]+data-mpd="([^"]+)"[^>]*>(.+?)</video>',
-            content).groups()
+            webpage).groups()
         formats = self._extract_mpd_formats(
             self._proto_relative_url(mpd_url), video_id, mpd_id='DASH')
         for format_id, height, format_url in 
re.findall(r'<source\s+res="((\d+)h?)"\s+src="([^"]+)"', sources):
@@ -42,10 +43,11 @@
             })
         self._sort_formats(formats)
 
-        return {
+        info.update({
             'id': video_id,
-            'title': title,
             'description': self._og_search_description(webpage),
-            'thumbnail': self._og_search_thumbnail(webpage),
+            'thumbnail': info.get('thumbnail') or 
self._og_search_thumbnail(webpage),
             'formats': formats,
-        }
+        })
+
+        return info
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/youtube-dl/youtube_dl/version.py 
new/youtube-dl/youtube_dl/version.py
--- old/youtube-dl/youtube_dl/version.py        2016-11-08 16:30:52.000000000 
+0100
+++ new/youtube-dl/youtube_dl/version.py        2016-11-13 20:48:14.000000000 
+0100
@@ -1,3 +1,3 @@
 from __future__ import unicode_literals
 
-__version__ = '2016.11.08.1'
+__version__ = '2016.11.14.1'


Reply via email to