Hello community,

here is the log from the commit of package you-get for openSUSE:Factory checked 
in at 2019-06-12 13:18:16
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/you-get (Old)
 and      /work/SRC/openSUSE:Factory/.you-get.new.4811 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "you-get"

Wed Jun 12 13:18:16 2019 rev:23 rq:709193 version:0.4.1314

Changes:
--------
--- /work/SRC/openSUSE:Factory/you-get/you-get.changes  2019-05-09 
14:28:31.873955934 +0200
+++ /work/SRC/openSUSE:Factory/.you-get.new.4811/you-get.changes        
2019-06-12 13:18:20.172578044 +0200
@@ -1,0 +2,5 @@
+Tue Jun 11 14:26:37 UTC 2019 - Luigi Baldoni <[email protected]>
+
+- Update to version 0.4.1314 (no changelog supplied) 
+
+-------------------------------------------------------------------

Old:
----
  you-get-0.4.1302.tar.gz

New:
----
  you-get-0.4.1314.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ you-get.spec ++++++
--- /var/tmp/diff_new_pack.4gKxzg/_old  2019-06-12 13:18:20.704577800 +0200
+++ /var/tmp/diff_new_pack.4gKxzg/_new  2019-06-12 13:18:20.708577799 +0200
@@ -17,7 +17,7 @@
 
 
 Name:           you-get
-Version:        0.4.1302
+Version:        0.4.1314
 Release:        0
 Summary:        Dumb downloader that scrapes the web
 License:        MIT

++++++ you-get-0.4.1302.tar.gz -> you-get-0.4.1314.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1302/src/you_get/common.py 
new/you-get-0.4.1314/src/you_get/common.py
--- old/you-get-0.4.1302/src/you_get/common.py  2019-05-09 01:43:06.000000000 
+0200
+++ new/you-get-0.4.1314/src/you_get/common.py  2019-06-11 16:02:36.000000000 
+0200
@@ -131,6 +131,7 @@
 dry_run = False
 json_output = False
 force = False
+skip_existing_file_size_check = False
 player = None
 extractor_proxy = None
 cookies = None
@@ -633,15 +634,22 @@
     while continue_renameing:
         continue_renameing = False
         if os.path.exists(filepath):
-            if not force and file_size == os.path.getsize(filepath):
+            if not force and (file_size == os.path.getsize(filepath) or 
skip_existing_file_size_check):
                 if not is_part:
                     if bar:
                         bar.done()
-                    log.w(
-                        'Skipping {}: file already exists'.format(
-                            tr(os.path.basename(filepath))
+                    if skip_existing_file_size_check:
+                        log.w(
+                            'Skipping {} without checking size: file already 
exists'.format(
+                                tr(os.path.basename(filepath))
+                            )
+                        )
+                    else:
+                        log.w(
+                            'Skipping {}: file already exists'.format(
+                                tr(os.path.basename(filepath))
+                            )
                         )
-                    )
                 else:
                     if bar:
                         bar.update_received(file_size)
@@ -878,13 +886,16 @@
         pass
 
 
-def get_output_filename(urls, title, ext, output_dir, merge):
+def get_output_filename(urls, title, ext, output_dir, merge, **kwargs):
     # lame hack for the --output-filename option
     global output_filename
     if output_filename:
+        result = output_filename
+        if kwargs.get('part', -1) >= 0:
+            result = '%s[%02d]' % (result, kwargs.get('part'))
         if ext:
-            return output_filename + '.' + ext
-        return output_filename
+            result = '%s.%s' % (result, ext)
+        return result
 
     merged_ext = ext
     if (len(urls) > 1) and merge:
@@ -901,7 +912,11 @@
                 merged_ext = 'mkv'
             else:
                 merged_ext = 'ts'
-    return '%s.%s' % (title, merged_ext)
+    result = title
+    if kwargs.get('part', -1) >= 0:
+        result = '%s[%02d]' % (result, kwargs.get('part'))
+    result = '%s.%s' % (result, merged_ext)
+    return result
 
 def print_user_agent(faker=False):
     urllib_default_user_agent = 'Python-urllib/%d.%d' % sys.version_info[:2]
@@ -945,8 +960,12 @@
 
     if total_size:
         if not force and os.path.exists(output_filepath) and not auto_rename\
-                and os.path.getsize(output_filepath) >= total_size * 0.9:
-            log.w('Skipping %s: file already exists' % output_filepath)
+                and (os.path.getsize(output_filepath) >= total_size * 0.9\
+                or skip_existing_file_size_check):
+            if skip_existing_file_size_check:
+                log.w('Skipping %s without checking size: file already exists' 
% output_filepath)
+            else:
+                log.w('Skipping %s: file already exists' % output_filepath)
             print()
             return
         bar = SimpleProgressBar(total_size, len(urls))
@@ -964,16 +983,16 @@
         bar.done()
     else:
         parts = []
-        print('Downloading %s.%s ...' % (tr(title), ext))
+        print('Downloading %s ...' % tr(output_filename))
         bar.update()
         for i, url in enumerate(urls):
-            filename = '%s[%02d].%s' % (title, i, ext)
-            filepath = os.path.join(output_dir, filename)
-            parts.append(filepath)
+            output_filename_i = get_output_filename(urls, title, ext, 
output_dir, merge, part=i)
+            output_filepath_i = os.path.join(output_dir, output_filename_i)
+            parts.append(output_filepath_i)
             # print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, 
len(urls))
             bar.update_piece(i + 1)
             url_save(
-                url, filepath, bar, refer=refer, is_part=True, faker=faker,
+                url, output_filepath_i, bar, refer=refer, is_part=True, 
faker=faker,
                 headers=headers, **kwargs
             )
         bar.done()
@@ -1456,6 +1475,10 @@
         help='Force overwriting existing files'
     )
     download_grp.add_argument(
+        '--skip-existing-file-size-check', action='store_true', default=False,
+        help='Skip existing file without checking file size'
+    )
+    download_grp.add_argument(
         '-F', '--format', metavar='STREAM_ID',
         help='Set video format to STREAM_ID'
     )
@@ -1541,6 +1564,7 @@
         logging.getLogger().setLevel(logging.DEBUG)
 
     global force
+    global skip_existing_file_size_check
     global dry_run
     global json_output
     global player
@@ -1554,6 +1578,8 @@
     info_only = args.info
     if args.force:
         force = True
+    if args.skip_existing_file_size_check:
+        skip_existing_file_size_check = True
     if args.auto_rename:
         auto_rename = True
     if args.url:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1302/src/you_get/extractors/acfun.py 
new/you-get-0.4.1314/src/you_get/extractors/acfun.py
--- old/you-get-0.4.1302/src/you_get/extractors/acfun.py        2019-05-09 
01:43:06.000000000 +0200
+++ new/you-get-0.4.1314/src/you_get/extractors/acfun.py        2019-06-11 
16:02:36.000000000 +0200
@@ -109,9 +109,9 @@
             pass
 
 def acfun_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
-    assert re.match(r'http://[^\.]*\.*acfun\.[^\.]+/(\D|bangumi)/\D\D(\d+)', 
url)
+    assert re.match(r'https?://[^\.]*\.*acfun\.[^\.]+/(\D|bangumi)/\D\D(\d+)', 
url)
 
-    if re.match(r'http://[^\.]*\.*acfun\.[^\.]+/\D/\D\D(\d+)', url):
+    if re.match(r'https?://[^\.]*\.*acfun\.[^\.]+/\D/\D\D(\d+)', url):
         html = get_content(url)
         title = r1(r'data-title="([^"]+)"', html)
         if match1(url, r'_(\d+)$'):  # current P
@@ -119,7 +119,7 @@
         vid = r1('data-vid="(\d+)"', html)
         up = r1('data-name="([^"]+)"', html)
     # bangumi
-    elif re.match("http://[^\.]*\.*acfun\.[^\.]+/bangumi/ab(\d+)", url):
+    elif re.match("https?://[^\.]*\.*acfun\.[^\.]+/bangumi/ab(\d+)", url):
         html = get_content(url)
         title = match1(html, r'"title"\s*:\s*"([^"]+)"')
         if match1(url, r'_(\d+)$'):  # current P
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1302/src/you_get/extractors/le.py 
new/you-get-0.4.1314/src/you_get/extractors/le.py
--- old/you-get-0.4.1302/src/you_get/extractors/le.py   2019-05-09 
01:43:06.000000000 +0200
+++ new/you-get-0.4.1314/src/you_get/extractors/le.py   2019-06-11 
16:02:36.000000000 +0200
@@ -9,13 +9,16 @@
 
 from ..common import *
 
-#@DEPRECATED
+
+# @DEPRECATED
 def get_timestamp():
     tn = random.random()
     url = 'http://api.letv.com/time?tn={}'.format(tn)
     result = get_content(url)
     return json.loads(result)['stime']
-#@DEPRECATED
+
+
+# @DEPRECATED
 def get_key(t):
     for s in range(0, 8):
         e = 1 & t
@@ -24,42 +27,40 @@
         t += e
     return t ^ 185025305
 
+
 def calcTimeKey(t):
-    ror = lambda val, r_bits, : ((val & (2**32-1)) >> r_bits%32) |  (val << 
(32-(r_bits%32)) & (2**32-1))
+    ror = lambda val, r_bits,: ((val & (2 ** 32 - 1)) >> r_bits % 32) | (val 
<< (32 - (r_bits % 32)) & (2 ** 32 - 1))
     magic = 185025305
     return ror(t, magic % 17) ^ magic
-    #return ror(ror(t,773625421%13)^773625421,773625421%17)
+    # return ror(ror(t,773625421%13)^773625421,773625421%17)
 
 
 def decode(data):
     version = data[0:5]
     if version.lower() == b'vc_01':
-        #get real m3u8
+        # get real m3u8
         loc2 = data[5:]
         length = len(loc2)
-        loc4 = [0]*(2*length)
+        loc4 = [0] * (2 * length)
         for i in range(length):
-            loc4[2*i] = loc2[i] >> 4
-            loc4[2*i+1]= loc2[i] & 15;
-        loc6 = loc4[len(loc4)-11:]+loc4[:len(loc4)-11]
-        loc7 = [0]*length
+            loc4[2 * i] = loc2[i] >> 4
+            loc4[2 * i + 1] = loc2[i] & 15;
+        loc6 = loc4[len(loc4) - 11:] + loc4[:len(loc4) - 11]
+        loc7 = [0] * length
         for i in range(length):
-            loc7[i] = (loc6[2 * i] << 4) +loc6[2*i+1]
+            loc7[i] = (loc6[2 * i] << 4) + loc6[2 * i + 1]
         return ''.join([chr(i) for i in loc7])
     else:
         # directly return
-        return data
-
-
+        return str(data)
 
 
-def video_info(vid,**kwargs):
-    url = 
'http://player-pc.le.com/mms/out/video/playJson?id={}&platid=1&splatid=101&format=1&tkey={}&domain=www.le.com&region=cn&source=1000&accesyx=1'.format(vid,calcTimeKey(int(time.time())))
+def video_info(vid, **kwargs):
+    url = 
'http://player-pc.le.com/mms/out/video/playJson?id={}&platid=1&splatid=105&format=1&tkey={}&domain=www.le.com&region=cn&source=1000&accesyx=1'.format(vid,
 calcTimeKey(int(time.time())))
     r = get_content(url, decoded=False)
-    info=json.loads(str(r,"utf-8"))
+    info = json.loads(str(r, "utf-8"))
     info = info['msgs']
 
-
     stream_id = None
     support_stream_id = info["playurl"]["dispatch"].keys()
     if "stream_id" in kwargs and kwargs["stream_id"].lower() in 
support_stream_id:
@@ -70,27 +71,28 @@
         elif "720p" in support_stream_id:
             stream_id = '720p'
         else:
-            stream_id =sorted(support_stream_id,key= lambda i: int(i[1:]))[-1]
+            stream_id = sorted(support_stream_id, key=lambda i: int(i[1:]))[-1]
 
-    url =info["playurl"]["domain"][0]+info["playurl"]["dispatch"][stream_id][0]
+    url = info["playurl"]["domain"][0] + 
info["playurl"]["dispatch"][stream_id][0]
     uuid = hashlib.sha1(url.encode('utf8')).hexdigest() + '_0'
     ext = info["playurl"]["dispatch"][stream_id][1].split('.')[-1]
     url = url.replace('tss=0', 'tss=ios')
-    
url+="&m3v=1&termid=1&format=1&hwtype=un&ostype=MacOS10.12.4&p1=1&p2=10&p3=-&expect=3&tn={}&vid={}&uuid={}&sign=letv".format(random.random(),
 vid, uuid)
+    url += 
"&m3v=1&termid=1&format=1&hwtype=un&ostype=MacOS10.12.4&p1=1&p2=10&p3=-&expect=3&tn={}&vid={}&uuid={}&sign=letv".format(random.random(),
 vid, uuid)
 
-    r2=get_content(url,decoded=False)
-    info2=json.loads(str(r2,"utf-8"))
+    r2 = get_content(url, decoded=False)
+    info2 = json.loads(str(r2, "utf-8"))
 
     # hold on ! more things to do
     # to decode m3u8 (encoded)
     suffix = '&r=' + str(int(time.time() * 1000)) + '&appid=500'
-    m3u8 = get_content(info2["location"]+suffix,decoded=False)
+    m3u8 = get_content(info2["location"] + suffix, decoded=False)
     m3u8_list = decode(m3u8)
-    urls = re.findall(r'^[^#][^\r]*',m3u8_list,re.MULTILINE)
-    return ext,urls
+    urls = re.findall(r'(http.*?)#', m3u8_list, re.MULTILINE)
+    return ext, urls
+
 
-def letv_download_by_vid(vid,title, output_dir='.', merge=True, 
info_only=False,**kwargs):
-    ext , urls = video_info(vid,**kwargs)
+def letv_download_by_vid(vid, title, output_dir='.', merge=True, 
info_only=False, **kwargs):
+    ext, urls = video_info(vid, **kwargs)
     size = 0
     for i in urls:
         _, _, tmp = url_info(i)
@@ -100,27 +102,29 @@
     if not info_only:
         download_urls(urls, title, ext, size, output_dir=output_dir, 
merge=merge)
 
+
 def letvcloud_download_by_vu(vu, uu, title=None, output_dir='.', merge=True, 
info_only=False):
-    #ran = float('0.' + str(random.randint(0, 9999999999999999))) # For ver 2.1
-    #str2Hash = 
'cfflashformatjsonran{ran}uu{uu}ver2.2vu{vu}bie^#@(%27eib58'.format(vu = vu, uu 
= uu, ran = ran)  #Magic!/ In ver 2.1
-    argumet_dict ={'cf' : 'flash', 'format': 'json', 'ran': 
str(int(time.time())), 'uu': str(uu),'ver': '2.2', 'vu': str(vu), }
-    sign_key = '2f9d6924b33a165a6d8b5d3d42f4f987'  #ALL YOUR BASE ARE BELONG 
TO US
+    # ran = float('0.' + str(random.randint(0, 9999999999999999))) # For ver 
2.1
+    # str2Hash = 
'cfflashformatjsonran{ran}uu{uu}ver2.2vu{vu}bie^#@(%27eib58'.format(vu = vu, uu 
= uu, ran = ran)  #Magic!/ In ver 2.1
+    argumet_dict = {'cf': 'flash', 'format': 'json', 'ran': 
str(int(time.time())), 'uu': str(uu), 'ver': '2.2', 'vu': str(vu), }
+    sign_key = '2f9d6924b33a165a6d8b5d3d42f4f987'  # ALL YOUR BASE ARE BELONG 
TO US
     str2Hash = ''.join([i + argumet_dict[i] for i in sorted(argumet_dict)]) + 
sign_key
     sign = hashlib.md5(str2Hash.encode('utf-8')).hexdigest()
-    request_info = urllib.request.Request('http://api.letvcloud.com/gpc.php?' 
+ '&'.join([i + '=' + argumet_dict[i] for i in argumet_dict]) + 
'&sign={sign}'.format(sign = sign))
+    request_info = urllib.request.Request('http://api.letvcloud.com/gpc.php?' 
+ '&'.join([i + '=' + argumet_dict[i] for i in argumet_dict]) + 
'&sign={sign}'.format(sign=sign))
     response = urllib.request.urlopen(request_info)
     data = response.read()
     info = json.loads(data.decode('utf-8'))
     type_available = []
     for video_type in info['data']['video_info']['media']:
         type_available.append({'video_url': 
info['data']['video_info']['media'][video_type]['play_url']['main_url'], 
'video_quality': 
int(info['data']['video_info']['media'][video_type]['play_url']['vtype'])})
-    urls = [base64.b64decode(sorted(type_available, key = lambda 
x:x['video_quality'])[-1]['video_url']).decode("utf-8")]
+    urls = [base64.b64decode(sorted(type_available, key=lambda x: 
x['video_quality'])[-1]['video_url']).decode("utf-8")]
     size = urls_size(urls)
     ext = 'mp4'
     print_info(site_info, title, ext, size)
     if not info_only:
         download_urls(urls, title, ext, size, output_dir=output_dir, 
merge=merge)
 
+
 def letvcloud_download(url, output_dir='.', merge=True, info_only=False):
     qs = parse.urlparse(url).query
     vu = match1(qs, r'vu=([\w]+)')
@@ -128,7 +132,8 @@
     title = "LETV-%s" % vu
     letvcloud_download_by_vu(vu, uu, title=title, output_dir=output_dir, 
merge=merge, info_only=info_only)
 
-def letv_download(url, output_dir='.', merge=True, info_only=False ,**kwargs):
+
+def letv_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
     url = url_locations([url])[0]
     if re.match(r'http://yuntv.letv.com/', url):
         letvcloud_download(url, output_dir=output_dir, merge=merge, 
info_only=info_only)
@@ -136,14 +141,15 @@
         html = get_content(url)
         vid = match1(url, r'video/(\d+)\.html')
         title = match1(html, r'<h2 class="title">([^<]+)</h2>')
-        letv_download_by_vid(vid, title=title, output_dir=output_dir, 
merge=merge, info_only=info_only,**kwargs)
+        letv_download_by_vid(vid, title=title, output_dir=output_dir, 
merge=merge, info_only=info_only, **kwargs)
     else:
         html = get_content(url)
         vid = match1(url, r'http://www.letv.com/ptv/vplay/(\d+).html') or \
-            match1(url, r'http://www.le.com/ptv/vplay/(\d+).html') or \
-            match1(html, r'vid="(\d+)"')
-        title = match1(html,r'name="irTitle" content="(.*?)"')
-        letv_download_by_vid(vid, title=title, output_dir=output_dir, 
merge=merge, info_only=info_only,**kwargs)
+              match1(url, r'http://www.le.com/ptv/vplay/(\d+).html') or \
+              match1(html, r'vid="(\d+)"')
+        title = match1(html, r'name="irTitle" content="(.*?)"')
+        letv_download_by_vid(vid, title=title, output_dir=output_dir, 
merge=merge, info_only=info_only, **kwargs)
+
 
 site_info = "Le.com"
 download = letv_download
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1302/src/you_get/extractors/sohu.py 
new/you-get-0.4.1314/src/you_get/extractors/sohu.py
--- old/you-get-0.4.1302/src/you_get/extractors/sohu.py 2019-05-09 
01:43:06.000000000 +0200
+++ new/you-get-0.4.1314/src/you_get/extractors/sohu.py 2019-06-11 
16:02:36.000000000 +0200
@@ -15,11 +15,13 @@
         new api
 '''
 
+
 def real_url(fileName, key, ch):
     url = "https://data.vod.itc.cn/ip?new="; + fileName + "&num=1&key=" + key + 
"&ch=" + ch + "&pt=1&pg=2&prod=h5n"
     return json.loads(get_html(url))['servers'][0]['url']
 
-def sohu_download(url, output_dir = '.', merge = True, info_only = False, 
extractor_proxy=None, **kwargs):
+
+def sohu_download(url, output_dir='.', merge=True, info_only=False, 
extractor_proxy=None, **kwargs):
     if re.match(r'http://share.vrs.sohu.com', url):
         vid = r1('id=(\d+)', url)
     else:
@@ -27,16 +29,16 @@
         vid = r1(r'\Wvid\s*[\:=]\s*[\'"]?(\d+)[\'"]?', html)
     assert vid
 
-    if re.match(r'http[s]://tv.sohu.com/', url):
-        if extractor_proxy:
-            set_proxy(tuple(extractor_proxy.split(":")))
-        info = 
json.loads(get_decoded_html('http://hot.vrs.sohu.com/vrs_flash.action?vid=%s' % 
vid))
-        for qtyp in ["oriVid","superVid","highVid" ,"norVid","relativeId"]:
+    if extractor_proxy:
+        set_proxy(tuple(extractor_proxy.split(":")))
+    info = 
json.loads(get_decoded_html('http://hot.vrs.sohu.com/vrs_flash.action?vid=%s' % 
vid))
+    if info and info.get("data", ""):
+        for qtyp in ["oriVid", "superVid", "highVid", "norVid", "relativeId"]:
             if 'data' in info:
                 hqvid = info['data'][qtyp]
             else:
                 hqvid = info[qtyp]
-            if hqvid != 0 and hqvid != vid :
+            if hqvid != 0 and hqvid != vid:
                 info = 
json.loads(get_decoded_html('http://hot.vrs.sohu.com/vrs_flash.action?vid=%s' % 
hqvid))
                 if not 'allot' in info:
                     continue
@@ -63,14 +65,15 @@
         urls = []
         data = info['data']
         title = data['tvName']
-        size = sum(map(int,data['clipsBytes']))
+        size = sum(map(int, data['clipsBytes']))
         assert len(data['clipsURL']) == len(data['clipsBytes']) == 
len(data['su'])
         for fileName, key in zip(data['su'], data['ck']):
             urls.append(real_url(fileName, key, data['ch']))
 
     print_info(site_info, title, 'mp4', size)
     if not info_only:
-        download_urls(urls, title, 'mp4', size, output_dir, refer = url, merge 
= merge)
+        download_urls(urls, title, 'mp4', size, output_dir, refer=url, 
merge=merge)
+
 
 site_info = "Sohu.com"
 download = sohu_download
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1302/src/you_get/extractors/youtube.py 
new/you-get-0.4.1314/src/you_get/extractors/youtube.py
--- old/you-get-0.4.1302/src/you_get/extractors/youtube.py      2019-05-09 
01:43:06.000000000 +0200
+++ new/you-get-0.4.1314/src/you_get/extractors/youtube.py      2019-06-11 
16:02:36.000000000 +0200
@@ -216,6 +216,7 @@
                     self.html5player = 'https://www.youtube.com' + 
ytplayer_config['assets']['js']
                     # Workaround: get_video_info returns bad s. Why?
                     stream_list = 
ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')
+                    #stream_list = 
ytplayer_config['args']['adaptive_fmts'].split(',')
                 except:
                     stream_list = 
video_info['url_encoded_fmt_stream_map'][0].split(',')
                     if re.search('([^"]*/base\.js)"', video_page):
@@ -306,7 +307,8 @@
                 'url': metadata['url'][0],
                 'sig': metadata['sig'][0] if 'sig' in metadata else None,
                 's': metadata['s'][0] if 's' in metadata else None,
-                'quality': metadata['quality'][0],
+                'quality': metadata['quality'][0] if 'quality' in metadata 
else None,
+                #'quality': metadata['quality_label'][0] if 'quality_label' in 
metadata else None,
                 'type': metadata['type'][0],
                 'mime': metadata['type'][0].split(';')[0],
                 'container': 
mime_to_container(metadata['type'][0].split(';')[0]),
@@ -433,13 +435,13 @@
                     dash_mp4_a_url = stream['url']
                     if 's' in stream:
                         sig = self.__class__.decipher(self.js, stream['s'])
-                        dash_mp4_a_url += '&signature={}'.format(sig)
+                        dash_mp4_a_url += '&sig={}'.format(sig)
                     dash_mp4_a_size = stream['clen']
                 elif stream['type'].startswith('audio/webm'):
                     dash_webm_a_url = stream['url']
                     if 's' in stream:
                         sig = self.__class__.decipher(self.js, stream['s'])
-                        dash_webm_a_url += '&signature={}'.format(sig)
+                        dash_webm_a_url += '&sig={}'.format(sig)
                     dash_webm_a_size = stream['clen']
             for stream in streams: # video
                 if 'size' in stream:
@@ -448,7 +450,7 @@
                         dash_url = stream['url']
                         if 's' in stream:
                             sig = self.__class__.decipher(self.js, stream['s'])
-                            dash_url += '&signature={}'.format(sig)
+                            dash_url += '&sig={}'.format(sig)
                         dash_size = stream['clen']
                         itag = stream['itag']
                         dash_urls = self.__class__.chunk_by_range(dash_url, 
int(dash_size))
@@ -467,7 +469,7 @@
                         dash_url = stream['url']
                         if 's' in stream:
                             sig = self.__class__.decipher(self.js, stream['s'])
-                            dash_url += '&signature={}'.format(sig)
+                            dash_url += '&sig={}'.format(sig)
                         dash_size = stream['clen']
                         itag = stream['itag']
                         audio_url = None
@@ -510,13 +512,13 @@
             src = self.streams[stream_id]['url']
             if self.streams[stream_id]['sig'] is not None:
                 sig = self.streams[stream_id]['sig']
-                src += '&signature={}'.format(sig)
+                src += '&sig={}'.format(sig)
             elif self.streams[stream_id]['s'] is not None:
                 if not hasattr(self, 'js'):
                     self.js = get_content(self.html5player)
                 s = self.streams[stream_id]['s']
                 sig = self.__class__.decipher(self.js, s)
-                src += '&signature={}'.format(sig)
+                src += '&sig={}'.format(sig)
 
             self.streams[stream_id]['src'] = [src]
             self.streams[stream_id]['size'] = 
urls_size(self.streams[stream_id]['src'])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1302/src/you_get/version.py 
new/you-get-0.4.1314/src/you_get/version.py
--- old/you-get-0.4.1302/src/you_get/version.py 2019-05-09 01:43:06.000000000 
+0200
+++ new/you-get-0.4.1314/src/you_get/version.py 2019-06-11 16:02:36.000000000 
+0200
@@ -1,4 +1,4 @@
 #!/usr/bin/env python
 
 script_name = 'you-get'
-__version__ = '0.4.1302'
+__version__ = '0.4.1314'


Reply via email to