Hello community,

here is the log from the commit of package you-get for openSUSE:Factory checked 
in at 2018-03-02 21:12:32
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/you-get (Old)
 and      /work/SRC/openSUSE:Factory/.you-get.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "you-get"

Fri Mar  2 21:12:32 2018 rev:4 rq:581921 version:0.4.1040

Changes:
--------
--- /work/SRC/openSUSE:Factory/you-get/you-get.changes  2018-02-10 
17:58:56.466451277 +0100
+++ /work/SRC/openSUSE:Factory/.you-get.new/you-get.changes     2018-03-02 
21:12:36.295005924 +0100
@@ -1,0 +2,5 @@
+Fri Mar  2 08:08:02 UTC 2018 - aloi...@gmx.com
+
+- Update to version 0.4.1040 (no changelog supplied)
+
+-------------------------------------------------------------------

Old:
----
  you-get-0.4.1025.tar.gz

New:
----
  you-get-0.4.1040.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ you-get.spec ++++++
--- /var/tmp/diff_new_pack.kXfIIv/_old  2018-03-02 21:12:37.026979602 +0100
+++ /var/tmp/diff_new_pack.kXfIIv/_new  2018-03-02 21:12:37.030979458 +0100
@@ -17,12 +17,12 @@
 
 
 Name:           you-get
-Version:        0.4.1025
+Version:        0.4.1040
 Release:        0
 Summary:        Dumb downloader that scrapes the web
 License:        MIT
 Group:          Productivity/Networking/Web/Utilities
-URL:            https://you-get.org
+Url:            https://you-get.org
 Source0:        
https://github.com/soimort/you-get/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz
 BuildRequires:  bash-completion
 BuildRequires:  fdupes

++++++ you-get-0.4.1025.tar.gz -> you-get-0.4.1040.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/common.py 
new/you-get-0.4.1040/src/you_get/common.py
--- old/you-get-0.4.1025/src/you_get/common.py  2018-02-09 15:50:09.000000000 
+0100
+++ new/you-get-0.4.1040/src/you_get/common.py  2018-03-01 22:55:09.000000000 
+0100
@@ -74,6 +74,7 @@
     'le'               : 'le',
     'letv'             : 'le',
     'lizhi'            : 'lizhi',
+    'longzhu'          : 'longzhu',
     'magisto'          : 'magisto',
     'metacafe'         : 'metacafe',
     'mgtv'             : 'mgtv',
@@ -134,6 +135,7 @@
 extractor_proxy = None
 cookies = None
 output_filename = None
+auto_rename = False
 
 fake_headers = {
     'Accept': 
'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',  # noqa
@@ -598,27 +600,43 @@
         tmp_headers['Referer'] = refer
     file_size = url_size(url, faker=faker, headers=tmp_headers)
 
-    if os.path.exists(filepath):
-        if not force and file_size == os.path.getsize(filepath):
-            if not is_part:
-                if bar:
-                    bar.done()
-                print(
-                    'Skipping {}: file already exists'.format(
-                        tr(os.path.basename(filepath))
+    continue_renameing = True
+    while continue_renameing:
+        continue_renameing = False
+        if os.path.exists(filepath):
+            if not force and file_size == os.path.getsize(filepath):
+                if not is_part:
+                    if bar:
+                        bar.done()
+                    print(
+                        'Skipping {}: file already exists'.format(
+                            tr(os.path.basename(filepath))
+                        )
                     )
-                )
+                else:
+                    if bar:
+                        bar.update_received(file_size)
+                return
             else:
-                if bar:
-                    bar.update_received(file_size)
-            return
-        else:
-            if not is_part:
-                if bar:
-                    bar.done()
-                print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
-    elif not os.path.exists(os.path.dirname(filepath)):
-        os.mkdir(os.path.dirname(filepath))
+                if not is_part:
+                    if bar:
+                        bar.done()
+                    if not force and auto_rename:
+                        path, ext = os.path.basename(filepath).rsplit('.', 1)
+                        finder = re.compile(' \([1-9]\d*?\)$')
+                        if (finder.search(path) is None):
+                            thisfile = path + ' (1).' + ext
+                        else:
+                            def numreturn(a):
+                                return ' (' + str(int(a.group()[2:-1]) + 1) + 
').'
+                            thisfile = finder.sub(numreturn, path) + ext
+                        filepath = os.path.join(os.path.dirname(filepath), 
thisfile)
+                        print('Changing name to %s' % 
tr(os.path.basename(filepath)), '...')
+                        continue_renameing = True
+                        continue
+                    print('Overwriting %s' % tr(os.path.basename(filepath)), 
'...')
+        elif not os.path.exists(os.path.dirname(filepath)):
+            os.mkdir(os.path.dirname(filepath))
 
     temp_filepath = filepath + '.download' if file_size != float('inf') \
         else filepath
@@ -883,7 +901,7 @@
     output_filepath = os.path.join(output_dir, output_filename)
 
     if total_size:
-        if not force and os.path.exists(output_filepath) \
+        if not force and os.path.exists(output_filepath) and not auto_rename\
                 and os.path.getsize(output_filepath) >= total_size * 0.9:
             print('Skipping %s: file already exists' % output_filepath)
             print()
@@ -1370,6 +1388,10 @@
         '-l', '--playlist', action='store_true',
         help='Prefer to download a playlist'
     )
+    download_grp.add_argument(
+        '-a', '--auto-rename', action='store_true', default=False,
+        help='Auto rename same name different files'
+    )
 
     proxy_grp = parser.add_argument_group('Proxy options')
     proxy_grp = proxy_grp.add_mutually_exclusive_group()
@@ -1414,11 +1436,16 @@
     global player
     global extractor_proxy
     global output_filename
+    global auto_rename
 
     output_filename = args.output_filename
     extractor_proxy = args.extractor_proxy
 
     info_only = args.info
+    if args.force:
+        force = True
+    if args.auto_rename:
+        auto_rename = True
     if args.url:
         dry_run = True
     if args.json:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/__init__.py 
new/you-get-0.4.1040/src/you_get/extractors/__init__.py
--- old/you-get-0.4.1025/src/you_get/extractors/__init__.py     2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/__init__.py     2018-03-01 
22:55:09.000000000 +0100
@@ -41,6 +41,7 @@
 from .kuwo import *
 from .le import *
 from .lizhi import *
+from .longzhu import *
 from .magisto import *
 from .metacafe import *
 from .mgtv import *
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/acfun.py 
new/you-get-0.4.1040/src/you_get/extractors/acfun.py
--- old/you-get-0.4.1025/src/you_get/extractors/acfun.py        2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/acfun.py        2018-03-01 
22:55:09.000000000 +0100
@@ -49,7 +49,7 @@
     """
 
     #first call the main parasing API
-    info = 
json.loads(get_content('http://www.acfun.tv/video/getVideo.aspx?id=' + vid))
+    info = 
json.loads(get_content('http://www.acfun.cn/video/getVideo.aspx?id=' + vid))
 
     sourceType = info['sourceType']
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/bilibili.py 
new/you-get-0.4.1040/src/you_get/extractors/bilibili.py
--- old/you-get-0.4.1025/src/you_get/extractors/bilibili.py     2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/bilibili.py     2018-03-01 
22:55:09.000000000 +0100
@@ -140,6 +140,8 @@
             self.movie_entry(**kwargs)
         elif 'bangumi.bilibili.com' in self.url:
             self.bangumi_entry(**kwargs)
+        elif 'bangumi/' in self.url:
+            self.bangumi_entry(**kwargs)
         elif 'live.bilibili.com' in self.url:
             self.live_entry(**kwargs)
         elif 'vc.bilibili.com' in self.url:
@@ -235,22 +237,22 @@
 
     def bangumi_entry(self, **kwargs):
         bangumi_id = re.search(r'(\d+)', self.url).group(1)
-        bangumi_data = get_bangumi_info(bangumi_id)
-        bangumi_payment = bangumi_data.get('payment')
-        if bangumi_payment and bangumi_payment['price'] != '0':
-            log.w("It's a paid item")
-        # ep_ids = collect_bangumi_epids(bangumi_data)
-
         frag = urllib.parse.urlparse(self.url).fragment
         if frag:
             episode_id = frag
         else:
-            episode_id = re.search(r'first_ep_id\s*=\s*"(\d+)"', self.page)
+            episode_id = re.search(r'first_ep_id\s*=\s*"(\d+)"', self.page) or 
re.search(r'\/ep(\d+)', self.url).group(1)
         # cont = 
post_content('http://bangumi.bilibili.com/web_api/get_source', 
post_data=dict(episode_id=episode_id))
         # cid = json.loads(cont)['result']['cid']
         cont = 
get_content('http://bangumi.bilibili.com/web_api/episode/{}.json'.format(episode_id))
         ep_info = json.loads(cont)['result']['currentEpisode']
 
+        bangumi_data = get_bangumi_info(str(ep_info['seasonId']))
+        bangumi_payment = bangumi_data.get('payment')
+        if bangumi_payment and bangumi_payment['price'] != '0':
+            log.w("It's a paid item")
+        # ep_ids = collect_bangumi_epids(bangumi_data)
+
         index_title = ep_info['indexTitle']
         long_title = ep_info['longTitle'].strip()
         cid = ep_info['danmaku']
@@ -295,10 +297,10 @@
     eps = json_data['episodes'][::-1]
     return [ep['episode_id'] for ep in eps]
 
-def get_bangumi_info(bangumi_id):
+def get_bangumi_info(season_id):
     BASE_URL = 'http://bangumi.bilibili.com/jsonp/seasoninfo/'
     long_epoch = int(time.time() * 1000)
-    req_url = BASE_URL + bangumi_id + 
'.ver?callback=seasonListCallback&jsonp=jsonp&_=' + str(long_epoch)
+    req_url = BASE_URL + season_id + 
'.ver?callback=seasonListCallback&jsonp=jsonp&_=' + str(long_epoch)
     season_data = get_content(req_url)
     season_data = season_data[len('seasonListCallback('):]
     season_data = season_data[: -1 * len(');')]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/douyutv.py 
new/you-get-0.4.1040/src/you_get/extractors/douyutv.py
--- old/you-get-0.4.1025/src/you_get/extractors/douyutv.py      2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/douyutv.py      2018-03-01 
22:55:09.000000000 +0100
@@ -73,7 +73,7 @@
 
     print_info(site_info, title, 'flv', float('inf'))
     if not info_only:
-        download_url_ffmpeg(real_url, title, 'flv', None, output_dir = 
output_dir, merge = merge)
+        download_url_ffmpeg(real_url, title, 'flv', params={}, output_dir = 
output_dir, merge = merge)
 
 site_info = "douyu.com"
 download = douyutv_download
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/ixigua.py 
new/you-get-0.4.1040/src/you_get/extractors/ixigua.py
--- old/you-get-0.4.1025/src/you_get/extractors/ixigua.py       2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/ixigua.py       2018-03-01 
22:55:09.000000000 +0100
@@ -5,30 +5,35 @@
 import binascii
 from ..common import *
 
-def get_video_id(text):
-    re_id = r"videoId: '(.*?)'"
-    return re.findall(re_id, text)[0]
+headers = {
+    'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) 
AppleWebKit/537.36'
+                  ' (KHTML, like Gecko) Chrome/61.0.3163.100 Mobile 
Safari/537.36'
+}
+
 
 def get_r():
     return str(random.random())[2:]
 
+
 def right_shift(val, n):
     return val >> n if val >= 0 else (val + 0x100000000) >> n
 
+
 def get_s(text):
     """get video info"""
-    id = get_video_id(text)
+    js_data = json.loads(text)
+    id = js_data['data']['video_id']
     p = get_r()
     url = 'http://i.snssdk.com/video/urls/v/1/toutiao/mp4/%s' % id
     n = parse.urlparse(url).path + '?r=%s' % p
     c = binascii.crc32(n.encode('utf-8'))
     s = right_shift(c, 0)
-    title = ''.join(re.findall(r"title: '(.*?)',", text))
-    return url + '?r=%s&s=%s' % (p, s), title
+    return url + '?r=%s&s=%s' % (p, s), js_data['data']['title']
+
 
 def get_moment(url, user_id, base_url, video_list):
     """Recursively obtaining a video list"""
-    video_list_data = json.loads(get_content(url))
+    video_list_data = json.loads(get_content(url, headers=headers))
     if not video_list_data['next']['max_behot_time']:
         return video_list
     [video_list.append(i["display_url"]) for i in video_list_data["data"]]
@@ -41,23 +46,33 @@
     }
     return get_moment(**_param)
 
+
 def ixigua_download(url, output_dir='.', info_only=False, **kwargs):
     """ Download a single video
         Sample URL: 
https://www.ixigua.com/a6487187567887254029/#mid=59051127876
     """
     try:
-        video_info_url, title = get_s(get_content(url))
-        video_info = json.loads(get_content(video_info_url))
+        video_page_id = re.findall('(\d+)', [i for i in url.split('/') if 
i][3])[0] if 'toutiao.com' in url \
+            else re.findall('(\d+)', [i for i in url.split('/') if i][2])[0]
+
+        video_start_info_url = 
r'https://m.ixigua.com/i{}/info/'.format(video_page_id)
+        video_info_url, title = get_s(get_content(video_start_info_url, 
headers=headers or kwargs.get('headers', {})))
+        video_info = json.loads(get_content(video_info_url, headers=headers or 
kwargs.get('headers', {})))
     except Exception:
         raise NotImplementedError(url)
     try:
         video_url = 
base64.b64decode(video_info["data"]["video_list"]["video_1"]["main_url"]).decode()
     except Exception:
         raise NotImplementedError(url)
-    filetype, ext, size = url_info(video_url)
+    filetype, ext, size = url_info(video_url, headers=headers or 
kwargs.get('headers', {}))
     print_info(site_info, title, filetype, size)
     if not info_only:
-        download_urls([video_url], title, ext, size, output_dir=output_dir)
+        _param = {
+            'output_dir': output_dir,
+            'headers': headers or kwargs.get('headers', {})
+        }
+        download_urls([video_url], title, ext, size, **_param)
+
 
 def ixigua_download_playlist(url, output_dir='.', info_only=False, **kwargs):
     """Download all video from the user's video list
@@ -80,6 +95,7 @@
     for i in get_moment(**_param):
         ixigua_download(i, output_dir, info_only, **kwargs)
 
+
 site_info = "ixigua.com"
 download = ixigua_download
-download_playlist = ixigua_download_playlist
\ No newline at end of file
+download_playlist = ixigua_download_playlist
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/longzhu.py 
new/you-get-0.4.1040/src/you_get/extractors/longzhu.py
--- old/you-get-0.4.1025/src/you_get/extractors/longzhu.py      1970-01-01 
01:00:00.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/longzhu.py      2018-03-01 
22:55:09.000000000 +0100
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+__all__ = ['longzhu_download']
+
+import json
+from ..common import (
+    get_content,
+    match1,
+    print_info,
+    download_urls,
+    playlist_not_supported,
+)
+from ..common import player
+
+def longzhu_download(url, output_dir = '.', merge=True, info_only=False, 
**kwargs):
+    web_domain = url.split('/')[2]
+    if (web_domain == 'star.longzhu.com') or (web_domain == 'y.longzhu.com'):
+        domain = url.split('/')[3].split('?')[0]
+        m_url = 'http://m.longzhu.com/{0}'.format(domain)
+        m_html = get_content(m_url)
+        room_id_patt = r'var\s*roomId\s*=\s*(\d+);'
+        room_id = match1(m_html,room_id_patt)
+
+        json_url = 
'http://liveapi.plu.cn/liveapp/roomstatus?roomId={0}'.format(room_id)
+        content = get_content(json_url)
+        data = json.loads(content)
+        streamUri = data['streamUri']
+        if len(streamUri) <= 4:
+            raise ValueError('The live stream is not online!')
+        title = data['title']
+        streamer = data['userName']
+        title = str.format(streamer,': ',title)
+
+        steam_api_url = 
'http://livestream.plu.cn/live/getlivePlayurl?roomId={0}'.format(room_id)
+        content = get_content(steam_api_url)
+        data = json.loads(content)
+        isonline = data.get('isTransfer')
+        if isonline == '0':
+            raise ValueError('The live stream is not online!')
+
+        real_url = data['playLines'][0]['urls'][0]['securityUrl']
+
+        print_info(site_info, title, 'flv', float('inf'))
+
+        if not info_only:
+            download_urls([real_url], title, 'flv', None, output_dir, 
merge=merge)
+
+    elif web_domain == 'replay.longzhu.com':
+        videoid = match1(url, r'(\d+)$')
+        json_url = 
'http://liveapi.longzhu.com/livereplay/getreplayfordisplay?videoId={0}'.format(videoid)
+        content = get_content(json_url)
+        data = json.loads(content)
+
+        username = data['userName']
+        title = data['title']
+        title = str.format(username,':',title)
+        real_url = data['videoUrl']
+
+        if player:
+            print_info('Longzhu Video', title, 'm3u8', 0)
+            download_urls([real_url], title, 'm3u8', 0, output_dir, 
merge=merge)
+        else:
+            urls = general_m3u8_extractor(real_url)
+            print_info('Longzhu Video', title, 'm3u8', 0)
+            if not info_only:
+                download_urls(urls, title, 'ts', 0, output_dir=output_dir, 
merge=merge, **kwargs)
+
+    else:
+        raise ValueError('Wrong url or unsupported link ... {0}'.format(url))
+
+site_info = 'longzhu.com'
+download = longzhu_download
+download_playlist = playlist_not_supported('longzhu')
\ No newline at end of file
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/quanmin.py 
new/you-get-0.4.1040/src/you_get/extractors/quanmin.py
--- old/you-get-0.4.1025/src/you_get/extractors/quanmin.py      2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/quanmin.py      2018-03-01 
22:55:09.000000000 +0100
@@ -4,7 +4,6 @@
 
 from ..common import *
 import json
-import time
 
 def quanmin_download(url, output_dir = '.', merge = True, info_only = False, 
**kwargs):
     roomid = url.split('/')[3].split('?')[0]
@@ -17,7 +16,8 @@
 
     if not data["play_status"]:
         raise ValueError("The live stream is not online!")
-    real_url = "http://flv.quanmin.tv/live/{}.flv".format(roomid)
+        
+    real_url = data["live"]["ws"]["flv"]["5"]["src"]
 
     print_info(site_info, title, 'flv', float('inf'))
     if not info_only:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/twitter.py 
new/you-get-0.4.1040/src/you_get/extractors/twitter.py
--- old/you-get-0.4.1025/src/you_get/extractors/twitter.py      2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/twitter.py      2018-03-01 
22:55:09.000000000 +0100
@@ -15,6 +15,9 @@
     return ['https://video.twimg.com%s' % i for i in s2]
 
 def twitter_download(url, output_dir='.', merge=True, info_only=False, 
**kwargs):
+    if re.match(r'https?://mobile', url): # normalize mobile URL
+        url = 'https://' + match1(url, r'//mobile\.(.+)')
+
     html = get_html(url)
     screen_name = r1(r'data-screen-name="([^"]*)"', html) or \
         r1(r'<meta name="twitter:title" content="([^"]*)"', html)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/yixia.py 
new/you-get-0.4.1040/src/you_get/extractors/yixia.py
--- old/you-get-0.4.1025/src/you_get/extractors/yixia.py        2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/yixia.py        2018-03-01 
22:55:09.000000000 +0100
@@ -11,14 +11,14 @@
 def yixia_miaopai_download_by_scid(scid, output_dir = '.', merge = True, 
info_only = False):
     """"""
     api_endpoint = 
'http://api.miaopai.com/m/v2_channel.json?fillType=259&scid={scid}&vend=miaopai'.format(scid
 = scid)
-    
+
     html = get_content(api_endpoint)
-    
+
     api_content = loads(html)
-    
+
     video_url = match1(api_content['result']['stream']['base'], r'(.+)\?vend')
     title = api_content['result']['ext']['t']
-    
+
     type, ext, size = url_info(video_url)
 
     print_info(site_info, title, type, size)
@@ -29,14 +29,14 @@
 def yixia_xiaokaxiu_download_by_scid(scid, output_dir = '.', merge = True, 
info_only = False):
     """"""
     api_endpoint = 
'http://api.xiaokaxiu.com/video/web/get_play_video?scid={scid}'.format(scid = 
scid)
-    
+
     html = get_content(api_endpoint)
-    
+
     api_content = loads(html)
-    
+
     video_url = api_content['data']['linkurl']
     title = api_content['data']['title']
-    
+
     type, ext, size = url_info(video_url)
 
     print_info(site_info, title, type, size)
@@ -50,20 +50,16 @@
     if 'miaopai.com' in hostname:  #Miaopai
         yixia_download_by_scid = yixia_miaopai_download_by_scid
         site_info = "Yixia Miaopai"
-        
-        if re.match(r'https?://www.miaopai.com/show/channel/.+', url):  #PC
-            scid = match1(url, 
r'https?://www.miaopai.com/show/channel/(.+)\.htm')
-        elif re.match(r'https?://www.miaopai.com/show/.+', url):  #PC
-            scid = match1(url, r'https?://www.miaopai.com/show/(.+)\.htm')
-        elif re.match(r'https?://m.miaopai.com/show/channel/.+', url):  #Mobile
-            scid = match1(url, 
r'https?://m.miaopai.com/show/channel/(.+)\.htm')
-            if scid == None :
-                scid = match1(url, r'https?://m.miaopai.com/show/channel/(.+)')
+
+        scid = match1(url, r'miaopai\.com/show/channel/(.+)\.htm') or \
+               match1(url, r'miaopai\.com/show/(.+)\.htm') or \
+               match1(url, r'm\.miaopai\.com/show/channel/(.+)\.htm') or \
+               match1(url, r'm\.miaopai\.com/show/channel/(.+)')
 
     elif 'xiaokaxiu.com' in hostname:  #Xiaokaxiu
         yixia_download_by_scid = yixia_xiaokaxiu_download_by_scid
         site_info = "Yixia Xiaokaxiu"
-        
+
         if re.match(r'http://v.xiaokaxiu.com/v/.+\.html', url):  #PC
             scid = match1(url, r'http://v.xiaokaxiu.com/v/(.+)\.html')
         elif re.match(r'http://m.xiaokaxiu.com/m/.+\.html', url):  #Mobile
@@ -71,7 +67,7 @@
 
     else:
         pass
-    
+
     yixia_download_by_scid(scid, output_dir, merge, info_only)
 
 site_info = "Yixia"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/extractors/youku.py 
new/you-get-0.4.1040/src/you_get/extractors/youku.py
--- old/you-get-0.4.1025/src/you_get/extractors/youku.py        2018-02-09 
15:50:09.000000000 +0100
+++ new/you-get-0.4.1040/src/you_get/extractors/youku.py        2018-03-01 
22:55:09.000000000 +0100
@@ -78,7 +78,7 @@
         self.api_error_code = None
         self.api_error_msg = None
 
-        self.ccode = '0513'
+        self.ccode = '0507'
         self.utid = None
 
     def youku_ups(self):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/you-get-0.4.1025/src/you_get/version.py 
new/you-get-0.4.1040/src/you_get/version.py
--- old/you-get-0.4.1025/src/you_get/version.py 2018-02-09 15:50:09.000000000 
+0100
+++ new/you-get-0.4.1040/src/you_get/version.py 2018-03-01 22:55:09.000000000 
+0100
@@ -1,4 +1,4 @@
 #!/usr/bin/env python
 
 script_name = 'you-get'
-__version__ = '0.4.1025'
+__version__ = '0.4.1040'


Reply via email to