The branch, eden-pre has been updated
via 947e2ee3762c4a1028d2ebb5ea5e6fab5f5aa84d (commit)
from 09954a8d7721aea4497e0458ceac36b262ed02e9 (commit)
- Log -----------------------------------------------------------------
http://xbmc.git.sourceforge.net/git/gitweb.cgi?p=xbmc/plugins;a=commit;h=947e2ee3762c4a1028d2ebb5ea5e6fab5f5aa84d
commit 947e2ee3762c4a1028d2ebb5ea5e6fab5f5aa84d
Author: spiff <[email protected]>
Date: Mon Sep 12 09:30:59 2011 +0200
[plugin.video.youtube] updated to version 2.6.3
diff --git a/plugin.video.youtube/YouTubeCore.py
b/plugin.video.youtube/YouTubeCore.py
index e4bdacd..7c7c6a5 100755
--- a/plugin.video.youtube/YouTubeCore.py
+++ b/plugin.video.youtube/YouTubeCore.py
@@ -264,7 +264,7 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
rstat = 403
while rstat == 403:
result =
self._fetchPage({"link": "http://gdata.youtube.com/feeds/api/videos/batch",
"api": "true", "request": final_request})
- rstat =
self.parseDOM(result["content"], { "name": "batch:status", "return": "code"})
+ rstat =
self.parseDOM(result["content"], "batch:status", ret = "code")
if len(rstat) > 0:
if int(rstat[len(rstat)
- 1]) == 403:
print
self.__plugin__ + " getBatchDetails quota exceeded. Waiting 5 seconds. " +
repr(rstat)
@@ -300,6 +300,7 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
get = params.get
link = get("link")
ret_obj = { "status": 500, "content": ""}
+
if self.__dbg__:
if (get("url_data") or get("request")):
print self.__plugin__ + " _fetchPage called for
: " + repr(params['link'])
@@ -342,6 +343,9 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
print self.__plugin__ + " _fetchPage got api"
request.add_header('GData-Version', '2') #confirmed
request.add_header('X-GData-Key', 'key=' + self.APIKEY)
+ if self.__settings__.getSetting("oauth2_expires_at") <
time.time():
+ self._oRefreshToken()
+
else:
request.add_header('User-Agent', self.USERAGENT)
@@ -436,8 +440,6 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
time.sleep(3)
params["error"] = str(int(get("error", "0")) + 1)
ret_obj = self._fetchPage(params)
- if not ret_obj.has_key("content") and e.fp:
- ret_obj["content"] = e.fp.read()
return ret_obj
def _findErrors(self, ret):
@@ -445,19 +447,19 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
print self.__plugin__ + " _findErrors"
## Couldn't find 2 factor or normal login
- error = self.parseDOM(ret['content'], { "name": "div", "class":
"errormsg", "content": "true"})
+ error = self.parseDOM(ret['content'], "div", attrs = { "class":
"errormsg" })
if len(error) == 0:
# An error in 2-factor
- error = self.parseDOM(ret['content'], { "name": "div",
"class": "error smaller", "content": "true"})
+ error = self.parseDOM(ret['content'], "div", attrs = {
"class": "error smaller"})
if len(error) == 0:
- error = self.parseDOM(ret['content'], { "name": "div",
"id": "id", "id-match": "unavailable-message", "content": "true"})
+ error = self.parseDOM(ret['content'], "div", attrs = {
"id": "unavailable-message"})
if len(error) == 0 and ret['content'].find("yt:quota") > -1:
# Api quota
- html = self.parseDOM(ret['content'], { "name": "error"})
- error = self.parseDOM(html, { "name": "code",
"content": "true"})
+ html = self.parseDOM(ret['content'],"error")
+ error = self.parseDOM(html, "code")
if len(error) == 0 and False: # This hits flash quite often.
# Playback
- error = self.parseDOM(ret['content'], { "name": "div",
"class": "yt-alert-content", "content": "true"})
+ error = self.parseDOM(ret['content'], "div", attrs = {
"class": "yt-alert-content"})
if len(error) > 0:
error = error[0]
error =
urllib.unquote(error[0:error.find("[")]).replace("'", "'")
@@ -542,7 +544,7 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
"client_secret": "sZn1pllhAfyonULAWfoGKCfp",
"refresh_token": self.__settings__.getSetting(
"oauth2_refresh_token" ),
"grant_type": "refresh_token"}
- ret = self._fetchPage({ "link": url,
"no-language-cookie": "true", "url_data": data}) # "no-language-cookie": "true"
<- might be needed here..
+ ret = self._fetchPage({ "link": url,
"no-language-cookie": "true", "url_data": data})
if ret["status"] == 200:
oauth = ""
try:
@@ -696,6 +698,7 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
overlay = self.__storage__.retrieveValue("vidstatus-" +
video['videoid'] )
if overlay:
+ print self.__plugin__ + " _getvideoinfo videoid
set to false XXXX XXXX : " + repr(overlay)
video['Overlay'] = int(overlay)
if video['videoid'] == "false":
@@ -707,8 +710,9 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
if next:
self.addNextFolder(ytobjects,params)
+ print self.__plugin__ + " _getvideoinfo Done: " +
str(len(ytobjects)) #+ repr(ytobjects)
return ytobjects;
-
+
def stripTags(self, html):
sub_start = html.find("<")
sub_end = html.find(">")
@@ -719,115 +723,98 @@ class YouTubeCore(YouTubeUtils.YouTubeUtils):
return html
- def getDOMContent(self, html, params, match):
- get = params.get
+ def getDOMContent(self, html, name, match):
#print self.__plugin__ + " getDOMContent match: " + match
start = html.find(match)
- if get("name") == "img":
+ if name == "img":
endstr = ">"
else:
- endstr = "</" + get("name") + ">"
+ endstr = "</" + name + ">"
end = html.find(endstr, start)
- #print self.__plugin__ + " getDOMContent " + str(start) + " < "
+ str(end)
+ pos = html.find("<" + name, start + 1 )
+
+ #print self.__plugin__ + " getDOMContent " + str(start) + " < "
+ str(end) + " pos = " + str(pos)
- pos = html.find("<" + get("name"), start + 1)
- while pos < end:
- pos = html.find("<" + get("name"), pos + 1)
- if pos == -1:
- break;
- tend = html.find(endstr, end + len(endstr))
- if tend != -1:
- end = tend
+ while pos < end and pos != -1:
+ pos = html.find("<" + name, pos + 1)
+ if pos > -1:
+ tend = html.find(endstr, end + len(endstr))
+ if tend != -1:
+ end = tend
#print self.__plugin__ + " getDOMContent2 loop: " +
str(start) + " < " + str(end) + " pos = " + str(pos)
+ #print self.__plugin__ + " getDOMContent XXX: " + str(start) +
" < " + str(end) + " pos = " + str(pos)
html = html[start:end + len(endstr)]
#print self.__plugin__ + " getDOMContent done html length: " +
str(len(html)) + repr(html)
return html
- def parseDOM(self, html, params):
- get = params.get
- #if self.__dbg__:
- # print self.__plugin__ + " parseDOM : " + repr(params)
- if get("id"):
- if get("id-match"):
- lst = re.compile('(<' + get("name") + ' ' +
get("id") + '=[\'"]+' + get("id-match") + '[\'"]>)').findall(html)
- if len(lst) == 0:
- lst = re.compile('(<' + get("name") + '
' + get("id") + '=[\'"]+' + get("id-match") + '[\'"]+.*?>)').findall(html)
- if len(lst) == 0:
- lst = re.compile('(<' +
get("name") + '.*?' + get("id") + '=[\'"]+' + get("id-match") +
'[\'"]+.*?>)').findall(html)
- else:
- lst = re.compile('(<' + get("name") + ' ' +
get("id") + '=[\'"].*?[\'"]+.*?>)').findall(html)
- if len(lst) == 0:
- lst = re.compile('(<' + get("name") +
'.*?' + get("id") + '=[\'"]+.*?[\'"]+.*?>)').findall(html)
- elif get("class"):
- lst = re.compile('(<' + get("name") + ' class=[\'"]+' +
get("class") + '[\'"]+.*?>)').findall(html)
- if len(lst) == 0:
- lst = re.compile('(<' + get("name") +
'.*?class=[\'"]+' + get("class") + '[\'"]+.*?>)').findall(html)
- elif get("return"):
- lst = re.compile('(<' + get("name") + ' ' +
get("return") + '=[\'"]+.*?[\'"]+.*?>)').findall(html)
- if len(lst) == 0:
- lst = re.compile('(<' + get("name") + '.*?' +
get("return") + '=[\'"]+.*?[\'"]+.*?>)').findall(html)
- else:
- lst = re.compile('(<' + get("name") +
'.*?>)').findall(html)
+ def parseDOM(self, html, name = "", attrs = {}, ret = False):
+ # html <- text to scan.
+ # name <- Element name
+ # attrs <- { "id": "my-div", "class": "oneclass.*anotherclass",
"attribute": "a random tag" }
+ # ret <- Return content of element
+ # Default return <- Returns a list with the content
- if len(lst) == 0:
- #print self.__plugin__ + " parseDOM Couldn't find any
matches. Returning empty handed : " + html
+ if self.__dbg__:
+ print self.__plugin__ + " parseDOM : " + repr(name) + "
- " + repr(attrs) + " - " + repr(ret) + " - " + str(type(html))
+ if type(html) == type([]):
+ html = "".join(html)
+ html = html.replace("\n", "")
+ if not name.strip():
+ if self.__dbg__:
+ print self.__plugin__ + " parseDOM - Missing
tag name "
return ""
- if get("return"):
- #these must be \n, at least for login and live.
- html2 = "\n".join(lst)
- else:
- html2 = ""
- for match in lst:
- html2 += "\n" + self.getDOMContent(html,
params, match)
+ lst = []
- if len(lst) > 0 and get("class"):
- lst = re.compile('(<.*?class=[\'"]' + get("class") +
'[\'"].*?>)').findall(html2)
- if get("return"):
- html2 = "\n".join(lst)
- else:
- html2 = ""
- for match in lst:
- html2 += "\n" +
self.getDOMContent(html, params, match)
- #print self.__plugin__ + " parseDOM class: " +
str(len(lst))
-
- if len(lst) > 0 and get("id"):
- lst = re.compile('(<.*' + get("id") +
'=.*>)').findall(html2)
- if get("return"):
- html2 = "\n".join(lst)
- else:
- html2 = ""
- for match in lst:
- html2 += "\n" +
self.getDOMContent(html, params, match)
- #print self.__plugin__ + " parseDOM id: " +
str(len(lst))
- if len(lst) > 0 and get("id-match"):
- lst = re.compile('(<.*' + get("id") + '=[\'"]'
+ get("id-match") + '[\'"].*>)').findall(html2)
- if get("return"):
- html2 = "\n".join(lst)
+ # Find all elements with the tag
+
+ i = 0
+ for key in attrs:
+ scripts = [ '(<' + name + '[^>]*?(?:' + key + '=[\'"]'
+ attrs[key] + '[\'"][^>]*?>))', # Hit often.
+ '(<' + name + ' (?:' + key + '=[\'"]' +
attrs[key] + '[\'"])[^>]*?>)', # Hit twice
+ '(<' + name + '[^>]*?(?:' + key + '=[\'"]'
+ attrs[key] + '[\'"])[^>]*?>)'] #
+
+ lst2 = []
+ for script in scripts:
+ if len(lst2) == 0:
+ #print self.__plugin__ + " parseDOM
scanning " + str(i) + " " + str(len(lst)) + " Running :" + script
+ lst2 = re.compile(script).findall(html)
+ #print self.__plugin__ + " parseDOM
scanning " + str(i) + " " + str(len(lst2)) + " Result : " #+ repr(lst2[:2])
+ i += 1
+ if len(lst2) > 0:
+ if len(lst) == 0:
+ lst = lst2;
+ lst2 = []
else:
- html2 = ""
- for match in lst:
- html2 += "\n" +
self.getDOMContent(html, params, match)
- #print self.__plugin__ + " parseDOM id-match: "
+ str(len(lst))
-
- #print self.__plugin__ + " parseDOM id - html2 length: " +
str(len(html2)) + " - " + str(len(lst))
-
- if len(lst) > 0 and get("return"):
- lst = re.compile('<' + get("name") + '.*' +
get("return") + '=[\'"](.*?)[\'"].*>').findall(html2)
- #print self.__plugin__ + " parseDOM return lst for " +
repr(params) + " : " + str(len(lst)) + repr(lst)
- return lst
-
- if len(lst) > 0 and get("content"):
- contlst = []
+ test = range(len(lst))
+ test.reverse()
+ for i in test: # Delete anything
missing from the next list.
+ if not lst[i] in lst2:
+ if self.__dbg__:
+ print
self.__plugin__ + " parseDOM Purging mismatch " + str(len(lst)) + " - " +
repr(lst[i])
+ del(lst[i])
+
+ if len(lst) == 0 and attrs == {}:
+ #print self.__plugin__ + " parseDOM no list found,
making one on just the element name"
+ lst = re.compile('(<' + name + '[^>]*?>)').findall(html)
+
+ if ret != False:
+ #print self.__plugin__ + " parseDOM Getting attribute
%s content for %s matches " % ( ret, len(lst) )
+ lst2 = []
+ for match in lst:
+ lst2 += re.compile('<' + name + '.*' + ret +
'=[\'"]([^>]*?)[\'"].*>').findall(match)
+ lst = lst2
+ else:
+ #print self.__plugin__ + " parseDOM Getting element
content for %s matches " % len(lst)
+ lst2 = []
for match in lst:
- temp = self.getDOMContent(html, params, match)
+ temp = self.getDOMContent(html, name, match)
html = html.replace(temp, "")
-
contlst.append(temp[temp.find(">")+1:temp.rfind("</" + get("name") + ">")])
- #print self.__plugin__ + " parseDOM return lst for " +
repr(params) + " : " + str(len(lst)) + " - " + repr(contlst)
- return contlst
-
- #print self.__plugin__ + " parseDOM done return html for " +
repr(params) + " : " + str(len(html2))
- return html2
+
lst2.append(temp[temp.find(">")+1:temp.rfind("</" + name + ">")])
+ lst = lst2
+ if self.__dbg__:
+ print self.__plugin__ + " parseDOM Done " +
str(len(lst))
+ return lst
\ No newline at end of file
diff --git a/plugin.video.youtube/YouTubeLogin.py
b/plugin.video.youtube/YouTubeLogin.py
index 65cded2..4f0f958 100755
--- a/plugin.video.youtube/YouTubeLogin.py
+++ b/plugin.video.youtube/YouTubeLogin.py
@@ -16,7 +16,10 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
-import sys, urllib, urllib2, re, socket, json, cookielib
+import sys, urllib, urllib2, socket, cookielib, time
+try: import simplejson as json
+except ImportError: import json
+
import xbmc
import YouTubeUtils
import YouTubeCore
@@ -34,7 +37,6 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
__dbg__ = sys.modules[ "__main__" ].__dbg__
APIKEY =
"AI39si6hWF7uOkKh4B9OEAX-gK337xbwR9Vax-cdeF9CF9iNAcQftT8NVhEXaORRLHAmHxj6GjM-Prw04odK4FxACFfKkiH9lg";
- USERAGENT = "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB;
rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8"
urls = {};
urls['http_login'] =
"https://www.google.com/accounts/ServiceLogin?service=youtube"
@@ -44,16 +46,20 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
__cj__ = cookielib.LWPCookieJar()
__opener__ = urllib2.build_opener(urllib2.HTTPCookieProcessor(__cj__))
urllib2.install_opener(__opener__)
+ __table_name__ = "YouTube"
def login(self, params = {}):
- oname = self.__settings__.getSetting("username")
+ if self.__dbg__:
+ print self.__plugin__ + " login "
+ ouname = self.__settings__.getSetting("username")
opass = self.__settings__.getSetting( "user_password" )
self.__settings__.openSettings()
+ uname = self.__settings__.getSetting("username")
self.__dbg__ = self.__settings__.getSetting("debug") == "true"
- if self.__settings__.getSetting("username") and
self.__settings__.getSetting( "user_password" ):
+ if uname != "":
refreshed = False
- if self.__settings__.getSetting( "oauth2_refresh_token"
) and oname == self.__settings__.getSetting("username") and opass ==
self.__settings__.getSetting( "user_password" ):
+ if self.__settings__.getSetting( "oauth2_refresh_token"
) and ouname == uname and opass == self.__settings__.getSetting(
"user_password" ):
if self.__dbg__:
print self.__plugin__ + " login
refreshing token: " + str(refreshed)
refreshed = self._oRefreshToken()
@@ -84,15 +90,7 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
if self.__dbg__:
print self.__plugin__ + " _apiLogin - errors: " +
str(error)
- uname = self.__settings__.getSetting( "username" )
- passwd = self.__settings__.getSetting( "user_password" )
-
self.__settings__.setSetting('auth', "")
-
- if ( uname == "" or passwd == "" ):
- if self.__dbg__:
- print self.__plugin__ + " _apiLogin no username
or password set "
- return ( "", 0 )
url =
"https://accounts.google.com/o/oauth2/auth?client_id=208795275779.apps.googleusercontent.com&redirect_uri=urn:ietf:wg:oauth:2.0:oob&scope=http%3A%2F%2Fgdata.youtube.com&response_type=code"
@@ -100,6 +98,8 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
fetch_options = { "link": url , "no-language-cookie": "true" }
step = 0
+ if self.__dbg__:
+ print self.__plugin__ + " _apiLogin part A"
while not logged_in and fetch_options and step < 6:
if self.__dbg__:
print self.__plugin__ + " _apiLogin step " +
str(step)
@@ -108,17 +108,20 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
ret = self._fetchPage(fetch_options)
fetch_options = False
- newurl = re.compile('<form action="(.*?)"
method="POST">').findall(ret["content"])
- state_wrapper = re.compile('<input type="hidden"
id="state_wrapper" name="state_wrapper" value="(.*?)">').findall(ret["content"])
- submit_access = "true"#re.compile('<button
id="submit_approve_access" name="submit_approve_access" type="submit"
tabindex="1" value="(.*?)" class="').findall(ret["content"])
- if len(newurl) > 0 and len(state_wrapper) > 0 and
len(submit_access) > 0:
+ newurl = self.parseDOM(ret["content"], "form", attrs= {
"method": "POST"}, ret = "action")
+ state_wrapper = self. parseDOM(ret["content"], "input",
attrs= { "id": "state_wrapper" }, ret = "value")
+ #submit_access = self.parseDOM(ret["content"],
"button", attrs = { "name": "submit_access", "type": "submit"}, ret = "value")
+
+ if len(newurl) > 0 and len(state_wrapper) > 0:
url_data = { "state_wrapper": state_wrapper[0],
- "submit_access": submit_access}
+ "submit_access": "true"}
fetch_options = { "link": newurl[0],
"url_data": url_data, "no-language-cookie": "true" }
+ if self.__dbg__:
+ print self.__plugin__ + " _apiLogin
part B"
continue;
- code =
re.compile('code=(.*)</title>').findall(ret['content'])
+ code = self.parseDOM(ret["content"], "textarea", attrs
= { "id": "code"})
if len(code) > 0:
url =
"https://accounts.google.com/o/oauth2/token"
url_data = { "client_id":
"208795275779.apps.googleusercontent.com",
@@ -127,19 +130,26 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
"redirect_uri":
"urn:ietf:wg:oauth:2.0:oob",
"grant_type": "authorization_code"
}
fetch_options = { "link": url, "url_data":
url_data}
+ if self.__dbg__:
+ print self.__plugin__ + " _apiLogin
part C"
continue
# use token
- oauth = json.loads(ret["content"])
- if len(oauth) > 0:
- #self.__settings__.setSetting("oauth2_expires
at", oauth["expires_in"] + current time. )
-
self.__settings__.setSetting("oauth2_access_token", oauth["access_token"])
- self.__settings__.setSetting('auth',
oauth["access_token"])
-
self.__settings__.setSetting("oauth2_refresh_token", oauth["refresh_token"])
-
+ if ret["content"].find("access_token") > -1:
if self.__dbg__:
- print self.__plugin__ + " _apiLogin
done: " + uname
- logged_in = True
+ print self.__plugin__ + " _apiLogin
part D"
+ oauth = json.loads(ret["content"])
+
+ if len(oauth) > 0:
+ print self.__plugin__ + " _apiLogin
part D " + repr(oauth["expires_in"])
+
self.__settings__.setSetting("oauth2_expires_at", str(int(oauth["expires_in"])
+ time.time()) )
+
self.__settings__.setSetting("oauth2_access_token", oauth["access_token"])
+ self.__settings__.setSetting('auth',
oauth["access_token"])
+
self.__settings__.setSetting("oauth2_refresh_token", oauth["refresh_token"])
+
+ logged_in = True
+ if self.__dbg__:
+ print self.__plugin__ + "
_apiLogin done: " + self.__settings__.getSetting( "username" )
if logged_in:
return ( self.__language__(30030), 200 )
@@ -154,12 +164,6 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
print self.__plugin__ + " _httpLogin "
result = ""
status = 500
-
- uname = self.__settings__.getSetting( "username" )
- pword = self.__settings__.getSetting( "user_password" )
-
- if uname == "" and pword == "":
- return ( "", 303)
if get("new", "false") == "true":
self.__settings__.setSetting( "login_info", "" )
@@ -181,7 +185,7 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
fetch_options = False
# Click login link on youtube.com
- newurl = self.parseDOM(ret["content"], { "name": "a",
"class": "end", "return": "href"})
+ newurl = self.parseDOM(ret["content"], "a", attrs =
{"class": "end" }, ret = "href")
if len(newurl) > 0:
# Start login procedure
if newurl[0] != "#":
@@ -190,18 +194,20 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
print self.__plugin__ + "
_httpLogin part A: " + repr(fetch_options)
# Fill out login information and send.
- newurl = self.parseDOM(ret["content"].replace("\n", "
"), { "name": "form", "id": "id", "id-match": "gaia_loginform", "return":
"action"})
+ newurl = self.parseDOM(ret["content"].replace("\n", "
"), "form", attrs = { "id": "gaia_loginform"}, ret = "action")
if len(newurl) > 0:
- ( galx, url_data ) = self._fillLoginInfo(ret)
+ ( galx, url_data ) =
self._fillLoginInfo(ret["content"])
if len(galx) > 0 and len(url_data) > 0:
fetch_options = { "link": newurl[0],
"no-language-cookie": "true", "url_data": url_data }
if self.__dbg__:
- print self.__plugin__ + "
_httpLogin part B:" + repr(fetch_options)
+ print self.__plugin__ + "
_httpLogin part B:" + repr(fetch_options) ## WARNING, SHOWS LOGIN INFO
continue
-
- newurl = re.compile('<meta http-equiv="refresh"
content="0; url='(.*)'"></head>').findall(ret["content"])
+
+ newurl = self.parseDOM(ret["content"], "meta", attrs =
{ "http-equiv": "refresh"}, ret = "content")
if len(newurl) > 0 :
- fetch_options = { "link":
newurl[0].replace("&", "&"), "no-language-cookie": "true" }
+ newurl = newurl[0].replace("&", "&")
+ newurl = newurl[newurl.find("'") + 5 :
newurl.rfind("'")]
+ fetch_options = { "link": newurl,
"no-language-cookie": "true" }
if self.__dbg__:
print self.__plugin__ + " _httpLogin
part C: " + repr(fetch_options)
continue
@@ -214,8 +220,8 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
print self.__plugin__ + " _httpLogin
part D: " + repr(fetch_options)
continue
- smsToken = re.compile('<input type="hidden"
name="smsToken" value="(.*?)">').findall(ret["content"])
- cont = re.compile('<input type="hidden" name="continue"
value="(.*?)">').findall(ret["content"])
+ smsToken = self.parseDOM(ret["content"], "input",
attrs= { "name": "smsToken" }, ret= "value")
+ cont = self.parseDOM(ret["content"], "input", attrs= {
"name": "continue"}, ret="value" )
if len(cont) > 0 and smsToken > 0 and galx != "" :
url_data = { "smsToken": smsToken[0],
"continue": cont[0],
@@ -249,17 +255,25 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
return (result, status)
- def _fillLoginInfo(self, ret):
- rmShown = re.compile('<input type="hidden" name=\'rmShown\'
value="(.*?)" />').findall(ret["content"])
- #cont = re.compile('<input type="hidden" name="continue"
id="continue"\n value="(.*?)" /> ').findall(ret["content"])
- #cont2 = self.parseDOM(ret["content"].replace("\n", " "), {
"name": "input", "id": "id", "id-match": "continue", "return": "value"})
+ def _fillLoginInfo(self, content):
+ rmShown = self.parseDOM(content, "input", attrs = { "name":
"rmShown"}, ret = "value" )
+ #cont2= self.parseDOM(content, "input", attrs = { "id":
"continue" }, ret = "value")
+ #print self.__plugin__ + " _httpLogin missing values for login
form XXXXXXXXXXXX " + repr(cont2) + "\n" + repr(content)
cont =
["http://www.youtube.com/signin?action_handle_signin=true&nomobiletemp=1&hl=en_US&next=%2F"]
- uilel = re.compile('<input type="hidden" name="uilel"
id="uilel"\n value="(.*?)" />').findall(ret["content"])
- dsh = re.compile('<input type="hidden" name="dsh" id="dsh"\n
value="(.*?)" />').findall(ret["content"])
- galx = re.compile('Set-Cookie:
GALX=(.*);Path=/accounts;Secure').findall(str(ret["header"]))
+ uilel = self.parseDOM(content, "input", attrs = { "name":
"uilel" }, ret= "value")
+ if len(uilel) == 0:
+ uilel = self.parseDOM(content, "input", attrs= { "id":
"uilel" }, ret= "value")
+ dsh = self.parseDOM(content, "input", attrs = { "name": "dsh"
}, ret = "value")
+ if len(dsh) == 0:
+ dsh = self.parseDOM(content, "input", attrs = { "id":
"dsh" }, ret = "value")
+
+ # Can we get this elsewhere?
+ galx = self.parseDOM(content, "input", attrs = { "name":
"GALX"}, ret = "value")
uname = self.__settings__.getSetting( "username" )
pword = self.__settings__.getSetting( "user_password" )
-
+
+ if pword == "":
+ pword = self.getUserInput(self.__language__(30628),
hidden=True)
if len(galx) == 0 or len(cont) == 0 or len(uilel) == 0 or
len(dsh) == 0 or len(rmShown) == 0 or uname == "" or pword == "":
if self.__dbg__:
@@ -288,13 +302,14 @@ class YouTubeLogin(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
return ( galx, url_data)
def _fillUserPin(self, content):
- smsToken = re.compile('<input type="hidden" name="smsToken"\n
value="(.*?)">').findall(content)
- email = re.compile('<input type="hidden" name="email"\n
value="(.*?)">').findall(content)
- if len(smsToken) > 0 and len(email) > 0:
+ smsToken = self.parseDOM(content, "input", attrs = { "name":
"smsToken" }, ret = "value")
+ email = self.parseDOM(content, "input", attrs = { "name":
"email" }, ret = "value")
+ userpin = self.getUserInput(self.__language__(30627))
+ if len(smsToken) > 0 and len(email) > 0 and len(userpin) > 0:
url_data = { "smsToken": smsToken[0],
"PersistentCookie": "yes",
"service": "youtube",
- "smsUserPin" :
self.getUserInput(self.__language__(30627)),
+ "smsUserPin" : userpin,
"smsVerifyPin" : "Verify",
"timeStmp" : "",
"secTok" : "",
diff --git a/plugin.video.youtube/YouTubePlayer.py
b/plugin.video.youtube/YouTubePlayer.py
index 09b797d..919e49e 100755
--- a/plugin.video.youtube/YouTubePlayer.py
+++ b/plugin.video.youtube/YouTubePlayer.py
@@ -460,7 +460,7 @@ class YouTubePlayer(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
if hd_quality > 1: #<-- 720p
if (link(22)):
video_url = link(22)
- if (link(45)):
+ elif (link(45)):
video_url = link(45)
if hd_quality > 2: #<-- 1080p
if (link(37)):
diff --git a/plugin.video.youtube/YouTubeScraper.py
b/plugin.video.youtube/YouTubeScraper.py
index 4308603..41dfc32 100644
--- a/plugin.video.youtube/YouTubeScraper.py
+++ b/plugin.video.youtube/YouTubeScraper.py
@@ -27,7 +27,7 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
__dbg__ = sys.modules[ "__main__" ].__dbg__
__feeds__ = sys.modules[ "__main__" ].__feeds__
- __storage__ = sys.modules [ "__main__" ].__storage__
+ __storage__ = sys.modules[ "__main__" ].__storage__
def __init__(self):
self.urls['categories'] = "http://www.youtube.com/videos"
@@ -67,12 +67,12 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
#list = SoupStrainer(id="recent-trailers-container", name="div")
#trailers = BeautifulSoup(result["content"],
parseOnlyThese=list)
- trailers = self.parseDOM(result["content"], { "name": "div",
"id": "id", "id-match": "recent-trailers-container"})
+ trailers = self.parseDOM(result["content"], "div", attrs = {
"id": "recent-trailers-container"})
if (len(trailers) > 0):
items = []
- ahref = self.parseDOM(trailers, { "name": "a", "class":
" yt-uix-hovercard-target", "return": "href"})
- athumb = self.parseDOM(trailers, {"name": "img", "id":
"alt", "id-match": "Thumbnail", "return": "src"})
+ ahref = self.parseDOM(trailers, "a", attrs = {"class":
" yt-uix-hovercard-target", "id": ".*?" }, ret = "href")
+ athumb = self.parseDOM(trailers, "img", attrs = {
"alt": "Thumbnail" }, ret = "src")
if len(ahref) == len(athumb):
for i in range(0, len(ahref)):
videoid = ahref[i]
@@ -102,32 +102,28 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
result = self._fetchPage({"link":url})
next = "false"
- pagination = self.parseDOM(result["content"], { "name": "div",
"class": " yt-uix-pager"})
+ pagination = self.parseDOM(result["content"], "div", attrs = {
"class": "yt-uix-pager"})
if (len(pagination) > 0):
tmp = str(pagination)
if (tmp.find("Next") > 0):
next = "true"
- videos = self.parseDOM(result["content"], { "name": "div",
"id": "id", "id-match": "browse-video-data"})
+ videos = self.parseDOM(result["content"], "div", { "id":
"browse-video-data"})
+ if len(videos) == 0:
+ videos = self.parseDOM(result["content"], "div", attrs=
{ "class": "most-viewed-list paginated"})
items = []
if (len(videos) > 0):
- links = self.parseDOM(videos, { "name": "a", "return":
"href"})
- for link in links:
- if (link.find("/watch?v=") != -1):
- link = link[link.find("=") +
1:link.find("&")]
- items.append(link)
- else:
- videos = self.parseDOM(result["content"], { "name":
"div", "class": "most-viewed-list paginated"})
- links = self.parseDOM(videos, { "name": "a", "return":
"href"})
+ links = self.parseDOM(videos, "a", attrs = { "class":
"ux-thumb-wrap " } , ret = "href")
+ if len(links) == 0:
+ links = self.parseDOM(videos, "a", ret = "href")
for link in links:
if (link.find("/watch?v=") != -1):
link = link[link.find("=") + 1:]
if (link.find("&") > 0):
link = link[:link.find("&")]
items.append(link)
-
if self.__dbg__:
print self.__plugin__ + " scrapeCategoriesGrid done "
return (items, result["status"])
@@ -144,9 +140,9 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
result = self._fetchPage({"link": url})
if result["status"] == 200:
- categories = self.parseDOM(result["content"], {"name":
"div", "id": "id", "id-match": "browse-filter-menu"})
- ahref = self.parseDOM(categories, {"name": "a",
"return": "href"})
- acontent = self.parseDOM(categories, {"name": "a",
"content": "true"})
+ categories = self.parseDOM(result["content"], "div",
attrs = { "id": "browse-filter-menu"})
+ ahref = self.parseDOM(categories, "a", ret= "href")
+ acontent = self.parseDOM(categories, "a")
if len(acontent) == len(ahref) and len(ahref) > 0:
for i in range(0 , len(ahref)):
@@ -165,7 +161,7 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
items.append(item)
if self.__dbg__:
- print self.__plugin__ + " scrapeMusicCategories done"
+ print self.__plugin__ + " scrapeMusicCategories done"
return (items, result["status"])
@@ -206,9 +202,9 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
result = self._fetchPage({"link": url})
if result["status"] == 200:
- artists = self.parseDOM(result["content"], {
"name": "div", "id": "id", "id-match": "similar-artists"});
- ahref = self.parseDOM(artists, {"name": "a",
"return": "href" })
- atitle = self.parseDOM(artists, {"name": "a",
"content": "true"})
+ artists = self.parseDOM(result["content"],
"div", { "id": "similar-artists"});
+ ahref = self.parseDOM(artists, "a", ret =
"href")
+ atitle = self.parseDOM(artists, "a")
if len(ahref) == len(atitle):
for i in range(0, len(ahref)):
item = {}
@@ -241,11 +237,11 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
url = self.urls["music"] + category
result = self._fetchPage({"link":url})
- artists = self.parseDOM(result["content"], {"name":
"div", "class": "browse-item artist-item", "content": "true"})
+ artists = self.parseDOM(result["content"], "div", {
"id": "artist-recs-container"})
for artist in artists:
- ahref = self.parseDOM(artist, {"name": "a",
"return": "href", "id": "title"})
- atitle = self.parseDOM(artist, {"name": "a",
"return": "title"})
- athumb = self.parseDOM(artist, {"name": "img",
"return": "data-thumb"})
+ ahref = self.parseDOM(artist, "a", { "title":
".*?" }, ret = "href")
+ atitle = self.parseDOM(artist, "a", ret =
"title")
+ athumb = self.parseDOM(artist, "img", ret =
"data-thumb")
if len(atitle) == len(ahref) == len(athumb) and
len(ahref) > 0:
for i in range(0 , len(ahref)):
item = {}
@@ -269,7 +265,7 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
get = params.get
if self.__dbg__:
print self.__plugin__ + " scrapeMusicCategoryHits"
-
+
status = 200
items = []
params["batch"] = "true"
@@ -279,17 +275,18 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
url = self.urls["music"] + category
result = self._fetchPage({"link":url})
- content = self.parseDOM(result["content"], { "name":
"div", "class": "browse-item music-item ", "content": "true"})
+ content = self.parseDOM(result["content"], "li", {
"class": "yt-uix-slider-slide-item "})
- for video in content:
- videoid = self.parseDOM(video, { "name": "a",
"class": "ux-thumb-wrap " })
+ for video in content:
+ videoid = self.parseDOM(video, "a", attrs =
{"class": "ux-thumb-wrap " }, ret = "href")
+ videoid = videoid[0]
videoid = videoid[videoid.find("?v=") +
3:videoid.find("&")]
items.append(videoid)
+
if self.__dbg__:
print self.__plugin__ + " scrapeMusicCategoryHits done"
return (items, status)
-
def searchDisco(self, params = {}):
get = params.get
if self.__dbg__:
@@ -317,7 +314,7 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
#list = SoupStrainer(name="div", id ="playlist-bar")
#mix_list = BeautifulSoup(result["content"],
parseOnlyThese=list)
- mix_list = self.parseDOM(result["content"], { "name":
"div", "id": "id", "id-match": "playlist-bar", "return": "data-video-ids"})
+ mix_list = self.parseDOM(result["content"], "div", {
"id": "playlist-bar" }, ret = "data-video-ids")
if (len(mix_list) > 0):
items = mix_list[0].split(",")
@@ -334,7 +331,7 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
url = self.urls["disco_main"]
result = self._fetchPage({"link": url})
- popular = self.parseDOM(result["content"], { "name": "a", "id":
"id", "id-match": "popular-tracks"})
+ popular = self.parseDOM(result["content"], "a", { "id":
"popular-tracks"})
items = []
if (len(popular) > 0):
@@ -360,10 +357,10 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
url = self.urls["disco_main"]
result = self._fetchPage({"link":url})
- popular = self.parseDOM(result["content"], { "name": "div",
"class": "ytg-fl popular-artists"})
+ popular = self.parseDOM(result["content"], "div", { "class":
"ytg-fl popular-artists"})
yobjects = []
if len(popular) > 0:
- artists = self.parseDOM(popular, { "name": "li",
"class": "popular-artist-row disco-search", "return": "data-artist-name"})
+ artists = self.parseDOM(popular, "li", attrs = {
"class": "popular-artist-row disco-search" }, ret = "data-artist-name")
for artist in artists:
item = {}
title = self.makeAscii(artist)
@@ -395,16 +392,16 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
result = self._fetchPage({"link": url})
- live = self.parseDOM(result["content"], { "name": "div", "id":
"id", "id-match": "live-main"})
- live = self.parseDOM(live, { "name": "div", "class":
"browse-item ytg-box", "content": "true"})
+ live = self.parseDOM(result["content"], "div", { "id":
"live-main"})
+ live = self.parseDOM(live, "div", { "class": "browse-item
ytg-box"})
videos = []
if len(live) > 0:
live = "".join(live)
- ahref = self.parseDOM(live, {"name": "a", "return":
"href", "class": "live-video-title"})
- atitle = self.parseDOM(live, {"name": "a", "class":
"live-video-title", "content": "true"})
- athumb = self.parseDOM(live, {"name": "img", "id":
"alt", "id-match": "Thumbnail", "return": "src"})
- astudio = self.parseDOM(live, {"name": "a", "id":
"title", "return": "title"})
+ ahref = self.parseDOM(live, "a", attrs = {"class":
"live-video-title"}, ret = "href" )
+ atitle = self.parseDOM(live, "a", attrs = {"class":
"live-video-title"})
+ athumb = self.parseDOM(live, "img", attrs = { "alt":
"Thumbnail" }, ret = "src")
+ astudio = self.parseDOM(live, "a", ret = "title")
#print self.__plugin__ + " BLA BLA BTEST2 " +
str(len(ahref)) + " - " + str(len(atitle)) + " - " + str(len(athumb)) + " - "
+ str(len(astudio)) #+ " - " + str(len(result["content"])) + " - " +
str(len(live))
if len(ahref) == len(atitle) and len(ahref) ==
len(astudio) and len(ahref) == len(athumb):
@@ -480,12 +477,12 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
url = self.urls[get("scraper")]
result = self._fetchPage({"link": url, "login": "true"})
- liked = self.parseDOM(result["content"], { "name": "div", "id":
"id", "id-match": "vm-video-list-container"})
+ liked = self.parseDOM(result["content"], "div", { "id":
"vm-video-list-container"})
items = []
if (len(liked) > 0):
- vidlist = self.parseDOM(liked, { "name": "li",
"class":" vm-video-item ", "return": "id"})
+ vidlist = self.parseDOM(liked, "li", { "class":"
vm-video-item " }, ret = "id")
for videoid in vidlist:
videoid = videoid[videoid.rfind("video-") + 6:]
items.append(videoid)
@@ -506,8 +503,8 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
videos = re.compile('<a
href="/watch\?v=(.*)&feature=sh_e_sl&list=SL"').findall(result["content"])
- nexturl = self.parseDOM(result["content"], { "name": "button",
"id": "data-next-url", "return": "data-next-url" })
-
+ nexturl = self.parseDOM(result["content"], "button", { "class":
" yt-uix-button" }, ret = "data-next-url")
+ print "smoker "+ repr(nexturl)
if (len(nexturl) > 0):
nexturl = nexturl[0]
else:
@@ -569,12 +566,12 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
yobjects = []
- seasons = self.parseDOM(html, { "name": "div", "class":
"seasons"})
+ seasons = self.parseDOM(html, "div", attrs = {"class":
"seasons"})
if (len(seasons) > 0):
params["folder"] = "true"
- season_list = self.parseDOM(seasons, { "name": "span",
"class": "yt-uix-button-content", "content": "true"})
- atitle = self.parseDOM(seasons, { "name": "button",
"id": "type", "id-match": "button", "return": "title"})
+ season_list = self.parseDOM(seasons, "span", attrs =
{"class": "yt-uix-button-content"})
+ atitle = self.parseDOM(seasons, "button", attrs = {
"type": "button" }, ret = "title")
if len(season_list) == len(atitle) and len(atitle) > 0:
for i in range(0, len(atitle)):
@@ -616,20 +613,20 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
url = self.createUrl(params)
result = self._fetchPage({"link":url})
- showcont = self.parseDOM(result["content"], { "name":
"ul", "class": "browse-item-list"})
+ showcont = self.parseDOM(result["content"], "ul", {
"class": "browse-item-list"})
if (len(showcont) > 0):
page += 1
next = "true"
showcont = "".join(showcont)
- shows = self.parseDOM(showcont, { "name": "div",
"class": "browse-item show-item yt-uix-hovercard ", "content": "true" })
+ shows = self.parseDOM(showcont, "div", { "class":
"browse-item show-item yt-uix-hovercard " })
for show in shows:
- ahref = self.parseDOM(show, { "name": "a",
"id": "title", "return": "href" })
- acont = self.parseDOM(show, { "name": "a",
"id": "title", "return": "title" })
- athumb = self.parseDOM(show, { "name": "img",
"id": "alt", "id-match": "Thumbnail", "return": "src"})
- acount = self.parseDOM(show, { "name": "span",
"class": "show-video-counts", "content": "true" })
+ ahref = self.parseDOM(show, "a", attrs = {
"title": ".*?" }, ret = "href" )
+ acont = self.parseDOM(show, "a", ret = "title" )
+ athumb = self.parseDOM(show, "img", attrs = {
"alt": "Thumbnail" }, ret = "src")
+ acount = self.parseDOM(show, "span", { "class":
"show-video-counts" })
#print self.__plugin__ + " XXX " +
str(len(ahref)) + " - " + str(len(acont)) + " - " + str(len(athumb)) + " - " +
str(len(acount)) + repr(show)
if len(ahref) == len(acont) and len(ahref) ==
len(acount) and len(ahref) == len(athumb) and len(ahref) > 0:
@@ -697,10 +694,10 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
ytobjects = []
- dom_pages = self.parseDOM(result["content"], {"name": "div",
"class": "yt-uix-slider-title", "content": "true"})
+ dom_pages = self.parseDOM(result["content"], "div", { "class":
"yt-uix-slider-title"})
for item in dom_pages:
- ahref = self.parseDOM(item, { "name": "a", "return":
"href" })
- acont = self.parseDOM(item, { "name": "a", "content":
"true" })
+ ahref = self.parseDOM(item, "a", ret = "href" )
+ acont = self.parseDOM(item, "a")
if len(ahref) == len(acont) and len(ahref) > 0:
item = {}
cat = ahref[0]
@@ -736,8 +733,8 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
url = self.createUrl(params)
result = self._fetchPage({"link":url})
- dom_pages = self.parseDOM(result["content"], {"name":
"div", "class": "yt-uix-pager"})
- links = self.parseDOM(dom_pages, {"name": "a", "class":
"yt-uix-pager-link", "return": "data-page"})
+ dom_pages = self.parseDOM(result["content"], "div",
attrs = {"class": "yt-uix-pager"})
+ links = self.parseDOM("".join(dom_pages), "a", attrs =
{"class": "yt-uix-pager-link" }, ret = "data-page")
print self.__plugin__ + " scrapeMoviesGrid " +
str(len(dom_pages)) + " - " + str(len(links))
if len(links) > 0:
for link in links:
@@ -746,9 +743,9 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
print self.__plugin__ +
" scrapeMoviesGrid - next page ? link: " + str(link) + " > page: " + str(page +
1)
next = "true"
- dom_list = self.parseDOM(result["content"], {"name":
"ul", "class": "browse-item-list"})
- vidids = self.parseDOM(dom_list, {"name": "span",
"return": "data-video-ids"})
- thumbs = self.parseDOM(dom_list, {"name": "img",
"return": "data-thumb"})
+ dom_list = self.parseDOM(result["content"], "ul", {
"class": "browse-item-list"})
+ vidids = self.parseDOM(dom_list, "span", ret =
"data-video-ids")
+ thumbs = self.parseDOM(dom_list, "img", ret =
"data-thumb")
page += 1
if len(vidids) == len(thumbs) and len(vidids) > 0:
@@ -923,7 +920,7 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
result = self._fetchPage({"link":url})
if result["status"] == 200:
- pagination = self.parseDOM(result["content"], { "name":
"div", "class": "yt-uix-pager"})
+ pagination = self.parseDOM(result["content"], "div", {
"class": "yt-uix-pager"})
if (len(pagination) > 0):
tmp = str(pagination)
@@ -931,32 +928,30 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
next = "true"
- trailers = self.parseDOM(result["content"], { "name":
"div", "id": "id", "id-match": "popular-column" })
+ trailers = self.parseDOM(result["content"], "div",
attrs = { "id": "popular-column" })
- if (len(trailers) > 0):
- ahref = self.parseDOM(trailers, { "name": "a",
"class": "ux-thumb-wrap ", "return": "href"})
+ if len(trailers) > 0:
+ ahref = self.parseDOM(result["content"], "a",
attrs = { "class": "ux-thumb-wrap " }, ret = "href")
if len(ahref) == 0:
- ahref = self.parseDOM(trailers, {
"name": "a", "class": "ux-thumb-wrap contains-addto", "return": "href"})
+ ahref = self.parseDOM(trailers, "a",
attrs = { "class": "ux-thumb-wrap contains-addto" }, ret = "href")
- athumbs = self.parseDOM(trailers, { "name":
"a", "class": "ux-thumb-wrap ", "content": "true"})
+ athumbs = self.parseDOM(trailers, "a", attrs =
{ "class": "ux-thumb-wrap "})
if len(athumbs) == 0:
- athumbs = self.parseDOM(trailers, {
"name": "a", "class": "ux-thumb-wrap contains-addto", "content": "true"})
-
- if len(athumbs) == len(ahref) and len(ahref) >
0:
- for i in range(0 , len(ahref)):
- videoid = ahref[i]
+ athumbs = self.parseDOM(trailers, "a",
attrs = { "class": "ux-thumb-wrap contains-addto"})
+ for i in range(0 , len(ahref)):
+ videoid = ahref[i]
- if (videoid):
- if (videoid.find("=") >
-1):
- videoid =
videoid[videoid.find("=")+1:]
- thumb =
self.parseDOM(athumbs[i], { "name": "img", "return": "src"});
- if len(thumb) > 0:
- thumb = thumb[0]
- items.append((videoid, thumb))
+ if (videoid):
+ if (videoid.find("=") > -1):
+ videoid =
videoid[videoid.find("=")+1:]
+ thumb = self.parseDOM(athumbs[i],
"img", attrs = { "alt": "Thumbnail"}, ret = "src")
+ if len(thumb) > 0:
+ thumb = thumb[0]
+ items.append((videoid, thumb))
if self.__dbg__:
- print self.__plugin__ + " scrapeGridFormat done"
- return (items, result["status"])
+ print self.__plugin__ + " scrapeGridFormat done "
+ return (items, result["status"])
def scrapeCategoryList(self, params = {}):
get = params.get
@@ -975,45 +970,48 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
result = self._fetchPage({"link":url})
if result["status"] == 200:
- categories = self.parseDOM(result["content"], {"name":
"div", "class": "yt-uix-expander-body.*"})
+ categories = self.parseDOM(result["content"], "div",
attrs = {"class": "yt-uix-expander-body.*?"})
if len(categories) == 0:
- categories = self.parseDOM(result["content"],
{"name": "div", "class": "browse-filter-menu.*"})
-
- if (len(categories) > 0):
- ahref = self.parseDOM(categories, {"name": "a",
"return": "href"})
- acontent = self.parseDOM(categories, {"name":
"a", "content": "true"})
+ categories = self.parseDOM(result["content"],
"div", attrs = {"id": "browse-filter-menu"})
- if len(acontent) == len(ahref) and len(ahref) >
0:
- for i in range(0 , len(ahref)):
- item = {}
- title = acontent[i]
- title = title.replace("&",
"&")
- if title == "All Categories" or
title == "Education" or title == "":
- continue
- item['Title'] = title
- cat = ahref[i].replace("/" +
scraper + "/", "")
+ if len(categories) == 0: # <- is this needed. Anyways.
it breaks. fix that..
+ categories = self.parseDOM(result["content"],
"div", attrs = {"class": "browse-filter-menu.*?"})
+
+ for cat in categories:
+ print self.__plugin__ + " scrapeCategoryList :
" + cat[0:50]
+ ahref = self.parseDOM(cat, "a", ret = "href")
+ acontent = self.parseDOM(cat, "a")
+ for i in range(0 , len(ahref)):
+ item = {}
+ title = acontent[i]
+ title = title.replace("&", "&")
+ if title == "All Categories" or title
== "Education" or title == "":
+ continue
+ item['Title'] = title
- if get("scraper") ==
"categories":
- if title == "Music":
- continue
- if cat.find("comedy") >
0:
- cat = "?c=23"
- if cat.find("gaming") >
0:
- cat = "?c=20"
- if
cat.find("education") > 0:
-
item["subcategory"] = "true"
+ cat = ahref[i].replace("/" + scraper +
"/", "")
- if get("scraper") == "movies":
- if cat.find("pt=nr") >
0:
- continue
- elif
cat.find("indian-cinema") > -1 or cat.find("foreign-film") > -1:
-
item["subcategory"] = "true"
+ if get("scraper") == "categories":
+ if title == "Music":
+ continue
+ if cat.find("comedy") > 0:
+ cat = "?c=23"
+ if cat.find("gaming") > 0:
+ cat = "?c=20"
+ if cat.find("education") > 0:
+ item["subcategory"] =
"true"
- cat = urllib.quote_plus(cat)
- item['category'] = cat
- item['scraper'] = scraper
- item["thumbnail"] = thumbnail
- yobjects.append(item)
+ if get("scraper") == "movies":
+ if cat.find("pt=nr") > 0:
+ continue
+ elif cat.find("indian-cinema")
> -1 or cat.find("foreign-film") > -1:
+ item["subcategory"] =
"true"
+
+ cat = urllib.quote_plus(cat)
+ item['category'] = cat
+ item['scraper'] = scraper
+ item["thumbnail"] = thumbnail
+ yobjects.append(item)
if (not yobjects):
if self.__dbg__:
@@ -1036,9 +1034,9 @@ class YouTubeScraper(YouTubeCore.YouTubeCore,
YouTubeUtils.YouTubeUtils):
if not get("page"):
(result, status) =
params["new_results_function"](params)
-
- if self.__dbg__ and False:
- print self.__plugin__ + " paginator new result
" + repr(result)
+
+ if self.__dbg__:
+ print self.__plugin__ + " paginator new result
" + str(repr(result))[0:50]
if len(result) == 0:
if get("scraper") not in ["music_top100"]:
diff --git a/plugin.video.youtube/addon.xml b/plugin.video.youtube/addon.xml
index 6f9b3c4..3547fc9 100644
--- a/plugin.video.youtube/addon.xml
+++ b/plugin.video.youtube/addon.xml
@@ -1,10 +1,11 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.youtube"
name="YouTube"
- version="2.6.1"
+ version="2.6.3"
provider-name="TheCollective">
<requires>
<import addon="xbmc.python" version="2.0"/>
+ <import addon="script.module.simplejson" version="2.0.10"/>
</requires>
<extension point="xbmc.python.pluginsource" library="default.py">
<provides>video</provides>
diff --git a/plugin.video.youtube/changelog.txt
b/plugin.video.youtube/changelog.txt
index a2dcfc6..2810055 100644
--- a/plugin.video.youtube/changelog.txt
+++ b/plugin.video.youtube/changelog.txt
@@ -12,10 +12,18 @@
- [XBMC] Subtitles and Annotations dont work the first time 90% of the time
- Download of rtmpe streams not supported.
-[B]Version 2.6.1 (Beta)[B]
+[B]Version 2.6.3 (Beta)[/B]
+- Fixed plugin to prefer h264 for 720p again as this was causing problems for
apple tv2 users
+- Fixed added dependency on simplejson since we're using that now and alot of
nightly users we're having problems with the plugin.
+
+[B]Version 2.6.2 (Beta)[/B]
+- Added missing settings strings
+- Changed login code no longer prints username and password to log file when
plugins debug mode is enabled (broken since 2.6)
+
+[B]Version 2.6.1 (Beta)[/B]
- Fixed login after youtube changed the procedure for oauth2 verification
-[B]Version 2.6.0 (Beta)[B]
+[B]Version 2.6.0 (Beta)[/B]
- Added New playback function. Now uses unified code accross all pages(html
scraping, embeded, flash fallback). Supports fallback server.
- Added fallback for age restricted videos (Will fail if embedding is disabled)
- Added support for locally stored artists in Youtube Music
@@ -39,7 +47,7 @@
- Fixed playback after youtube changes
- Fixed RTMPe support (we now provide playpath correctly) - NOTE: this
requires and updated version of librtmp
-[B]Version 2.5.0 (Beta)[B]
+[B]Version 2.5.0 (Beta)[/B]
- Added new option to select playback and download quality with dialogs
- Added Caching and alphabetically sorting of My playlists, My contacts and My
subscriptions
- Added "Play all" context menu on certain folders
@@ -157,7 +165,7 @@
- Hardened handling of age restricted videos
- No longer try login when no credentials have been provided.
-[B]Version 1.9.2[/B
+[B]Version 1.9.2[/B]
- Disable socket timeout since that makes login fail on windows.
[B]Version 1.9.1[/B]
diff --git a/plugin.video.youtube/default.py b/plugin.video.youtube/default.py
index e78be88..c29356c 100644
--- a/plugin.video.youtube/default.py
+++ b/plugin.video.youtube/default.py
@@ -19,7 +19,7 @@
import sys, xbmcaddon
# plugin constants
-__version__ = "2.6.1"
+__version__ = "2.6.3"
__plugin__ = "YouTube" + __version__
__author__ = "TheCollective"
__url__ = "www.xbmc.com"
diff --git a/plugin.video.youtube/resources/language/English/strings.xml
b/plugin.video.youtube/resources/language/English/strings.xml
index 656d385..fb7a757 100644
--- a/plugin.video.youtube/resources/language/English/strings.xml
+++ b/plugin.video.youtube/resources/language/English/strings.xml
@@ -146,6 +146,8 @@
<string id="30283">Japanese</string>
<string id="30284">Enable annotations</string>
<string id="30285">Use YouTube cache servers</string>
+ <string id="30286">Show YouTube Music</string>
+ <string id="30287">Show YouTube Live</string>
<!-- Menu strings -->
<string id="30500">Clear refinements</string>
@@ -212,5 +214,6 @@
<string id="30625">Download Failed</string>
<string id="30626">RTMPe playback currently not supported</string>
<string id="30627">Please provide 2-factor authentication PIN</string>
+ <string id="30628">Please provide your password</string>
</strings>
\ No newline at end of file
-----------------------------------------------------------------------
Summary of changes:
plugin.video.youtube/YouTubeCore.py | 191 ++++++++---------
plugin.video.youtube/YouTubeLogin.py | 125 ++++++-----
plugin.video.youtube/YouTubePlayer.py | 2 +-
plugin.video.youtube/YouTubeScraper.py | 236 ++++++++++----------
plugin.video.youtube/addon.xml | 3 +-
plugin.video.youtube/changelog.txt | 16 +-
plugin.video.youtube/default.py | 2 +-
.../resources/language/English/strings.xml | 3 +
8 files changed, 295 insertions(+), 283 deletions(-)
hooks/post-receive
--
Plugins
------------------------------------------------------------------------------
Doing More with Less: The Next Generation Virtual Desktop
What are the key obstacles that have prevented many mid-market businesses
from deploying virtual desktops? How do next-generation virtual desktops
provide companies an easier-to-deploy, easier-to-manage and more affordable
virtual desktop model.http://www.accelacomm.com/jaw/sfnl/114/51426474/
_______________________________________________
Xbmc-addons mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/xbmc-addons