The branch, dharma has been updated
       via  8fefcf77bce330653eb4bb02d54594acbeaf7325 (commit)
      from  d0220d5546d12068614f869a7308f82b7dfcfdb0 (commit)

- Log -----------------------------------------------------------------
http://xbmc.git.sourceforge.net/git/gitweb.cgi?p=xbmc/plugins;a=commit;h=8fefcf77bce330653eb4bb02d54594acbeaf7325

commit 8fefcf77bce330653eb4bb02d54594acbeaf7325
Author: spiff <[email protected]>
Date:   Sun May 22 09:56:00 2011 +0200

    [plugin.video.youtube] updated to version 2.0.4

diff --git a/plugin.video.youtube/YouTubeCore.py 
b/plugin.video.youtube/YouTubeCore.py
index b416179..199fa6b 100755
--- a/plugin.video.youtube/YouTubeCore.py
+++ b/plugin.video.youtube/YouTubeCore.py
@@ -75,7 +75,7 @@ class YouTubeCore(object):
        def login(self, error = 0):
                if self.__dbg__:
                        print self.__plugin__ + " login - errors: " + str(error)
-                       
+               
                uname = self.__settings__.getSetting( "username" )
                passwd = self.__settings__.getSetting( "user_password" )
                
@@ -382,6 +382,7 @@ class YouTubeCore(object):
                        con = urllib2.urlopen(url);
                        file.write(con.read())
                        con.close()
+                       file.close()
                        
                        os.rename(filename_incomplete, filename_complete)
                        
@@ -669,7 +670,7 @@ class YouTubeCore(object):
                        if self.__dbg__:
                                print self.__plugin__ +  " _fetchPage adding 
cookie"
                        request.add_header('Cookie', 'LOGIN_INFO=' + 
self._httpLogin() )
-
+               
                if auth:
                        authkey = self._getAuth()
                        if ( not authkey ):
@@ -684,55 +685,22 @@ class YouTubeCore(object):
                        result = con.read()
                        new_url = con.geturl()
                        con.close()
-
+                       
                        # Return result if it isn't age restricted
-                       if ( result.find("verify-age-actions") == -1):
+                       if ( result.find("verify-actions") == -1 and 
result.find("verify-age-actions") == -1):
                                return ( result, 200 )
-                       
-                       # review this before 2.0 final
                        elif ( error < 10 ):
-                               
                                # We need login to verify age.       
                                if not login:
                                        if self.__dbg__:
                                                print self.__plugin__ + " 
_fetchPage age verification required, retrying with login"
                                        error = error + 0
                                        return self._fetchPage(link, api, auth, 
login = True, error = error)
-
-                               if self.__dbg__:
-                                       print self.__plugin__ + " _fetchPage 
Video age restricted, trying to verify for url: " + new_url
-
-                               # Fallback for missing confirm form.
-                               if result.find("confirm-age-form") == -1:
-                                       if self.__dbg__:
-                                               print self.__plugin__ + " 
_fetchPage: Sorry - you must be 18 or over to view this video or group"
-                                       return ( self.__language__( 30608 ) , 
303 )
-                                                               
-                               request = urllib2.Request(new_url)
-                               request.add_header('User-Agent', self.USERAGENT)
-                               request.add_header('Cookie', 'LOGIN_INFO=' + 
self._httpLogin(True) )
-
-                               # This really should be a regex, but the regex 
kept failing.
-                               temp = 
result[result.find("verify-age-actions"):(result.find("verify-age-actions") + 
600)]
-                               next_url = temp[( temp.find('"next_url" 
value="') + len('"next_url" value="')):]
-                               next_url = next_url[:next_url.find('"')] 
-                                       
-                               if self.__settings__.getSetting( "safe_search" 
) == "0":
-                                       confirmed = 1
-                               else:
-                                       confirmed = 0
-                       
-                               values = { "next_url": next_url, 
"action_confirm": confirmed }
-
-                               con = urllib2.urlopen(request, 
urllib.urlencode(values))
-                               result = con.read()
-                               con.close()
                                
                                if self.__dbg__:
-                                       print self.__plugin__ + " _fetchPage. 
Age should now be verified, calling _fetchPage again"
-                                       
-                               return self._fetchPage(link, api, auth, login = 
True, error = error + 1)
-
+                                       print self.__plugin__ + " _fetchPage 
verifying age"
+                               return self._verifyAge(result, new_url, link, 
api, auth, login, error) 
+                       
                        if self.__dbg__:
                                print self.__plugin__ + " _fetchPage. Too many 
errors"
                        return ( "", 500 )
@@ -741,7 +709,7 @@ class YouTubeCore(object):
                        err = str(e)
                        if self.__dbg__:
                                print self.__plugin__ + " _fetchPage HTTPError 
: " + err
-
+                       
                        # 400 (Bad request) - A 400 response code indicates 
that a request was poorly formed or contained invalid data. The API response 
content will explain the reason wny the API returned a 400 response code.
                        if ( err.find("400") > -1 ):
                                return ( err, 303 )
@@ -784,6 +752,68 @@ class YouTubeCore(object):
                                                                                
                 , sys.exc_info()[2].tb_frame.f_code.co_name, 
sys.exc_info()[2].tb_lineno, sys.exc_info()[1])
                                
                        return ( "", 500 )
+                       
+       def _verifyAge(self, result, new_url, link, api = False, auth=False, 
login=False, error = 0):
+               login_info = self._httpLogin(True)
+               confirmed = "0"
+               if self.__settings__.getSetting( "safe_search" ) != "2":
+                       confirmed = "1"
+               
+               request = urllib2.Request(new_url)
+               request.add_header('User-Agent', self.USERAGENT)
+               request.add_header('Cookie', 'LOGIN_INFO=' + login_info)
+               con = urllib2.urlopen(request)
+               result = con.read()
+               
+               # Fallback for missing confirm form.
+               if result.find("confirm-age-form") == -1:
+                       if self.__dbg__ or True:
+                               print self.__plugin__ + " Failed trying to 
verify-age could find confirm age form."
+                               print self.__plugin__ + " html page given: " + 
repr(result)
+                       return ( self.__language__( 30600 ) , 303 )
+                                               
+               # get next_url
+               next_url_start = result.find('"next_url" value="') + 
len('"next_url" value="')
+               next_url_stop = result.find('">',next_url_start)
+               next_url = result[next_url_start:next_url_stop]
+               
+               if self.__dbg__:
+                       print self.__plugin__ + " next_url=" + next_url
+               
+               # get session token to get around the cross site scripting 
prevetion
+               session_token_start = result.find("'XSRF_TOKEN': '") + 
len("'XSRF_TOKEN': '")
+               session_token_stop = result.find("',",session_token_start) 
+               session_token = result[session_token_start:session_token_stop]
+               
+               if self.__dbg__:
+                       print self.__plugin__ + " session_token=" + 
session_token
+               
+               # post collected information to age the verifiaction page
+               request = urllib2.Request(new_url)
+               request.add_header('User-Agent', self.USERAGENT)
+               request.add_header('Cookie', 'LOGIN_INFO=' + login_info )
+               
request.add_header("Content-Type","application/x-www-form-urlencoded")
+               values = urllib.urlencode( { "next_url": next_url, 
"action_confirm": confirmed, "session_token":session_token })
+               
+               if self.__dbg__:
+                       print self.__plugin__ + " post page content: " + values
+               
+               con = urllib2.urlopen(request, values)
+               new_url = con.geturl()
+               result = con.read()
+               con.close()
+               
+               #If verification is success full new url must look like: 
'http://www.youtube.com/index?has_verified=1'
+               if new_url.find("has_verified=1") > 0:
+                       if self.__dbg__:
+                               print self.__plugin__ + " Age Verification 
sucessfull " + new_url
+                       return self._fetchPage(link, api, auth, login = True, 
error = error + 1)
+               
+               # If verification failed we dump a shit load of info to the logs
+               print self.__plugin__ + " age verification failed with result: 
" + repr(result)
+               print self.__plugin__ + " result url: " + repr(new_url)
+               return (self.__language__(30600), 303)
+
                
        def _extractVariables(self, videoid):
                if self.__dbg__:
@@ -1136,7 +1166,6 @@ class YouTubeCore(object):
                                        infoString += "Date Uploaded: " + 
video['Date'][:video['Date'].find("T")] + ", "                                
                                infoString += "View count: " + 
str(video['count'])
                                video['Plot'] = infoString + "\n" + 
video['Plot']
-                               print "plot updated"
                                video['Genre'] = self._getNodeAttribute(node, 
"media:category", "label", "Unknown Genre").encode( "utf-8" )
 
                                if node.getElementsByTagName("link"):
@@ -1278,8 +1307,6 @@ class YouTubeCore(object):
                                                   'continue': cont})
 
                        # Login to Google
-                       #if self.__dbg__:
-                       #       print self.__plugin__ + " _httpLogin: step 2"
                        url = 
urllib2.Request('https://www.google.com/accounts/ServiceLoginAuth?service=youtube',
 params)
                        url.add_header('User-Agent', self.USERAGENT)
                
@@ -1289,10 +1316,6 @@ class YouTubeCore(object):
                        newurl = re.compile('<meta http-equiv="refresh" 
content="0; url=&#39;(.*)&#39;"></head>').findall(result)[0].replace("&amp;", 
"&")
                        url = urllib2.Request(newurl)
                        url.add_header('User-Agent', self.USERAGENT)
-                       
-                       # Login to youtube
-                       #if self.__dbg__:
-                       #       print self.__plugin__ + " _httpLogin: step 3"
                        con = urllib2.urlopen(newurl)
                        
                        # Save cookiefile in settings
diff --git a/plugin.video.youtube/YouTubeNavigation.py 
b/plugin.video.youtube/YouTubeNavigation.py
index 081b379..6a1340e 100644
--- a/plugin.video.youtube/YouTubeNavigation.py
+++ b/plugin.video.youtube/YouTubeNavigation.py
@@ -230,6 +230,7 @@ class YouTubeNavigation:
                                feed = feed % get("contact")
                        elif ( get("channel")):
                                feed = feed % get("channel")
+                               print "found channel " + feed
                        elif ( get("playlist")):
                                feed = feed % get("playlist")
                        elif ( get("feed") == "uploads" or get("feed") == 
"favorites" or  get("feed") == "playlists" or get("feed") == "subscriptions" or 
get("feed") == "newsubscriptions"):
@@ -352,7 +353,13 @@ class YouTubeNavigation:
                get = params.get
 
                feed = self.feeds[get("feed")]
-                                                                       
+               
+               if (get("channel")):
+                       feed = feed % get("channel")
+               
+               if (get("videoid")):
+                       feed = feed % get("videoid")
+               
                ( result, status ) = core.feeds(feed, params)
                if status != 200:
                        feed_label = ""
@@ -384,6 +391,7 @@ class YouTubeNavigation:
                        if (get("scraper") == "disco_top_artist" 
                                or get("scraper") == "shows"
                                or (get("scraper") == "movies" and not 
get("category"))
+                               or (get("scraper") == "movies" and 
get("subcategory"))
                                or (get("scraper") == "categories" and not 
get("category"))
                                ):
                                self.parseFolderList(params, results)
@@ -971,7 +979,7 @@ class YouTubeNavigation:
        def buildItemUrl(self, item_params = {}, url = ""):
                for k, v in item_params.items():
                        if (k != "path" and k != "thumbnail" and k!= 
"playlistId" and k!= "next" and k != "content" and k!= "editid"
-                               and k!= "summary" and k!= "published" and 
k!="Title" and k!= "Title" ):
+                               and k!= "summary" and k!= "published" and 
k!="Title"):
                                url += k + "=" + v + "&"
                return url
 
@@ -1003,13 +1011,13 @@ class YouTubeNavigation:
                        url_studio = urllib.quote_plus(studio)
                        
                        if (get("feed") != "subscriptions_favorites" and 
get("feed") != "subscriptions_uploads" and get("feed") != 
"subscriptions_playlists"):
-                               cm.append( ( self.__language__( 30516 ) % 
studio, 
"XBMC.Container.Update(%s?path=%s&login=true&feed=subscriptions_uploads&view_mode=subscriptions_uploads&channel=%s)"
 % ( sys.argv[0],  get("path"), url_studio ) ) )
+                               cm.append( ( self.__language__( 30516 ) % 
studio, 
"XBMC.Container.Update(%s?path=%s&feed=subscriptions_uploads&view_mode=subscriptions_uploads&channel=%s)"
 % ( sys.argv[0],  get("path"), url_studio ) ) )
                        
                        if (get("action") == "search_disco"):
                                cm.append( ( self.__language__( 30523 ) % 
title, "XBMC.Container.Update(%s?path=%s&action=search_disco&search=%s)" % ( 
sys.argv[0],  get("path"), url_title ) ) )
                        
                        cm.append( ( self.__language__( 30514 ), 
"XBMC.Container.Update(%s?path=%s&action=search&search=%s)" % ( sys.argv[0],  
get("path"), url_title ) ) )
-                       cm.append( ( self.__language__( 30529 ), 
"XBMC.Container.Update(%s?path=%s&action=list_related&videoid=%s)" % ( 
sys.argv[0],  get("path"), item("videoid") ) ) )
+                       cm.append( ( self.__language__( 30529 ), 
"XBMC.Container.Update(%s?path=%s&feed=list_related&videoid=%s)" % ( 
sys.argv[0],  get("path"), item("videoid") ) ) )
                        cm.append( ( self.__language__( 30527 ), 
"XBMC.ActivateWindow(VideoPlaylist)"))
                        cm.append( ( self.__language__( 30504 ), 
"XBMC.Action(Queue)", ) )
                        cm.append( ( self.__language__( 30502 ), 
"XBMC.Action(Info)", ) )
diff --git a/plugin.video.youtube/YouTubeScraperCore.py 
b/plugin.video.youtube/YouTubeScraperCore.py
index eed90a2..7f15c5e 100644
--- a/plugin.video.youtube/YouTubeScraperCore.py
+++ b/plugin.video.youtube/YouTubeScraperCore.py
@@ -32,19 +32,23 @@ class YouTubeScraperCore:
        
        urls = {}
        urls['categories'] = "http://www.youtube.com/videos";
-       urls['movies'] = "http://www.youtube.com/movies";
-       urls['shows'] = "http://www.youtube.com/shows";
-       urls['show_list'] = "http://www.youtube.com/show";
+       urls['current_trailers'] = 
"http://www.youtube.com/trailers?s=trit&p=%s&hl=en";
        urls['disco_main'] = "http://www.youtube.com/disco"; 
-       urls['disco_search'] = 
"http://www.youtube.com/disco?action_search=1&query=%s";
        urls['disco_mix_list'] = 
"http://www.youtube.com/watch?v=%s&feature=disco&playnext=1&list=%s";
+       urls['disco_search'] = 
"http://www.youtube.com/disco?action_search=1&query=%s";
+       urls['game_trailers'] = "http://www.youtube.com/trailers?s=gtcs";
+       urls['live'] = "http://www.youtube.com/live";
        urls['main'] = "http://www.youtube.com";
+       urls['movies'] = "http://www.youtube.com/ytmovies";
+       urls['popular_game_trailers'] = 
"http://www.youtube.com/trailers?s=gtp&p=%s&hl=en";
+       urls['popular_trailers'] = 
"http://www.youtube.com/trailers?s=trp&p=%s&hl=en";
+       urls['recommended'] = "http://www.youtube.com/videos?r=1&hl=en";
+       urls['show_list'] = "http://www.youtube.com/show";
+       urls['shows'] = "http://www.youtube.com/shows";
        urls['trailers'] = "http://www.youtube.com/trailers?s=tr";
-       urls['current_trailers'] = 
"http://www.youtube.com/trailers?s=trit&p=%s&hl=en";
+       urls['upcoming_game_trailers'] = 
"http://www.youtube.com/trailers?s=gtcs&p=%s&hl=en";
        urls['upcoming_trailers'] = 
"http://www.youtube.com/trailers?s=tros&p=%s&hl=en";
-       urls['popular_trailers'] = 
"http://www.youtube.com/trailers?s=trp&p=%s&hl=en";
-       urls['recommended'] = "http://www.youtube.com/videos?r=1";
-       urls['movies_list'] = ""
+       urls['watch_later'] = "http://www.youtube.com/my_watch_later_list";
 
 #=================================== Recommended 
============================================
        def scrapeRecommended(self, params = {}):
@@ -84,8 +88,6 @@ class YouTubeScraperCore:
                return (ytobjects, status)
        
        def _scrapeYouTubeData(self, url, retry = True):
-               if self.__dbg__:
-                       print self.__plugin__ + " _scrapeYouTubeData: " + url
                result = ""
 
                login_info = self.__settings__.getSetting( "login_info" )
@@ -97,34 +99,26 @@ class YouTubeScraperCore:
                url.add_header('User-Agent', self.USERAGENT)
                url.add_header('Cookie', 'LOGIN_INFO=' + login_info)
 
-               try:
+               try:            
                        con = urllib2.urlopen(url)
                        result = con.read()
                        con.close()
-
+       
                        videos = re.compile('<a 
href="/watch\?v=(.*)&amp;feature=grec_browse" class=').findall(result);
-
+               
                        if len(videos) == 0:
                                videos = re.compile('<div id="reco-(.*)" 
class=').findall(result);
-
+       
                        if ( len(videos) == 0 and retry ):
                                self.core._httpLogin()
                                videos = self._scrapeYouTubeData(url, False)
                        if self.__dbg__:
                                print self.__plugin__ + " _scrapeYouTubeData 
done"
-                       return ( videos, 200 )
-               except urllib2.HTTPError, e:
-                       if self.__dbg__:
-                               print self.__plugin__ + " _scrapeYouTubeData 
exception: " + str(e)
-                       return ( self.__language__(30619), "303" )
                except:
-                       if self.__dbg__:
-                               print self.__plugin__ + " _scrapeYouTubeData 
uncaught exception"
-                               print 'ERROR: %s::%s (%d) - %s' % 
(self.__class__.__name__
-                                                                  , 
sys.exc_info()[2].tb_frame.f_code.co_name, sys.exc_info()[2].tb_lineno, 
sys.exc_info()[1])
-                               print self.__plugin__ + " _scrapeYouTubeData 
result: " + repr(result)
-                       return ( "", 500 )
-
+                       print self.__plugin__ + "_scrapeYouTubeData failed"
+               
+               return ( videos, 200 )
+               
 #=================================== Trailers 
============================================
        def scrapeTrailersListFormat (self, page, params = {}):
                get = params.get                 
@@ -154,13 +148,12 @@ class YouTubeScraperCore:
                        return (yobjects, 500)
                
                return (yobjects, status)
+       
 #=================================== Categories  
============================================
-
        def scrapeCategoriesGrid(self, html, params = {}):
+               get = params.get
                if self.__dbg__:
                        print self.__plugin__ + " scrapeCategoriesGrid"
-                       
-               get = params.get
                
                next = "false"
                pager = SoupStrainer(name="div", attrs = 
{'class':"yt-uix-pager"})
@@ -170,7 +163,7 @@ class YouTubeScraperCore:
                        tmp = str(pagination)
                        if (tmp.find("Next") > 0):
                                next = "true"
-
+               
                list = SoupStrainer(name="div", id="browse-video-data")
                videos = BeautifulSoup(html, parseOnlyThese=list)
                
@@ -183,6 +176,19 @@ class YouTubeScraperCore:
                                        id = id[id.find("=") + 1:id.find("&")]
                                        items.append(id)
                                video = video.findNextSibling(name="div", attrs 
= {'class':"video-cell *vl"})
+               else:
+                       list = SoupStrainer(name="div", attrs = 
{'class':"most-viewed-list paginated"})
+                       videos = BeautifulSoup(html, parseOnlyThese=list)
+                       if (len(videos) > 0):
+                               video = 
videos.div.div.findNextSibling(name="div", attrs={'class':"video-cell"})
+                               while (video != None):
+                                       id = video.div.a["href"]
+                                       if (id.find("/watch?v=") != -1):
+                                               id = id[id.find("=") + 1:]
+                                       if (id.find("&") > 0):
+                                               id = id[:id.find("&")]
+                                       items.append(id)
+                                       video = 
video.findNextSibling(name="div", attrs = {'class':"video-cell"})
                
                if (items):
                        (results, status) = self.core._get_batch_details(items)
@@ -240,6 +246,7 @@ class YouTubeScraperCore:
                        print "Disco search url %s" % url
                page = self._fetchPage(url)
                if (page.find("list=") != -1):
+                       page = page.replace("\u0026", "&")
                        mix_list_id = page[page.find("list=") + 5:]
                        if (mix_list_id.find("&") != -1):
                                mix_list_id = 
mix_list_id[:mix_list_id.find("&")]
@@ -253,7 +260,7 @@ class YouTubeScraperCore:
                                                                                
                        page = self._fetchPage(url)
                        
-                       list = SoupStrainer(name="div", id ="quicklist")
+                       list = SoupStrainer(name="div", id ="playlist-bar")
                        mix_list = BeautifulSoup(page, parseOnlyThese=list)
                        if (len(mix_list) > 0):
                                match = 
mix_list.div["data-video-ids"].split(",")
@@ -274,12 +281,12 @@ class YouTubeScraperCore:
                page = self._fetchPage(url, params)
                list = SoupStrainer(name="div", attrs = 
{"class":"popular-message"})
                popular = BeautifulSoup(page, parseOnlyThese=list)
-               result = []
+               items = []
                if (len(popular) > 0):
                        videos = self.urls["main"] + popular.a["onclick"]
-                       if (videos.find("&quot;") > 0):
-                               videos = 
videos[videos.find("&quot;"):videos.rfind("])")]
-                               videos = videos.replace("&quot;","")
+                       if (videos.find("([") > 0):
+                               videos = videos[videos.find("([") + 
2:videos.rfind("])")]
+                               videos = videos.replace('"',"")
                                videos = videos.replace(" ","")
                                items = videos.split(",")
                                return self.core._get_batch_details(items)
@@ -344,7 +351,7 @@ class YouTubeScraperCore:
                
                return (ytobjects, status)
                
-               # If the show contains more than one season the function will 
returns a list of folder items,
+               # If the show contains more than one season the function will 
return a list of folder items,
                # otherwise a paginated list of video items is returned
        def scrapeShow(self, html, params = {}):
                get = params.get
@@ -372,10 +379,13 @@ class YouTubeScraperCore:
                                if (str(season).find("page not-selected") > 0):
                                        season_url = season["href"]
                                        
-                                       if (season_url.find("&amp;s=") > 0):
-                                               season_url = 
season_url[season_url.find("&amp;s=") + 7:]
-                                               if (season_url.find("&amp;") > 
0):
-                                                       season_url = 
season_url[:season_url.find("&amp;")]
+                                       if season_url:
+                                               season_url = 
season_url.replace("\u0026", "&")
+                                       
+                                       if (season_url.find("&s=") > 0):
+                                               season_url = 
season_url[season_url.find("&s=") + 3:]
+                                               if (season_url.find("&") > 0):
+                                                       season_url = 
season_url[:season_url.find("&")]
                                                item["Title"] = "Season " + 
season_url.encode("utf-8")
                                                item["season"] = 
season_url.encode("utf-8")
                                                item["thumbnail"] = "shows"
@@ -466,9 +476,78 @@ class YouTubeScraperCore:
                        return (self.__language__(30601), 303)
                
                yobjects[len(yobjects) -1]["next"] = next
-               
+                       
                return (yobjects, status)
 
+#=================================== Movies 
============================================               
+
+       def scrapeMovieSubCategory(self, html, params = {}):
+               get = params.get
+               ytobjects = []
+
+               list = SoupStrainer(name="div", attrs = {'class':"ytg-fl 
browse-content"})
+               categories = BeautifulSoup(html, parseOnlyThese=list)
+               if len(categories):
+                       categorylist = categories.findAll(name="div", attrs = 
{'class':"yt-uix-slider-head"})
+                       for category in categorylist:
+                               item = {}
+                               cat = category.div.button["href"]
+                               title = category.div.findNextSibling(name="div")
+                               title = title.h2.contents[0].strip()
+                               item['Title'] = title
+                               cat = cat.replace("/movies/", "")               
                                                                                
+                               cat = urllib.quote_plus(cat)
+                               item['category'] = cat
+                               item['scraper'] = "movies"
+                               item["thumbnail"] = "movies"
+                               ytobjects.append(item)
+               
+               params["folder"] = "true"
+               return (ytobjects, 200)
+       
+       def scrapeMoviesGrid(self, html, params = {}):
+               get = params.get
+               yobjects = []
+               next = "false"
+               
+               pager = SoupStrainer(name="div", attrs = 
{'class':"yt-uix-pager"})
+               pagination = BeautifulSoup(html, parseOnlyThese=pager)
+
+               if (len(pagination) > 0):
+                       tmp = str(pagination)
+                       if (tmp.find("Next") > 0):
+                               next = "true"
+                       
+               list = SoupStrainer(name="ul", attrs = 
{'class':"browse-item-list"})
+               movies = BeautifulSoup(html, parseOnlyThese=list)
+               
+               if (len(movies) > 0):
+                       movie = movies.li
+                                               
+                       items = []
+                       while ( movie != None ):
+                               videoid = ""
+                               video_info = 
movie.div.a.span.findNextSibling(name="span")
+                               if video_info:
+                                       videoid = video_info['data-video-ids']
+                                               
+                               if (videoid):                                   
+                                       items.append( (videoid, 
movie.div.a.span.img["data-thumb"]) )
+                               
+                               movie = movie.findNextSibling(name="li")
+                       
+                       (yobjects, result ) = 
self.core._get_batch_details_thumbnails(items)
+                       
+                       if result != 200:
+                               return (yobjects, result)
+
+               if (not yobjects):
+                       return (yobjects, 500)
+               
+               yobjects[len(yobjects)-1]['next'] = next
+
+               return (yobjects, 200)
+
 #=================================== Common 
============================================               
 
        def _fetchPage(self, feed, params = {}):
@@ -486,10 +565,13 @@ class YouTubeScraperCore:
                result = []
 
                if ( get("scraper") == "categories" and get("category")):
-                       scraper_per_page = 23
+                       if urllib.unquote_plus(get("category")).find("/") != -1:
+                               scraper_per_page = 23
+                       else:
+                               scraper_per_page = 36
                elif ( get("scraper") == "shows" and get("category")):
                        scraper_per_page = 44
-               elif ( get("scraper") == "movies" and get("category")):
+               elif ( get("scraper") == "movies" and get("category") and not 
get("subcategory")):
                        scraper_per_page = 60           
                elif (get("scraper") != "shows" and get("scraper") != "show" 
and get("scraper") != "categories" and get("scraper") != "movies" and 
get("scraper") in self.urls):
                        scraper_per_page = 40
@@ -509,14 +591,16 @@ class YouTubeScraperCore:
                        
                        params["page"] = str(begin_page)
                        url = self.createUrl(params)
-                       html = self._fetchPage(url, params)
                        if (self.__dbg__):
                                print "fetching url " + url
+                       html = self._fetchPage(url, params)
 
                        if (get("scraper") == "categories"):
                                (result, status) = 
self.scrapeCategoriesGrid(html, params)
                        elif (get("scraper") == "shows"):
                                (result, status) = self.scrapeShowsGrid(html, 
params)   
+                       elif (get("scraper") == "movies" and get("category")):
+                               (result, status) = self.scrapeMoviesGrid(html, 
params)
                        else:
                                (result, status) = self.scrapeGridFormat(html, 
params)
                        
@@ -538,7 +622,9 @@ class YouTubeScraperCore:
                                        if (get("scraper") == "categories"):
                                                (new_result, status) = 
self.scrapeCategoriesGrid(html, params)
                                        elif (get("scraper") == "shows"):
-                                               (new_result, status) = 
self.scrapeShowsGrid(html, params)       
+                                               (new_result, status) = 
self.scrapeShowsGrid(html, params)
+                                       elif (get("scraper") == "movies" and 
get("category")):
+                                               (result, status) = 
self.scrapeMoviesGrid(html, params)
                                        else:
                                                (new_result, status) = 
self.scrapeGridFormat(html, params)
                                        
@@ -554,7 +640,7 @@ class YouTubeScraperCore:
                                        i = i+1
                                        if (i > 9):     
                                                if (self.__dbg__):
-                                                       print "Scraper 
pagination failed, requested more than 10 pages which should never happen."
+                                                       print self.__plugin__ + 
" Scraper pagination failed, requested more than 10 pages which should never 
happen."
                                                return False
                                
                                if (next == "false" and len(result) > per_page):
@@ -588,7 +674,10 @@ class YouTubeScraperCore:
                                        return self.scrapeCategoryList(html, 
params, "shows")
                        elif (get("scraper") == "movies"):
                                if (get("category")):
-                                       return self.scrapeGridFormat(html, 
params)
+                                       if get("subcategory"):
+                                               return 
self.scrapeMovieSubCategory(html, params)
+                                       else:
+                                               return 
self.scrapeGridFormat(html, params)
                                else: 
                                        return self.scrapeCategoryList(html, 
params, "movies")
                        else:
@@ -605,18 +694,29 @@ class YouTubeScraperCore:
                                if (category.find("/") != -1):
                                        url = self.urls["main"] + category + 
"?hl=en" + "&p=" + page
                                else:
-                                       url = self.urls["main"] + "/videos" + 
category + "&hl=en" + "&p=" + page
+                                       url = self.urls["main"] + "/categories" 
+ category + "&hl=en" + "&p=" + page
                        else:
                                url = self.urls["categories"] + "?hl=en"
                
-               elif (get("scraper") == "shows" or get("scraper") == "movies"):
+               elif (get("scraper") == "shows"):
                        if (get("category")):
                                category = get("category")
                                category = urllib.unquote_plus(category)
-                               url = self.urls[get("scraper")] + "/" + 
category + "?p=" + page + "&hl=en"
+                               url = self.urls["shows"] + "/" + category + 
"?p=" + page + "&hl=en"
                        else:
-                               url = self.urls[get("scraper")] + "?hl=en"      
+                               url = self.urls["shows"] + "?hl=en"
                                
+               elif (get("scraper") == "movies"):
+                       if (get("category")):
+                               category = get("category")
+                               category = urllib.unquote_plus(category)
+                               if get("subcategory"):
+                                       url = self.urls["main"] + "/movies/" + 
category + "?hl=en"
+                               else:
+                                       url = self.urls["main"] + "/movies/" + 
category + "?p=" + page + "&hl=en"
+                       else:
+                               url = self.urls["movies"] + "?hl=en"
+
                elif (get("show")):                     
                        show = urllib.unquote_plus(get("show"))
                        if (show.find("p=") < 0):
@@ -631,7 +731,8 @@ class YouTubeScraperCore:
                                url = self.urls[get("scraper")]
                                url = url % page
                        else :
-                               url = self.urls["trailers"]
+                               if (get("scraper") == "latest_trailers"):       
                                
+                                       url = self.urls["trailers"]
                                
                return url
        
@@ -657,8 +758,8 @@ class YouTubeScraperCore:
                                trailer = trailers.div.div.div
                        
                        cell = "trailer-cell *vl"
-                       if (get("scraper") == "movies"):
-                               cell = "movie-cell *vl"
+                       if (get("scraper") == "categories"):
+                               cell = "video-cell"
                        
                        item = []
                        while ( trailer != None ):
@@ -684,20 +785,24 @@ class YouTubeScraperCore:
 
                return (yobjects, 200)
        
-       def scrapeCategoryList(self, html, params = {}, tag = ""):
+       def scrapeCategoryList(self, html = "", params = {}, tag = ""):
                get = params.get
                if self.__dbg__:
-                       print self.__plugin__ + "scrapeCategories " 
+                       print self.__plugin__ + " scrapeCategories " 
                scraper = "categories"
                thumbnail = "explore"
                
                if (tag):
                        scraper = tag
                        thumbnail = tag
-                               
+               
                list = SoupStrainer(name="div", attrs = 
{"class":"yt-uix-expander-body"})
                categories = BeautifulSoup(html, parseOnlyThese=list)
                
+               if len(categories) == 0:
+                       list = SoupStrainer(name="div", id = 
"browse-filter-menu")
+                       categories = BeautifulSoup(html, parseOnlyThese=list)
+               
                yobjects = []
                status = 200
                
@@ -718,13 +823,23 @@ class YouTubeScraperCore:
                                                                continue
                                                        if cat.find("?") != -1:
                                                                cat = 
cat[cat.find("?"):]
+                                                       if cat.find("comedy") > 
0:
+                                                               cat = "?c=23"
+                                                       if cat.find("gaming") > 
0:
+                                                               cat = "?c=20"
+                                               if get("scraper") == "movies":
+                                                       if cat.find("pt=nr") > 
0:
+                                                               category = 
category.findNextSibling(name = "li")
+                                                               continue
+                                                       elif cat == 
"indian-cinema":
+                                                               
item["subcategory"] = "true"
                                                
                                                cat = urllib.quote_plus(cat)
                                                item['category'] = cat
                                                item['scraper'] = scraper
                                                item["thumbnail"] = thumbnail
                                                if self.__dbg__:
-                                                       print self.__plugin__ + 
"adding item: " + item['Title'] + ", url: " + item['category']
+                                                       print self.__plugin__ + 
"adding item: " + repr(item['Title']) + ", url: " + item['category']
                                                yobjects.append(item)
                                        
                                        category = 
category.findNextSibling(name = "li")
diff --git a/plugin.video.youtube/addon.xml b/plugin.video.youtube/addon.xml
index 2283f05..369691a 100644
--- a/plugin.video.youtube/addon.xml
+++ b/plugin.video.youtube/addon.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
 <addon id="plugin.video.youtube"
        name="YouTube"
-       version="2.0.3"
+       version="2.0.4"
        provider-name="TheCollective">
   <requires>
     <import addon="xbmc.python" version="1.0"/>
diff --git a/plugin.video.youtube/changelog.txt 
b/plugin.video.youtube/changelog.txt
index 4c0c525..ed0eb75 100644
--- a/plugin.video.youtube/changelog.txt
+++ b/plugin.video.youtube/changelog.txt
@@ -1,11 +1,27 @@
 [B]Errata[/B]
 - [XBMC] Thumbnails sometimes turns into black box or Folder (XBMC not 
detecting when thumbnail is set and defaulting to icon?)
 - [XBMC] When sorting items, it's impossible to get them to return to their 
original order.
-- [XBMC] has Excessive Memory use after running the plugin for prolonged 
periods of time 
-- [YOUTUBE] API doesnt allow proper sorting of playlists and subscriptions 
(Video order when listing a playlists is broken and orderby when listing all 
subscriptions just doesn't work)
+- [XBMC] Has Excessive Memory use after running the plugin for prolonged 
periods of time 
+- [XBMC] Playback of RTMPe streams (flash streaming) is currently broken as 
youtube updated to use a newer protocol than what XBMC supports
+- [XBMC] Theres an unknown issue with the version of urllib and openssl 
supplied by xbmc for python 2.4 on windows that causes severe login issues for 
windows users (ssl_ctx_new errors)
 - [YOUTUBE] Can't delete favorites (YouTube gives video id, not favorite id).
 - Download of rtmpe streams not supported.
 
+
+====================== DO NOT REMOVE THIS LINE UNTIL VERSION HAS BEEN UPDATE 
EVERY WHERE ===========================
+[B]Version 2.2.4[/B]
+- Verfied Compatibility with Eden - pre and submitted to repo.
+
+[B]Version 2.0.4[/B]
+- Fixed YouTube character encoding changes broke disco scraper.
+- Fixed YouTube character encoding changes broke show scraper
+- Fixed Site changes broke Disco top 25 scraper
+- Fixed YouTube design changes broken categories scraper
+- Fixed Movies Scraper (backport of fix from new beta by chocol)
+- Fixed problems when downloading a file to a network drives (thanks to ToCsIc)
+- Fixed Age verification was broken, back working again.
+- Changed more videos by user, and related videos so you no longer have to be 
logged in for them to work.
+
 [B]Version 2.0.3[/B]
 - Fixed Context menus were "suddenly" missing on a lot of folder items 
(probably since pre v.2)
 - Fixed "New Subscriptions Videos" feed would show up on every subscriptions 
page this has been corrected
diff --git a/plugin.video.youtube/default.py b/plugin.video.youtube/default.py
index 0fa0941..8434976 100644
--- a/plugin.video.youtube/default.py
+++ b/plugin.video.youtube/default.py
@@ -19,13 +19,10 @@
 import sys, xbmc, xbmcaddon
 
 # plugin constants
-__version__ = "2.0.3"
+__version__ = "2.0.4"
 __plugin__ = "YouTube-" + __version__
 __author__ = "TheCollective"
 __url__ = "www.xbmc.com"
-__svn_url__ = ""
-__svn_revision__ = "$Revision$"
-__XBMC_Revision__ = "34731"
 __settings__ = xbmcaddon.Addon(id='plugin.video.youtube')
 __language__ = __settings__.getLocalizedString
 __dbg__ = __settings__.getSetting( "debug" ) == "true"

-----------------------------------------------------------------------

Summary of changes:
 plugin.video.youtube/YouTubeCore.py        |  119 +++++++++------
 plugin.video.youtube/YouTubeNavigation.py  |   16 ++-
 plugin.video.youtube/YouTubeScraperCore.py |  231 +++++++++++++++++++++-------
 plugin.video.youtube/addon.xml             |    2 +-
 plugin.video.youtube/changelog.txt         |   20 ++-
 plugin.video.youtube/default.py            |    5 +-
 6 files changed, 276 insertions(+), 117 deletions(-)


hooks/post-receive
-- 
Plugins

------------------------------------------------------------------------------
What Every C/C++ and Fortran developer Should Know!
Read this article and learn how Intel has extended the reach of its 
next-generation tools to help Windows* and Linux* C/C++ and Fortran 
developers boost performance applications - including clusters. 
http://p.sf.net/sfu/intel-dev2devmay
_______________________________________________
Xbmc-addons mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/xbmc-addons

Reply via email to