The branch, dharma has been updated
       via  adbabd6d349f72de626769cb42990a9a8028577f (commit)
      from  82f3ff8fbbffd7d71b887dc91105835ee1e88c5b (commit)

- Log -----------------------------------------------------------------
http://xbmc.git.sourceforge.net/git/gitweb.cgi?p=xbmc/plugins;a=commit;h=adbabd6d349f72de626769cb42990a9a8028577f

commit adbabd6d349f72de626769cb42990a9a8028577f
Author: spiff <[email protected]>
Date:   Mon Oct 3 11:37:01 2011 +0200

    [plugin.video.dump] updated to version 0.3

diff --git a/plugin.video.dump/addon.xml b/plugin.video.dump/addon.xml
index 29f12be..32b2586 100644
--- a/plugin.video.dump/addon.xml
+++ b/plugin.video.dump/addon.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
 <addon id="plugin.video.dump"
        name="Dump.com"
-       version="0.2"
+       version="0.3"
        provider-name="Insayne">
   <requires>
     <import addon="xbmc.python" version="1.0"/>
diff --git a/plugin.video.dump/changelog.txt b/plugin.video.dump/changelog.txt
index a023916..1528092 100644
--- a/plugin.video.dump/changelog.txt
+++ b/plugin.video.dump/changelog.txt
@@ -1,5 +1,10 @@
+0.3

+- Added: Caching Engine for Archive View (0.010426s loadtime vs 15.7s)

+- Added: Icon for Archives 

+- Misc: Cleaned up code, commented a bit more and remove profanity from 
changelog

+

 0.2

-- Remove Shitty Website-style pagination and swapped it with loading entire 
month on the fly

+- Remove bad Website-style pagination and swapped it with loading entire month 
on the fly

 - Public Release! :P

 

 0.1

diff --git a/plugin.video.dump/default.py b/plugin.video.dump/default.py
index 8fd858e..b868403 100644
--- a/plugin.video.dump/default.py
+++ b/plugin.video.dump/default.py
@@ -9,11 +9,17 @@ __plugin__ = "Dump.com"
 __author__ = "Insayne"

 __url__ = "http://code.google.com/p/insayne-projects/";

 __svn_url__ = 
"https://insayne-projects.googlecode.com/svn/trunk/XBMC/Video/plugin.video.dump/";

-__version__ = "0.2"

+__version__ = "0.3"

 __svn_revision__ = "$Revision$"

 __XBMC_Revision__ = xbmc.getInfoLabel('System.BuildVersion')

 __settings__ = xbmcaddon.Addon(id='plugin.video.dump')

 

+# Global Variables

+addonpath = __settings__.getAddonInfo('path')

+addonsavepath = __settings__.getAddonInfo('profile')

+global cache_write

+cache_write = 0

+

 # Archive Listing

 def get_archives():

        url = 'http://www.dump.com/'

@@ -31,7 +37,10 @@ def get_archives():
                        link = link + "|" + posts

                        addDir(title,link,3,thumb)

                        

-# Page Generator       

+# Page Generator

+# This function Generates the title/date/link/thumb data per page

+# Dump.com usually has 5 entries per page

+# It also writes to the cache if necessary

 def Generate_Page(date,page):

        url = 'http://www.dump.com'

        page = int(page)

@@ -41,7 +50,7 @@ def Generate_Page(date,page):
                url = url + "/" + date

        url = url.replace('.com//', '.com/')

 

-   

+       global cache_write 

        req = urllib2.Request(url)

        response = urllib2.urlopen(req)

        html=response.read()

@@ -51,13 +60,51 @@ def Generate_Page(date,page):
        thumb = ''

        regex = '<h2><a href="(.+?)" rel="bookmark" 
title=".+?">(.+?)</a></h2>.+?<span class="date">(.+?)</span>.+?autostart: 
false,image: "(.+?)",flashplayer: "http://www.dump.com/player/player.swf";'

        videos = re.compile(regex).findall(html)

-

+       if cache_write == 1:

+               content = ""

        for link,title,date,thumb in videos:

-               title = "[" + date + "] " + cleanstring(remove_spaces(title))

-               addVideo(title,link,1, thumb)

-       

-       

+               titled = "[" + date + "] " + cleanstring(remove_spaces(title))

+               addVideo(titled,link,1, thumb)

+               

+               # If we are writing the cache, here we do it in the cache syntax

+               if cache_write == 1:

+                       content = content + "<date>" + date + "</date><title>" 
+ title + "</title><url>" + link + "</url><thumb>" + thumb + "</thumb>" + "\r\n"

+                       content = content.replace(" </title>", "</title>")

+                       content = content.replace("  ", " ")

+

+       # Here we do the writing to the file

+       if cache_write == 1:

+               global cachefilename

+               # Filecheck

+               filecheck = os.path.isfile(cachefilename)

+               if filecheck==False:

+                       # If it didn't, we just write to the file

+                       file = open(cachefilename, 'w')

+                       file.write(content)

+                       file.close()

+                       

+               else:

+                       # If it did, we append (A mode does not work in XBMC 
Dharma 10.1)

+                       file = open(cachefilename, 'r')

+                       content_old = file.read()

+                       file.close()

+                       

+                       filecontent = content_old + content

+                       

+                       file = open(cachefilename, 'w')

+                       file.write(filecontent)

+                       file.close()

+                       

 # Link Handler

+# This basically knows what to do with the links, and throws em at the right 
functions :P 

+# Its a small helper that lets me be incredibly lazy

+# For reference:

+# ARGV1 = Year (if it isnt Page param)

+# ARGV2 = Month (if #1 wasnt page param)

+# ARGV3 = Page Param (if #1 wasn't page already)

+# ARGV4 = Page Number

+

+

 def lh(url):

                link = url

                link = link.replace("http://www.dump.com/";, "")

@@ -85,13 +132,16 @@ def lh(url):
                                                Generate_Page(date, page)

 

 # Month Generator

+# This is the function to generate the data for an entire Month

+# Only 5 entries per page is too little for XBMC users, or myself :P

+

 def Generate_Month(url,posts,mode):

        if url == "None":

                url = 'http://www.dump.com/'

        if posts == "None":

                posts = "None"

 

-       # Here we create the index Page (Bottom should have archives

+       # Here we create the index Page 

        if mode=="index":

                req = urllib2.Request(url)

                response = urllib2.urlopen(req)

@@ -129,10 +179,63 @@ def Generate_Month(url,posts,mode):
                pages = int(posts) / 5

                pages = pages + 1

                link = url

-               for page in range(1,pages):

-                       lhurl = link + "page/" + str(page) + "/"

-                       lh(lhurl)

+               

+               # Here we do the Caching Engine

+               

+               global cachefilename

+               global cache_write 

+               cachefilename = link.replace("http://www.dump.com/";, "")

+               cachefilename = cachefilename.replace("/", "-") + ".txt"

+               cachefilename = cachefilename.replace("-.txt", ".txt")

+               cachefilename = xbmc.translatePath(os.path.join( addonsavepath, 
'cache', cachefilename ))

+               filecheck = os.path.isfile(cachefilename)

+               if filecheck==False:

+                       #start = time.clock()

+                       cache_write = 1

+                       for page in range(1,pages):

+                               lhurl = link + "page/" + str(page) + "/"

+                               lh(lhurl)

+                       #end = time.clock()

+                       ## Completed fetching data (Live) in %f seconds." % 
(end-start)

+

                        

+                       

+               else:

+                       cache_write = 0

+                       # Verify Cache has more entries"

+                       file = open(cachefilename, 'r')

+                       lines = len(file.readlines())

+                       file.close()

+

+                       if int(lines) == int(posts) or url == 
"http://www.dump.com/2010/11/":

+                               #start = time.clock()

+                               # Cache Data matches"

+                               # Here i should regex through the file and add 
the entries"

+                               file = open(cachefilename, 'r')

+                               content = file.read()

+                               file.close()

+                               content = content.replace("\n", "")

+                               content = content.replace("\r", "")

+                               regex = 
'<date>(.+?)</date><title>(.+?)</title><url>(.+?)</url><thumb>(.+?)</thumb>'

+                               videos = re.compile(regex).findall(content)

+                               for date,title,link,thumb in videos:

+                                       title = "[" + date + "] " + 
cleanstring(title)

+                                       addVideo(title,link,1, thumb)

+                               #end = time.clock()

+                               ## Completed fetching data (Cache) in %f 
seconds." % (end-start)

+

+                               

+                       else:

+                               # Cache Data mismatch - deleting and recreating 
cache"

+                               print url

+                               os.remove(cachefilename)

+                               cache_write = 1

+                               for page in range(1,pages):

+                                       lhurl = link + "page/" + str(page) + "/"

+                                       lh(lhurl)

+

+               cache_write = 0

+               # Cache Writing turned off"

 

                        

        

@@ -185,6 +288,11 @@ def cleanstring(string):
        string = string.replace("&amp;", "&")

        string = string.replace("&quot;", "\"")

        string = string.replace("[VIDEO]", "")

+       string = string.replace("&#8217;", "\'")

+       string = string.replace("&#8230;", "...")

+       string = string.replace("&#8220;", '"')

+       string = string.replace("&#8221;", '"')

+       

        return string

 

 # Remove double spaces, etc

@@ -231,9 +339,15 @@ except:
         pass

 

 if mode==None or url==None or len(url)<1:

+       # Verification our cache dir exists (required on startup)

+       path = xbmc.translatePath(os.path.join(addonsavepath , 'cache'))

+       if os.path.isdir(path)==False:

+               os.makedirs(path)

+               

        Generate_Month("None", "None", "index")

        #Archive Link

-       thumb = ''

+       thumb = xbmc.translatePath(os.path.join( addonpath, 'resources', 
'images', 'archives.png'))

+       

        link = "Empty"

        title = "[Archives]"

        addDir(title,link,2,thumb)

@@ -248,7 +362,6 @@ elif mode==2:
        get_archives()

 

 elif mode==3:

-

        # Prepare Month Data

        link = url.split("|")[0]

        posts = url.split("|")[1]


-----------------------------------------------------------------------

Summary of changes:
 plugin.video.dump/addon.xml                        |    2 +-
 plugin.video.dump/changelog.txt                    |    7 +-
 plugin.video.dump/default.py                       |  141 ++++++++++++++++++--
 .../resources/images/archives.png                  |  Bin 49407 -> 49407 bytes
 4 files changed, 134 insertions(+), 16 deletions(-)
 copy {plugin.video.wimp => plugin.video.dump}/resources/images/archives.png 
(100%)


hooks/post-receive
-- 
Plugins

------------------------------------------------------------------------------
All the data continuously generated in your IT infrastructure contains a
definitive record of customers, application performance, security
threats, fraudulent activity and more. Splunk takes this data and makes
sense of it. Business sense. IT sense. Common sense.
http://p.sf.net/sfu/splunk-d2dcopy1
_______________________________________________
Xbmc-addons mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/xbmc-addons

Reply via email to