[Zope-Checkins] SVN: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/ - replace nasty and ineffective hack for dynamic connections ids in ZSQL Method cache which is tested and works

2006-11-17 Thread Chris Withers
Log message for revision 71151:
  - replace nasty and ineffective hack for dynamic connections ids in ZSQL 
Method cache which is tested and works
  - simplify tests as a result
  - make sure full chain of DA.__call__ and DA._cached_result is execercised

Changed:
  U   Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py
  U   Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py

-=-
Modified: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py
===
--- Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py   2006-11-17 12:30:35 UTC 
(rev 71150)
+++ Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py   2006-11-17 13:22:41 UTC 
(rev 71151)
@@ -352,11 +352,8 @@
 
 def _searchable_result_columns(self): return self._col
 
-def _cached_result(self, DB__, query):
-pure_query = query
-# we need to munge the incoming query key in the cache
-# so that the same request to a different db is returned
-query = query + ('\nDBConnId: %s' % self.connection_hook, )
+def _cached_result(self, DB__, query, max_rows, conn_id):
+cache_key = query,max_rows,conn_id
 
 # Try to fetch from cache
 if hasattr(self,'_v_cache'): cache=self._v_cache
@@ -376,15 +373,15 @@
 del cache[q]
 del keys[-1]
 
-if cache.has_key(query):
-k, r = cache[query]
+if cache.has_key(cache_key):
+k, r = cache[cache_key]
 if k  t: return r
 
 # call the pure query
-result=apply(DB__.query, pure_query)
+result=DB__.query(query,max_rows)
 if self.cache_time_  0:
-tcache[int(now)]=query
-cache[query]= now, result
+tcache[int(now)]=cache_key
+cache[cache_key]= now, result
 
 return result
 
@@ -450,8 +447,9 @@
 if src__: return query
 
 if self.cache_time_  0 and self.max_cache_  0:
-result=self._cached_result(DB__, (query, self.max_rows_))
-else: result=DB__.query(query, self.max_rows_)
+result=self._cached_result(DB__, query, self.max_rows_, c)
+else:
+result=DB__.query(query, self.max_rows_)
 
 if hasattr(self, '_v_brain'): brain=self._v_brain
 else:

Modified: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py
===
--- Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py   
2006-11-17 12:30:35 UTC (rev 71150)
+++ Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py   
2006-11-17 13:22:41 UTC (rev 71151)
@@ -5,9 +5,13 @@
 class DummyDB:
 
 conn_num = 1
+
+result = None
 
-def query(self,*args):
-return ('result for:',)+args
+def query(self,query,max_rows):
+if self.result:
+return self.result
+return 'result for ' + query
 
 def hook_method(self):
 conn_to_use = 'conn'+str(self.conn_num)
@@ -34,13 +38,13 @@
 self.da.cache_time_ = 10
 self.da.max_cache_ = 2
 
-def _do_query(self,query,time):
+def _do_query(self,query,t):
 try:
-self.DA.time = DummyTime(time)
-result = self.da._cached_result(DummyDB(),query)
+self.DA.time = DummyTime(t)
+result = self.da._cached_result(DummyDB(),query,1,'conn_id')
 finally:
 self.DA.time = time
-self.assertEqual(result,('result for:',)+query)
+self.assertEqual(result,'result for '+query)
 
 def _check_mapping(self,expected,actual):
 missing = []
@@ -101,10 +105,10 @@
 # query, but where the item returned is always in the cache
 self._check_cache({},{})
 for t in range(1,6):
-self._do_query(('query',),t)
+self._do_query('query',t)
 self._check_cache(
-{('query', '\nDBConnId: None'): (1,('result for:', 'query'))},
-{1: ('query', '\nDBConnId: None')}
+{('query',1,'conn_id'): (1,'result for query')},
+{1: ('query',1,'conn_id')}
 )
 
 def test_same_query_same_second(self):
@@ -115,10 +119,10 @@
 self._check_cache({},{})
 for t in range(11,16,1):
 t = float(t)/10
-self._do_query(('query',),t)
+self._do_query('query',t)
 self._check_cache(
-{('query', '\nDBConnId: None'): (1.1,('result for:', 
'query'))},
-{1: ('query', '\nDBConnId: None')}
+{('query',1,'conn_id'): (1.1,'result for query')},
+{1: ('query',1,'conn_id')}
 )
 
 def test_different_queries_different_second(self):
@@ -128,49 +132,49 @@
 # dumped due to the replacement of Bucket with dict
 self._check_cache({},{})
 # one
-self._do_query(('query1',),1.1)
+

[Zope-Checkins] SVN: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py more documentary tests:

2006-11-17 Thread Chris Withers
Log message for revision 71153:
  more documentary tests:
  - document 2 memory leaks, one in cache and one in tcache
  - test coverage for the patch Brian applied in Dec 2000

Changed:
  U   Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py

-=-
Modified: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py
===
--- Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py   
2006-11-17 14:04:28 UTC (rev 71152)
+++ Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py   
2006-11-17 15:32:54 UTC (rev 71153)
@@ -180,7 +180,7 @@
 def test_different_queries_same_second(self):
 # This tests different queries being fired into the cache
 # in the same second.
-# XXX The demonstrates a memory leak in the cache code
+# XXX The demonstrates 2 memory leaks in the cache code
 self._check_cache({},{})
 # one
 self._do_query('query1',1.0)
@@ -191,6 +191,7 @@
 # two
 self._do_query( 'query2',1.1)
 self._check_cache(
+# XXX oops, query1 is in the cache but it'll never be purged.
 {('query1',1,'conn_id'): (1.0,'result for query1'),
  ('query2',1,'conn_id'): (1.1,'result for query2'),},
 {1.0: ('query2',1,'conn_id'),}
@@ -198,6 +199,7 @@
 # three
 self._do_query('query3',1.2)
 self._check_cache(
+# XXX oops, query1 and query2 are in the cache but will never be 
purged
 {('query1',1,'conn_id'): (1,'result for query1'),
  ('query2',1,'conn_id'): (1.1,'result for query2'),
  ('query3',1,'conn_id'): (1.2,'result for query3'),},
@@ -206,7 +208,7 @@
 # four - now we drop our first cache entry, this is an off-by-one error
 self._do_query('query4',1.3)
 self._check_cache(
-# XXX - oops, why is query1 here still?
+# XXX - oops, why is query1 here still? see above ;-)
 {('query1',1,'conn_id'): (1,'result for query1'),
  ('query2',1,'conn_id'): (1.1,'result for query2'),
  ('query3',1,'conn_id'): (1.2,'result for query3'),
@@ -216,7 +218,7 @@
 # five - now we drop another cache entry
 self._do_query('query5',1.4)
 self._check_cache(
-# XXX - oops, why are query1 and query2 here still?
+# XXX - oops, why are query1 and query2 here still? see above ;-)
 {('query1',1,'conn_id'): (1,'result for query1'),
  ('query2',1,'conn_id'): (1.1,'result for query2'),
  ('query3',1,'conn_id'): (1.2,'result for query3'),
@@ -225,6 +227,69 @@
 {1: ('query5',1,'conn_id'),}
 )
 
+def test_time_tcache_expires(self):
+# the first query gets cached
+self._do_query('query1',1)
+self._check_cache(
+{('query1',1,'conn_id'): (1,'result for query1')},
+{1: ('query1',1,'conn_id')}
+)
+# the 2nd gets cached, the cache is still smaller than max_cache / 2
+self._do_query('query2',12)
+self._check_cache(
+{('query1',1,'conn_id'): (1,'result for query1'),
+ ('query2',1,'conn_id'): (12,'result for query2')},
+{1: ('query1',1,'conn_id'),
+ 12:('query2',1,'conn_id')}
+)
+# the 2rd trips the max_cache/2 trigger, so both our old queries get
+# dumped
+self._do_query('query',23)
+self._check_cache(
+{('query',1,'conn_id'): (23,'result for query')},
+{23:('query',1,'conn_id')}
+)
+
+def test_time_refreshed_cache(self):
+# the first query gets cached
+self._do_query('query1',1)
+self._check_cache(
+{('query1',1,'conn_id'): (1,'result for query1')},
+{1: ('query1',1,'conn_id')}
+)
+# do the same query much later, so new one gets cached
+# XXX memory leak as old tcache entry is left lying around
+self._do_query('query1',12)
+self._check_cache(
+{('query1',1,'conn_id'): (12,'result for query1')},
+{1: ('query1',1,'conn_id'), # XXX why is 1 still here?
+ 12: ('query1',1,'conn_id')}
+)
+def test_cachetime_doesnt_match_tcache_time(self):
+# get some old entries for one query in
+# (the time are carefully picked to give out-of-order dict keys)
+self._do_query('query1',4)
+self._do_query('query1',19)
+self._do_query('query1',44)
+self._check_cache(
+{('query1',1,'conn_id'): (44,'result for query1')},
+{4: ('query1',1,'conn_id'),
+ 19: ('query1',1,'conn_id'),
+ 44: ('query1',1,'conn_id')}
+)
+# now do another query
+self._do_query('query2',44.1)
+# XXX whoops, because 

[Zope-Checkins] SVN: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/Connection.py fixed logger calls

2006-11-17 Thread Andreas Jung
Log message for revision 71155:
  fixed logger calls
  

Changed:
  U   Zope/branches/2.9/lib/python/Shared/DC/ZRDB/Connection.py

-=-
Modified: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/Connection.py
===
--- Zope/branches/2.9/lib/python/Shared/DC/ZRDB/Connection.py   2006-11-17 
15:50:39 UTC (rev 71154)
+++ Zope/branches/2.9/lib/python/Shared/DC/ZRDB/Connection.py   2006-11-17 
15:55:05 UTC (rev 71155)
@@ -71,7 +71,7 @@
 try: self.connect(self.connection_string)
 except:
 logger.error('Error connecting to relational database.',
- exc_info=exc_info())
+ exc_info=True)
 
 def title_and_id(self):
 s=Connection.inheritedAttribute('title_and_id')(self)
@@ -151,7 +151,7 @@
 self._v_database_connection.close()
 except:
 logger.error('Error closing relational database connection.',
- error=exc_info())
+ exc_info=True)
 self._v_connected=''
 if REQUEST is not None:
 return self.manage_main(self, REQUEST)

___
Zope-Checkins maillist  -  Zope-Checkins@zope.org
http://mail.zope.org/mailman/listinfo/zope-checkins


[Zope-Checkins] SVN: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/ - stop rounding to ints, making memory leaks much less likely

2006-11-17 Thread Chris Withers
Log message for revision 71158:
  - stop rounding to ints, making memory leaks much less likely
  - document the slim chances of remaining memory leaks
  - remove pointless import try/except, it always fails now
  - adjust tests to make sure we still generate pathalogical dict orderings

Changed:
  U   Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py
  U   Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py

-=-
Modified: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py
===
--- Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py   2006-11-17 16:03:10 UTC 
(rev 71157)
+++ Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py   2006-11-17 16:54:02 UTC 
(rev 71158)
@@ -39,8 +39,7 @@
 from webdav.Resource import Resource
 from webdav.Lockable import ResourceLockedError
 from zExceptions import BadRequest
-try: from IOBTree import Bucket
-except: Bucket=lambda:{}
+Bucket=lambda:{}
 
 
 class DatabaseError(BadRequest):
@@ -369,7 +368,7 @@
 key=keys[-1]
 q=tcache[key]
 del tcache[key]
-if int(cache[q][0]) == key:
+if cache[q][0] == key:
 del cache[q]
 del keys[-1]
 
@@ -380,7 +379,20 @@
 # call the pure query
 result=DB__.query(query,max_rows)
 if self.cache_time_  0:
-tcache[int(now)]=cache_key
+# When a ZSQL method is handled by one ZPublisher thread twice in
+# less time than it takes for time.time() to return a different
+# value, the SQL generated is different, then this code will leak
+# an entry in 'cache' for each time the ZSQL method generates
+# different SQL until time.time() returns a different value.
+#
+# On Linux, you would need an extremely fast machine under 
extremely
+# high load, making this extremely unlikely. On Windows, this is a
+# little more likely, but still unlikely to be a problem.
+#
+# If it does become a problem, the values of the tcache mapping
+# need to be turned into sets of cache keys rather than a single
+# cache key.
+tcache[now]=cache_key
 cache[cache_key]= now, result
 
 return result

Modified: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py
===
--- Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py   
2006-11-17 16:03:10 UTC (rev 71157)
+++ Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py   
2006-11-17 16:54:02 UTC (rev 71158)
@@ -122,7 +122,7 @@
 self._do_query('query',t)
 self._check_cache(
 {('query',1,'conn_id'): (1.1,'result for query')},
-{1: ('query',1,'conn_id')}
+{1.1: ('query',1,'conn_id')}
 )
 
 def test_different_queries_different_second(self):
@@ -135,15 +135,15 @@
 self._do_query('query1',1.1)
 self._check_cache(
 {('query1',1,'conn_id'): (1.1,'result for query1')},
-{1: ('query1',1,'conn_id')}
+{1.1: ('query1',1,'conn_id')}
 )
 # two
 self._do_query( 'query2',3.2)
 self._check_cache(
 {('query1',1,'conn_id'): (1.1,'result for query1'),
  ('query2',1,'conn_id'): (3.2,'result for query2'),},
-{1: ('query1',1,'conn_id'),
- 3: ('query2',1,'conn_id'),}
+{1.1: ('query1',1,'conn_id'),
+ 3.2: ('query2',1,'conn_id'),}
 )
 # three
 self._do_query('query3',4.3)
@@ -151,80 +151,83 @@
 {('query1',1,'conn_id'): (1.1,'result for query1'),
  ('query2',1,'conn_id'): (3.2,'result for query2'),
  ('query3',1,'conn_id'): (4.3,'result for query3'),},
-{1: ('query1',1,'conn_id'),
- 3: ('query2',1,'conn_id'),
- 4: ('query3',1,'conn_id'),}
+{1.1: ('query1',1,'conn_id'),
+ 3.2: ('query2',1,'conn_id'),
+ 4.3: ('query3',1,'conn_id'),}
 )
 # four - now we drop our first cache entry, this is an off-by-one error
 self._do_query('query4',8.4)
+# XXX oops - because dicts have an arbitary ordering, we dumped the 
wrong key!
 self._check_cache(
 {('query2',1,'conn_id'): (3.2,'result for query2'),
- ('query3',1,'conn_id'): (4.3,'result for query3'),
+ ('query1',1,'conn_id'): (1.1,'result for query1'),
  ('query4',1,'conn_id'): (8.4,'result for query4'),},
-{3: ('query2',1,'conn_id'),
- 4: ('query3',1,'conn_id'),
- 8: ('query4',1,'conn_id'),}
+{1.1: ('query1',1,'conn_id'),
+ 3.2: ('query2',1,'conn_id'),
+ 8.4: 

[Zope-Checkins] SVN: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/ - use an ordered mapping so we dump the oldest stuff from the cache first

2006-11-17 Thread Chris Withers
Log message for revision 71161:
  - use an ordered mapping so we dump the oldest stuff from the cache first
  - remove the pointless check for cache time being enabled inside 
_cached_result - _cached_result isn't actually called if caching isn't enabled
  - slight code tidy in DA.py
  - lots of comments in the test_caching.py and DA.py's _cached_result method

Changed:
  U   Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py
  U   Zope/branches/2.9/lib/python/Shared/DC/ZRDB/tests/test_caching.py

-=-
Modified: Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py
===
--- Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py   2006-11-17 17:23:22 UTC 
(rev 71160)
+++ Zope/branches/2.9/lib/python/Shared/DC/ZRDB/DA.py   2006-11-17 18:15:27 UTC 
(rev 71161)
@@ -39,7 +39,7 @@
 from webdav.Resource import Resource
 from webdav.Lockable import ResourceLockedError
 from zExceptions import BadRequest
-Bucket=lambda:{}
+from BTrees.OOBTree import OOBucket as Bucket
 
 
 class DatabaseError(BadRequest):
@@ -352,27 +352,52 @@
 def _searchable_result_columns(self): return self._col
 
 def _cached_result(self, DB__, query, max_rows, conn_id):
+# Try to fetch a result from the cache.
+# Compute and cache the result otherwise.
+# Also maintains the cache and ensures stale entries
+# are never returned and that the cache never gets too large.
+
+# NB: Correct cache behavior is predicated on Bucket.keys()
+# returning a sequence ordered from smalled number
+# (ie: the oldest cache entry) to largest number
+# (ie: the newest cache entry). Please be careful if you
+# change the class instantied below!
+
+# get hold of a cache
+caches = getattr(self,'_v_cache',None)
+if caches is None:
+caches = self._v_cache = {}, Bucket()
+cache, tcache = caches
+
+# the key for caching
 cache_key = query,max_rows,conn_id
-
-# Try to fetch from cache
-if hasattr(self,'_v_cache'): cache=self._v_cache
-else: cache=self._v_cache={}, Bucket()
-cache, tcache = cache
+# the maximum number of result sets to cache
 max_cache=self.max_cache_
+# the current time
 now=time()
+# the oldest time which is not stale
 t=now-self.cache_time_
+
+# if the cache is too big, we purge entries from it
 if len(cache) = max_cache:
 keys=tcache.keys()
-keys.reverse()
-while keys and (len(keys) = max_cache or keys[-1]  t):
-key=keys[-1]
+# We also hoover out any stale entries, as we're
+# already doing cache minimisation.
+# 'keys' is ordered, so we purge the oldest results
+# until the cache is small enough and there are no
+# stale entries in it
+while keys and (len(keys) = max_cache or keys[0]  t):
+key=keys[0]
 q=tcache[key]
 del tcache[key]
 del cache[q]
-del keys[-1]
+del keys[0]
 
+# okay, now see if we have a cached result
 if cache.has_key(cache_key):
 k, r = cache[cache_key]
+# the result may still be stale, as we only hoover out
+# stale results above if the cache gets too large.
 if k  t:
 # yay! a cached result returned!
 return r
@@ -383,23 +408,23 @@
 
 # call the pure query
 result=DB__.query(query,max_rows)
-if self.cache_time_  0:
-# When a ZSQL method is handled by one ZPublisher thread twice in
-# less time than it takes for time.time() to return a different
-# value, the SQL generated is different, then this code will leak
-# an entry in 'cache' for each time the ZSQL method generates
-# different SQL until time.time() returns a different value.
-#
-# On Linux, you would need an extremely fast machine under 
extremely
-# high load, making this extremely unlikely. On Windows, this is a
-# little more likely, but still unlikely to be a problem.
-#
-# If it does become a problem, the values of the tcache mapping
-# need to be turned into sets of cache keys rather than a single
-# cache key.
-tcache[now]=cache_key
-cache[cache_key]= now, result
 
+# When a ZSQL method is handled by one ZPublisher thread twice in
+# less time than it takes for time.time() to return a different
+# value, the SQL generated is different, then this code will leak
+# an entry in 'cache' for each time the ZSQL method generates
+# different SQL until time.time() returns a different value.
+#
+

[Zope-Checkins] SVN: Zope/branches/2.9/doc/CHANGES.txt change log for recent DA.py work.

2006-11-17 Thread Chris Withers
Log message for revision 71162:
  change log for recent DA.py work.

Changed:
  U   Zope/branches/2.9/doc/CHANGES.txt

-=-
Modified: Zope/branches/2.9/doc/CHANGES.txt
===
--- Zope/branches/2.9/doc/CHANGES.txt   2006-11-17 18:15:27 UTC (rev 71161)
+++ Zope/branches/2.9/doc/CHANGES.txt   2006-11-17 18:21:08 UTC (rev 71162)
@@ -9,6 +9,18 @@
 
Bugs fixed
 
+  - Reworking of _cached_result in Shared.DC.ZRDB.DA.DA:
+
+- fixed KeyError reported in Collector #2212
+
+- fixed two memory leaks that occurred under high load
+
+- fixed broken cache keys for people using the obscure 
+  Shared.DC.ZRDB.DA.DA.connection_hook
+
+- fixed incorrect cache ordering resulting in newer results
+  being dumped when the cache became too large.
+
   - Collector #2237: 'make' doesn't tell you to run 'make inplace'
 before running 'make instance'.
 

___
Zope-Checkins maillist  -  Zope-Checkins@zope.org
http://mail.zope.org/mailman/listinfo/zope-checkins


[Zope-Checkins] SVN: Zope/branches/2.10/lib/python/Shared/DC/ZRDB/ merge of DA.py caching work from 2.9 branch:

2006-11-17 Thread Chris Withers
Log message for revision 71163:
  merge of DA.py caching work from 2.9 branch:
  
- Reworking of _cached_result in Shared.DC.ZRDB.DA.DA:
  
  - fixed KeyError reported in Collector #2212
  
  - fixed two memory leaks that occurred under high load
  
  - fixed broken cache keys for people using the obscure 
Shared.DC.ZRDB.DA.DA.connection_hook
  
  - fixed incorrect cache ordering resulting in newer results
being dumped when the cache became too large.

Changed:
  U   Zope/branches/2.10/lib/python/Shared/DC/ZRDB/DA.py
  A   Zope/branches/2.10/lib/python/Shared/DC/ZRDB/tests/test_caching.py

-=-
Modified: Zope/branches/2.10/lib/python/Shared/DC/ZRDB/DA.py
===
--- Zope/branches/2.10/lib/python/Shared/DC/ZRDB/DA.py  2006-11-17 18:21:08 UTC 
(rev 71162)
+++ Zope/branches/2.10/lib/python/Shared/DC/ZRDB/DA.py  2006-11-17 18:27:15 UTC 
(rev 71163)
@@ -44,8 +44,7 @@
 from webdav.Resource import Resource
 from webdav.Lockable import ResourceLockedError
 from zExceptions import BadRequest
-try: from IOBTree import Bucket
-except: Bucket=lambda:{}
+from BTrees.OOBTree import OOBucket as Bucket
 
 
 class DatabaseError(BadRequest):
@@ -357,40 +356,80 @@
 
 def _searchable_result_columns(self): return self._col
 
-def _cached_result(self, DB__, query):
-pure_query = query
-# we need to munge the incoming query key in the cache
-# so that the same request to a different db is returned
-query = query + ('\nDBConnId: %s' % self.connection_hook, )
-
-# Try to fetch from cache
-if hasattr(self,'_v_cache'): cache=self._v_cache
-else: cache=self._v_cache={}, Bucket()
-cache, tcache = cache
+def _cached_result(self, DB__, query, max_rows, conn_id):
+# Try to fetch a result from the cache.
+# Compute and cache the result otherwise.
+# Also maintains the cache and ensures stale entries
+# are never returned and that the cache never gets too large.
+
+# NB: Correct cache behavior is predicated on Bucket.keys()
+# returning a sequence ordered from smalled number
+# (ie: the oldest cache entry) to largest number
+# (ie: the newest cache entry). Please be careful if you
+# change the class instantied below!
+
+# get hold of a cache
+caches = getattr(self,'_v_cache',None)
+if caches is None:
+caches = self._v_cache = {}, Bucket()
+cache, tcache = caches
+
+# the key for caching
+cache_key = query,max_rows,conn_id
+# the maximum number of result sets to cache
 max_cache=self.max_cache_
+# the current time
 now=time()
+# the oldest time which is not stale
 t=now-self.cache_time_
-if len(cache)  max_cache / 2:
+
+# if the cache is too big, we purge entries from it
+if len(cache) = max_cache:
 keys=tcache.keys()
-keys.reverse()
-while keys and (len(keys)  max_cache or keys[-1]  t):
-key=keys[-1]
+# We also hoover out any stale entries, as we're
+# already doing cache minimisation.
+# 'keys' is ordered, so we purge the oldest results
+# until the cache is small enough and there are no
+# stale entries in it
+while keys and (len(keys) = max_cache or keys[0]  t):
+key=keys[0]
 q=tcache[key]
 del tcache[key]
-if int(cache[q][0]) == key:
-del cache[q]
-del keys[-1]
+del cache[q]
+del keys[0]
 
-if cache.has_key(query):
-k, r = cache[query]
-if k  t: return r
+# okay, now see if we have a cached result
+if cache.has_key(cache_key):
+k, r = cache[cache_key]
+# the result may still be stale, as we only hoover out
+# stale results above if the cache gets too large.
+if k  t:
+# yay! a cached result returned!
+return r
+else:
+# delete stale cache entries
+del cache[cache_key]
+del tcache[k]
 
 # call the pure query
-result=apply(DB__.query, pure_query)
-if self.cache_time_  0:
-tcache[int(now)]=query
-cache[query]= now, result
+result=DB__.query(query,max_rows)
 
+# When a ZSQL method is handled by one ZPublisher thread twice in
+# less time than it takes for time.time() to return a different
+# value, the SQL generated is different, then this code will leak
+# an entry in 'cache' for each time the ZSQL method generates
+# different SQL until time.time() returns a different value.
+

[Zope-Checkins] SVN: Zope/branches/2.10/doc/CHANGES.txt change log for recent DA.py work

2006-11-17 Thread Chris Withers
Log message for revision 71164:
  change log for recent DA.py work

Changed:
  U   Zope/branches/2.10/doc/CHANGES.txt

-=-
Modified: Zope/branches/2.10/doc/CHANGES.txt
===
--- Zope/branches/2.10/doc/CHANGES.txt  2006-11-17 18:27:15 UTC (rev 71163)
+++ Zope/branches/2.10/doc/CHANGES.txt  2006-11-17 18:29:50 UTC (rev 71164)
@@ -8,6 +8,18 @@
 
 Bugs fixed
 
+  - Reworking of _cached_result in Shared.DC.ZRDB.DA.DA:
+
+- fixed KeyError reported in Collector #2212
+
+- fixed two memory leaks that occurred under high load
+
+- fixed broken cache keys for people using the obscure 
+  Shared.DC.ZRDB.DA.DA.connection_hook
+
+- fixed incorrect cache ordering resulting in newer results
+  being dumped when the cache became too large.
+
   - Collector #2232: Can't call DTML templates from Page Templates
 
   - Collector #2213: Can't edit old ZopePageTemplate instances.

___
Zope-Checkins maillist  -  Zope-Checkins@zope.org
http://mail.zope.org/mailman/listinfo/zope-checkins


[Zope-Checkins] SVN: Zope/trunk/lib/python/Shared/DC/ZRDB/ merge of DA.py caching work from 2.9 branch:

2006-11-17 Thread Chris Withers
Log message for revision 71165:
  merge of DA.py caching work from 2.9 branch:
  
- Reworking of _cached_result in Shared.DC.ZRDB.DA.DA:
  
  - fixed KeyError reported in Collector #2212
  
  - fixed two memory leaks that occurred under high load
  
  - fixed broken cache keys for people using the obscure 
Shared.DC.ZRDB.DA.DA.connection_hook
  
  - fixed incorrect cache ordering resulting in newer results
being dumped when the cache became too large.

Changed:
  U   Zope/trunk/lib/python/Shared/DC/ZRDB/DA.py
  A   Zope/trunk/lib/python/Shared/DC/ZRDB/tests/test_caching.py

-=-
Modified: Zope/trunk/lib/python/Shared/DC/ZRDB/DA.py
===
--- Zope/trunk/lib/python/Shared/DC/ZRDB/DA.py  2006-11-17 18:29:50 UTC (rev 
71164)
+++ Zope/trunk/lib/python/Shared/DC/ZRDB/DA.py  2006-11-17 18:31:29 UTC (rev 
71165)
@@ -44,8 +44,7 @@
 from webdav.Resource import Resource
 from webdav.Lockable import ResourceLockedError
 from zExceptions import BadRequest
-try: from IOBTree import Bucket
-except: Bucket=lambda:{}
+from BTrees.OOBTree import OOBucket as Bucket
 
 
 class DatabaseError(BadRequest):
@@ -357,40 +356,80 @@
 
 def _searchable_result_columns(self): return self._col
 
-def _cached_result(self, DB__, query):
-pure_query = query
-# we need to munge the incoming query key in the cache
-# so that the same request to a different db is returned
-query = query + ('\nDBConnId: %s' % self.connection_hook, )
-
-# Try to fetch from cache
-if hasattr(self,'_v_cache'): cache=self._v_cache
-else: cache=self._v_cache={}, Bucket()
-cache, tcache = cache
+def _cached_result(self, DB__, query, max_rows, conn_id):
+# Try to fetch a result from the cache.
+# Compute and cache the result otherwise.
+# Also maintains the cache and ensures stale entries
+# are never returned and that the cache never gets too large.
+
+# NB: Correct cache behavior is predicated on Bucket.keys()
+# returning a sequence ordered from smalled number
+# (ie: the oldest cache entry) to largest number
+# (ie: the newest cache entry). Please be careful if you
+# change the class instantied below!
+
+# get hold of a cache
+caches = getattr(self,'_v_cache',None)
+if caches is None:
+caches = self._v_cache = {}, Bucket()
+cache, tcache = caches
+
+# the key for caching
+cache_key = query,max_rows,conn_id
+# the maximum number of result sets to cache
 max_cache=self.max_cache_
+# the current time
 now=time()
+# the oldest time which is not stale
 t=now-self.cache_time_
-if len(cache)  max_cache / 2:
+
+# if the cache is too big, we purge entries from it
+if len(cache) = max_cache:
 keys=tcache.keys()
-keys.reverse()
-while keys and (len(keys)  max_cache or keys[-1]  t):
-key=keys[-1]
+# We also hoover out any stale entries, as we're
+# already doing cache minimisation.
+# 'keys' is ordered, so we purge the oldest results
+# until the cache is small enough and there are no
+# stale entries in it
+while keys and (len(keys) = max_cache or keys[0]  t):
+key=keys[0]
 q=tcache[key]
 del tcache[key]
-if int(cache[q][0]) == key:
-del cache[q]
-del keys[-1]
+del cache[q]
+del keys[0]
 
-if cache.has_key(query):
-k, r = cache[query]
-if k  t: return r
+# okay, now see if we have a cached result
+if cache.has_key(cache_key):
+k, r = cache[cache_key]
+# the result may still be stale, as we only hoover out
+# stale results above if the cache gets too large.
+if k  t:
+# yay! a cached result returned!
+return r
+else:
+# delete stale cache entries
+del cache[cache_key]
+del tcache[k]
 
 # call the pure query
-result=apply(DB__.query, pure_query)
-if self.cache_time_  0:
-tcache[int(now)]=query
-cache[query]= now, result
+result=DB__.query(query,max_rows)
 
+# When a ZSQL method is handled by one ZPublisher thread twice in
+# less time than it takes for time.time() to return a different
+# value, the SQL generated is different, then this code will leak
+# an entry in 'cache' for each time the ZSQL method generates
+# different SQL until time.time() returns a different value.
+#
+# On Linux, you would 

[Zope-Checkins] SVN: Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/ revert ZCatalog.getobject() semantics not to mask traversal errors and not to fallback to .resolve_url() when the trav

2006-11-17 Thread Leonardo Rochael Almeida
Log message for revision 71167:
  revert ZCatalog.getobject() semantics not to mask traversal errors and not to 
fallback to .resolve_url() when the traversal result is None
  

Changed:
  U   Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/ZCatalog.py
  U   
Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/tests/testCatalog.py

-=-
Modified: Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/ZCatalog.py
===
--- Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/ZCatalog.py  
2006-11-17 18:32:00 UTC (rev 71166)
+++ Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/ZCatalog.py  
2006-11-17 19:51:12 UTC (rev 71167)
@@ -615,12 +615,7 @@
 def getobject(self, rid, REQUEST=None):
 Return a cataloged object given a 'data_record_id_'
 
-obj = self.aq_parent.unrestrictedTraverse(self.getpath(rid), None)
-if obj is None:
-if REQUEST is None:
-REQUEST=self.REQUEST
-obj = self.resolve_url(self.getpath(rid), REQUEST)
-return obj
+return self.aq_parent.unrestrictedTraverse(self.getpath(rid))
 
 def getMetadataForUID(self, uid):
 return the correct metadata given the uid, usually the path

Modified: 
Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/tests/testCatalog.py
===
--- 
Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/tests/testCatalog.py 
2006-11-17 18:32:00 UTC (rev 71166)
+++ 
Zope/branches/Zope-2_8-branch/lib/python/Products/ZCatalog/tests/testCatalog.py 
2006-11-17 19:51:12 UTC (rev 71167)
@@ -177,15 +177,23 @@
 def __nonzero__(self):
 self.fail(__nonzero__() was called)
 
+class FakeTraversalError(KeyError):
+fake traversal exception for testing
+
 class fakeparent(Implicit):
 # fake parent mapping unrestrictedTraverse to
 # catalog.resolve_path as simulated by TestZCatalog
 def __init__(self, d):
 self.d = d
 
-def unrestrictedTraverse(self, path, default=None):
-return self.d.get(path, default)
+marker = object()
 
+def unrestrictedTraverse(self, path, default=marker):
+result = self.d.get(path, default)
+if result is self.marker:
+raise FakeTraversalError(path)
+return result
+
 class TestZCatalog(unittest.TestCase):
 
 def setUp(self):
@@ -283,7 +291,7 @@
 self._catalog.manage_catalogObject(None, myresponse(), 'URL1', 
urls=('11', '12'))
 
 def testBooleanEvalOn_refreshCatalog_getobject(self):
-# wrap catalog under the fake parent
+# wrap catalog under the fake parent providing unrestrictedTraverse()
 catalog = self._catalog.__of__(fakeparent(self.d))
 # replace entries to test refreshCatalog
 self.d['0'] = dummyLenFail(0, self.fail)
@@ -292,10 +300,27 @@
 catalog.refreshCatalog()
 
 for uid in ('0', '1'):
-rid = self._catalog.getrid(uid)
+rid = catalog.getrid(uid)
 # neither should these
 catalog.getobject(rid)
 
+def 
test_getobject_doesntMaskTraversalErrorsAndDoesntDelegateTo_resolve_url(self):
+# wrap catalog under the fake parent providing unrestrictedTraverse()
+catalog = self._catalog.__of__(fakeparent(self.d))
+# make resolve_url fail if ZCatalog falls back on it
+def resolve_url(path, REQUEST):
+self.fail(.resolve_url() should not be called by .getobject())
+catalog.resolve_url = resolve_url
+
+# traversal should work at first
+rid0 = catalog.getrid('0')
+# lets set it up so the traversal fails
+del self.d['0']
+self.assertRaises(FakeTraversalError, catalog.getobject, rid0, 
REQUEST=object())
+# and if there is a None at the traversal point, that's where it 
should return
+self.d['0'] = None
+self.assertEquals(catalog.getobject(rid0), None)
+
 class dummy(ExtensionClass.Base):
 att1 = 'att1'
 att2 = 'att2'

___
Zope-Checkins maillist  -  Zope-Checkins@zope.org
http://mail.zope.org/mailman/listinfo/zope-checkins


[Zope-Checkins] SVN: Zope/branches/2.9/lib/python/Products/ZCatalog/ revert ZCatalog.getobject() semantics not to mask traversal errors and not to fallback to .resolve_url() when the traversal result

2006-11-17 Thread Leonardo Rochael Almeida
Log message for revision 71168:
  revert ZCatalog.getobject() semantics not to mask traversal errors and not to 
fallback to .resolve_url() when the traversal result is None
  

Changed:
  U   Zope/branches/2.9/lib/python/Products/ZCatalog/ZCatalog.py
  U   Zope/branches/2.9/lib/python/Products/ZCatalog/tests/testCatalog.py

-=-
Modified: Zope/branches/2.9/lib/python/Products/ZCatalog/ZCatalog.py
===
--- Zope/branches/2.9/lib/python/Products/ZCatalog/ZCatalog.py  2006-11-17 
19:51:12 UTC (rev 71167)
+++ Zope/branches/2.9/lib/python/Products/ZCatalog/ZCatalog.py  2006-11-17 
20:01:22 UTC (rev 71168)
@@ -615,12 +615,7 @@
 def getobject(self, rid, REQUEST=None):
 Return a cataloged object given a 'data_record_id_'
 
-obj = self.aq_parent.unrestrictedTraverse(self.getpath(rid), None)
-if obj is None:
-if REQUEST is None:
-REQUEST=self.REQUEST
-obj = self.resolve_url(self.getpath(rid), REQUEST)
-return obj
+return self.aq_parent.unrestrictedTraverse(self.getpath(rid))
 
 def getMetadataForUID(self, uid):
 return the correct metadata given the uid, usually the path

Modified: Zope/branches/2.9/lib/python/Products/ZCatalog/tests/testCatalog.py
===
--- Zope/branches/2.9/lib/python/Products/ZCatalog/tests/testCatalog.py 
2006-11-17 19:51:12 UTC (rev 71167)
+++ Zope/branches/2.9/lib/python/Products/ZCatalog/tests/testCatalog.py 
2006-11-17 20:01:22 UTC (rev 71168)
@@ -177,15 +177,23 @@
 def __nonzero__(self):
 self.fail(__nonzero__() was called)
 
+class FakeTraversalError(KeyError):
+fake traversal exception for testing
+
 class fakeparent(Implicit):
 # fake parent mapping unrestrictedTraverse to
 # catalog.resolve_path as simulated by TestZCatalog
 def __init__(self, d):
 self.d = d
 
-def unrestrictedTraverse(self, path, default=None):
-return self.d.get(path, default)
+marker = object()
 
+def unrestrictedTraverse(self, path, default=marker):
+result = self.d.get(path, default)
+if result is self.marker:
+raise FakeTraversalError(path)
+return result
+
 class TestZCatalog(unittest.TestCase):
 
 def setUp(self):
@@ -283,7 +291,7 @@
 self._catalog.manage_catalogObject(None, myresponse(), 'URL1', 
urls=('11', '12'))
 
 def testBooleanEvalOn_refreshCatalog_getobject(self):
-# wrap catalog under the fake parent
+# wrap catalog under the fake parent providing unrestrictedTraverse()
 catalog = self._catalog.__of__(fakeparent(self.d))
 # replace entries to test refreshCatalog
 self.d['0'] = dummyLenFail(0, self.fail)
@@ -292,10 +300,27 @@
 catalog.refreshCatalog()
 
 for uid in ('0', '1'):
-rid = self._catalog.getrid(uid)
+rid = catalog.getrid(uid)
 # neither should these
 catalog.getobject(rid)
 
+def 
test_getobject_doesntMaskTraversalErrorsAndDoesntDelegateTo_resolve_url(self):
+# wrap catalog under the fake parent providing unrestrictedTraverse()
+catalog = self._catalog.__of__(fakeparent(self.d))
+# make resolve_url fail if ZCatalog falls back on it
+def resolve_url(path, REQUEST):
+self.fail(.resolve_url() should not be called by .getobject())
+catalog.resolve_url = resolve_url
+
+# traversal should work at first
+rid0 = catalog.getrid('0')
+# lets set it up so the traversal fails
+del self.d['0']
+self.assertRaises(FakeTraversalError, catalog.getobject, rid0, 
REQUEST=object())
+# and if there is a None at the traversal point, that's where it 
should return
+self.d['0'] = None
+self.assertEquals(catalog.getobject(rid0), None)
+
 class dummy(ExtensionClass.Base):
 att1 = 'att1'
 att2 = 'att2'

___
Zope-Checkins maillist  -  Zope-Checkins@zope.org
http://mail.zope.org/mailman/listinfo/zope-checkins


[Zope-Checkins] SVN: Zope/branches/2.10/lib/python/Products/ZCatalog/ revert ZCatalog.getobject() semantics not to mask traversal errors and not to fallback to .resolve_url() when the traversal resul

2006-11-17 Thread Leonardo Rochael Almeida
Log message for revision 71169:
  revert ZCatalog.getobject() semantics not to mask traversal errors and not to 
fallback to .resolve_url() when the traversal result is None
  

Changed:
  U   Zope/branches/2.10/lib/python/Products/ZCatalog/ZCatalog.py
  U   Zope/branches/2.10/lib/python/Products/ZCatalog/tests/testCatalog.py

-=-
Modified: Zope/branches/2.10/lib/python/Products/ZCatalog/ZCatalog.py
===
--- Zope/branches/2.10/lib/python/Products/ZCatalog/ZCatalog.py 2006-11-17 
20:01:22 UTC (rev 71168)
+++ Zope/branches/2.10/lib/python/Products/ZCatalog/ZCatalog.py 2006-11-17 
20:11:22 UTC (rev 71169)
@@ -587,12 +587,7 @@
 def getobject(self, rid, REQUEST=None):
 Return a cataloged object given a 'data_record_id_'
 
-obj = self.aq_parent.unrestrictedTraverse(self.getpath(rid), None)
-if obj is None:
-if REQUEST is None:
-REQUEST=self.REQUEST
-obj = self.resolve_url(self.getpath(rid), REQUEST)
-return obj
+return self.aq_parent.unrestrictedTraverse(self.getpath(rid))
 
 def getMetadataForUID(self, uid):
 return the correct metadata given the uid, usually the path

Modified: Zope/branches/2.10/lib/python/Products/ZCatalog/tests/testCatalog.py
===
--- Zope/branches/2.10/lib/python/Products/ZCatalog/tests/testCatalog.py
2006-11-17 20:01:22 UTC (rev 71168)
+++ Zope/branches/2.10/lib/python/Products/ZCatalog/tests/testCatalog.py
2006-11-17 20:11:22 UTC (rev 71169)
@@ -177,15 +177,23 @@
 def __nonzero__(self):
 self.fail(__nonzero__() was called)
 
+class FakeTraversalError(KeyError):
+fake traversal exception for testing
+
 class fakeparent(Implicit):
 # fake parent mapping unrestrictedTraverse to
 # catalog.resolve_path as simulated by TestZCatalog
 def __init__(self, d):
 self.d = d
 
-def unrestrictedTraverse(self, path, default=None):
-return self.d.get(path, default)
+marker = object()
 
+def unrestrictedTraverse(self, path, default=marker):
+result = self.d.get(path, default)
+if result is self.marker:
+raise FakeTraversalError(path)
+return result
+
 class TestZCatalog(unittest.TestCase):
 
 def setUp(self):
@@ -283,7 +291,7 @@
 self._catalog.manage_catalogObject(None, myresponse(), 'URL1', 
urls=('11', '12'))
 
 def testBooleanEvalOn_refreshCatalog_getobject(self):
-# wrap catalog under the fake parent
+# wrap catalog under the fake parent providing unrestrictedTraverse()
 catalog = self._catalog.__of__(fakeparent(self.d))
 # replace entries to test refreshCatalog
 self.d['0'] = dummyLenFail(0, self.fail)
@@ -292,10 +300,27 @@
 catalog.refreshCatalog()
 
 for uid in ('0', '1'):
-rid = self._catalog.getrid(uid)
+rid = catalog.getrid(uid)
 # neither should these
 catalog.getobject(rid)
 
+def 
test_getobject_doesntMaskTraversalErrorsAndDoesntDelegateTo_resolve_url(self):
+# wrap catalog under the fake parent providing unrestrictedTraverse()
+catalog = self._catalog.__of__(fakeparent(self.d))
+# make resolve_url fail if ZCatalog falls back on it
+def resolve_url(path, REQUEST):
+self.fail(.resolve_url() should not be called by .getobject())
+catalog.resolve_url = resolve_url
+
+# traversal should work at first
+rid0 = catalog.getrid('0')
+# lets set it up so the traversal fails
+del self.d['0']
+self.assertRaises(FakeTraversalError, catalog.getobject, rid0, 
REQUEST=object())
+# and if there is a None at the traversal point, that's where it 
should return
+self.d['0'] = None
+self.assertEquals(catalog.getobject(rid0), None)
+
 class dummy(ExtensionClass.Base):
 att1 = 'att1'
 att2 = 'att2'

___
Zope-Checkins maillist  -  Zope-Checkins@zope.org
http://mail.zope.org/mailman/listinfo/zope-checkins


[Zope-Checkins] SVN: Zope/trunk/lib/python/Products/ZCatalog/ revert ZCatalog.getobject() semantics not to mask traversal errors and not to fallback to .resolve_url() when the traversal result is None

2006-11-17 Thread Leonardo Rochael Almeida
Log message for revision 71170:
  revert ZCatalog.getobject() semantics not to mask traversal errors and not to 
fallback to .resolve_url() when the traversal result is None
  

Changed:
  U   Zope/trunk/lib/python/Products/ZCatalog/ZCatalog.py
  U   Zope/trunk/lib/python/Products/ZCatalog/tests/testCatalog.py

-=-
Modified: Zope/trunk/lib/python/Products/ZCatalog/ZCatalog.py
===
--- Zope/trunk/lib/python/Products/ZCatalog/ZCatalog.py 2006-11-17 20:11:22 UTC 
(rev 71169)
+++ Zope/trunk/lib/python/Products/ZCatalog/ZCatalog.py 2006-11-17 20:17:28 UTC 
(rev 71170)
@@ -587,12 +587,7 @@
 def getobject(self, rid, REQUEST=None):
 Return a cataloged object given a 'data_record_id_'
 
-obj = self.aq_parent.unrestrictedTraverse(self.getpath(rid), None)
-if obj is None:
-if REQUEST is None:
-REQUEST=self.REQUEST
-obj = self.resolve_url(self.getpath(rid), REQUEST)
-return obj
+return self.aq_parent.unrestrictedTraverse(self.getpath(rid))
 
 def getMetadataForUID(self, uid):
 return the correct metadata given the uid, usually the path

Modified: Zope/trunk/lib/python/Products/ZCatalog/tests/testCatalog.py
===
--- Zope/trunk/lib/python/Products/ZCatalog/tests/testCatalog.py
2006-11-17 20:11:22 UTC (rev 71169)
+++ Zope/trunk/lib/python/Products/ZCatalog/tests/testCatalog.py
2006-11-17 20:17:28 UTC (rev 71170)
@@ -177,15 +177,23 @@
 def __nonzero__(self):
 self.fail(__nonzero__() was called)
 
+class FakeTraversalError(KeyError):
+fake traversal exception for testing
+
 class fakeparent(Implicit):
 # fake parent mapping unrestrictedTraverse to
 # catalog.resolve_path as simulated by TestZCatalog
 def __init__(self, d):
 self.d = d
 
-def unrestrictedTraverse(self, path, default=None):
-return self.d.get(path, default)
+marker = object()
 
+def unrestrictedTraverse(self, path, default=marker):
+result = self.d.get(path, default)
+if result is self.marker:
+raise FakeTraversalError(path)
+return result
+
 class TestZCatalog(unittest.TestCase):
 
 def setUp(self):
@@ -283,7 +291,7 @@
 self._catalog.manage_catalogObject(None, myresponse(), 'URL1', 
urls=('11', '12'))
 
 def testBooleanEvalOn_refreshCatalog_getobject(self):
-# wrap catalog under the fake parent
+# wrap catalog under the fake parent providing unrestrictedTraverse()
 catalog = self._catalog.__of__(fakeparent(self.d))
 # replace entries to test refreshCatalog
 self.d['0'] = dummyLenFail(0, self.fail)
@@ -292,10 +300,27 @@
 catalog.refreshCatalog()
 
 for uid in ('0', '1'):
-rid = self._catalog.getrid(uid)
+rid = catalog.getrid(uid)
 # neither should these
 catalog.getobject(rid)
 
+def 
test_getobject_doesntMaskTraversalErrorsAndDoesntDelegateTo_resolve_url(self):
+# wrap catalog under the fake parent providing unrestrictedTraverse()
+catalog = self._catalog.__of__(fakeparent(self.d))
+# make resolve_url fail if ZCatalog falls back on it
+def resolve_url(path, REQUEST):
+self.fail(.resolve_url() should not be called by .getobject())
+catalog.resolve_url = resolve_url
+
+# traversal should work at first
+rid0 = catalog.getrid('0')
+# lets set it up so the traversal fails
+del self.d['0']
+self.assertRaises(FakeTraversalError, catalog.getobject, rid0, 
REQUEST=object())
+# and if there is a None at the traversal point, that's where it 
should return
+self.d['0'] = None
+self.assertEquals(catalog.getobject(rid0), None)
+
 class dummy(ExtensionClass.Base):
 att1 = 'att1'
 att2 = 'att2'

___
Zope-Checkins maillist  -  Zope-Checkins@zope.org
http://mail.zope.org/mailman/listinfo/zope-checkins