Author: toad
Date: 2008-12-19 18:44:57 +0000 (Fri, 19 Dec 2008)
New Revision: 24630

Modified:
   branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
Log:
Detect and workaround db4o duplicate objects bug. :<


Modified: 
branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherSegment.java 
2008-12-19 18:16:51 UTC (rev 24629)
+++ branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherSegment.java 
2008-12-19 18:44:57 UTC (rev 24630)
@@ -425,6 +425,14 @@
                                for(int i=0;i<dataBuckets.length;i++) {
                                        // The FECCodec won't set them.
                                        // But they should be active.
+                                       if(dataBlockStatus[i] != 
dataBuckets[i]) {
+                                               long theirID = 
container.ext().getID(dataBlockStatus[i]);
+                                               long ourID = 
container.ext().getID(dataBuckets[i]);
+                                               if(theirID == ourID) {
+                                                       Logger.error(this, 
"DB4O BUG DETECTED IN DECODED SEGMENT!: our block: "+dataBuckets[i]+" block 
from decode "+dataBlockStatus[i]+" both have ID "+ourID+" = "+theirID);
+                                                       dataBuckets[i] = 
(MinimalSplitfileBlock) dataBlockStatus[i];
+                                               }
+                                       }
                                        if(logMINOR)
                                                Logger.minor(this, "Data block 
"+i+" is "+dataBuckets[i]);
                                        
if(!container.ext().isStored(dataBuckets[i]))

_______________________________________________
cvs mailing list
[email protected]
http://emu.freenetproject.org/cgi-bin/mailman/listinfo/cvs

Reply via email to