Author: toad
Date: 2008-12-24 19:44:41 +0000 (Wed, 24 Dec 2008)
New Revision: 24786

Modified:
   branches/db4o/freenet/src/freenet/client/FECCodec.java
   branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
Log:
Pad or delete the last block prior to encode/decode.
Remove padding from FECCodec: caller must pad blocks both for encodes and 
decodes. This is necessary for us to removeFrom().


Modified: branches/db4o/freenet/src/freenet/client/FECCodec.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/FECCodec.java      2008-12-24 
19:39:14 UTC (rev 24785)
+++ branches/db4o/freenet/src/freenet/client/FECCodec.java      2008-12-24 
19:44:41 UTC (rev 24786)
@@ -156,20 +156,7 @@
                                        long sz = buckets[i].size();
                                        if(sz < blockLength) {
                                                if(i != dataBlockStatus.length 
- 1)
-                                                       throw new 
IllegalArgumentException("All buckets except the last must be the full size but 
data bucket " + i + " of " + dataBlockStatus.length + " (" + dataBlockStatus[i] 
+ ") is " + sz + " not " + blockLength);
-                                               if(sz < blockLength) {
-                                                       // FIXME NOT FETCHING 
LAST BLOCK
-//                                                     buckets[i] = 
BucketTools.pad(buckets[i], blockLength, bf, (int) sz);
-                                                       buckets[i].free();
-                                                       buckets[i] = 
bf.makeBucket(blockLength);
-                                                       writers[i] = 
buckets[i].getOutputStream();
-                                                       if(logMINOR)
-                                                               
Logger.minor(this, "writers[" + i + "] != null (NOT PADDING)");
-                                                       readers[i] = null;
-                                                       numberToDecode++;
-                                               }
-                                               else
-                                                       throw new 
IllegalArgumentException("Too big: " + sz + " bigger than " + blockLength);
+                                                       throw new 
IllegalArgumentException("All buckets must be the full size (caller must pad if 
needed) but data bucket " + i + " of " + dataBlockStatus.length + " (" + 
dataBlockStatus[i] + ") is " + sz + " not " + blockLength);
                                        } else {
                                                if(logMINOR)
                                                        Logger.minor(this, 
"writers[" + i + "] = null (already filled)");
@@ -284,13 +271,7 @@
                                buckets[i] = dataBlockStatus[i];
                                long sz = buckets[i].size();
                                if(sz < blockLength) {
-                                       if(i != dataBlockStatus.length - 1)
-                                               throw new 
IllegalArgumentException("All buckets except the last must be the full size");
-                                       if(sz < blockLength) {
-                                               buckets[i] = 
BucketTools.pad(buckets[i], blockLength, bf, (int) sz);
-                                               toFree = buckets[i];
-                                       } else
-                                               throw new 
IllegalArgumentException("Too big: " + sz + " bigger than " + blockLength);
+                                       throw new IllegalArgumentException("All 
buckets must be the full size: caller must pad the last one if needed");
                                }
                                readers[i] = new 
DataInputStream(buckets[i].getInputStream());
                        }

Modified: 
branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherSegment.java 
2008-12-24 19:39:14 UTC (rev 24785)
+++ branches/db4o/freenet/src/freenet/client/async/SplitFileFetcherSegment.java 
2008-12-24 19:44:41 UTC (rev 24786)
@@ -394,6 +394,29 @@
                        }
                        if(persistent)
                                container.activate(parent, 1);
+                       Bucket lastBlock = 
dataBuckets[dataBuckets.length-1].data;
+                       if(lastBlock != null) {
+                               if(persistent)
+                                       container.activate(lastBlock, 1);
+                               if(ignoreLastDataBlock) {
+                                       lastBlock.free();
+                                       if(persistent)
+                                               lastBlock.removeFrom(container);
+                                       dataBuckets[dataBuckets.length-1].data 
= null;
+                               } else if(lastBlock.size() != 
CHKBlock.DATA_LENGTH) {
+                                       try {
+                                               
dataBuckets[dataBuckets.length-1].data =
+                                                       
BucketTools.pad(lastBlock, CHKBlock.DATA_LENGTH, 
context.persistentBucketFactory, (int) lastBlock.size());
+                                               lastBlock.free();
+                                               if(persistent) {
+                                                       
lastBlock.removeFrom(container);
+                                                       
dataBuckets[dataBuckets.length-1].storeTo(container);
+                                               }
+                                       } catch (IOException e) {
+                                               fail(new 
FetchException(FetchException.BUCKET_ERROR, e), container, context, true);
+                                       }
+                               }
+                       }
                        if(codec == null)
                                codec = FECCodec.getCodec(splitfileType, 
dataKeys.length, checkKeys.length, context.mainExecutor);
                        FECJob job = new FECJob(codec, queue, dataBuckets, 
checkBuckets, CHKBlock.DATA_LENGTH, context.getBucketFactory(persistent), this, 
true, parent.getPriorityClass(), persistent);
@@ -517,6 +540,26 @@
                 * reconstructed and reinserted.
                 */
 
+               // FIXME don't heal if ignoreLastBlock.
+               Bucket lastBlock = dataBuckets[dataBuckets.length-1].data;
+               if(lastBlock != null) {
+                       if(persistent)
+                               container.activate(lastBlock, 1);
+                       if(lastBlock.size() != CHKBlock.DATA_LENGTH) {
+                               try {
+                                       dataBuckets[dataBuckets.length-1].data =
+                                               BucketTools.pad(lastBlock, 
CHKBlock.DATA_LENGTH, context.persistentBucketFactory, (int) lastBlock.size());
+                                       lastBlock.free();
+                                       if(persistent) {
+                                               lastBlock.removeFrom(container);
+                                               
dataBuckets[dataBuckets.length-1].storeTo(container);
+                                       }
+                               } catch (IOException e) {
+                                       fail(new 
FetchException(FetchException.BUCKET_ERROR, e), container, context, true);
+                               }
+                       }
+               }
+               
                // Encode any check blocks we don't have
                try {
                codec.addToQueue(new FECJob(codec, context.fecQueue, 
dataBuckets, checkBuckets, 32768, context.getBucketFactory(persistent), this, 
false, parent.getPriorityClass(), persistent),
@@ -687,12 +730,14 @@
                                Bucket copy = 
context.tempBucketFactory.makeBucket(data.size());
                                BucketTools.copy(data, copy);
                                data.free();
-                               data.removeFrom(container);
+                               if(persistent)
+                                       data.removeFrom(container);
                                data = copy;
                        } catch (IOException e) {
                                Logger.normal(this, "Failed to copy data for 
healing: "+e, e);
                                data.free();
-                               data.removeFrom(container);
+                               if(persistent)
+                                       data.removeFrom(container);
                                return;
                        }
                }

_______________________________________________
cvs mailing list
[email protected]
http://emu.freenetproject.org/cgi-bin/mailman/listinfo/cvs

Reply via email to