Author: bombe
Date: 2008-06-29 17:03:08 +0000 (Sun, 29 Jun 2008)
New Revision: 20868
Modified:
trunk/freenet/src/freenet/client/async/SingleFileInserter.java
trunk/freenet/src/freenet/support/io/BucketChainBucket.java
trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
trunk/freenet/src/freenet/support/io/BucketTools.java
Log:
fix backport of BucketChainBucket
Modified: trunk/freenet/src/freenet/client/async/SingleFileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleFileInserter.java
2008-06-29 16:23:27 UTC (rev 20867)
+++ trunk/freenet/src/freenet/client/async/SingleFileInserter.java
2008-06-29 17:03:08 UTC (rev 20868)
@@ -14,7 +14,6 @@
import freenet.keys.BaseClientKey;
import freenet.keys.CHKBlock;
import freenet.keys.FreenetURI;
- import freenet.keys.NodeCHK;
import freenet.keys.SSKBlock;
import freenet.support.Logger;
import freenet.support.OOMHandler;
@@ -22,7 +21,6 @@
import freenet.support.api.Bucket;
import freenet.support.compress.CompressionOutputSizeException;
import freenet.support.compress.Compressor;
-import freenet.support.io.BucketChainBucketFactory;
import freenet.support.io.BucketTools;
/**
@@ -172,7 +170,7 @@
ctx.eventProducer.produceEvent(new StartedCompressionEvent(i));
Compressor comp =
Compressor.getCompressionAlgorithmByDifficulty(i);
Bucket result;
- result = comp.compress(origData, new
BucketChainBucketFactory(ctx.persistentBucketFactory, NodeCHK.BLOCK_SIZE),
origData.size());
+ result = comp.compress(origData,
ctx.persistentBucketFactory, origData.size());
if(result.size() <
oneBlockCompressedSize) {
bestCodec = comp;
if(bestCompressedData != null)
Modified: trunk/freenet/src/freenet/support/io/BucketChainBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketChainBucket.java 2008-06-29
16:23:27 UTC (rev 20867)
+++ trunk/freenet/src/freenet/support/io/BucketChainBucket.java 2008-06-29
17:03:08 UTC (rev 20868)
@@ -41,7 +41,7 @@
list[i].free();
}
}
-
+
/** Equivalent to free(), but don't free the underlying buckets. */
public void clear() {
synchronized(this) {
@@ -49,7 +49,7 @@
buckets.clear();
}
}
-
+
public synchronized Bucket[] getBuckets() {
return (Bucket[]) buckets.toArray(new Bucket[buckets.size()]);
}
Modified: trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
2008-06-29 16:23:27 UTC (rev 20867)
+++ trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
2008-06-29 17:03:08 UTC (rev 20868)
@@ -19,4 +19,4 @@
return new BucketChainBucket(blockSize, factory);
}
-}
\ No newline at end of file
+}
Modified: trunk/freenet/src/freenet/support/io/BucketTools.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketTools.java 2008-06-29
16:23:27 UTC (rev 20867)
+++ trunk/freenet/src/freenet/support/io/BucketTools.java 2008-06-29
17:03:08 UTC (rev 20868)
@@ -334,24 +334,27 @@
* and the data written to them.
*
* Note that this method will allocate a buffer of size splitSize.
- * @param freeData
+ * @param freeData
* @throws IOException If there is an error creating buckets, reading
from
* the provided bucket, or writing to created buckets.
*/
public static Bucket[] split(Bucket origData, int splitSize,
BucketFactory bf, boolean freeData) throws IOException {
if(origData instanceof FileBucket) {
- if(freeData)
+ if(freeData) {
Logger.error(BucketTools.class, "Asked to free
data when splitting a FileBucket ?!?!? Not freeing as this would clobber the
split result...");
+ }
return ((FileBucket)origData).split(splitSize);
}
if(origData instanceof BucketChainBucket) {
- BucketChainBucket data = (BucketChainBucket) origData;
+ BucketChainBucket data = (BucketChainBucket)origData;
if(data.bucketSize == splitSize) {
+ Bucket[] buckets = data.getBuckets();
if(freeData)
data.clear();
- return data.getBuckets();
- } else
- Logger.error(BucketTools.class, "Incompatible
split size splitting a BucketChainBucket: his split size is " + data.bucketSize
+ " but mine is " + splitSize + " - we will copy the data, but this suggests a
bug", new Exception("debug"));
+ return buckets;
+ } else {
+ Logger.error(BucketTools.class, "Incompatible
split size splitting a BucketChainBucket: his split size is "+data.bucketSize+"
but mine is "+splitSize+" - we will copy the data, but this suggests a bug",
new Exception("debug"));
+ }
}
long length = origData.size();
if(length > ((long)Integer.MAX_VALUE) * splitSize)