Author: nextgens
Date: 2008-06-22 08:41:25 +0000 (Sun, 22 Jun 2008)
New Revision: 20620
Added:
trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
Modified:
trunk/freenet/src/freenet/client/async/SingleFileInserter.java
trunk/freenet/src/freenet/client/async/SplitFileInserter.java
trunk/freenet/src/freenet/support/io/BucketChainBucket.java
trunk/freenet/src/freenet/support/io/BucketTools.java
Log:
Untested backport of r20573 (Write compressed data directly into a chain of
buckets)
Please review & test it
Modified: trunk/freenet/src/freenet/client/async/SingleFileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleFileInserter.java
2008-06-22 07:46:34 UTC (rev 20619)
+++ trunk/freenet/src/freenet/client/async/SingleFileInserter.java
2008-06-22 08:41:25 UTC (rev 20620)
@@ -14,6 +14,7 @@
import freenet.keys.BaseClientKey;
import freenet.keys.CHKBlock;
import freenet.keys.FreenetURI;
+ import freenet.keys.NodeCHK;
import freenet.keys.SSKBlock;
import freenet.support.Logger;
import freenet.support.OOMHandler;
@@ -21,6 +22,7 @@
import freenet.support.api.Bucket;
import freenet.support.compress.CompressionOutputSizeException;
import freenet.support.compress.Compressor;
+import freenet.support.io.BucketChainBucketFactory;
import freenet.support.io.BucketTools;
/**
@@ -170,7 +172,7 @@
ctx.eventProducer.produceEvent(new StartedCompressionEvent(i));
Compressor comp =
Compressor.getCompressionAlgorithmByDifficulty(i);
Bucket result;
- result = comp.compress(origData,
ctx.persistentBucketFactory, origData.size());
+ result = comp.compress(origData, new
BucketChainBucketFactory(ctx.persistentBucketFactory, NodeCHK.BLOCK_SIZE),
origData.size());
if(result.size() <
oneBlockCompressedSize) {
bestCodec = comp;
if(bestCompressedData != null)
Modified: trunk/freenet/src/freenet/client/async/SplitFileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileInserter.java
2008-06-22 07:46:34 UTC (rev 20619)
+++ trunk/freenet/src/freenet/client/async/SplitFileInserter.java
2008-06-22 08:41:25 UTC (rev 20620)
@@ -78,14 +78,13 @@
this.cb = cb;
this.ctx = ctx;
this.decompressedLength = decompressedLength;
+ this.dataLength = data.size();
Bucket[] dataBuckets;
try {
- dataBuckets = BucketTools.split(data,
CHKBlock.DATA_LENGTH, ctx.persistentBucketFactory);
+ dataBuckets = BucketTools.split(data,
CHKBlock.DATA_LENGTH, ctx.persistentBucketFactory, freeData);
} catch (IOException e) {
throw new InsertException(InsertException.BUCKET_ERROR,
e, null);
}
- this.dataLength = data.size();
- if(freeData) data.free();
countDataBlocks = dataBuckets.length;
// Encoding is done by segments
if(bestCodec == null)
Modified: trunk/freenet/src/freenet/support/io/BucketChainBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketChainBucket.java 2008-06-22
07:46:34 UTC (rev 20619)
+++ trunk/freenet/src/freenet/support/io/BucketChainBucket.java 2008-06-22
08:41:25 UTC (rev 20620)
@@ -15,7 +15,7 @@
public class BucketChainBucket implements Bucket {
private final Vector buckets;
- private long bucketSize;
+ public final long bucketSize;
private long size;
private boolean freed;
private boolean readOnly;
@@ -42,6 +42,14 @@
}
}
+ /** Equivalent to free(), but don't free the underlying buckets. */
+ public void clear() {
+ synchronized(this) {
+ size = 0;
+ buckets.clear();
+ }
+ }
+
public synchronized Bucket[] getBuckets() {
return (Bucket[]) buckets.toArray(new Bucket[buckets.size()]);
}
Added: trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
(rev 0)
+++ trunk/freenet/src/freenet/support/io/BucketChainBucketFactory.java
2008-06-22 08:41:25 UTC (rev 20620)
@@ -0,0 +1,22 @@
+package freenet.support.io;
+
+import java.io.IOException;
+
+import freenet.support.api.Bucket;
+import freenet.support.api.BucketFactory;
+
+public class BucketChainBucketFactory implements BucketFactory {
+
+ final BucketFactory factory;
+ final int blockSize;
+
+ public BucketChainBucketFactory(BucketFactory bucketFactory, int
block_size) {
+ this.factory = bucketFactory;
+ this.blockSize = block_size;
+ }
+
+ public Bucket makeBucket(long size) throws IOException {
+ return new BucketChainBucket(blockSize, factory);
+ }
+
+}
\ No newline at end of file
Modified: trunk/freenet/src/freenet/support/io/BucketTools.java
===================================================================
--- trunk/freenet/src/freenet/support/io/BucketTools.java 2008-06-22
07:46:34 UTC (rev 20619)
+++ trunk/freenet/src/freenet/support/io/BucketTools.java 2008-06-22
08:41:25 UTC (rev 20620)
@@ -334,13 +334,25 @@
* and the data written to them.
*
* Note that this method will allocate a buffer of size splitSize.
+ * @param freeData
* @throws IOException If there is an error creating buckets, reading
from
* the provided bucket, or writing to created buckets.
*/
- public static Bucket[] split(Bucket origData, int splitSize,
BucketFactory bf) throws IOException {
+ public static Bucket[] split(Bucket origData, int splitSize,
BucketFactory bf, boolean freeData) throws IOException {
if(origData instanceof FileBucket) {
+ if(freeData)
+ Logger.error(BucketTools.class, "Asked to free
data when splitting a FileBucket ?!?!? Not freeing as this would clobber the
split result...");
return ((FileBucket)origData).split(splitSize);
}
+ if(origData instanceof BucketChainBucket) {
+ BucketChainBucket data = (BucketChainBucket) origData;
+ if(data.bucketSize == splitSize) {
+ if(freeData)
+ data.clear();
+ return data.getBuckets();
+ } else
+ Logger.error(BucketTools.class, "Incompatible
split size splitting a BucketChainBucket: his split size is " + data.bucketSize
+ " but mine is " + splitSize + " - we will copy the data, but this suggests a
bug", new Exception("debug"));
+ }
long length = origData.size();
if(length > ((long)Integer.MAX_VALUE) * splitSize)
throw new IllegalArgumentException("Way too big!:
"+length+" for "+splitSize);
@@ -374,6 +386,8 @@
else
is.close();
}
+ if(freeData)
+ origData.free();
return buckets;
}