Author: toad
Date: 2008-06-21 14:38:38 +0000 (Sat, 21 Jun 2008)
New Revision: 20573
Added:
branches/db4o/freenet/src/freenet/support/io/BucketChainBucketFactory.java
Modified:
branches/db4o/freenet/src/freenet/client/async/InsertCompressor.java
branches/db4o/freenet/src/freenet/client/async/SplitFileInserter.java
branches/db4o/freenet/src/freenet/support/io/BucketChainBucket.java
branches/db4o/freenet/src/freenet/support/io/BucketTools.java
Log:
Write compressed data directly into a chain of buckets.
Saves copying, disk space, and time later on. (Currently this time is on the
database thread, because it's in the SplitFileFetcher constructor...).
BACKPORT.
Modified: branches/db4o/freenet/src/freenet/client/async/InsertCompressor.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/InsertCompressor.java
2008-06-21 14:23:52 UTC (rev 20572)
+++ branches/db4o/freenet/src/freenet/client/async/InsertCompressor.java
2008-06-21 14:38:38 UTC (rev 20573)
@@ -7,10 +7,12 @@
import com.db4o.query.Predicate;
import freenet.client.InsertException;
+import freenet.keys.NodeCHK;
import freenet.support.api.Bucket;
import freenet.support.api.BucketFactory;
import freenet.support.compress.CompressionOutputSizeException;
import freenet.support.compress.Compressor;
+import freenet.support.io.BucketChainBucketFactory;
import freenet.support.io.NativeThread;
/**
@@ -71,7 +73,7 @@
inserter.onStartCompression(i, null, context);
Compressor comp =
Compressor.getCompressionAlgorithmByDifficulty(i);
Bucket result;
- result = comp.compress(origData, bucketFactory,
origData.size());
+ result = comp.compress(origData, new
BucketChainBucketFactory(bucketFactory, NodeCHK.BLOCK_SIZE), origData.size());
if(result.size() < minSize) {
bestCodec = comp;
if(bestCompressedData != null)
Modified: branches/db4o/freenet/src/freenet/client/async/SplitFileInserter.java
===================================================================
--- branches/db4o/freenet/src/freenet/client/async/SplitFileInserter.java
2008-06-21 14:23:52 UTC (rev 20572)
+++ branches/db4o/freenet/src/freenet/client/async/SplitFileInserter.java
2008-06-21 14:38:38 UTC (rev 20573)
@@ -81,14 +81,13 @@
this.cb = cb;
this.ctx = ctx;
this.decompressedLength = decompressedLength;
+ this.dataLength = data.size();
Bucket[] dataBuckets;
try {
- dataBuckets = BucketTools.split(data,
CHKBlock.DATA_LENGTH, ctx.persistentBucketFactory);
+ dataBuckets = BucketTools.split(data,
CHKBlock.DATA_LENGTH, ctx.persistentBucketFactory, freeData);
} catch (IOException e) {
throw new InsertException(InsertException.BUCKET_ERROR,
e, null);
}
- this.dataLength = data.size();
- if(freeData) data.free();
countDataBlocks = dataBuckets.length;
// Encoding is done by segments
if(bestCodec == null)
Modified: branches/db4o/freenet/src/freenet/support/io/BucketChainBucket.java
===================================================================
--- branches/db4o/freenet/src/freenet/support/io/BucketChainBucket.java
2008-06-21 14:23:52 UTC (rev 20572)
+++ branches/db4o/freenet/src/freenet/support/io/BucketChainBucket.java
2008-06-21 14:38:38 UTC (rev 20573)
@@ -17,7 +17,7 @@
public class BucketChainBucket implements Bucket {
private final Vector buckets;
- private long bucketSize;
+ public final long bucketSize;
private long size;
private boolean freed;
private boolean readOnly;
@@ -43,6 +43,14 @@
list[i].free();
}
}
+
+ /** Equivalent to free(), but don't free the underlying buckets. */
+ public void clear() {
+ synchronized(this) {
+ size = 0;
+ buckets.clear();
+ }
+ }
public synchronized Bucket[] getBuckets() {
return (Bucket[]) buckets.toArray(new Bucket[buckets.size()]);
Added:
branches/db4o/freenet/src/freenet/support/io/BucketChainBucketFactory.java
===================================================================
--- branches/db4o/freenet/src/freenet/support/io/BucketChainBucketFactory.java
(rev 0)
+++ branches/db4o/freenet/src/freenet/support/io/BucketChainBucketFactory.java
2008-06-21 14:38:38 UTC (rev 20573)
@@ -0,0 +1,22 @@
+package freenet.support.io;
+
+import java.io.IOException;
+
+import freenet.support.api.Bucket;
+import freenet.support.api.BucketFactory;
+
+public class BucketChainBucketFactory implements BucketFactory {
+
+ final BucketFactory factory;
+ final int blockSize;
+
+ public BucketChainBucketFactory(BucketFactory bucketFactory, int
block_size) {
+ this.factory = bucketFactory;
+ this.blockSize = block_size;
+ }
+
+ public Bucket makeBucket(long size) throws IOException {
+ return new BucketChainBucket(blockSize, factory);
+ }
+
+}
Modified: branches/db4o/freenet/src/freenet/support/io/BucketTools.java
===================================================================
--- branches/db4o/freenet/src/freenet/support/io/BucketTools.java
2008-06-21 14:23:52 UTC (rev 20572)
+++ branches/db4o/freenet/src/freenet/support/io/BucketTools.java
2008-06-21 14:38:38 UTC (rev 20573)
@@ -334,13 +334,27 @@
* and the data written to them.
*
* Note that this method will allocate a buffer of size splitSize.
+ * @param freeData
* @throws IOException If there is an error creating buckets, reading
from
* the provided bucket, or writing to created buckets.
*/
- public static Bucket[] split(Bucket origData, int splitSize,
BucketFactory bf) throws IOException {
+ public static Bucket[] split(Bucket origData, int splitSize,
BucketFactory bf, boolean freeData) throws IOException {
if(origData instanceof FileBucket) {
+ if(freeData) {
+ Logger.error(BucketTools.class, "Asked to free
data when splitting a FileBucket ?!?!? Not freeing as this would clobber the
split result...");
+ }
return ((FileBucket)origData).split(splitSize);
}
+ if(origData instanceof BucketChainBucket) {
+ BucketChainBucket data = (BucketChainBucket)origData;
+ if(data.bucketSize == splitSize) {
+ if(freeData)
+ data.clear();
+ return data.getBuckets();
+ } else {
+ Logger.error(BucketTools.class, "Incompatible
split size splitting a BucketChainBucket: his split size is "+data.bucketSize+"
but mine is "+splitSize+" - we will copy the data, but this suggests a bug",
new Exception("debug"));
+ }
+ }
long length = origData.size();
if(length > ((long)Integer.MAX_VALUE) * splitSize)
throw new IllegalArgumentException("Way too big!:
"+length+" for "+splitSize);
@@ -374,6 +388,8 @@
else
is.close();
}
+ if(freeData)
+ origData.free();
return buckets;
}