Author: toad
Date: 2006-03-03 20:49:45 +0000 (Fri, 03 Mar 2006)
New Revision: 8148
Modified:
trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
trunk/freenet/src/freenet/client/async/ClientGetter.java
trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
trunk/freenet/src/freenet/keys/Key.java
trunk/freenet/src/freenet/node/Version.java
trunk/freenet/src/freenet/node/fcp/ClientGet.java
trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java
trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java
trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java
trunk/freenet/src/freenet/support/compress/Compressor.java
trunk/freenet/src/freenet/support/compress/GzipCompressor.java
Log:
486:
Return data directly in original file if possible on FCP get-to-file's.
Also provides infrastructure for next bit...
Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2006-03-03 19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2006-03-03 20:49:45 UTC (rev 8148)
@@ -94,7 +94,7 @@
if(uri == null) throw new NullPointerException();
FetcherContext context = getFetcherContext();
FetchWaiter fw = new FetchWaiter();
- ClientGetter get = new ClientGetter(fw, node.fetchScheduler,
uri, context, priorityClass, this);
+ ClientGetter get = new ClientGetter(fw, node.fetchScheduler,
uri, context, priorityClass, this, null);
get.start();
return fw.waitForCompletion();
}
Modified: trunk/freenet/src/freenet/client/async/ClientGetter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/ClientGetter.java 2006-03-03
19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/client/async/ClientGetter.java 2006-03-03
20:49:45 UTC (rev 8148)
@@ -1,5 +1,6 @@
package freenet.client.async;
+import java.io.IOException;
import java.net.MalformedURLException;
import freenet.client.ArchiveContext;
@@ -9,6 +10,8 @@
import freenet.client.FetcherContext;
import freenet.client.events.SplitfileProgressEvent;
import freenet.keys.FreenetURI;
+import freenet.support.Bucket;
+import freenet.support.BucketTools;
import freenet.support.Logger;
/**
@@ -23,10 +26,25 @@
ClientGetState currentState;
private boolean finished;
private int archiveRestarts;
-
- public ClientGetter(ClientCallback client, ClientRequestScheduler
sched, FreenetURI uri, FetcherContext ctx, short priorityClass, Object
clientContext) {
+ /** If not null, Bucket to return the data in */
+ final Bucket returnBucket;
+
+ /**
+ * Fetch a key.
+ * @param client
+ * @param sched
+ * @param uri
+ * @param ctx
+ * @param priorityClass
+ * @param clientContext The context object (can be anything). Used for
round-robin query balancing.
+ * @param returnBucket The bucket to return the data in. Can be null.
If not null, the ClientGetter must either
+ * write the data directly to the bucket, or copy it and free the
original temporary bucket. Preferably the
+ * former, obviously!
+ */
+ public ClientGetter(ClientCallback client, ClientRequestScheduler
sched, FreenetURI uri, FetcherContext ctx, short priorityClass, Object
clientContext, Bucket returnBucket) {
super(priorityClass, sched, clientContext);
this.client = client;
+ this.returnBucket = returnBucket;
this.uri = uri;
this.ctx = ctx;
this.finished = false;
@@ -36,7 +54,7 @@
public void start() throws FetchException {
try {
- currentState = new SingleFileFetcher(this, this, new
ClientMetadata(), uri, ctx, actx, ctx.maxNonSplitfileRetries, 0, false, null,
true);
+ currentState = new SingleFileFetcher(this, this, new
ClientMetadata(), uri, ctx, actx, ctx.maxNonSplitfileRetries, 0, false, null,
true, returnBucket);
currentState.schedule();
} catch (MalformedURLException e) {
throw new FetchException(FetchException.INVALID_URI, e);
@@ -46,6 +64,20 @@
public void onSuccess(FetchResult result, ClientGetState state) {
finished = true;
currentState = null;
+ if(result.asBucket() != returnBucket) {
+ Bucket from = result.asBucket();
+ Bucket to = returnBucket;
+ try {
+ Logger.minor(this, "Copying - returnBucket not
respected by client.async");
+ BucketTools.copy(from, to);
+ } catch (IOException e) {
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR), state /* not strictly to blame,
but we're not ako ClientGetState... */);
+ }
+ result = new FetchResult(result, to);
+ } else {
+ if(returnBucket != null)
+ Logger.minor(this, "client.async returned data
in returnBucket");
+ }
client.onSuccess(result, this);
}
Modified: trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
2006-03-03 19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/client/async/SingleFileFetcher.java
2006-03-03 20:49:45 UTC (rev 8148)
@@ -22,6 +22,7 @@
import freenet.node.LowLevelGetException;
import freenet.node.Node;
import freenet.support.Bucket;
+import freenet.support.BucketTools;
import freenet.support.Logger;
import freenet.support.compress.CompressionOutputSizeException;
import freenet.support.compress.Compressor;
@@ -48,16 +49,17 @@
private final boolean dontTellClientGet;
private boolean cancelled;
private Object token;
+ private final Bucket returnBucket;
-
/** Create a new SingleFileFetcher and register self.
* Called when following a redirect, or direct from ClientGet.
* @param token
* @param dontTellClientGet
*/
- public SingleFileFetcher(ClientGetter get, GetCompletionCallback cb,
ClientMetadata metadata, ClientKey key, LinkedList metaStrings, FetcherContext
ctx, ArchiveContext actx, int maxRetries, int recursionLevel, boolean
dontTellClientGet, Object token, boolean isEssential) throws FetchException {
+ public SingleFileFetcher(ClientGetter get, GetCompletionCallback cb,
ClientMetadata metadata, ClientKey key, LinkedList metaStrings, FetcherContext
ctx, ArchiveContext actx, int maxRetries, int recursionLevel, boolean
dontTellClientGet, Object token, boolean isEssential, Bucket returnBucket)
throws FetchException {
Logger.minor(this, "Creating SingleFileFetcher for "+key);
this.cancelled = false;
+ this.returnBucket = returnBucket;
this.dontTellClientGet = dontTellClientGet;
this.token = token;
this.parent = get;
@@ -83,14 +85,16 @@
}
/** Called by ClientGet. */
- public SingleFileFetcher(ClientGetter get, GetCompletionCallback cb,
ClientMetadata metadata, FreenetURI uri, FetcherContext ctx, ArchiveContext
actx, int maxRetries, int recursionLevel, boolean dontTellClientGet, Object
token, boolean isEssential) throws MalformedURLException, FetchException {
- this(get, cb, metadata, ClientKey.getBaseKey(uri),
uri.listMetaStrings(), ctx, actx, maxRetries, recursionLevel,
dontTellClientGet, token, isEssential);
+ public SingleFileFetcher(ClientGetter get, GetCompletionCallback cb,
ClientMetadata metadata, FreenetURI uri, FetcherContext ctx, ArchiveContext
actx, int maxRetries, int recursionLevel, boolean dontTellClientGet, Object
token, boolean isEssential, Bucket returnBucket) throws MalformedURLException,
FetchException {
+ this(get, cb, metadata, ClientKey.getBaseKey(uri),
uri.listMetaStrings(), ctx, actx, maxRetries, recursionLevel,
dontTellClientGet, token, isEssential, returnBucket);
}
- /** Copy constructor, modifies a few given fields, don't call
schedule() */
+ /** Copy constructor, modifies a few given fields, don't call
schedule().
+ * Used for things like slave fetchers for MultiLevelMetadata,
therefore does not remember returnBucket. */
public SingleFileFetcher(SingleFileFetcher fetcher, Metadata newMeta,
GetCompletionCallback callback, FetcherContext ctx2) throws FetchException {
Logger.minor(this, "Creating SingleFileFetcher for
"+fetcher.key);
this.token = fetcher.token;
+ this.returnBucket = null;
this.dontTellClientGet = fetcher.dontTellClientGet;
this.actx = fetcher.actx;
this.ah = fetcher.ah;
@@ -196,7 +200,7 @@
while(!decompressors.isEmpty()) {
Compressor c = (Compressor)
decompressors.removeLast();
try {
- data = c.decompress(data,
ctx.bucketFactory, Math.max(ctx.maxTempLength, ctx.maxOutputLength));
+ data = c.decompress(data,
ctx.bucketFactory, Math.max(ctx.maxTempLength, ctx.maxOutputLength),
decompressors.isEmpty() ? returnBucket : null);
} catch (IOException e) {
onFailure(new
FetchException(FetchException.BUCKET_ERROR, e));
return;
@@ -264,8 +268,23 @@
throw new
FetchException(FetchException.NOT_ENOUGH_METASTRINGS);
Bucket dataBucket = ah.get((String)
metaStrings.removeFirst(), actx, null, recursionLevel+1, true);
if(dataBucket != null) {
+ // The client may free it, which is
bad, or it may hang on to it for so long that it gets
+ // freed by us, which is also bad.
+ // So copy it.
+ // FIXME this is stupid, reconsider how
we determine when to free buckets; refcounts maybe?
+ Bucket out;
+ try {
+ if(returnBucket != null)
+ out = returnBucket;
+ else
+ out =
ctx.bucketFactory.makeBucket(dataBucket.size());
+ BucketTools.copy(dataBucket,
out);
+ } catch (IOException e) {
+ onFailure(new
FetchException(FetchException.BUCKET_ERROR));
+ return;
+ }
// Return the data
- onSuccess(new
FetchResult(this.clientMetadata, dataBucket));
+ onSuccess(new
FetchResult(this.clientMetadata, out));
return;
} else {
// Metadata cannot contain pointers to
files which don't exist.
@@ -308,7 +327,7 @@
metaStrings.addFirst(o);
}
- SingleFileFetcher f = new
SingleFileFetcher(parent, rcb, clientMetadata, key, metaStrings, ctx, actx,
maxRetries, recursionLevel, false, null, true);
+ SingleFileFetcher f = new
SingleFileFetcher(parent, rcb, clientMetadata, key, metaStrings, ctx, actx,
maxRetries, recursionLevel, false, null, true, returnBucket);
if(metadata.isCompressed()) {
Compressor codec =
Compressor.getCompressionAlgorithmByMetadataID(metadata.getCompressionCodec());
f.addDecompressor(codec);
@@ -330,7 +349,7 @@
}
SplitFileFetcher sf = new
SplitFileFetcher(metadata, rcb, parent, ctx,
- decompressors, clientMetadata,
actx, recursionLevel);
+ decompressors, clientMetadata,
actx, recursionLevel, returnBucket);
sf.schedule();
rcb.onBlockSetFinished(this);
// SplitFile will now run.
@@ -414,7 +433,7 @@
parent.currentState = SingleFileFetcher.this;
try {
metadata =
Metadata.construct(result.asBucket());
- SingleFileFetcher f = new
SingleFileFetcher(parent, rcb, clientMetadata, key, metaStrings, ctx, actx,
maxRetries, recursionLevel, dontTellClientGet, null, true);
+ SingleFileFetcher f = new
SingleFileFetcher(parent, rcb, clientMetadata, key, metaStrings, ctx, actx,
maxRetries, recursionLevel, dontTellClientGet, null, true, returnBucket);
f.metadata = metadata;
f.handleMetadata();
} catch (MetadataParseException e) {
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
2006-03-03 19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcher.java
2006-03-03 20:49:45 UTC (rev 8148)
@@ -55,12 +55,15 @@
private final long overrideLength;
/** Accept non-full splitfile chunks? */
private final boolean splitUseLengths;
+ /** Preferred bucket to return data in */
+ private final Bucket returnBucket;
private boolean finished;
public SplitFileFetcher(Metadata metadata, GetCompletionCallback rcb,
ClientGetter parent,
FetcherContext newCtx, LinkedList decompressors,
ClientMetadata clientMetadata,
- ArchiveContext actx, int recursionLevel) throws
FetchException, MetadataParseException {
+ ArchiveContext actx, int recursionLevel, Bucket
returnBucket) throws FetchException, MetadataParseException {
this.finished = false;
+ this.returnBucket = returnBucket;
this.fetchContext = newCtx;
this.archiveContext = actx;
this.decompressors = decompressors;
@@ -141,7 +144,10 @@
OutputStream os = null;
Bucket output;
try {
- output =
fetchContext.bucketFactory.makeBucket(finalLength);
+ if(returnBucket != null && decompressors.isEmpty())
+ output = returnBucket;
+ else
+ output =
fetchContext.bucketFactory.makeBucket(finalLength);
os = output.getOutputStream();
for(int i=0;i<segments.length;i++) {
SplitFileFetcherSegment s = segments[i];
@@ -198,7 +204,9 @@
while(!decompressors.isEmpty()) {
Compressor c = (Compressor)
decompressors.removeLast();
try {
- data = c.decompress(data,
fetchContext.bucketFactory, Math.max(fetchContext.maxTempLength,
fetchContext.maxOutputLength));
+ Bucket out = returnBucket;
+ if(!decompressors.isEmpty()) out = null;
+ data = c.decompress(data,
fetchContext.bucketFactory, Math.max(fetchContext.maxTempLength,
fetchContext.maxOutputLength), out);
} catch (IOException e) {
cb.onFailure(new
FetchException(FetchException.BUCKET_ERROR, e), this);
return;
Modified: trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2006-03-03 19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/client/async/SplitFileFetcherSegment.java
2006-03-03 20:49:45 UTC (rev 8148)
@@ -322,12 +322,12 @@
try {
for(int i=0;i<dataBlocks.length;i++) {
dataBlockStatus[i] =
- new
SingleFileFetcher(parentFetcher.parent, this, null, dataBlocks[i],
blockFetchContext, archiveContext, blockFetchContext.maxSplitfileBlockRetries,
recursionLevel, true, new Integer(i), true);
+ new
SingleFileFetcher(parentFetcher.parent, this, null, dataBlocks[i],
blockFetchContext, archiveContext, blockFetchContext.maxSplitfileBlockRetries,
recursionLevel, true, new Integer(i), true, null);
dataBlockStatus[i].schedule();
}
for(int i=0;i<checkBlocks.length;i++) {
checkBlockStatus[i] =
- new
SingleFileFetcher(parentFetcher.parent, this, null, checkBlocks[i],
blockFetchContext, archiveContext, blockFetchContext.maxSplitfileBlockRetries,
recursionLevel, true, new Integer(dataBlocks.length+i), false);
+ new
SingleFileFetcher(parentFetcher.parent, this, null, checkBlocks[i],
blockFetchContext, archiveContext, blockFetchContext.maxSplitfileBlockRetries,
recursionLevel, true, new Integer(dataBlocks.length+i), false, null);
checkBlockStatus[i].schedule();
}
} catch (MalformedURLException e) {
Modified: trunk/freenet/src/freenet/keys/Key.java
===================================================================
--- trunk/freenet/src/freenet/keys/Key.java 2006-03-03 19:33:35 UTC (rev
8147)
+++ trunk/freenet/src/freenet/keys/Key.java 2006-03-03 20:49:45 UTC (rev
8148)
@@ -111,7 +111,7 @@
Compressor decompressor =
Compressor.getCompressionAlgorithmByMetadataID(compressionAlgorithm);
Bucket inputBucket = new SimpleReadOnlyArrayBucket(output,
shortLength?2:4, output.length-(shortLength?2:4));
try {
- return decompressor.decompress(inputBucket, bf,
maxLength);
+ return decompressor.decompress(inputBucket, bf,
maxLength, null);
} catch (CompressionOutputSizeException e) {
throw new CHKDecodeException("Too big");
}
Modified: trunk/freenet/src/freenet/node/Version.java
===================================================================
--- trunk/freenet/src/freenet/node/Version.java 2006-03-03 19:33:35 UTC (rev
8147)
+++ trunk/freenet/src/freenet/node/Version.java 2006-03-03 20:49:45 UTC (rev
8148)
@@ -20,7 +20,7 @@
public static final String protocolVersion = "1.0";
/** The build number of the current revision */
- private static final int buildNumber = 485;
+ private static final int buildNumber = 486;
/** Oldest build of Fred we will talk to */
private static final int lastGoodBuild = 475;
Modified: trunk/freenet/src/freenet/node/fcp/ClientGet.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientGet.java 2006-03-03 19:33:35 UTC
(rev 8147)
+++ trunk/freenet/src/freenet/node/fcp/ClientGet.java 2006-03-03 20:49:45 UTC
(rev 8148)
@@ -23,6 +23,7 @@
import freenet.support.Fields;
import freenet.support.Logger;
import freenet.support.SimpleFieldSet;
+import freenet.support.io.FileBucket;
/**
* A simple client fetch. This can of course fetch arbitrarily large
@@ -46,6 +47,8 @@
private final File targetFile;
private final File tempFile;
final String clientToken;
+ /** Bucket passed in to the ClientGetter to return data in. Null unless
returntype=disk */
+ private final Bucket returnBucket;
// Verbosity bitmasks
private int VERBOSITY_SPLITFILE_PROGRESS = 1;
@@ -92,12 +95,19 @@
this.verbosity = message.verbosity;
// FIXME do something with verbosity !!
// Has already been checked
- this.returnType = message.returnType;
fctx.maxOutputLength = message.maxSize;
fctx.maxTempLength = message.maxTempSize;
- this.targetFile = message.diskFile;
- this.tempFile = message.tempFile;
- getter = new ClientGetter(this, client.node.fetchScheduler,
uri, fctx, priorityClass, client);
+ this.returnType = message.returnType;
+ if(returnType == ClientGetMessage.RETURN_TYPE_DISK) {
+ this.targetFile = message.diskFile;
+ this.tempFile = message.tempFile;
+ returnBucket = new FileBucket(message.tempFile, false,
false, false, false);
+ } else {
+ returnBucket = null;
+ targetFile = null;
+ tempFile = null;
+ }
+ getter = new ClientGetter(this, client.node.fetchScheduler,
uri, fctx, priorityClass, client, returnBucket);
}
/**
@@ -153,8 +163,12 @@
getFailedMessage = new
GetFailedMessage(fs.subset("GetFailed"), false);
}
}
-
- getter = new ClientGetter(this, client.node.fetchScheduler,
uri, fctx, priorityClass, client);
+ if(returnType == ClientGetMessage.RETURN_TYPE_DISK) {
+ returnBucket = new FileBucket(tempFile, false, false,
false, false);
+ } else
+ returnBucket = null;
+
+ getter = new ClientGetter(this, client.node.fetchScheduler,
uri, fctx, priorityClass, client, returnBucket);
start();
}
@@ -195,9 +209,16 @@
} else if(returnType ==
ClientGetMessage.RETURN_TYPE_DISK) {
// Write to temp file, then rename over filename
FileOutputStream fos = null;
+ boolean closed = false;
try {
- fos = new FileOutputStream(tempFile);
- BucketTools.copyTo(data, fos,
data.size());
+ if(data != returnBucket) {
+ fos = new
FileOutputStream(tempFile);
+ BucketTools.copyTo(data, fos,
data.size());
+ if(fos != null) {
+ fos.close(); // must be
closed before rename
+ closed = true;
+ }
+ }
if(!tempFile.renameTo(targetFile)) {
postFetchProtocolErrorMessage =
new ProtocolErrorMessage(ProtocolErrorMessage.COULD_NOT_RENAME_FILE, false,
null, identifier);
trySendDataFoundOrGetFailed();
@@ -209,7 +230,7 @@
postFetchProtocolErrorMessage = new
ProtocolErrorMessage(ProtocolErrorMessage.COULD_NOT_WRITE_FILE, false, null,
identifier);
}
try {
- if(fos != null)
+ if(fos != null && !closed)
fos.close();
} catch (IOException e) {
Logger.error(this, "Caught "+e+"
closing file "+tempFile, e);
Modified: trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java 2006-03-03
19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/node/fcp/ClientGetMessage.java 2006-03-03
20:49:45 UTC (rev 8148)
@@ -171,10 +171,6 @@
} else if(persistenceString.equalsIgnoreCase("forever")) {
// Same as reboot but saved to disk, persists forever.
persistenceType = ClientRequest.PERSIST_FOREVER;
- // FIXME for now we only support returntype=disk if
persistenceType=forever.
- if(returnType != RETURN_TYPE_DISK) {
- throw new
MessageInvalidException(ProtocolErrorMessage.NOT_SUPPORTED,
"PersistenceType=forever implies ReturnType=disk", identifier);
- }
} else {
throw new
MessageInvalidException(ProtocolErrorMessage.ERROR_PARSING_NUMBER, "Error
parsing Persistence field: "+persistenceString, identifier);
}
Modified: trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java 2006-03-03
19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/node/fcp/GetFailedMessage.java 2006-03-03
20:49:45 UTC (rev 8148)
@@ -72,7 +72,7 @@
* or another node).
*/
public SimpleFieldSet getFieldSet(boolean verbose) {
- SimpleFieldSet sfs = new SimpleFieldSet(false);
+ SimpleFieldSet sfs = new SimpleFieldSet(true);
sfs.put("Code", Integer.toString(code));
if(verbose)
sfs.put("CodeDescription", codeDescription);
Modified: trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java
===================================================================
--- trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java 2006-03-03
19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/node/fcp/PutFailedMessage.java 2006-03-03
20:49:45 UTC (rev 8148)
@@ -70,7 +70,7 @@
}
public SimpleFieldSet getFieldSet(boolean verbose) {
- SimpleFieldSet fs = new SimpleFieldSet(false);
+ SimpleFieldSet fs = new SimpleFieldSet(true);
fs.put("Identifier", identifier);
fs.put("Code", Integer.toString(code));
if(verbose)
Modified: trunk/freenet/src/freenet/support/compress/Compressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/Compressor.java 2006-03-03
19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/support/compress/Compressor.java 2006-03-03
20:49:45 UTC (rev 8148)
@@ -16,7 +16,17 @@
public abstract Bucket compress(Bucket data, BucketFactory bf, long
maxLength) throws IOException, CompressionOutputSizeException;
- public abstract Bucket decompress(Bucket data, BucketFactory
bucketFactory, long maxLength) throws IOException,
CompressionOutputSizeException;
+ /**
+ * Decompress data.
+ * @param data The data to decompress.
+ * @param bucketFactory A BucketFactory to create a new Bucket with if
necessary.
+ * @param maxLength The maximum length to decompress (we throw if more
is present).
+ * @param preferred A Bucket to use instead. If null, we allocate one
from the BucketFactory.
+ * @return
+ * @throws IOException
+ * @throws CompressionOutputSizeException
+ */
+ public abstract Bucket decompress(Bucket data, BucketFactory
bucketFactory, long maxLength, Bucket preferred) throws IOException,
CompressionOutputSizeException;
public short codecNumberForMetadata() {
return Metadata.COMPRESS_GZIP;
Modified: trunk/freenet/src/freenet/support/compress/GzipCompressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2006-03-03 19:33:35 UTC (rev 8147)
+++ trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2006-03-03 20:49:45 UTC (rev 8148)
@@ -5,10 +5,8 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.util.zip.DataFormatException;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
-import java.util.zip.Inflater;
import freenet.support.Bucket;
import freenet.support.BucketFactory;
@@ -46,8 +44,12 @@
return output;
}
- public Bucket decompress(Bucket data, BucketFactory bf, long maxLength)
throws IOException, CompressionOutputSizeException {
- Bucket output = bf.makeBucket(-1);
+ public Bucket decompress(Bucket data, BucketFactory bf, long maxLength,
Bucket preferred) throws IOException, CompressionOutputSizeException {
+ Bucket output;
+ if(preferred != null)
+ output = preferred;
+ else
+ output = bf.makeBucket(-1);
InputStream is = data.getInputStream();
OutputStream os = output.getOutputStream();
decompress(is, os, maxLength);