Author: toad
Date: 2005-11-10 16:10:49 +0000 (Thu, 10 Nov 2005)
New Revision: 7517
Added:
trunk/freenet/src/freenet/support/SimpleReadOnlyArrayBucket.java
trunk/freenet/src/freenet/support/compress/CompressionOutputSizeException.java
Modified:
trunk/freenet/src/freenet/client/FetchException.java
trunk/freenet/src/freenet/client/Fetcher.java
trunk/freenet/src/freenet/client/FetcherContext.java
trunk/freenet/src/freenet/client/FileInserter.java
trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
trunk/freenet/src/freenet/keys/CHKBlock.java
trunk/freenet/src/freenet/keys/ClientCHKBlock.java
trunk/freenet/src/freenet/keys/ClientKey.java
trunk/freenet/src/freenet/keys/KeyBlock.java
trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
trunk/freenet/src/freenet/node/Version.java
trunk/freenet/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
trunk/freenet/src/freenet/support/compress/Compressor.java
trunk/freenet/src/freenet/support/compress/GzipCompressor.java
Log:
153:
Fix insertion of 8MB of zeros as one key.
Make encode/decode use buckets, and 4 byte compressed length.
Modified: trunk/freenet/src/freenet/client/FetchException.java
===================================================================
--- trunk/freenet/src/freenet/client/FetchException.java 2005-11-10
14:59:04 UTC (rev 7516)
+++ trunk/freenet/src/freenet/client/FetchException.java 2005-11-10
16:10:49 UTC (rev 7517)
@@ -87,6 +87,8 @@
return "Splitfile error";
case INVALID_URI:
return "Invalid URI";
+ case TOO_BIG:
+ return "Too big";
default:
return "Unknown fetch error code: "+mode;
}
@@ -134,4 +136,8 @@
public static final int SPLITFILE_ERROR = 19;
/** Invalid URI. */
public static final int INVALID_URI = 20;
+ /** Too big */
+ public static final int TOO_BIG = 21;
+ /** Metadata too big */
+ public static final int TOO_BIG_METADATA = 22;
}
Modified: trunk/freenet/src/freenet/client/Fetcher.java
===================================================================
--- trunk/freenet/src/freenet/client/Fetcher.java 2005-11-10 14:59:04 UTC
(rev 7516)
+++ trunk/freenet/src/freenet/client/Fetcher.java 2005-11-10 16:10:49 UTC
(rev 7517)
@@ -15,6 +15,7 @@
import freenet.support.Bucket;
import freenet.support.BucketTools;
import freenet.support.Logger;
+import freenet.support.compress.CompressionOutputSizeException;
import freenet.support.compress.Compressor;
/** Class that does the actual fetching. Does not have to have a user friendly
@@ -134,28 +135,34 @@
ctx.eventProducer.produceEvent(new GotBlockEvent(key));
- byte[] data;
+ Bucket data;
try {
- data = block.decode(key);
+ data = block.decode(key, ctx.bucketFactory, (int)
(Math.min(ctx.maxTempLength, Integer.MAX_VALUE)));
} catch (KeyDecodeException e1) {
throw new
FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage());
+ } catch (IOException e) {
+ Logger.error(this, "Could not capture data - disk
full?: "+e, e);
+ throw new FetchException(FetchException.BUCKET_ERROR);
}
ctx.eventProducer.produceEvent(new DecodedBlockEvent(key));
if(!key.isMetadata()) {
// Just return the data
- try {
- return new FetchResult(dm,
BucketTools.makeImmutableBucket(ctx.bucketFactory, data));
- } catch (IOException e) {
- Logger.error(this, "Could not capture data -
disk full?: "+e, e);
- }
+ return new FetchResult(dm, data);
}
// Otherwise we need to parse the metadata
+
+ if(data.size() > ctx.maxMetadataSize)
+ throw new
FetchException(FetchException.TOO_BIG_METADATA);
+ Metadata metadata;
+ try {
+ metadata =
Metadata.construct(BucketTools.toByteArray(data));
+ } catch (IOException e) {
+ throw new FetchException(FetchException.BUCKET_ERROR);
+ }
- Metadata metadata = Metadata.construct(data);
-
ctx.eventProducer.produceEvent(new FetchedMetadataEvent());
FetchResult result = runMetadata(dm, recursionLevel, key,
metaStrings, metadata, null, key.getURI(), dontEnterImplicitArchives);
@@ -273,6 +280,8 @@
output = codec.decompress(data,
ctx.bucketFactory, maxLen);
} catch (IOException e) {
throw new
FetchException(FetchException.BUCKET_ERROR, e);
+ } catch (CompressionOutputSizeException e) {
+ throw new
FetchException(FetchException.TOO_BIG);
}
return new FetchResult(fr, output);
}
@@ -309,6 +318,8 @@
sfResult = codec.decompress(sfResult,
ctx.bucketFactory, maxLen);
} catch (IOException e) {
throw new
FetchException(FetchException.BUCKET_ERROR, e);
+ } catch (CompressionOutputSizeException e) {
+ throw new
FetchException(FetchException.TOO_BIG);
}
}
return new FetchResult(dm, sfResult);
Modified: trunk/freenet/src/freenet/client/FetcherContext.java
===================================================================
--- trunk/freenet/src/freenet/client/FetcherContext.java 2005-11-10
14:59:04 UTC (rev 7516)
+++ trunk/freenet/src/freenet/client/FetcherContext.java 2005-11-10
16:10:49 UTC (rev 7517)
@@ -31,9 +31,10 @@
/** Whether to allow non-full blocks, or blocks which are not direct
CHKs, in splitfiles.
* Set by the splitfile metadata and the mask constructor, so we don't
need to pass it in. */
final boolean splitfileUseLengths;
+ final int maxMetadataSize;
public FetcherContext(SimpleLowLevelClient client, long curMaxLength,
- long curMaxTempLength, int maxRecursionLevel, int
maxArchiveRestarts,
+ long curMaxTempLength, int maxMetadataSize, int
maxRecursionLevel, int maxArchiveRestarts,
boolean dontEnterImplicitArchives, int
maxSplitfileThreads,
int maxSplitfileBlockRetries, int
maxNonSplitfileRetries,
boolean allowSplitfiles, boolean followRedirects,
boolean localRequestOnly,
@@ -42,6 +43,7 @@
this.client = client;
this.maxOutputLength = curMaxLength;
this.maxTempLength = curMaxTempLength;
+ this.maxMetadataSize = maxMetadataSize;
this.archiveManager = archiveManager;
this.bucketFactory = bucketFactory;
this.maxRecursionLevel = maxRecursionLevel;
@@ -62,6 +64,7 @@
if(maskID == SPLITFILE_DEFAULT_BLOCK_MASK) {
this.client = ctx.client;
this.maxOutputLength = ctx.maxOutputLength;
+ this.maxMetadataSize = ctx.maxMetadataSize;
this.maxTempLength = ctx.maxTempLength;
this.archiveManager = ctx.archiveManager;
this.bucketFactory = ctx.bucketFactory;
@@ -81,6 +84,7 @@
this.client = ctx.client;
this.maxOutputLength = ctx.maxOutputLength;
this.maxTempLength = ctx.maxTempLength;
+ this.maxMetadataSize = ctx.maxMetadataSize;
this.archiveManager = ctx.archiveManager;
this.bucketFactory = ctx.bucketFactory;
this.maxRecursionLevel = ctx.maxRecursionLevel;
@@ -99,6 +103,7 @@
this.client = ctx.client;
this.maxOutputLength = ctx.maxOutputLength;
this.maxTempLength = ctx.maxTempLength;
+ this.maxMetadataSize = ctx.maxMetadataSize;
this.archiveManager = ctx.archiveManager;
this.bucketFactory = ctx.bucketFactory;
this.maxRecursionLevel = ctx.maxRecursionLevel;
Modified: trunk/freenet/src/freenet/client/FileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/FileInserter.java 2005-11-10 14:59:04 UTC
(rev 7516)
+++ trunk/freenet/src/freenet/client/FileInserter.java 2005-11-10 16:10:49 UTC
(rev 7517)
@@ -11,6 +11,7 @@
import freenet.support.Bucket;
import freenet.support.BucketTools;
import freenet.support.Logger;
+import freenet.support.compress.CompressionOutputSizeException;
import freenet.support.compress.Compressor;
/**
@@ -56,7 +57,7 @@
for(int i=0;i<algos;i++) {
Compressor comp =
Compressor.getCompressionAlgorithmByDifficulty(i);
Bucket result;
- result = comp.compress(data, ctx.bf);
+ result = comp.compress(data, ctx.bf,
Long.MAX_VALUE);
if(result.size() < NodeCHK.BLOCK_SIZE) {
bestCodec = comp;
data = result;
@@ -75,6 +76,9 @@
}
} catch (IOException e) {
throw new
InserterException(InserterException.BUCKET_ERROR, e);
+ } catch (CompressionOutputSizeException e) {
+ // Impossible
+ throw new Error(e);
}
}
Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2005-11-10 14:59:04 UTC (rev 7516)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2005-11-10 16:10:49 UTC (rev 7517)
@@ -19,6 +19,7 @@
private final ClientEventProducer globalEventProducer;
private long curMaxLength;
private long curMaxTempLength;
+ private int curMaxMetadataLength;
private final RandomSource random;
static final int MAX_RECURSION = 10;
static final int MAX_ARCHIVE_RESTARTS = 2;
@@ -51,6 +52,9 @@
random = r;
this.globalEventProducer = new SimpleEventProducer();
globalEventProducer.addEventListener(new
EventLogger(Logger.MINOR));
+ curMaxLength = Long.MAX_VALUE;
+ curMaxTempLength = Long.MAX_VALUE;
+ curMaxMetadataLength = 1024 * 1024;
}
public void setMaxLength(long maxLength) {
@@ -66,7 +70,7 @@
*/
public FetchResult fetch(FreenetURI uri) throws FetchException {
if(uri == null) throw new NullPointerException();
- FetcherContext context = new FetcherContext(client,
curMaxLength, curMaxTempLength,
+ FetcherContext context = new FetcherContext(client,
curMaxLength, curMaxTempLength, curMaxMetadataLength,
MAX_RECURSION, MAX_ARCHIVE_RESTARTS,
DONT_ENTER_IMPLICIT_ARCHIVES,
SPLITFILE_THREADS, SPLITFILE_BLOCK_RETRIES,
NON_SPLITFILE_RETRIES,
FETCH_SPLITFILES, FOLLOW_REDIRECTS,
LOCAL_REQUESTS_ONLY,
Modified: trunk/freenet/src/freenet/keys/CHKBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/CHKBlock.java 2005-11-10 14:59:04 UTC
(rev 7516)
+++ trunk/freenet/src/freenet/keys/CHKBlock.java 2005-11-10 16:10:49 UTC
(rev 7517)
@@ -1,5 +1,6 @@
package freenet.keys;
+import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
@@ -9,7 +10,14 @@
import freenet.crypt.UnsupportedCipherException;
import freenet.crypt.ciphers.Rijndael;
import freenet.node.Node;
+import freenet.support.ArrayBucket;
+import freenet.support.ArrayBucketFactory;
+import freenet.support.Bucket;
+import freenet.support.BucketFactory;
+import freenet.support.BucketTools;
import freenet.support.Logger;
+import freenet.support.SimpleReadOnlyArrayBucket;
+import freenet.support.compress.CompressionOutputSizeException;
import freenet.support.compress.Compressor;
import freenet.support.compress.DecompressException;
@@ -25,7 +33,7 @@
final byte[] header;
final short hashIdentifier;
final NodeCHK chk;
- public static final int MAX_LENGTH_BEFORE_COMPRESSION = 1024 * 1024;
+ public static final int MAX_LENGTH_BEFORE_COMPRESSION = Integer.MAX_VALUE;
final static int HASH_SHA1 = 1;
public String toString() {
@@ -80,17 +88,31 @@
// Otherwise it checks out
}
- public byte[] decode(ClientKey key) throws KeyDecodeException {
+ /**
+ * Decode into RAM, if short.
+ * @throws CHKDecodeException
+ */
+ public byte[] memoryDecode(ClientCHK chk) throws CHKDecodeException {
+ try {
+ ArrayBucket a = (ArrayBucket) decode(chk, new
ArrayBucketFactory(), 32*1024);
+ return BucketTools.toByteArray(a); // FIXME
+ } catch (IOException e) {
+ throw new Error(e);
+ }
+ }
+
+ public Bucket decode(ClientKey key, BucketFactory bf, int maxLength)
throws KeyDecodeException, IOException {
if(!(key instanceof ClientCHK))
throw new KeyDecodeException("Not a CHK!: "+key);
- return decode((ClientCHK)key);
+ return decode((ClientCHK)key, bf, maxLength);
}
/**
* Decode the CHK and recover the original data
* @return the original data
+ * @throws IOException If there is a bucket error.
*/
- public byte[] decode(ClientCHK key) throws CHKDecodeException {
+ public Bucket decode(ClientCHK key, BucketFactory bf, int maxLength)
throws CHKDecodeException, IOException {
// Overall hash already verified, so first job is to decrypt.
if(key.cryptoAlgorithm != ClientCHK.ALGO_AES_PCFB_256)
throw new UnsupportedOperationException();
@@ -136,33 +158,35 @@
int size = ((hbuf[32] & 0xff) << 8) + (hbuf[33] & 0xff);
if(size > 32768 || size < 0)
throw new CHKDecodeException("Invalid size: "+size);
+ byte[] output = new byte[size];
+ // No particular reason to check the padding, is there?
+ System.arraycopy(dbuf, 0, output, 0, size);
+ return decompress(key, output, bf, maxLength);
+ }
+
+ private Bucket decompress(ClientCHK key, byte[] output, BucketFactory bf,
int maxLength) throws CHKDecodeException, IOException {
if(key.isCompressed()) {
Logger.minor(this, "Decompressing in decode: "+key.getURI()+"
with codec "+key.compressionAlgorithm);
- if(size < 4) throw new CHKDecodeException("No bytes to
decompress");
+ if(output.length < 5) throw new CHKDecodeException("No bytes to
decompress");
// Decompress
// First get the length
- int len = ((((dbuf[0] & 0xff) << 8) + (dbuf[1] & 0xff)) << 8) +
- (dbuf[2] & 0xff);
+ int len = ((((((output[0] & 0xff) << 8) + (output[1] & 0xff)) <<
8) + (output[2] & 0xff)) << 8) +
+ (output[3] & 0xff);
if(len > MAX_LENGTH_BEFORE_COMPRESSION)
throw new CHKDecodeException("Invalid precompressed size:
"+len);
- byte[] output = new byte[len];
Compressor decompressor =
Compressor.getCompressionAlgorithmByMetadataID(key.compressionAlgorithm);
+ Bucket inputBucket = new SimpleReadOnlyArrayBucket(output, 4,
output.length-4);
try {
- int x = decompressor.decompress(dbuf, 3, dbuf.length-3, output);
- if(x != len)
- throw new
CHKDecodeException("Decompression failed: got "+x+" bytes, needed "+len+"
bytes");
- } catch (DecompressException e) {
- throw new CHKDecodeException(e.getMessage());
+ return decompressor.decompress(inputBucket, bf,
maxLength);
+ } catch (CompressionOutputSizeException e) {
+ throw new CHKDecodeException("Too big");
}
- return output;
+ } else {
+ return BucketTools.makeImmutableBucket(bf, output);
}
- byte[] output = new byte[size];
- // No particular reason to check the padding, is there?
- System.arraycopy(dbuf, 0, output, 0, size);
- return output;
- }
+ }
- public Key getKey() {
+ public Key getKey() {
return chk;
}
}
Modified: trunk/freenet/src/freenet/keys/ClientCHKBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientCHKBlock.java 2005-11-10 14:59:04 UTC
(rev 7516)
+++ trunk/freenet/src/freenet/keys/ClientCHKBlock.java 2005-11-10 16:10:49 UTC
(rev 7517)
@@ -14,7 +14,9 @@
import freenet.support.ArrayBucketFactory;
import freenet.support.Bucket;
import freenet.support.BucketTools;
+import freenet.support.compress.CompressionOutputSizeException;
import freenet.support.compress.Compressor;
+import freenet.support.compress.DecompressException;
/**
@@ -24,7 +26,7 @@
*/
public class ClientCHKBlock extends CHKBlock {
- public static final long MAX_COMPRESSED_DATA_LENGTH = NodeCHK.BLOCK_SIZE -
3;
+ public static final long MAX_COMPRESSED_DATA_LENGTH = NodeCHK.BLOCK_SIZE -
4;
final ClientCHK key;
public String toString() {
@@ -52,15 +54,17 @@
}
/**
- * Encode a block of data to a CHKBlock.
- * @param sourceData The data to encode.
+ * Encode a Bucket of data to a CHKBlock.
+ * @param sourceData The bucket of data to encode. Can be arbitrarily
large.
* @param asMetadata Is this a metadata key?
* @param dontCompress If set, don't even try to compress.
* @param alreadyCompressedCodec If !dontCompress, and this is >=0, then
the
* data is already compressed, and this is the algorithm.
+ * @throws CHKEncodeException
+ * @throws IOException If there is an error reading from the Bucket.
*/
-
- static public ClientCHKBlock encode(byte[] sourceData, boolean asMetadata,
boolean dontCompress, short alreadyCompressedCodec, int sourceLength) throws
CHKEncodeException {
+ static public ClientCHKBlock encode(Bucket sourceData, boolean asMetadata,
boolean dontCompress, short alreadyCompressedCodec, long sourceLength) throws
CHKEncodeException, IOException {
+ byte[] finalData = null;
byte[] data;
byte[] header;
ClientCHK key;
@@ -68,32 +72,36 @@
// Try to compress it - even if it fits into the block,
// because compressing it improves its entropy.
boolean compressed = false;
- if(sourceData.length > MAX_LENGTH_BEFORE_COMPRESSION)
+ if(sourceData.size() > MAX_LENGTH_BEFORE_COMPRESSION)
throw new CHKEncodeException("Too big");
if(!dontCompress) {
byte[] cbuf = null;
if(alreadyCompressedCodec >= 0) {
+ if(sourceData.size() > MAX_COMPRESSED_DATA_LENGTH)
+ throw new CHKEncodeException("Too big
(precompressed)");
compressionAlgorithm = alreadyCompressedCodec;
- cbuf = sourceData;
+ cbuf = BucketTools.toByteArray(sourceData);
+ if(sourceLength > MAX_LENGTH_BEFORE_COMPRESSION)
+ throw new CHKEncodeException("Too big");
} else {
- if (sourceData.length > NodeCHK.BLOCK_SIZE) {
+ if (sourceData.size() > NodeCHK.BLOCK_SIZE) {
// Determine the best algorithm
- Bucket bucket = new
ArrayBucket(sourceData);
- bucket.setReadOnly();
for (int i = 0; i <
Compressor.countCompressAlgorithms(); i++) {
Compressor comp = Compressor
.getCompressionAlgorithmByDifficulty(i);
ArrayBucket compressedData;
try {
compressedData =
(ArrayBucket) comp.compress(
- bucket,
new ArrayBucketFactory());
+
sourceData, new ArrayBucketFactory(), NodeCHK.BLOCK_SIZE);
} catch (IOException e) {
throw new Error(e);
+ } catch
(CompressionOutputSizeException e) {
+ continue;
}
if (compressedData.size() <=
MAX_COMPRESSED_DATA_LENGTH) {
compressionAlgorithm =
comp
.codecNumberForMetadata();
- sourceLength =
sourceData.length;
+ sourceLength =
sourceData.size();
try {
cbuf =
BucketTools.toByteArray(compressedData);
// FIXME
provide a method in ArrayBucket
@@ -109,17 +117,24 @@
if(cbuf != null) {
// Use it
int compressedLength = cbuf.length;
- sourceData = new byte[compressedLength+3];
- System.arraycopy(cbuf, 0, sourceData, 3, compressedLength);
- sourceData[0] = (byte) ((sourceLength >> 16) & 0xff);
- sourceData[1] = (byte) ((sourceLength >> 8) & 0xff);
- sourceData[2] = (byte) ((sourceLength) & 0xff);
+ finalData = new byte[compressedLength+4];
+ System.arraycopy(cbuf, 0, finalData, 4, compressedLength);
+ finalData[0] = (byte) ((sourceLength >> 24) & 0xff);
+ finalData[1] = (byte) ((sourceLength >> 16) & 0xff);
+ finalData[2] = (byte) ((sourceLength >> 8) & 0xff);
+ finalData[3] = (byte) ((sourceLength) & 0xff);
compressed = true;
}
}
- if(sourceData.length > NodeCHK.BLOCK_SIZE) {
- throw new CHKEncodeException("Too big");
+ if(finalData == null) {
+ if(sourceData.size() > NodeCHK.BLOCK_SIZE) {
+ throw new CHKEncodeException("Too big");
+ }
+ finalData = BucketTools.toByteArray(sourceData);
}
+
+ // Now do the actual encode
+
MessageDigest md160;
try {
md160 = MessageDigest.getInstance("SHA-1");
@@ -136,10 +151,10 @@
throw new Error(e1);
}
// First pad it
- if(sourceData.length != 32768) {
+ if(finalData.length != 32768) {
// Hash the data
- if(sourceData.length != 0)
- md256.update(sourceData);
+ if(finalData.length != 0)
+ md256.update(finalData);
byte[] digest = md256.digest();
// Turn digest into a seed array for the MT
int[] seed = new int[8]; // 32/4=8
@@ -152,12 +167,12 @@
}
MersenneTwister mt = new MersenneTwister(seed);
data = new byte[32768];
- System.arraycopy(sourceData, 0, data, 0, sourceData.length);
- byte[] randomBytes = new byte[32768-sourceData.length];
+ System.arraycopy(finalData, 0, data, 0, finalData.length);
+ byte[] randomBytes = new byte[32768-finalData.length];
mt.nextBytes(randomBytes);
- System.arraycopy(randomBytes, 0, data, sourceData.length,
32768-sourceData.length);
+ System.arraycopy(randomBytes, 0, data, finalData.length,
32768-finalData.length);
} else {
- data = sourceData;
+ data = finalData;
}
// Now make the header
byte[] encKey = md256.digest(data);
@@ -168,8 +183,8 @@
header[0] = (byte)(CHKBlock.HASH_SHA1 >> 8);
header[1] = (byte)(CHKBlock.HASH_SHA1 & 0xff);
System.arraycopy(plainIV, 0, header, 2, plainIV.length);
- header[plainIV.length+2] = (byte)(sourceData.length >> 8);
- header[plainIV.length+3] = (byte)(sourceData.length & 0xff);
+ header[plainIV.length+2] = (byte)(finalData.length >> 8);
+ header[plainIV.length+3] = (byte)(finalData.length & 0xff);
// GRRR, java 1.4 does not have any symmetric crypto
// despite exposing asymmetric and hashes!
@@ -200,6 +215,23 @@
throw new Error(e3);
}
}
+
+ /**
+ * Encode a block of data to a CHKBlock.
+ * @param sourceData The data to encode.
+ * @param asMetadata Is this a metadata key?
+ * @param dontCompress If set, don't even try to compress.
+ * @param alreadyCompressedCodec If !dontCompress, and this is >=0, then
the
+ * data is already compressed, and this is the algorithm.
+ */
+ static public ClientCHKBlock encode(byte[] sourceData, boolean asMetadata,
boolean dontCompress, short alreadyCompressedCodec, int sourceLength) throws
CHKEncodeException {
+ try {
+ return encode(new ArrayBucket(sourceData), asMetadata,
dontCompress, alreadyCompressedCodec, sourceLength);
+ } catch (IOException e) {
+ // Can't happen
+ throw new Error(e);
+ }
+ }
/**
* @return The ClientCHK for this key.
@@ -207,4 +239,5 @@
public ClientCHK getClientKey() {
return key;
}
+
}
Modified: trunk/freenet/src/freenet/keys/ClientKey.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientKey.java 2005-11-10 14:59:04 UTC
(rev 7516)
+++ trunk/freenet/src/freenet/keys/ClientKey.java 2005-11-10 16:10:49 UTC
(rev 7517)
@@ -11,7 +11,7 @@
public static ClientKey getBaseKey(FreenetURI origURI) throws
MalformedURLException {
if(origURI.getKeyType().equals("CHK"))
return new ClientCHK(origURI);
- throw new UnsupportedOperationException();
+ throw new UnsupportedOperationException("Unknown keytype from
"+origURI);
}
/**
Modified: trunk/freenet/src/freenet/keys/KeyBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/KeyBlock.java 2005-11-10 14:59:04 UTC
(rev 7516)
+++ trunk/freenet/src/freenet/keys/KeyBlock.java 2005-11-10 16:10:49 UTC
(rev 7517)
@@ -1,11 +1,20 @@
package freenet.keys;
+import java.io.IOException;
+
+import freenet.support.Bucket;
+import freenet.support.BucketFactory;
+
/**
* Interface for fetched blocks. Can be decoded with a key.
*/
public interface KeyBlock {
- /** Decode with the key */
- byte[] decode(ClientKey key) throws KeyDecodeException;
+ /** Decode with the key
+ * @param key The ClientKey to use to decode the block.
+ * @param factory The BucketFactory to use to create the Bucket to
return the data in.
+ * @param maxLength The maximum size of the returned data in bytes.
+ */
+ Bucket decode(ClientKey key, BucketFactory factory, int maxLength)
throws KeyDecodeException, IOException;
}
Modified: trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
===================================================================
--- trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
2005-11-10 14:59:04 UTC (rev 7516)
+++ trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
2005-11-10 16:10:49 UTC (rev 7517)
@@ -173,7 +173,7 @@
byte[] encData = block.getData();
byte[] encHeaders = block.getHeader();
ClientCHKBlock newBlock = new ClientCHKBlock(encData,
encHeaders, chk, true);
- Logger.error(RealNodeRequestInsertTest.class, "Decoded: "+new
String(newBlock.decode(chk)));
+ Logger.error(RealNodeRequestInsertTest.class, "Decoded: "+new
String(newBlock.memoryDecode(chk)));
Logger.error(RealNodeRequestInsertTest.class,"CHK:
"+chk.getURI());
Logger.error(RealNodeRequestInsertTest.class,"Headers:
"+HexUtil.bytesToHex(block.getHeader()));
randomNode.putCHK(block);
@@ -190,7 +190,7 @@
Logger.error(RealNodeRequestInsertTest.class, "Fetch
FAILED from "+node2);
requestsAvg.report(0.0);
} else {
- byte[] results = block.decode(chk);
+ byte[] results = block.memoryDecode(chk);
requestsAvg.report(1.0);
if(Arrays.equals(results, data)) {
Logger.error(RealNodeRequestInsertTest.class, "Fetch
succeeded: "+new String(results));
Modified: trunk/freenet/src/freenet/node/Version.java
===================================================================
--- trunk/freenet/src/freenet/node/Version.java 2005-11-10 14:59:04 UTC (rev
7516)
+++ trunk/freenet/src/freenet/node/Version.java 2005-11-10 16:10:49 UTC (rev
7517)
@@ -20,10 +20,10 @@
public static final String protocolVersion = "1.0";
/** The build number of the current revision */
- public static final int buildNumber = 152;
+ public static final int buildNumber = 153;
/** Oldest build of Fred we will talk to */
- public static final int lastGoodBuild = 152;
+ public static final int lastGoodBuild = 153;
/** The highest reported build of fred */
public static int highestSeenBuild = buildNumber;
Modified:
trunk/freenet/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
2005-11-10 14:59:04 UTC (rev 7516)
+++ trunk/freenet/src/freenet/support/PaddedEphemerallyEncryptedBucket.java
2005-11-10 16:10:49 UTC (rev 7517)
@@ -191,7 +191,7 @@
while(true) {
if(max < 0)
throw new Error("Impossible size: "+size+" -
min="+min+", max="+max);
- if(size > min && size < max) return max;
+ if(size >= min && size <= max) return max;
min = max;
max = max << 1;
}
Added: trunk/freenet/src/freenet/support/SimpleReadOnlyArrayBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/SimpleReadOnlyArrayBucket.java
2005-11-10 14:59:04 UTC (rev 7516)
+++ trunk/freenet/src/freenet/support/SimpleReadOnlyArrayBucket.java
2005-11-10 16:10:49 UTC (rev 7517)
@@ -0,0 +1,50 @@
+package freenet.support;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * Simple read-only array bucket. Just an adapter class to save some RAM.
+ * Not the same as ArrayBucket, which can't take a (byte[], offset, len) in
+ * constructor (unless we waste some RAM there by using an object to store
these
+ * instead of storing the byte[]'s directly).
+ */
+public class SimpleReadOnlyArrayBucket implements Bucket {
+
+ final byte[] buf;
+ final int offset;
+ final int length;
+
+ public SimpleReadOnlyArrayBucket(byte[] buf, int offset, int length) {
+ this.buf = buf;
+ this.offset = offset;
+ this.length = length;
+ }
+
+ public OutputStream getOutputStream() throws IOException {
+ throw new IOException("Read only");
+ }
+
+ public InputStream getInputStream() throws IOException {
+ return new ByteArrayInputStream(buf, offset, length);
+ }
+
+ public String getName() {
+ return "SimpleReadOnlyArrayBucket: len="+length+"
"+super.toString();
+ }
+
+ public long size() {
+ return length;
+ }
+
+ public boolean isReadOnly() {
+ return true;
+ }
+
+ public void setReadOnly() {
+ // Already read-only
+ }
+
+}
Added:
trunk/freenet/src/freenet/support/compress/CompressionOutputSizeException.java
===================================================================
---
trunk/freenet/src/freenet/support/compress/CompressionOutputSizeException.java
2005-11-10 14:59:04 UTC (rev 7516)
+++
trunk/freenet/src/freenet/support/compress/CompressionOutputSizeException.java
2005-11-10 16:10:49 UTC (rev 7517)
@@ -0,0 +1,8 @@
+package freenet.support.compress;
+
+/**
+ * The output was too big for the buffer.
+ */
+public class CompressionOutputSizeException extends Exception {
+
+}
Modified: trunk/freenet/src/freenet/support/compress/Compressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/Compressor.java 2005-11-10
14:59:04 UTC (rev 7516)
+++ trunk/freenet/src/freenet/support/compress/Compressor.java 2005-11-10
16:10:49 UTC (rev 7517)
@@ -14,9 +14,9 @@
public static Compressor gzip = new GzipCompressor();
- public abstract Bucket compress(Bucket data, BucketFactory bf) throws
IOException;
+ public abstract Bucket compress(Bucket data, BucketFactory bf, long
maxLength) throws IOException, CompressionOutputSizeException;
- public abstract Bucket decompress(Bucket data, BucketFactory
bucketFactory, long maxLength) throws IOException;
+ public abstract Bucket decompress(Bucket data, BucketFactory
bucketFactory, long maxLength) throws IOException,
CompressionOutputSizeException;
public short codecNumberForMetadata() {
return Metadata.COMPRESS_GZIP;
@@ -48,8 +48,9 @@
* @param j Number of bytes to read.
* @param output Output buffer.
* @throws DecompressException
+ * @throws CompressionOutputSizeException
* @returns The number of bytes actually written.
*/
- public abstract int decompress(byte[] dbuf, int i, int j, byte[]
output) throws DecompressException;
+ public abstract int decompress(byte[] dbuf, int i, int j, byte[]
output) throws CompressionOutputSizeException;
}
Modified: trunk/freenet/src/freenet/support/compress/GzipCompressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2005-11-10 14:59:04 UTC (rev 7516)
+++ trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2005-11-10 16:10:49 UTC (rev 7517)
@@ -15,24 +15,30 @@
public class GzipCompressor extends Compressor {
- public Bucket compress(Bucket data, BucketFactory bf) throws
IOException {
+ public Bucket compress(Bucket data, BucketFactory bf, long maxLength)
throws IOException, CompressionOutputSizeException {
Bucket output = bf.makeBucket(-1);
InputStream is = data.getInputStream();
OutputStream os = output.getOutputStream();
GZIPOutputStream gos = new GZIPOutputStream(os);
+ long written = 0;
byte[] buffer = new byte[4096];
while(true) {
- int x = is.read(buffer);
+ int l = (int) Math.min(buffer.length, maxLength -
written);
+ int x = is.read(buffer, 0, buffer.length);
+ if(l < x) {
+ throw new CompressionOutputSizeException();
+ }
if(x <= -1) break;
if(x == 0) throw new IOException("Returned zero from
read()");
gos.write(buffer, 0, x);
+ written += x;
}
is.close();
gos.close();
return output;
}
- public Bucket decompress(Bucket data, BucketFactory bf, long maxLength)
throws IOException {
+ public Bucket decompress(Bucket data, BucketFactory bf, long maxLength)
throws IOException, CompressionOutputSizeException {
Bucket output = bf.makeBucket(-1);
InputStream is = data.getInputStream();
OutputStream os = output.getOutputStream();
@@ -42,15 +48,16 @@
return output;
}
- private long decompress(InputStream is, OutputStream os, long
maxLength) throws IOException {
+ private long decompress(InputStream is, OutputStream os, long
maxLength) throws IOException, CompressionOutputSizeException {
GZIPInputStream gis = new GZIPInputStream(is);
long written = 0;
byte[] buffer = new byte[4096];
while(true) {
int l = (int) Math.min(buffer.length, maxLength -
written);
- if(l <= 0)
- return written;
- int x = gis.read(buffer, 0, l);
+ int x = gis.read(buffer, 0, 4096);
+ if(l < x) {
+ throw new CompressionOutputSizeException();
+ }
if(x <= -1) return written;
if(x == 0) throw new IOException("Returned zero from
read()");
os.write(buffer, 0, x);
@@ -58,7 +65,7 @@
}
}
- public int decompress(byte[] dbuf, int i, int j, byte[] output) throws
DecompressException {
+ public int decompress(byte[] dbuf, int i, int j, byte[] output) throws
CompressionOutputSizeException {
// Didn't work with Inflater.
// FIXME fix sometimes to use Inflater - format issue?
ByteArrayInputStream bais = new ByteArrayInputStream(dbuf, i,
j);
@@ -67,7 +74,7 @@
try {
bytes = (int)decompress(bais, baos, output.length);
} catch (IOException e) {
- throw new DecompressException("Got IOException:
"+e.getMessage());
+ throw new Error("Got IOException: "+e.getMessage());
}
byte[] buf = baos.toByteArray();
System.arraycopy(buf, 0, output, 0, bytes);