Author: toad
Date: 2005-11-08 16:02:24 +0000 (Tue, 08 Nov 2005)
New Revision: 7500
Added:
trunk/freenet/src/freenet/client/SplitInserter.java
trunk/freenet/src/freenet/support/ArrayBucket.java
trunk/freenet/src/freenet/support/ArrayBucketFactory.java
trunk/freenet/src/freenet/support/compress/
trunk/freenet/src/freenet/support/compress/Compressor.java
trunk/freenet/src/freenet/support/compress/DecompressException.java
trunk/freenet/src/freenet/support/compress/GzipCompressor.java
Removed:
trunk/freenet/src/freenet/client/Compressor.java
trunk/freenet/src/freenet/client/GzipCompressor.java
Modified:
trunk/freenet/src/freenet/client/FileInserter.java
trunk/freenet/src/freenet/client/HighLevelSimpleClient.java
trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
trunk/freenet/src/freenet/client/InserterException.java
trunk/freenet/src/freenet/client/Metadata.java
trunk/freenet/src/freenet/keys/CHKBlock.java
trunk/freenet/src/freenet/keys/ClientCHK.java
trunk/freenet/src/freenet/keys/ClientCHKBlock.java
trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
trunk/freenet/src/freenet/node/TextModeClientInterface.java
trunk/freenet/src/freenet/node/Version.java
Log:
Build 144:
Support for multiple compressors, even in single blocks.
The compression ID is now part of the URI for a simple CHK.
Also lots of work on inserting.
In theory inserting should work, in practice it probably won't.
Deleted: trunk/freenet/src/freenet/client/Compressor.java
===================================================================
--- trunk/freenet/src/freenet/client/Compressor.java 2005-11-08 00:36:19 UTC
(rev 7499)
+++ trunk/freenet/src/freenet/client/Compressor.java 2005-11-08 16:02:24 UTC
(rev 7500)
@@ -1,22 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-
-import freenet.support.Bucket;
-import freenet.support.BucketFactory;
-
-/**
- * A data compressor. Contains methods to get all data compressors.
- * This is for single-file compression (gzip, bzip2) as opposed to archives.
- */
-public abstract class Compressor {
-
- public static Compressor gzip = new GzipCompressor();
-
- public abstract Bucket compress(Bucket data, BucketFactory bf) throws
IOException;
-
- public int codecNumberForMetadata() {
- return Metadata.COMPRESS_GZIP;
- }
-
-}
Modified: trunk/freenet/src/freenet/client/FileInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/FileInserter.java 2005-11-08 00:36:19 UTC
(rev 7499)
+++ trunk/freenet/src/freenet/client/FileInserter.java 2005-11-08 16:02:24 UTC
(rev 7500)
@@ -9,6 +9,8 @@
import freenet.node.LowLevelPutException;
import freenet.support.Bucket;
import freenet.support.BucketTools;
+import freenet.support.Logger;
+import freenet.support.compress.Compressor;
/**
* Class that does high-level inserts.
@@ -39,65 +41,64 @@
Bucket data = block.data;
ClientCHKBlock chk;
-
- int compressionCodec = -1; // no compression
- int bestCompressionCodec = -1; // no compression
- Bucket bestCompressedData;
+ Compressor bestCodec = null;
+ Bucket bestCompressedData = null;
if(data.size() > NodeCHK.BLOCK_SIZE && (!ctx.dontCompress)) {
// Try to compress the data.
// Try each algorithm, starting with the fastest and
weakest.
// Stop when run out of algorithms, or the compressed
data fits in a single block.
- int algos = Metadata.countCompressAlgorithms();
- for(int i=0;i<algos;i++) {
- Compressor comp =
Metadata.getCompressionAlgorithmByDifficulty(i);
- Bucket result = comp.compress(data, ctx.bf);
- if(result.size() < NodeCHK.BLOCK_SIZE) {
- compressionCodec = -1;
- data = result;
- if(bestCompressedData != null)
+ int algos = Compressor.countCompressAlgorithms();
+ try {
+ for(int i=0;i<algos;i++) {
+ Compressor comp =
Compressor.getCompressionAlgorithmByDifficulty(i);
+ Bucket result;
+ result = comp.compress(data, ctx.bf);
+ if(result.size() < NodeCHK.BLOCK_SIZE) {
+ bestCodec = comp;
+ data = result;
+ if(bestCompressedData != null)
+
ctx.bf.freeBucket(bestCompressedData);
+ break;
+ }
+ if(bestCompressedData != null &&
result.size() < bestCompressedData.size()) {
ctx.bf.freeBucket(bestCompressedData);
- break;
+ bestCompressedData = result;
+ bestCodec = comp;
+ } else if(bestCompressedData == null &&
result.size() < data.size()) {
+ bestCompressedData = result;
+ bestCodec = comp;
+ }
}
- if(bestCompressedData != null && result.size()
< bestCompressedData.size()) {
- ctx.bf.freeBucket(bestCompressedData);
- bestCompressedData = result;
- bestCompressionCodec =
comp.codecNumberForMetadata();
- } else if(bestCompressedData == null &&
result.size() < data.size()) {
- bestCompressedData = result;
- bestCompressionCodec =
comp.codecNumberForMetadata();
- }
+ } catch (IOException e) {
+ throw new
InserterException(InserterException.BUCKET_ERROR, e);
}
- if(compressionCodec == -1) {
- compressionCodec = bestCompressionCodec;
- if(compressionCodec != -1) {
- data = bestCompressedData;
- }
- }
}
if(data.size() <= NodeCHK.BLOCK_SIZE) {
- if(compressionCodec == -1) {
- chk =
ClientCHKBlock.encode(BucketTools.toByteArray(data), metadata, true);
+ byte[] array;
+ try {
+ array = BucketTools.toByteArray(data);
+ } catch (IOException e) {
+ throw new
InserterException(InserterException.BUCKET_ERROR, e);
}
- }
-
- if(data.size() <= NodeCHK.BLOCK_SIZE ||
- data.size() <=
ClientCHKBlock.MAX_LENGTH_BEFORE_COMPRESSION) {
try {
- chk =
ClientCHKBlock.encode(BucketTools.toByteArray(data), metadata);
- return simplePutCHK(chk, block.clientMetadata);
+ if(bestCodec == null) {
+ chk = ClientCHKBlock.encode(array,
metadata, true, (short)-1);
+ } else {
+ chk = ClientCHKBlock.encode(array,
metadata, false, bestCodec.codecNumberForMetadata());
+ }
} catch (CHKEncodeException e) {
- // Too big! Encode to a splitfile, below.
- } catch (IOException e) {
- throw new
InserterException(InserterException.BUCKET_ERROR);
+ Logger.error(this, "Unexpected error: "+e, e);
+ throw new
InserterException(InserterException.INTERNAL_ERROR);
}
+ return simplePutCHK(chk, block.clientMetadata);
}
// Too big, encode to a splitfile
SplitInserter splitInsert = new SplitInserter(data,
block.clientMetadata);
- splitInsert.run();
+ return splitInsert.run();
}
/**
@@ -106,8 +107,9 @@
* @param clientMetadata The client metadata. If this is non-trivial,
we will have to
* create a redirect document just to put the metadata in.
* @return The URI of the resulting CHK.
+ * @throws InserterException If there was an error inserting the block.
*/
- private FreenetURI simplePutCHK(ClientCHKBlock chk, ClientMetadata
clientMetadata) {
+ private FreenetURI simplePutCHK(ClientCHKBlock chk, ClientMetadata
clientMetadata) throws InserterException {
try {
ctx.client.putCHK(chk);
} catch (LowLevelPutException e) {
@@ -124,6 +126,20 @@
}
}
+ private void translateException(LowLevelPutException e) throws
InserterException {
+ switch(e.code) {
+ case LowLevelPutException.INTERNAL_ERROR:
+ throw new
InserterException(InserterException.INTERNAL_ERROR);
+ case LowLevelPutException.REJECTED_OVERLOAD:
+ throw new
InserterException(InserterException.REJECTED_OVERLOAD);
+ case LowLevelPutException.ROUTE_NOT_FOUND:
+ throw new
InserterException(InserterException.ROUTE_NOT_FOUND);
+ default:
+ Logger.error(this, "Unknown LowLevelPutException code:
"+e.code+" on "+this);
+ throw new
InserterException(InserterException.INTERNAL_ERROR);
+ }
+ }
+
/** Put a metadata CHK
* @throws InserterException If the insert fails.
*/
@@ -138,5 +154,4 @@
InsertBlock block = new InsertBlock(bucket, null,
FreenetURI.EMPTY_CHK_URI);
return run(block, true);
}
-
}
Deleted: trunk/freenet/src/freenet/client/GzipCompressor.java
===================================================================
--- trunk/freenet/src/freenet/client/GzipCompressor.java 2005-11-08
00:36:19 UTC (rev 7499)
+++ trunk/freenet/src/freenet/client/GzipCompressor.java 2005-11-08
16:02:24 UTC (rev 7500)
@@ -1,29 +0,0 @@
-package freenet.client;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.zip.GZIPOutputStream;
-
-import freenet.support.Bucket;
-import freenet.support.BucketFactory;
-
-public class GzipCompressor extends Compressor {
-
- public Bucket compress(Bucket data, BucketFactory bf) throws
IOException {
- Bucket output = bf.makeBucket(-1);
- InputStream is = data.getInputStream();
- OutputStream os = output.getOutputStream();
- GZIPOutputStream gos = new GZIPOutputStream(os);
- byte[] buffer = new byte[4096];
- while(true) {
- int x = is.read(buffer);
- if(x <= -1) break;
- if(x == 0) throw new IOException("Returned zero from
read()");
- gos.write(buffer, 0, x);
- }
- gos.close();
- return output;
- }
-
-}
Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClient.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClient.java 2005-11-08
00:36:19 UTC (rev 7499)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClient.java 2005-11-08
16:02:24 UTC (rev 7500)
@@ -22,6 +22,7 @@
/**
* Blocking insert of a URI
+ * @throws InserterException If there is an error inserting the data
*/
- public FreenetURI insert(InsertBlock insert);
+ public FreenetURI insert(InsertBlock insert) throws InserterException;
}
Modified: trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
===================================================================
--- trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2005-11-08 00:36:19 UTC (rev 7499)
+++ trunk/freenet/src/freenet/client/HighLevelSimpleClientImpl.java
2005-11-08 16:02:24 UTC (rev 7500)
@@ -64,10 +64,10 @@
return f.run();
}
- public FreenetURI insert(InsertBlock insert) {
- InserterContext context = new InserterContext(client);
+ public FreenetURI insert(InsertBlock insert) throws InserterException {
+ InserterContext context = new InserterContext(client,
bucketFactory);
FileInserter i = new FileInserter(context);
- return i.run(insert);
+ return i.run(insert, false);
}
}
Modified: trunk/freenet/src/freenet/client/InserterException.java
===================================================================
--- trunk/freenet/src/freenet/client/InserterException.java 2005-11-08
00:36:19 UTC (rev 7499)
+++ trunk/freenet/src/freenet/client/InserterException.java 2005-11-08
16:02:24 UTC (rev 7500)
@@ -25,5 +25,10 @@
public static final int INVALID_URI = 1;
/** Failed to read from or write to a bucket; a kind of internal error
*/
public static final int BUCKET_ERROR = 2;
-
+ /** Internal error of some sort */
+ public static final int INTERNAL_ERROR = 3;
+ /** Downstream node was overloaded */
+ public static final int REJECTED_OVERLOAD = 4;
+ /** Couldn't find enough nodes to send the data to */
+ public static final int ROUTE_NOT_FOUND = 5;
}
Modified: trunk/freenet/src/freenet/client/Metadata.java
===================================================================
--- trunk/freenet/src/freenet/client/Metadata.java 2005-11-08 00:36:19 UTC
(rev 7499)
+++ trunk/freenet/src/freenet/client/Metadata.java 2005-11-08 16:02:24 UTC
(rev 7500)
@@ -14,6 +14,7 @@
import freenet.keys.FreenetURI;
import freenet.support.Bucket;
import freenet.support.Logger;
+import freenet.support.compress.Compressor;
/** Metadata parser/writer class. */
public class Metadata {
@@ -396,7 +397,7 @@
/** Compressed splitfile codec */
short compressionCodec;
- static final short COMPRESS_GZIP = 0; // for future use
+ static public final short COMPRESS_GZIP = 0; // for future use
static final short COMPRESS_BZIP2 = 1; // FIXME for future use
/** The length of the splitfile */
@@ -642,17 +643,4 @@
public FreenetURI[] getSplitfileCheckKeys() {
return splitfileCheckKeys;
}
-
- /** Count the number of distinct compression algorithms currently
supported. */
- public static int countCompressAlgorithms() {
- // FIXME we presently only support gzip. This should change in
future.
- return 1;
- }
-
- public static Compressor getCompressionAlgorithmByDifficulty(int i) {
- if(i == 0)
- return Compressor.gzip;
- // FIXME when we get more compression algos, put them here.
- return null;
- }
}
Added: trunk/freenet/src/freenet/client/SplitInserter.java
===================================================================
--- trunk/freenet/src/freenet/client/SplitInserter.java 2005-11-08 00:36:19 UTC
(rev 7499)
+++ trunk/freenet/src/freenet/client/SplitInserter.java 2005-11-08 16:02:24 UTC
(rev 7500)
@@ -0,0 +1,25 @@
+package freenet.client;
+
+import freenet.keys.FreenetURI;
+import freenet.support.Bucket;
+
+/**
+ * Insert a splitfile.
+ */
+public class SplitInserter {
+
+ public SplitInserter(Bucket data, ClientMetadata clientMetadata) {
+ // TODO Auto-generated constructor stub
+ }
+
+ /**
+ * Inserts the splitfile.
+ * @return The URI of the resulting file.
+ */
+ public FreenetURI run() {
+ throw new UnsupportedOperationException();
+ // TODO Auto-generated method stub
+
+ }
+
+}
Modified: trunk/freenet/src/freenet/keys/CHKBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/CHKBlock.java 2005-11-08 00:36:19 UTC
(rev 7499)
+++ trunk/freenet/src/freenet/keys/CHKBlock.java 2005-11-08 16:02:24 UTC
(rev 7500)
@@ -11,6 +11,8 @@
import freenet.crypt.UnsupportedCipherException;
import freenet.crypt.ciphers.Rijndael;
import freenet.node.Node;
+import freenet.support.compress.Compressor;
+import freenet.support.compress.DecompressException;
/**
* @author amphibian
@@ -135,7 +137,7 @@
int size = ((hbuf[32] & 0xff) << 8) + (hbuf[33] & 0xff);
if(size > 32768 || size < 0)
throw new CHKDecodeException("Invalid size: "+size);
- if(key.compressed) {
+ if(key.isCompressed()) {
if(size < 4) throw new CHKDecodeException("No bytes to
decompress");
// Decompress
// First get the length
@@ -144,15 +146,14 @@
if(len > MAX_LENGTH_BEFORE_COMPRESSION)
throw new CHKDecodeException("Invalid precompressed size:
"+len);
byte[] output = new byte[len];
- Inflater decompressor = new Inflater();
- decompressor.setInput(dbuf, 3, size-3);
+ Compressor decompressor =
Compressor.getCompressionAlgorithmByMetadataID(key.compressionAlgorithm);
try {
- int resultLength = decompressor.inflate(output);
- if(resultLength != len)
- throw new CHKDecodeException("Wanted "+len+" but got
"+resultLength+" bytes from decompression");
- } catch (DataFormatException e2) {
- throw new CHKDecodeException(e2);
- }
+ int x = decompressor.decompress(dbuf, 3, dbuf.length-3, output);
+ if(x != len)
+ throw new
CHKDecodeException("Decompression failed: got "+x+" bytes, needed "+len+"
bytes");
+ } catch (DecompressException e) {
+ throw new CHKDecodeException(e.getMessage());
+ }
return output;
}
byte[] output = new byte[size];
Modified: trunk/freenet/src/freenet/keys/ClientCHK.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientCHK.java 2005-11-08 00:36:19 UTC
(rev 7499)
+++ trunk/freenet/src/freenet/keys/ClientCHK.java 2005-11-08 16:02:24 UTC
(rev 7500)
@@ -13,8 +13,28 @@
*/
public class ClientCHK extends ClientKey {
+ /** Lazily constructed: the NodeCHK */
NodeCHK nodeKey;
+ /** Routing key */
+ final byte[] routingKey;
+ /** Decryption key */
+ final byte[] cryptoKey;
+ /** Is the data a control document? */
+ final boolean controlDocument;
+ /** Encryption algorithm */
+ final short cryptoAlgorithm;
+ /** Compression algorithm, negative means uncompressed */
+ final short compressionAlgorithm;
+ /* We use EXTRA_LENGTH above for consistency, rather than dis.read etc.
Some code depends on this
+ * being accurate. Change those uses if you like. */
+ /** The length of the "extra" bytes in the key */
+ static final short EXTRA_LENGTH = 5;
+ /** The length of the decryption key */
+ static final short CRYPTO_KEY_LENGTH = 32;
+ /** Code for 256-bit AES with PCFB */
+ static final short ALGO_AES_PCFB_256 = 1;
+
/**
* @param routingKey The routing key. This is the overall hash of the
* header and content of the key.
@@ -27,13 +47,13 @@
* @param algo The encryption algorithm's identifier. See ALGO_* for
* values.
*/
- public ClientCHK(byte[] routingKey, byte[] encKey, boolean isCompressed,
- boolean isControlDocument, short algo) {
+ public ClientCHK(byte[] routingKey, byte[] encKey,
+ boolean isControlDocument, short algo, short compressionAlgorithm)
{
this.routingKey = routingKey;
this.cryptoKey = encKey;
- this.compressed = isCompressed;
this.controlDocument = isControlDocument;
this.cryptoAlgorithm = algo;
+ this.compressionAlgorithm = compressionAlgorithm;
}
/**
@@ -45,13 +65,13 @@
routingKey = uri.getRoutingKey();
cryptoKey = uri.getCryptoKey();
byte[] extra = uri.getExtra();
- if(extra == null || extra.length < 3)
+ if(extra == null || extra.length < 5)
throw new MalformedURLException();
cryptoAlgorithm = (short)(((extra[0] & 0xff) << 8) + (extra[1] &
0xff));
if(cryptoAlgorithm != ALGO_AES_PCFB_256)
throw new MalformedURLException("Invalid crypto
algorithm");
- compressed = (extra[2] & 0x01) != 0;
controlDocument = (extra[2] & 0x02) != 0;
+ compressionAlgorithm = (short)(((extra[3] & 0xff) << 8) + (extra[4] &
0xff));
}
/**
@@ -65,7 +85,7 @@
cryptoAlgorithm = (short)(((extra[0] & 0xff) << 8) + (extra[1] &
0xff));
if(cryptoAlgorithm != ALGO_AES_PCFB_256)
throw new MalformedURLException("Invalid crypto
algorithm");
- compressed = (extra[2] & 0x01) != 0;
+ compressionAlgorithm = (short)(((extra[3] & 0xff) << 8) + (extra[4] &
0xff));
controlDocument = (extra[2] & 0x02) != 0;
routingKey = new byte[NodeCHK.KEY_LENGTH];
dis.readFully(routingKey);
@@ -78,30 +98,27 @@
* @throws IOException If a write failed.
*/
public void writeRawBinaryKey(DataOutputStream dos) throws IOException {
+ dos.write(getExtra());
+ dos.write(routingKey);
+ dos.write(cryptoKey);
+ }
+
+ public byte[] getExtra() {
byte[] extra = new byte[EXTRA_LENGTH];
extra[0] = (byte) (cryptoAlgorithm >> 8);
extra[1] = (byte) cryptoAlgorithm;
- extra[2] = (byte) ((compressed ? 1 : 0) + (controlDocument ? 2
: 0));
- dos.write(extra);
- dos.write(routingKey);
- dos.write(cryptoKey);
+ extra[2] = (byte) (controlDocument ? 2 : 0);
+ extra[3] = (byte) (compressionAlgorithm >> 8);
+ extra[4] = (byte) compressionAlgorithm;
+ return extra;
}
-
- byte[] routingKey;
- byte[] cryptoKey;
- boolean compressed;
- boolean controlDocument;
- short cryptoAlgorithm;
-
+
public String toString() {
return super.toString()+":"+Base64.encode(routingKey)+","+
- Base64.encode(cryptoKey)+","+compressed+","+controlDocument+
+
Base64.encode(cryptoKey)+","+compressionAlgorithm+","+controlDocument+
","+cryptoAlgorithm;
}
- static final short EXTRA_LENGTH = 3;
- static final short CRYPTO_KEY_LENGTH = 32;
- static final short ALGO_AES_PCFB_256 = 1;
/**
* @return a NodeCHK corresponding to this key. Basically keep the
@@ -117,11 +134,7 @@
* @return URI form of this key.
*/
public FreenetURI getURI() {
- byte[] extra = new byte[3];
- extra[0] = (byte)((cryptoAlgorithm >> 8) & 0xff);
- extra[1] = (byte)(cryptoAlgorithm & 0xff);
- extra[2] =
- (byte)((compressed ? 1 : 0) + (controlDocument ? 2 : 0));
+ byte[] extra = getExtra();
return new FreenetURI("CHK", "", routingKey, cryptoKey, extra);
}
@@ -136,4 +149,8 @@
public boolean isMetadata() {
return controlDocument;
}
+
+ public boolean isCompressed() {
+ return compressionAlgorithm > 0;
+ }
}
Modified: trunk/freenet/src/freenet/keys/ClientCHKBlock.java
===================================================================
--- trunk/freenet/src/freenet/keys/ClientCHKBlock.java 2005-11-08 00:36:19 UTC
(rev 7499)
+++ trunk/freenet/src/freenet/keys/ClientCHKBlock.java 2005-11-08 16:02:24 UTC
(rev 7500)
@@ -1,5 +1,6 @@
package freenet.keys;
+import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.zip.Deflater;
@@ -10,6 +11,11 @@
import freenet.crypt.PCFBMode;
import freenet.crypt.UnsupportedCipherException;
import freenet.crypt.ciphers.Rijndael;
+import freenet.support.ArrayBucket;
+import freenet.support.ArrayBucketFactory;
+import freenet.support.Bucket;
+import freenet.support.BucketTools;
+import freenet.support.compress.Compressor;
/**
@@ -19,7 +25,8 @@
*/
public class ClientCHKBlock extends CHKBlock {
- final ClientCHK key;
+ public static final long MAX_COMPRESSED_DATA_LENGTH = NodeCHK.BLOCK_SIZE -
3;
+ final ClientCHK key;
public String toString() {
return super.toString()+",key="+key;
@@ -50,35 +57,63 @@
* @param sourceData The data to encode.
* @param asMetadata Is this a metadata key?
* @param dontCompress If set, don't even try to compress.
+ * @param alreadyCompressedCodec If !dontCompress, and this is >=0, then
the
+ * data is already compressed, and this is the algorithm.
*/
- static public ClientCHKBlock encode(byte[] sourceData, boolean asMetadata,
boolean dontCompress) throws CHKEncodeException {
+ static public ClientCHKBlock encode(byte[] sourceData, boolean asMetadata,
boolean dontCompress, short alreadyCompressedCodec) throws CHKEncodeException {
byte[] data;
byte[] header;
ClientCHK key;
+ short compressionAlgorithm = -1;
// Try to compress it - even if it fits into the block,
// because compressing it improves its entropy.
boolean compressed = false;
if(sourceData.length > MAX_LENGTH_BEFORE_COMPRESSION)
throw new CHKEncodeException("Too big");
- if(sourceData.length > NodeCHK.BLOCK_SIZE && !dontCompress) {
- int sourceLength = sourceData.length;
- byte[] cbuf = new byte[32768+1024];
- Deflater compressor = new Deflater();
- compressor.setInput(sourceData);
- compressor.finish();
- int compressedLength = compressor.deflate(cbuf);
- if(compressedLength+2 < sourceData.length) {
- // Yay
+ if(!dontCompress) {
+ byte[] cbuf = null;
+ if(alreadyCompressedCodec >= 0) {
+ compressionAlgorithm = alreadyCompressedCodec;
+ cbuf = sourceData;
+ } else {
+ // Determine the best algorithm
+ Bucket bucket = new ArrayBucket(sourceData);
+ bucket.setReadOnly();
+ for(int i=0;i<Compressor.countCompressAlgorithms();i++) {
+ Compressor comp =
Compressor.getCompressionAlgorithmByDifficulty(i);
+ ArrayBucket compressedData;
+ try {
+ compressedData = (ArrayBucket)
comp.compress(bucket, new ArrayBucketFactory());
+ } catch (IOException e) {
+ throw new Error(e);
+ }
+ if(compressedData.size() <= MAX_COMPRESSED_DATA_LENGTH)
{
+ compressionAlgorithm =
comp.codecNumberForMetadata();
+ try {
+ cbuf =
BucketTools.toByteArray(compressedData);
+ // FIXME provide a method in
ArrayBucket
+ } catch (IOException e) {
+ throw new Error(e);
+ }
+ break;
+ }
+ }
+
+ }
+ if(cbuf != null) {
+ // Use it
+ int sourceLength = sourceData.length;
+ int compressedLength = cbuf.length;
sourceData = new byte[compressedLength+3];
System.arraycopy(cbuf, 0, sourceData, 3, compressedLength);
sourceData[0] = (byte) ((sourceLength >> 16) & 0xff);
sourceData[1] = (byte) ((sourceLength >> 8) & 0xff);
sourceData[2] = (byte) ((sourceLength) & 0xff);
compressed = true;
- }
+ }
}
- if(sourceData.length > 32768) {
+ if(sourceData.length > NodeCHK.BLOCK_SIZE) {
throw new CHKEncodeException("Too big");
}
MessageDigest md160;
@@ -152,7 +187,7 @@
byte[] finalHash = md160.digest(data);
// Now convert it into a ClientCHK
- key = new ClientCHK(finalHash, encKey, compressed, asMetadata,
ClientCHK.ALGO_AES_PCFB_256);
+ key = new ClientCHK(finalHash, encKey, asMetadata,
ClientCHK.ALGO_AES_PCFB_256, compressionAlgorithm);
try {
return new ClientCHKBlock(data, header, key, false);
Modified: trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
===================================================================
--- trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
2005-11-08 00:36:19 UTC (rev 7499)
+++ trunk/freenet/src/freenet/node/RealNodeRequestInsertTest.java
2005-11-08 16:02:24 UTC (rev 7500)
@@ -168,7 +168,7 @@
Logger.error(RealNodeRequestInsertTest.class,"Inserting:
\""+dataString+"\" to "+node1);
byte[] data = dataString.getBytes();
ClientCHKBlock block;
- block = ClientCHKBlock.encode(data);
+ block = ClientCHKBlock.encode(data, false, false, (short)-1);
ClientCHK chk = block.getClientKey();
byte[] encData = block.getData();
byte[] encHeaders = block.getHeader();
Modified: trunk/freenet/src/freenet/node/TextModeClientInterface.java
===================================================================
--- trunk/freenet/src/freenet/node/TextModeClientInterface.java 2005-11-08
00:36:19 UTC (rev 7499)
+++ trunk/freenet/src/freenet/node/TextModeClientInterface.java 2005-11-08
16:02:24 UTC (rev 7500)
@@ -196,7 +196,7 @@
byte[] data = content.getBytes();
ClientCHKBlock block;
try {
- block = ClientCHKBlock.encode(data);
+ block = ClientCHKBlock.encode(data, false, false, (short)-1);
} catch (CHKEncodeException e) {
Logger.error(this, "Couldn't encode: "+e, e);
return;
@@ -230,7 +230,7 @@
System.out.println("Inserting...");
ClientCHKBlock block;
try {
- block = ClientCHKBlock.encode(data);
+ block = ClientCHKBlock.encode(data, false, false,
(short)-1);
} catch (CHKEncodeException e) {
System.out.println("Couldn't encode: "+e.getMessage());
Logger.error(this, "Couldn't encode: "+e, e);
Modified: trunk/freenet/src/freenet/node/Version.java
===================================================================
--- trunk/freenet/src/freenet/node/Version.java 2005-11-08 00:36:19 UTC (rev
7499)
+++ trunk/freenet/src/freenet/node/Version.java 2005-11-08 16:02:24 UTC (rev
7500)
@@ -20,10 +20,10 @@
public static final String protocolVersion = "1.0";
/** The build number of the current revision */
- public static final int buildNumber = 143;
+ public static final int buildNumber = 144;
/** Oldest build of Fred we will talk to */
- public static final int lastGoodBuild = 139;
+ public static final int lastGoodBuild = 144;
/** The highest reported build of fred */
public static int highestSeenBuild = buildNumber;
Added: trunk/freenet/src/freenet/support/ArrayBucket.java
===================================================================
--- trunk/freenet/src/freenet/support/ArrayBucket.java 2005-11-08 00:36:19 UTC
(rev 7499)
+++ trunk/freenet/src/freenet/support/ArrayBucket.java 2005-11-08 16:02:24 UTC
(rev 7500)
@@ -0,0 +1,174 @@
+package freenet.support;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Iterator;
+
+/**
+ * A bucket that stores data in the memory.
+ *
+ * @author oskar
+ */
+public class ArrayBucket implements Bucket {
+
+ private ArrayList data;
+ private boolean reset;
+ private String name;
+ private boolean readOnly;
+
+ public ArrayBucket() {
+ this("ArrayBucket");
+ }
+
+ public ArrayBucket(byte[] initdata) {
+ this("ArrayBucket");
+ data.add(initdata);
+ }
+
+ public ArrayBucket(String name) {
+ data = new ArrayList();
+ this.name = name;
+ }
+
+ public OutputStream getOutputStream() throws IOException {
+ if(readOnly) throw new IOException("Read only");
+ return new ArrayBucketOutputStream(reset);
+ }
+
+ public InputStream getInputStream() {
+ return new ArrayBucketInputStream();
+ }
+
+ public String toString() {
+ StringBuffer s = new StringBuffer(250);
+ for (Iterator i = data.iterator(); i.hasNext();) {
+ byte[] b = (byte[]) i.next();
+ s.append(new String(b));
+ }
+ return new String(s);
+ }
+
+ public void read(InputStream in) throws IOException {
+ OutputStream out = new ArrayBucketOutputStream(reset);
+ int i;
+ byte[] b = new byte[8 * 1024];
+ while ((i = in.read(b)) != -1) {
+ out.write(b, 0, i);
+ }
+ out.close();
+ }
+
+ public long size() {
+ long size = 0;
+ for (Iterator i = data.iterator(); i.hasNext();) {
+ byte[] b = (byte[]) i.next();
+ size += b.length;
+ }
+ return size;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ private class ArrayBucketOutputStream extends ByteArrayOutputStream {
+
+ private boolean reset;
+
+ public ArrayBucketOutputStream(boolean reset) {
+ super();
+ this.reset = reset;
+ }
+
+ public void close() throws IOException {
+ if (reset) {
+ data.clear();
+ data.trimToSize();
+ }
+ reset = false;
+ data.add(toByteArray());
+ if(readOnly) throw new IOException("Read only");
+ // FIXME maybe we should throw on write instead? :)
+ }
+ }
+
+ private class ArrayBucketInputStream extends InputStream {
+
+ private Iterator i;
+ private ByteArrayInputStream in;
+
+ public ArrayBucketInputStream() {
+ i = data.iterator();
+ }
+
+ public int read() {
+ return priv_read();
+ }
+
+ private int priv_read() {
+ if (in == null) {
+ if (i.hasNext()) {
+ in = new ByteArrayInputStream((byte[])
i.next());
+ } else {
+ return -1;
+ }
+ }
+ int i = in.read();
+ if (i == -1) {
+ in = null;
+ return priv_read();
+ } else {
+ return i;
+ }
+ }
+
+ public int read(byte[] b) {
+ return priv_read(b, 0, b.length);
+ }
+
+ public int read(byte[] b, int off, int len) {
+ return priv_read(b, off, len);
+ }
+
+ private int priv_read(byte[] b, int off, int len) {
+ if (in == null) {
+ if (i.hasNext()) {
+ in = new ByteArrayInputStream((byte[])
i.next());
+ } else {
+ return -1;
+ }
+ }
+ int i = in.read(b, off, len);
+ if (i == -1) {
+ in = null;
+ return priv_read(b, off, len);
+ } else {
+ return i;
+ }
+ }
+
+ public int available() {
+ if (in == null) {
+ if (i.hasNext()) {
+ in = new ByteArrayInputStream((byte[])
i.next());
+ } else {
+ return 0;
+ }
+ }
+ return in.available();
+ }
+
+ }
+
+ public boolean isReadOnly() {
+ return readOnly;
+ }
+
+ public void setReadOnly() {
+ readOnly = true;
+ }
+}
Added: trunk/freenet/src/freenet/support/ArrayBucketFactory.java
===================================================================
--- trunk/freenet/src/freenet/support/ArrayBucketFactory.java 2005-11-08
00:36:19 UTC (rev 7499)
+++ trunk/freenet/src/freenet/support/ArrayBucketFactory.java 2005-11-08
16:02:24 UTC (rev 7500)
@@ -0,0 +1,15 @@
+package freenet.support;
+
+import java.io.IOException;
+
+public class ArrayBucketFactory implements BucketFactory {
+
+ public Bucket makeBucket(long size) throws IOException {
+ return new ArrayBucket();
+ }
+
+ public void freeBucket(Bucket b) throws IOException {
+ // Do nothing
+ }
+
+}
Copied: trunk/freenet/src/freenet/support/compress/Compressor.java (from rev
7495, trunk/freenet/src/freenet/client/Compressor.java)
===================================================================
--- trunk/freenet/src/freenet/client/Compressor.java 2005-11-07 23:51:07 UTC
(rev 7495)
+++ trunk/freenet/src/freenet/support/compress/Compressor.java 2005-11-08
16:02:24 UTC (rev 7500)
@@ -0,0 +1,53 @@
+package freenet.support.compress;
+
+import java.io.IOException;
+
+import freenet.client.Metadata;
+import freenet.support.Bucket;
+import freenet.support.BucketFactory;
+
+/**
+ * A data compressor. Contains methods to get all data compressors.
+ * This is for single-file compression (gzip, bzip2) as opposed to archives.
+ */
+public abstract class Compressor {
+
+ public static Compressor gzip = new GzipCompressor();
+
+ public abstract Bucket compress(Bucket data, BucketFactory bf) throws
IOException;
+
+ public short codecNumberForMetadata() {
+ return Metadata.COMPRESS_GZIP;
+ }
+
+ /** Count the number of distinct compression algorithms currently
supported. */
+ public static int countCompressAlgorithms() {
+ // FIXME we presently only support gzip. This should change in
future.
+ return 1;
+ }
+
+ public static Compressor getCompressionAlgorithmByDifficulty(int i) {
+ if(i == 0)
+ return Compressor.gzip;
+ // FIXME when we get more compression algos, put them here.
+ return null;
+ }
+
+ public static Compressor getCompressionAlgorithmByMetadataID(short
algo) {
+ if(algo == Metadata.COMPRESS_GZIP)
+ return gzip;
+ // FIXME when we get more compression algos, put them here.
+ return null;
+ }
+
+ /** Decompress in RAM only.
+ * @param dbuf Input buffer.
+ * @param i Offset to start reading from.
+ * @param j Number of bytes to read.
+ * @param output Output buffer.
+ * @throws DecompressException
+ * @returns The number of bytes actually written.
+ */
+ public abstract int decompress(byte[] dbuf, int i, int j, byte[]
output) throws DecompressException;
+
+}
Added: trunk/freenet/src/freenet/support/compress/DecompressException.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/DecompressException.java
2005-11-08 00:36:19 UTC (rev 7499)
+++ trunk/freenet/src/freenet/support/compress/DecompressException.java
2005-11-08 16:02:24 UTC (rev 7500)
@@ -0,0 +1,12 @@
+package freenet.support.compress;
+
+/**
+ * Exception thrown when there is a permanent failure in decompression due to
e.g. a format error.
+ */
+public class DecompressException extends Exception {
+
+ public DecompressException(String msg) {
+ super(msg);
+ }
+
+}
Copied: trunk/freenet/src/freenet/support/compress/GzipCompressor.java (from
rev 7495, trunk/freenet/src/freenet/client/GzipCompressor.java)
===================================================================
--- trunk/freenet/src/freenet/client/GzipCompressor.java 2005-11-07
23:51:07 UTC (rev 7495)
+++ trunk/freenet/src/freenet/support/compress/GzipCompressor.java
2005-11-08 16:02:24 UTC (rev 7500)
@@ -0,0 +1,45 @@
+package freenet.support.compress;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.zip.DataFormatException;
+import java.util.zip.GZIPOutputStream;
+import java.util.zip.Inflater;
+
+import freenet.keys.CHKDecodeException;
+import freenet.support.Bucket;
+import freenet.support.BucketFactory;
+
+public class GzipCompressor extends Compressor {
+
+ public Bucket compress(Bucket data, BucketFactory bf) throws
IOException {
+ Bucket output = bf.makeBucket(-1);
+ InputStream is = data.getInputStream();
+ OutputStream os = output.getOutputStream();
+ GZIPOutputStream gos = new GZIPOutputStream(os);
+ byte[] buffer = new byte[4096];
+ while(true) {
+ int x = is.read(buffer);
+ if(x <= -1) break;
+ if(x == 0) throw new IOException("Returned zero from
read()");
+ gos.write(buffer, 0, x);
+ }
+ gos.close();
+ return output;
+ }
+
+ public int decompress(byte[] dbuf, int i, int j, byte[] output) throws
DecompressException {
+ Inflater decompressor = new Inflater();
+ decompressor.setInput(dbuf, i, j);
+ try {
+ int resultLength = decompressor.inflate(output);
+ return resultLength;
+ } catch (DataFormatException e) {
+ throw new DecompressException("Invalid data: "+e);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ throw new DecompressException("Invalid data: "+e);
+ }
+ }
+
+}