Author: toad
Date: 2008-10-27 20:46:44 +0000 (Mon, 27 Oct 2008)
New Revision: 23112
Removed:
trunk/freenet/src/SevenZip/
trunk/freenet/src/freenet/support/compress/Bzip2Compressor.java
trunk/freenet/src/freenet/support/compress/LZMACompressor.java
trunk/freenet/src/net/contrapunctus/
trunk/freenet/test/freenet/support/compress/Bzip2CompressorTest.java
Modified:
trunk/freenet/src/freenet/client/ArchiveManager.java
trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
trunk/freenet/src/freenet/support/compress/Compressor.java
Log:
Remove the new compression methods so that I can get 1166 out. It appears that
the DNS NPE is a serious problem preventing nodes from bootstrapping, so we
need a new build *now*.
Nextgens: Please move the compression code to ext/. I have deleted what I can
here, but I dunno how to get rid of an external, so we'll have the tar and
bzip2 code, but we no longer use it.
Modified: trunk/freenet/src/freenet/client/ArchiveManager.java
===================================================================
--- trunk/freenet/src/freenet/client/ArchiveManager.java 2008-10-27
20:26:54 UTC (rev 23111)
+++ trunk/freenet/src/freenet/client/ArchiveManager.java 2008-10-27
20:46:44 UTC (rev 23112)
@@ -23,8 +23,6 @@
import freenet.support.io.Closer;
import java.io.InputStream;
import java.util.zip.GZIPInputStream;
-import net.contrapunctus.lzma.LzmaInputStream;
-import org.apache.tools.bzip2.CBZip2InputStream;
import org.apache.tools.tar.TarEntry;
import org.apache.tools.tar.TarInputStream;
@@ -43,8 +41,7 @@
private static boolean logMINOR;
public enum ARCHIVE_TYPE {
- ZIP((short)0, new String[] { "application/zip",
"application/x-zip" }), /* eventually get rid of ZIP support at some
point */
- TAR((short)1, new String[] { "application/x-tar" });
+ ZIP((short)0, new String[] { "application/zip",
"application/x-zip" });
public final short metadataID;
public final String[] mimeTypes;
@@ -91,7 +88,7 @@
}
public final static ARCHIVE_TYPE getDefault() {
- return TAR;
+ return ZIP;
}
}
@@ -215,7 +212,7 @@
/**
* Extract data to cache. Call synchronized on ctx.
* @param key The key the data was fetched from.
- * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP |
Metadata.ARCHIVE_TAR.
+ * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP.
* @param data The actual data fetched.
* @param archiveContext The context for the whole fetch process.
* @param ctx The ArchiveStoreContext for this key.
@@ -266,21 +263,13 @@
if(ctype == null) {
if(logMINOR) Logger.minor(this, "No
compression");
is = data.getInputStream();
- } else if(ctype == COMPRESSOR_TYPE.BZIP2) {
- if(logMINOR) Logger.minor(this, "dealing with
BZIP2");
- is = new
CBZip2InputStream(data.getInputStream());
} else if(ctype == COMPRESSOR_TYPE.GZIP) {
if(logMINOR) Logger.minor(this, "dealing with
GZIP");
is = new GZIPInputStream(data.getInputStream());
- } else if(ctype == COMPRESSOR_TYPE.LZMA) {
- if(logMINOR) Logger.minor(this, "dealing with
LZMA");
- is = new LzmaInputStream(data.getInputStream());
}
if(ARCHIVE_TYPE.ZIP == archiveType)
handleZIPArchive(ctx, key, is, element,
callback, gotElement, throwAtExit);
- else if(ARCHIVE_TYPE.TAR == archiveType)
- handleTARArchive(ctx, key, is, element,
callback, gotElement, throwAtExit);
else
throw new ArchiveFailureException("Unknown or
unsupported archive algorithm " + archiveType);
} catch (IOException ioe) {
@@ -290,75 +279,6 @@
}
}
- private void handleTARArchive(ArchiveStoreContext ctx, FreenetURI key,
InputStream data, String element, ArchiveExtractCallback callback,
MutableBoolean gotElement, boolean throwAtExit) throws ArchiveFailureException,
ArchiveRestartException {
- if(logMINOR) Logger.minor(this, "Handling a TAR Archive");
- TarInputStream tarIS = null;
- try {
- tarIS = new TarInputStream(data);
-
- // MINOR: Assumes the first entry in the tarball is a
directory.
- TarEntry entry;
-
- byte[] buf = new byte[32768];
- HashSet names = new HashSet();
- boolean gotMetadata = false;
-
-outerTAR: while(true) {
- entry = tarIS.getNextEntry();
- if(entry == null) break;
- if(entry.isDirectory()) continue;
- String name = entry.getName();
- if(names.contains(name)) {
- Logger.error(this, "Duplicate key
"+name+" in archive "+key);
- continue;
- }
- long size = entry.getSize();
- if(size > maxArchivedFileSize) {
- addErrorElement(ctx, key, name, "File
too big: "+maxArchivedFileSize+" greater than current archived file size limit
"+maxArchivedFileSize);
- } else {
- // Read the element
- long realLen = 0;
- Bucket output =
tempBucketFactory.makeBucket(size);
- OutputStream out =
output.getOutputStream();
-
- int readBytes;
- while((readBytes = tarIS.read(buf)) >
0) {
- out.write(buf, 0, readBytes);
- readBytes += realLen;
- if(readBytes >
maxArchivedFileSize) {
- addErrorElement(ctx,
key, name, "File too big: "+maxArchivedFileSize+" greater than current archived
file size limit "+maxArchivedFileSize);
- out.close();
- output.free();
- continue outerTAR;
- }
- }
-
- out.close();
- if(name.equals(".metadata"))
- gotMetadata = true;
- addStoreElement(ctx, key, name, output,
gotElement, element, callback);
- names.add(name);
- trimStoredData();
- }
- }
-
- // If no metadata, generate some
- if(!gotMetadata) {
- generateMetadata(ctx, key, names, gotElement,
element, callback);
- trimStoredData();
- }
- if(throwAtExit) throw new
ArchiveRestartException("Archive changed on re-fetch");
-
- if((!gotElement.value) && element != null)
- callback.notInArchive();
-
- } catch (IOException e) {
- throw new ArchiveFailureException("Error reading
archive: "+e.getMessage(), e);
- } finally {
- Closer.close(tarIS);
- }
- }
-
private void handleZIPArchive(ArchiveStoreContext ctx, FreenetURI key,
InputStream data, String element, ArchiveExtractCallback callback,
MutableBoolean gotElement, boolean throwAtExit) throws ArchiveFailureException,
ArchiveRestartException {
if(logMINOR) Logger.minor(this, "Handling a ZIP Archive");
ZipInputStream zis = null;
Modified: trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
===================================================================
--- trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
2008-10-27 20:26:54 UTC (rev 23111)
+++ trunk/freenet/src/freenet/client/async/SimpleManifestPutter.java
2008-10-27 20:46:44 UTC (rev 23112)
@@ -25,8 +25,6 @@
import freenet.support.Logger;
import freenet.support.api.Bucket;
import freenet.support.io.BucketTools;
-import org.apache.tools.tar.TarEntry;
-import org.apache.tools.tar.TarOutputStream;
public class SimpleManifestPutter extends BaseClientPutter implements
PutCompletionCallback {
// Only implements PutCompletionCallback for the final metadata insert
@@ -427,9 +425,7 @@
Bucket outputBucket =
ctx.bf.makeBucket(baseMetadata.dataLength());
// TODO: try both ? - maybe not worth it
archiveType = ARCHIVE_TYPE.getDefault();
- String mimeType = (archiveType ==
ARCHIVE_TYPE.TAR ?
- createTarBucket(bucket, outputBucket) :
- createZipBucket(bucket, outputBucket));
+ String mimeType = createZipBucket(bucket,
outputBucket);
if(logMINOR) Logger.minor(this, "We are using
"+archiveType);
@@ -459,40 +455,6 @@
}
}
- private String createTarBucket(Bucket inputBucket, Bucket outputBucket)
throws IOException {
- if(logMINOR) Logger.minor(this, "Create a TAR Bucket");
-
- OutputStream os = new
BufferedOutputStream(outputBucket.getOutputStream());
- TarOutputStream tarOS = new TarOutputStream(os);
- TarEntry ze;
-
- for(PutHandler ph : elementsToPutInArchive) {
- ze = new TarEntry(ph.targetInArchive);
- ze.setModTime(0);
- long size = ph.data.size();
- ze.setSize(size);
- tarOS.putNextEntry(ze);
- BucketTools.copyTo(ph.data, tarOS, size);
- tarOS.closeEntry();
- }
-
- // Add .metadata - after the rest.
- ze = new TarEntry(".metadata");
- ze.setModTime(0); // -1 = now, 0 = 1970.
- long size = inputBucket.size();
- ze.setSize(size);
- tarOS.putNextEntry(ze);
- BucketTools.copyTo(inputBucket, tarOS, size);
-
- tarOS.closeEntry();
- // Both finish() and close() are necessary.
- tarOS.finish();
- tarOS.flush();
- tarOS.close();
-
- return ARCHIVE_TYPE.TAR.mimeTypes[0];
- }
-
private String createZipBucket(Bucket inputBucket, Bucket outputBucket)
throws IOException {
if(logMINOR) Logger.minor(this, "Create a ZIP Bucket");
Deleted: trunk/freenet/src/freenet/support/compress/Bzip2Compressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/Bzip2Compressor.java
2008-10-27 20:26:54 UTC (rev 23111)
+++ trunk/freenet/src/freenet/support/compress/Bzip2Compressor.java
2008-10-27 20:46:44 UTC (rev 23112)
@@ -1,117 +0,0 @@
-/* This code is part of Freenet. It is distributed under the GNU General
-* Public License, version 2 (or at your option any later version). See
-* http://www.gnu.org/ for further details of the GPL. */
-package freenet.support.compress;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import freenet.support.Logger;
-import freenet.support.api.Bucket;
-import freenet.support.api.BucketFactory;
-import org.apache.tools.bzip2.CBZip2InputStream;
-import org.apache.tools.bzip2.CBZip2OutputStream;
-
-public class Bzip2Compressor implements Compressor {
-
- public Bucket compress(Bucket data, BucketFactory bf, long maxLength)
throws IOException, CompressionOutputSizeException {
- if(maxLength <= 0)
- throw new IllegalArgumentException();
- Bucket output = bf.makeBucket(maxLength);
- InputStream is = null;
- OutputStream os = null;
- CBZip2OutputStream bz2os = null;
- try {
- is = data.getInputStream();
- os = output.getOutputStream();
- bz2os = new CBZip2OutputStream(os);
- long written = 0;
- // Bigger input buffer, so can compress all at once.
- // Won't hurt on I/O either, although most OSs will
only return a page at a time.
- byte[] buffer = new byte[32768];
- while(true) {
- int l = (int) Math.min(buffer.length, maxLength
- written);
- int x = is.read(buffer, 0, buffer.length);
- if(l < x) {
- throw new
CompressionOutputSizeException();
- }
- if(x <= -1) break;
- if(x == 0) throw new IOException("Returned zero
from read()");
- bz2os.write(buffer, 0, x);
- written += x;
- }
- bz2os.flush();
- bz2os.close();
- os = null;
- } finally {
- if(is != null) is.close();
- if(os != null) os.close();
- }
- return output;
- }
-
- public Bucket decompress(Bucket data, BucketFactory bf, long maxLength,
long maxCheckSizeLength, Bucket preferred) throws IOException,
CompressionOutputSizeException {
- Bucket output;
- if(preferred != null)
- output = preferred;
- else
- output = bf.makeBucket(maxLength);
- InputStream is = data.getInputStream();
- OutputStream os = output.getOutputStream();
- decompress(is, os, maxLength, maxCheckSizeLength);
- os.close();
- is.close();
- return output;
- }
-
- private long decompress(InputStream is, OutputStream os, long
maxLength, long maxCheckSizeBytes) throws IOException,
CompressionOutputSizeException {
- CBZip2InputStream bz2is = new CBZip2InputStream(is);
- long written = 0;
- byte[] buffer = new byte[4096];
- while(true) {
- int l = (int) Math.min(buffer.length, maxLength -
written);
- // We can over-read to determine whether we have
over-read.
- // We enforce maximum size this way.
- // FIXME there is probably a better way to do this!
- int x = bz2is.read(buffer, 0, buffer.length);
- if(l < x) {
- Logger.normal(this, "l="+l+", x="+x+",
written="+written+", maxLength="+maxLength+" throwing a
CompressionOutputSizeException");
- if(maxCheckSizeBytes > 0) {
- written += x;
- while(true) {
- l = (int)
Math.min(buffer.length, maxLength + maxCheckSizeBytes - written);
- x = bz2is.read(buffer, 0, l);
- if(x <= -1) throw new
CompressionOutputSizeException(written);
- if(x == 0) throw new
IOException("Returned zero from read()");
- written += x;
- }
- }
- throw new CompressionOutputSizeException();
- }
- if(x <= -1) return written;
- if(x == 0) throw new IOException("Returned zero from
read()");
- os.write(buffer, 0, x);
- written += x;
- }
- }
-
- public int decompress(byte[] dbuf, int i, int j, byte[] output) throws
CompressionOutputSizeException {
- // Didn't work with Inflater.
- // FIXME fix sometimes to use Inflater - format issue?
- ByteArrayInputStream bais = new ByteArrayInputStream(dbuf, i,
j);
- ByteArrayOutputStream baos = new
ByteArrayOutputStream(output.length);
- int bytes = 0;
- try {
- bytes = (int)decompress(bais, baos, output.length, -1);
- } catch (IOException e) {
- // Impossible
- throw new Error("Got IOException: " + e.getMessage(),
e);
- }
- byte[] buf = baos.toByteArray();
- System.arraycopy(buf, 0, output, 0, bytes);
- return bytes;
- }
-}
Modified: trunk/freenet/src/freenet/support/compress/Compressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/Compressor.java 2008-10-27
20:26:54 UTC (rev 23111)
+++ trunk/freenet/src/freenet/support/compress/Compressor.java 2008-10-27
20:46:44 UTC (rev 23112)
@@ -16,9 +16,7 @@
public enum COMPRESSOR_TYPE implements Compressor {
// They will be tried in order: put the less resource consuming
first
- GZIP("GZIP", new GzipCompressor(), (short) 0),
- BZIP2("BZIP2", new Bzip2Compressor(), (short) 1),
- LZMA("LZMA", new LZMACompressor(), (short)2);
+ GZIP("GZIP", new GzipCompressor(), (short) 0);
public final String name;
public final Compressor compressor;
Deleted: trunk/freenet/src/freenet/support/compress/LZMACompressor.java
===================================================================
--- trunk/freenet/src/freenet/support/compress/LZMACompressor.java
2008-10-27 20:26:54 UTC (rev 23111)
+++ trunk/freenet/src/freenet/support/compress/LZMACompressor.java
2008-10-27 20:46:44 UTC (rev 23112)
@@ -1,117 +0,0 @@
-/* This code is part of Freenet. It is distributed under the GNU General
-* Public License, version 2 (or at your option any later version). See
-* http://www.gnu.org/ for further details of the GPL. */
-package freenet.support.compress;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import freenet.support.Logger;
-import freenet.support.api.Bucket;
-import freenet.support.api.BucketFactory;
-import net.contrapunctus.lzma.LzmaInputStream;
-import net.contrapunctus.lzma.LzmaOutputStream;
-
-public class LZMACompressor implements Compressor {
-
- public Bucket compress(Bucket data, BucketFactory bf, long maxLength)
throws IOException, CompressionOutputSizeException {
- if(maxLength <= 0)
- throw new IllegalArgumentException();
- Bucket output = bf.makeBucket(maxLength);
- InputStream is = null;
- OutputStream os = null;
- LzmaOutputStream lzmaOS = null;
- try {
- is = data.getInputStream();
- os = output.getOutputStream();
- lzmaOS = new LzmaOutputStream(os);
- long written = 0;
- // Bigger input buffer, so can compress all at once.
- // Won't hurt on I/O either, although most OSs will
only return a page at a time.
- byte[] buffer = new byte[32768];
- while(true) {
- int l = (int) Math.min(buffer.length, maxLength
- written);
- int x = is.read(buffer, 0, buffer.length);
- if(l < x) {
- throw new
CompressionOutputSizeException();
- }
- if(x <= -1) break;
- if(x == 0) throw new IOException("Returned zero
from read()");
- lzmaOS.write(buffer, 0, x);
- written += x;
- }
- lzmaOS.flush();
- lzmaOS.close();
- os = null;
- } finally {
- if(is != null) is.close();
- if(os != null) os.close();
- }
- return output;
- }
-
- public Bucket decompress(Bucket data, BucketFactory bf, long maxLength,
long maxCheckSizeLength, Bucket preferred) throws IOException,
CompressionOutputSizeException {
- Bucket output;
- if(preferred != null)
- output = preferred;
- else
- output = bf.makeBucket(maxLength);
- InputStream is = data.getInputStream();
- OutputStream os = output.getOutputStream();
- decompress(is, os, maxLength, maxCheckSizeLength);
- os.close();
- is.close();
- return output;
- }
-
- private long decompress(InputStream is, OutputStream os, long
maxLength, long maxCheckSizeBytes) throws IOException,
CompressionOutputSizeException {
- LzmaInputStream lzmaIS = new LzmaInputStream(is);
- long written = 0;
- byte[] buffer = new byte[4096];
- while(true) {
- int l = (int) Math.min(buffer.length, maxLength -
written);
- // We can over-read to determine whether we have
over-read.
- // We enforce maximum size this way.
- // FIXME there is probably a better way to do this!
- int x = lzmaIS.read(buffer, 0, buffer.length);
- if(l < x) {
- Logger.normal(this, "l="+l+", x="+x+",
written="+written+", maxLength="+maxLength+" throwing a
CompressionOutputSizeException");
- if(maxCheckSizeBytes > 0) {
- written += x;
- while(true) {
- l = (int)
Math.min(buffer.length, maxLength + maxCheckSizeBytes - written);
- x = lzmaIS.read(buffer, 0, l);
- if(x <= -1) throw new
CompressionOutputSizeException(written);
- if(x == 0) throw new
IOException("Returned zero from read()");
- written += x;
- }
- }
- throw new CompressionOutputSizeException();
- }
- if(x <= -1) return written;
- if(x == 0) throw new IOException("Returned zero from
read()");
- os.write(buffer, 0, x);
- written += x;
- }
- }
-
- public int decompress(byte[] dbuf, int i, int j, byte[] output) throws
CompressionOutputSizeException {
- // Didn't work with Inflater.
- // FIXME fix sometimes to use Inflater - format issue?
- ByteArrayInputStream bais = new ByteArrayInputStream(dbuf, i,
j);
- ByteArrayOutputStream baos = new
ByteArrayOutputStream(output.length);
- int bytes = 0;
- try {
- bytes = (int)decompress(bais, baos, output.length, -1);
- } catch (IOException e) {
- // Impossible
- throw new Error("Got IOException: " + e.getMessage(),
e);
- }
- byte[] buf = baos.toByteArray();
- System.arraycopy(buf, 0, output, 0, bytes);
- return bytes;
- }
-}
Deleted: trunk/freenet/test/freenet/support/compress/Bzip2CompressorTest.java
===================================================================
--- trunk/freenet/test/freenet/support/compress/Bzip2CompressorTest.java
2008-10-27 20:26:54 UTC (rev 23111)
+++ trunk/freenet/test/freenet/support/compress/Bzip2CompressorTest.java
2008-10-27 20:46:44 UTC (rev 23112)
@@ -1,200 +0,0 @@
-/* This code is part of Freenet. It is distributed under the GNU General
-* Public License, version 2 (or at your option any later version). See
-* http://www.gnu.org/ for further details of the GPL. */
-package freenet.support.compress;
-
-import java.io.IOException;
-import java.io.InputStream;
-
-import junit.framework.TestCase;
-import freenet.support.api.Bucket;
-import freenet.support.api.BucketFactory;
-import freenet.support.io.ArrayBucket;
-import freenet.support.io.ArrayBucketFactory;
-
-/**
- * Test case for {@link freenet.support.compress.Bzip2Compressor} class.
- */
-public class Bzip2CompressorTest extends TestCase {
-
- private static final String UNCOMPRESSED_DATA_1 =
GzipCompressorTest.UNCOMPRESSED_DATA_1;
-
- private static final byte[] COMPRESSED_DATA_1 = {
- 104,57,49,65,89,38,83,89,-18,-87,-99,-74,0,0,33,-39,-128,0,8,16,
- 0,58,64,52,-7,-86,0,48,0,-69,65,76,38,-102,3,76,65,-92,-12,-43,
- 61,71,-88,-51,35,76,37,52,32,19,-44,67,74,-46,-9,17,14,-35,55,
- 100,-10,73,-75,121,-34,83,56,-125,15,32,-118,35,66,124,-120,-39,
- 119,-104,-108,66,101,-56,94,-71,-41,-43,68,51,65,19,-44,-118,4,
- -36,-117,33,-101,-120,-49,-10,17,-51,-19,28,76,-57,-112,-68,-50,
- -66,-60,-43,-81,127,-51,-10,58,-92,38,18,45,102,117,-31,-116,
- -114,-6,-87,-59,-43,-106,41,-30,-63,-34,-39,-117,-104,-114,100,
- -115,36,-112,23,104,-110,71,-45,-116,-23,-85,-36,-24,-61,14,32,
- 105,55,-105,-31,-4,93,-55,20,-31,66,67,-70,-90,118,-40
- };
-
- /**
- * test BZIP2 compressor's identity and functionality
- */
- public void testBzip2Compressor() {
- Compressor.COMPRESSOR_TYPE bz2compressor =
Compressor.COMPRESSOR_TYPE.BZIP2;
- Compressor compressorZero =
Compressor.COMPRESSOR_TYPE.getCompressorByMetadataID((short)1);
-
- // check BZIP2 is the second compressor
- assertEquals(bz2compressor, compressorZero);
- }
-
- public void testCompress() {
-
- // do bzip2 compression
- byte[] compressedData =
doCompress(UNCOMPRESSED_DATA_1.getBytes());
-
- // output size same as expected?
- //assertEquals(compressedData.length, COMPRESSED_DATA_1.length);
-
- // check each byte is exactly as expected
- for (int i = 0; i < compressedData.length; i++) {
- assertEquals(COMPRESSED_DATA_1[i], compressedData[i]);
- }
- }
-
- public void testBucketDecompress() {
-
- byte[] compressedData = COMPRESSED_DATA_1;
-
- // do bzip2 decompression with buckets
- byte[] uncompressedData = doBucketDecompress(compressedData);
-
- // is the (round-tripped) uncompressed string the same as the
original?
- String uncompressedString = new String(uncompressedData);
- assertEquals(uncompressedString, UNCOMPRESSED_DATA_1);
- }
-
- public void testByteArrayDecompress() {
-
- // build 5k array
- byte[] originalUncompressedData = new byte[5 * 1024];
- for(int i = 0; i < originalUncompressedData.length; i++) {
- originalUncompressedData[i] = 1;
- }
-
- byte[] compressedData = doCompress(originalUncompressedData);
- byte[] outUncompressedData = new byte[5 * 1024];
-
- int writtenBytes = 0;
-
- try {
- writtenBytes =
Compressor.COMPRESSOR_TYPE.BZIP2.decompress(compressedData, 0,
compressedData.length, outUncompressedData);
- } catch (CompressionOutputSizeException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- }
-
- assertEquals(writtenBytes, originalUncompressedData.length);
- assertEquals(originalUncompressedData.length,
outUncompressedData.length);
-
- // check each byte is exactly as expected
- for (int i = 0; i < outUncompressedData.length; i++) {
- assertEquals(originalUncompressedData[i],
outUncompressedData[i]);
- }
- }
-
- public void testCompressException() {
-
- byte[] uncompressedData = UNCOMPRESSED_DATA_1.getBytes();
- Bucket inBucket = new ArrayBucket(uncompressedData);
- BucketFactory factory = new ArrayBucketFactory();
-
- try {
- Compressor.COMPRESSOR_TYPE.BZIP2.compress(inBucket,
factory, 32);
- } catch (IOException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- } catch (CompressionOutputSizeException e) {
- // expect this
- }
- }
-
- public void testDecompressException() {
-
- // build 5k array
- byte[] uncompressedData = new byte[5 * 1024];
- for(int i = 0; i < uncompressedData.length; i++) {
- uncompressedData[i] = 1;
- }
-
- byte[] compressedData = doCompress(uncompressedData);
-
- Bucket inBucket = new ArrayBucket(compressedData);
- BucketFactory factory = new ArrayBucketFactory();
-
- try {
- Compressor.COMPRESSOR_TYPE.BZIP2.decompress(inBucket,
factory, 4096 + 10, 4096 + 20, null);
- } catch (IOException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- } catch (CompressionOutputSizeException e) {
- // expect this
- }
- }
-
- private byte[] doBucketDecompress(byte[] compressedData) {
-
- Bucket inBucket = new ArrayBucket(compressedData);
- BucketFactory factory = new ArrayBucketFactory();
- Bucket outBucket = null;
-
- try {
- outBucket =
Compressor.COMPRESSOR_TYPE.BZIP2.decompress(inBucket, factory, 32768, 32768 *
2, null);
- } catch (IOException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- } catch (CompressionOutputSizeException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- }
-
- InputStream in = null;
-
- try {
- in = outBucket.getInputStream();
- } catch (IOException e1) {
- fail("unexpected exception thrown : " +
e1.getMessage());
- }
- long size = outBucket.size();
- byte[] outBuf = new byte[(int) size];
-
- try {
- in.read(outBuf);
- } catch (IOException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- }
-
- return outBuf;
- }
-
- private byte[] doCompress(byte[] uncompressedData) {
- Bucket inBucket = new ArrayBucket(uncompressedData);
- BucketFactory factory = new ArrayBucketFactory();
- Bucket outBucket = null;
-
- try {
- outBucket =
Compressor.COMPRESSOR_TYPE.BZIP2.compress(inBucket, factory, 32768);
- } catch (IOException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- } catch (CompressionOutputSizeException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- }
-
- InputStream in = null;
- try {
- in = outBucket.getInputStream();
- } catch (IOException e1) {
- fail("unexpected exception thrown : " +
e1.getMessage());
- }
- long size = outBucket.size();
- byte[] outBuf = new byte[(int) size];
-
- try {
- in.read(outBuf);
- } catch (IOException e) {
- fail("unexpected exception thrown : " + e.getMessage());
- }
-
- return outBuf;
- }
-}