Author: slebresne
Date: Mon Aug 8 14:16:30 2011
New Revision: 1154969
URL: http://svn.apache.org/viewvc?rev=1154969&view=rev
Log:
Fix OutOfBounds with compression
patch by slebresne; reviewed by stuhood for CASSANDRA-2994
Modified:
cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedRandomAccessReader.java
cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
Modified:
cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedRandomAccessReader.java
URL:
http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedRandomAccessReader.java?rev=1154969&r1=1154968&r2=1154969&view=diff
==============================================================================
---
cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedRandomAccessReader.java
(original)
+++
cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedRandomAccessReader.java
Mon Aug 8 14:16:30 2011
@@ -135,7 +135,7 @@ public class CompressedRandomAccessReade
{
super(new File(dataFilePath), metadata.chunkLength, skipIOCache);
this.metadata = metadata;
- compressed = new byte[metadata.chunkLength];
+ compressed = new
byte[Snappy.maxCompressedLength(metadata.chunkLength)];
// can't use super.read(...) methods
// that is why we are allocating special InputStream to read data from
disk
// from already open file descriptor
Modified:
cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
URL:
http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java?rev=1154969&r1=1154968&r2=1154969&view=diff
==============================================================================
---
cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
(original)
+++
cassandra/trunk/src/java/org/apache/cassandra/io/compress/CompressedSequentialWriter.java
Mon Aug 8 14:16:30 2011
@@ -53,7 +53,7 @@ public class CompressedSequentialWriter
super(file, CHUNK_LENGTH, skipIOCache);
// buffer for compression should be the same size as buffer itself
- compressed = new byte[buffer.length];
+ compressed = new byte[Snappy.maxCompressedLength(buffer.length)];
/* Index File (-CompressionInfo.db component) and it's header */
metadataWriter = new CompressionMetadata.Writer(indexFilePath);
@@ -85,6 +85,7 @@ public class CompressedSequentialWriter
chunkCount++;
// write data itself
+ assert compressedLength <= compressed.length;
out.write(compressed, 0, compressedLength);
// next chunk should be written right after current