Pass correct uncompressed data size to BytesInput ctor in ComplexParquetReader


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/4216e0e2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/4216e0e2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/4216e0e2

Branch: refs/heads/master
Commit: 4216e0e2c60cf17caa678cee685cbfc2ca4e819a
Parents: ed72c13
Author: Parth Chandra <pchan...@maprtech.com>
Authored: Sat Aug 23 21:48:04 2014 -0700
Committer: Jacques Nadeau <jacq...@apache.org>
Committed: Sun Aug 24 08:28:34 2014 -0700

----------------------------------------------------------------------
 .../src/main/java/parquet/hadoop/ColumnChunkIncReadStore.java      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/4216e0e2/exec/java-exec/src/main/java/parquet/hadoop/ColumnChunkIncReadStore.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/parquet/hadoop/ColumnChunkIncReadStore.java 
b/exec/java-exec/src/main/java/parquet/hadoop/ColumnChunkIncReadStore.java
index 379d3e6..516be0e 100644
--- a/exec/java-exec/src/main/java/parquet/hadoop/ColumnChunkIncReadStore.java
+++ b/exec/java-exec/src/main/java/parquet/hadoop/ColumnChunkIncReadStore.java
@@ -147,7 +147,7 @@ public class ColumnChunkIncReadStore implements 
PageReadStore {
               ByteBuffer buffer = buf.nioBuffer(0, 
pageHeader.compressed_page_size);
               CompatibilityUtil.getBuf(in, buffer, 
pageHeader.compressed_page_size);
               return new Page(
-                      decompressor.decompress(BytesInput.from(buffer, 0, 
pageHeader.compressed_page_size), pageHeader.compressed_page_size),
+                      decompressor.decompress(BytesInput.from(buffer, 0, 
pageHeader.compressed_page_size), pageHeader.getUncompressed_page_size()),
                       pageHeader.data_page_header.num_values,
                       pageHeader.uncompressed_page_size,
                       
parquetMetadataConverter.fromParquetStatistics(pageHeader.data_page_header.statistics,
 columnDescriptor.getType()),

Reply via email to