eadwright removed a comment on pull request #902:
URL: https://github.com/apache/parquet-mr/pull/902#issuecomment-846560922


   Work in progress looks like this (not committed) - adjust `PATH` to point to 
the file I uploaded.
   
   ```
   package org.apache.parquet.hadoop;
   
   import org.apache.hadoop.conf.Configuration;
   import org.apache.hadoop.fs.Path;
   import org.apache.parquet.ParquetReadOptions;
   import org.apache.parquet.column.page.PageReadStore;
   import org.apache.parquet.example.data.Group;
   import org.apache.parquet.example.data.simple.convert.GroupRecordConverter;
   import org.apache.parquet.hadoop.util.HadoopInputFile;
   import org.apache.parquet.io.ColumnIOFactory;
   import org.apache.parquet.io.MessageColumnIO;
   import org.apache.parquet.io.RecordReader;
   import org.apache.parquet.schema.MessageType;
   import org.apache.parquet.schema.PrimitiveType;
   import org.junit.Test;
   
   import java.io.IOException;
   
   import static 
org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY;
   import static org.apache.parquet.schema.Type.Repetition.REQUIRED;
   
   public class TestParquetReaderLargeColumn {
     private static final Path PATH = new Path("/Volumes/HDD/random6.parquet");
   
     @Test
     public void test() throws IOException {
       Configuration configuration = new Configuration();
       ParquetReadOptions options = ParquetReadOptions.builder().build();
   
       MessageType messageType = buildSchema();
   
       try (ParquetFileReader reader = new 
ParquetFileReader(HadoopInputFile.fromPath(PATH, configuration), options)) {
         PageReadStore pages;
         while ((pages = reader.readNextRowGroup()) != null) {
           MessageColumnIO columnIO = new 
ColumnIOFactory().getColumnIO(messageType);
           RecordReader<Group> recordReader = columnIO.getRecordReader(pages, 
new GroupRecordConverter(messageType));
           long rowCount = pages.getRowCount();
   
           for (int i = 0; i < rowCount - 1; i++) {
             Group group = recordReader.read();
             group.getString("string", 0);
           }
         }
       }
     }
   
     private static MessageType buildSchema() {
       return new MessageType("AvroString",
         new PrimitiveType(REQUIRED, BINARY, "string"));
     }
   }
   ```
   
   Alas when I run it, I get this Exception, not sure why yet:
   
   ```
   java.lang.NoSuchMethodError: 
org.apache.parquet.format.LogicalType.getSetField()Lshaded/parquet/org/apache/thrift/TFieldIdEnum;
   
        at 
org.apache.parquet.format.converter.ParquetMetadataConverter.getLogicalTypeAnnotation(ParquetMetadataConverter.java:1066)
        at 
org.apache.parquet.format.converter.ParquetMetadataConverter.buildChildren(ParquetMetadataConverter.java:1569)
        at 
org.apache.parquet.format.converter.ParquetMetadataConverter.fromParquetSchema(ParquetMetadataConverter.java:1524)
        at 
org.apache.parquet.format.converter.ParquetMetadataConverter.fromParquetMetadata(ParquetMetadataConverter.java:1399)
        at 
org.apache.parquet.format.converter.ParquetMetadataConverter.readParquetMetadata(ParquetMetadataConverter.java:1370)
        at 
org.apache.parquet.hadoop.ParquetFileReader.readFooter(ParquetFileReader.java:583)
        at 
org.apache.parquet.hadoop.ParquetFileReader.<init>(ParquetFileReader.java:777)
        at 
org.apache.parquet.hadoop.TestParquetReaderLargeColumn.test(TestParquetReaderLargeColumn.java:64)
   ```
   
   
   
   
   
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to