danielxjd commented on a change in pull request #12223: URL: https://github.com/apache/beam/pull/12223#discussion_r481465744
########## File path: sdks/java/io/parquet/src/main/java/org/apache/beam/sdk/io/parquet/ParquetIO.java ########## @@ -229,15 +290,258 @@ public void populateDisplayData(DisplayData.Builder builder) { public ReadFiles withAvroDataModel(GenericData model) { return toBuilder().setAvroDataModel(model).build(); } + /** Enable the Splittable reading. */ + public ReadFiles withSplit() { + return toBuilder().setSplittable(true).build(); + } @Override public PCollection<GenericRecord> expand(PCollection<FileIO.ReadableFile> input) { checkNotNull(getSchema(), "Schema can not be null"); + if (isSplittable()) { + return input + .apply(ParDo.of(new SplitReadFn(getAvroDataModel()))) + .setCoder(AvroCoder.of(getSchema())); + } return input .apply(ParDo.of(new ReadFn(getAvroDataModel()))) .setCoder(AvroCoder.of(getSchema())); } + @DoFn.BoundedPerElement + static class SplitReadFn extends DoFn<FileIO.ReadableFile, GenericRecord> { + private Class<? extends GenericData> modelClass; + private static final Logger LOG = LoggerFactory.getLogger(SplitReadFn.class); + // Default initial splitting the file into blocks of 64MB. Unit of SPLIT_LIMIT is byte. + private static final long SPLIT_LIMIT = 64000000; + + SplitReadFn(GenericData model) { + + this.modelClass = model != null ? model.getClass() : null; + } + + ParquetFileReader getParquetFileReader(FileIO.ReadableFile file) throws Exception { + ParquetReadOptions options = HadoopReadOptions.builder(getConfWithModelClass()).build(); + return ParquetFileReader.open(new BeamParquetInputFile(file.openSeekable()), options); + } + + @ProcessElement + public void processElement( + @Element FileIO.ReadableFile file, + RestrictionTracker<OffsetRange, Long> tracker, + OutputReceiver<GenericRecord> outputReceiver) + throws Exception { + LOG.debug( + "start " + + tracker.currentRestriction().getFrom() + + " to " + + tracker.currentRestriction().getTo()); + ParquetReadOptions options = HadoopReadOptions.builder(getConfWithModelClass()).build(); + ParquetFileReader reader = Review comment: The debug was from the Hadoop Parquet reader also. So in Hadoop the message is categorized in debug. So maybe they will allow the corrupted record to be skipped. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org