Github user ppadma commented on a diff in the pull request: https://github.com/apache/drill/pull/789#discussion_r108693574 --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/BatchReader.java --- @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.drill.exec.store.parquet.columnreaders; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import com.google.common.base.Stopwatch; +import com.google.common.collect.Lists; + +/** + * Base strategy for reading a batch of Parquet records. + */ +public abstract class BatchReader { + + protected final ReadState readState; + + public BatchReader(ReadState readState) { + this.readState = readState; + } + + public int readBatch() throws Exception { + ColumnReader<?> firstColumnStatus = readState.getFirstColumnStatus(); + long recordsToRead = Math.min(getReadCount(firstColumnStatus), readState.getRecordsToRead()); + int readCount = readRecords(firstColumnStatus, recordsToRead); + readState.fillNullVectors(readCount); + return readCount; + } + + protected abstract long getReadCount(ColumnReader<?> firstColumnStatus); + + protected abstract int readRecords(ColumnReader<?> firstColumnStatus, long recordsToRead) throws Exception; + + protected void readAllFixedFields(long recordsToRead) throws Exception { + Stopwatch timer = Stopwatch.createStarted(); + if(readState.useAsyncColReader()){ + readAllFixedFieldsParallel(recordsToRead); + } else { + readAllFixedFieldsSerial(recordsToRead); + } + readState.parquetReaderStats.timeFixedColumnRead.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS)); + } + + protected void readAllFixedFieldsSerial(long recordsToRead) throws IOException { + for (ColumnReader<?> crs : readState.getReaders()) { + crs.processPages(recordsToRead); + } + } + + protected void readAllFixedFieldsParallel(long recordsToRead) throws Exception { + ArrayList<Future<Long>> futures = Lists.newArrayList(); + for (ColumnReader<?> crs : readState.getReaders()) { + Future<Long> f = crs.processPagesAsync(recordsToRead); + futures.add(f); + } + Exception exception = null; + for(Future<Long> f: futures){ + if (exception != null) { + f.cancel(true); + } else { + try { + f.get(); + } catch (Exception e) { + f.cancel(true); + exception = e; + } + } + } + if (exception != null) { + throw exception; + } + } + + /** + * Strategy for reading mock records. (What are these?) + */ + + public static class MockBatchReader extends BatchReader { + + public MockBatchReader(ReadState readState) { + super(readState); + } + + @Override + protected long getReadCount(ColumnReader<?> firstColumnStatus) { + if (readState.mockRecordsRead == readState.schema().getGroupRecordCount()) { + return 0; --- End diff -- How about moving mockRecordsRead to this class instead of keeping it in readState ?
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. ---