Github user jackylk commented on a diff in the pull request:

    https://github.com/apache/incubator-carbondata/pull/265#discussion_r90249260
  
    --- Diff: 
core/src/main/java/org/apache/carbondata/core/carbon/datastore/chunk/reader/measure/CompressedMeasureChunkFileReader2.java
 ---
    @@ -0,0 +1,232 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing,
    + * software distributed under the License is distributed on an
    + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    + * KIND, either express or implied.  See the License for the
    + * specific language governing permissions and limitations
    + * under the License.
    + */
    +package org.apache.carbondata.core.carbon.datastore.chunk.reader.measure;
    +
    +import java.util.ArrayList;
    +import java.util.BitSet;
    +import java.util.List;
    +
    +import 
org.apache.carbondata.core.carbon.datastore.chunk.MeasureColumnDataChunk;
    +import org.apache.carbondata.core.carbon.metadata.blocklet.BlockletInfo;
    +import 
org.apache.carbondata.core.carbon.metadata.blocklet.datachunk.PresenceMeta;
    +import org.apache.carbondata.core.datastorage.store.FileHolder;
    +import 
org.apache.carbondata.core.datastorage.store.compression.SnappyCompression.SnappyByteCompression;
    +import 
org.apache.carbondata.core.datastorage.store.compression.ValueCompressionModel;
    +import 
org.apache.carbondata.core.datastorage.store.compression.ValueCompressonHolder.UnCompressValue;
    +import 
org.apache.carbondata.core.datastorage.store.dataholder.CarbonReadDataHolder;
    +import org.apache.carbondata.core.metadata.ValueEncoderMeta;
    +import org.apache.carbondata.core.util.CarbonUtil;
    +import org.apache.carbondata.format.DataChunk2;
    +
    +/**
    + * Class to read the measure column data for version 2
    + */
    +public class CompressedMeasureChunkFileReader2 extends 
AbstractMeasureChunkReader {
    +
    +  /**
    +   * measure column chunks offset
    +   */
    +  private List<Long> measureColumnChunkOffsets;
    +
    +  /**
    +   * measure column chunks length
    +   */
    +  private List<Short> measureColumnChunkLength;
    +
    +  /**
    +   * Constructor to get minimum parameter to create instance of this class
    +   *
    +   * @param blockletInfo BlockletInfo
    +   * @param filePath     file from which data will be read
    +   */
    +  public CompressedMeasureChunkFileReader2(final BlockletInfo 
blockletInfo, final String filePath) {
    +    super(filePath);
    +    this.measureColumnChunkOffsets = blockletInfo.getMeasureChunkOffsets();
    +    this.measureColumnChunkLength = blockletInfo.getMeasureChunksLength();
    +  }
    +
    +  /**
    +   * Below method will be used to convert the thrift presence meta to 
wrapper
    +   * presence meta
    +   *
    +   * @param presentMetadataThrift
    +   * @return wrapper presence meta
    +   */
    +  private static PresenceMeta getPresenceMeta(
    +      org.apache.carbondata.format.PresenceMeta presentMetadataThrift) {
    +    PresenceMeta presenceMeta = new PresenceMeta();
    +    
presenceMeta.setRepresentNullValues(presentMetadataThrift.isRepresents_presence());
    +    presenceMeta.setBitSet(BitSet.valueOf(
    +        
SnappyByteCompression.INSTANCE.unCompress(presentMetadataThrift.getPresent_bit_stream())));
    +    return presenceMeta;
    +  }
    +
    +  /**
    +   * Below method will be used to read the chunk based on block indexes
    +   * Reading logic of below method is:
    +   * Except last column all the column chunk can be read in group
    +   * if not last column then read data of all the column present in block 
index
    +   * together then process it.
    +   * For last column read is separately and process
    +   *
    +   * @param fileReader   file reader to read the blocks from file
    +   * @param blockIndexes blocks range to be read
    +   * @return measure column chunks
    +   */
    +  public MeasureColumnDataChunk[] readMeasureChunks(FileHolder fileReader, 
int[][] blockIndexes) {
    +    // read the column chunk based on block index and add
    +    MeasureColumnDataChunk[] dataChunks =
    +        new MeasureColumnDataChunk[measureColumnChunkOffsets.size()];
    +    if (blockIndexes.length == 0) {
    +      return dataChunks;
    +    }
    +    MeasureColumnDataChunk[] groupChunk = null;
    +    int index = 0;
    +    for (int i = 0; i < blockIndexes.length - 1; i++) {
    +      index = 0;
    +      groupChunk = readMeasureChunksInGroup(fileReader, 
blockIndexes[i][0], blockIndexes[i][1]);
    +      for (int j = blockIndexes[i][0]; j <= blockIndexes[i][1]; j++) {
    +        dataChunks[j] = groupChunk[index++];
    +      }
    +    }
    +    if (blockIndexes[blockIndexes.length - 1][0] == 
measureColumnChunkOffsets.size() - 1) {
    +      dataChunks[blockIndexes[blockIndexes.length - 1][0]] =
    +          readMeasureChunk(fileReader, blockIndexes[blockIndexes.length - 
1][0]);
    +    } else {
    +      groupChunk = readMeasureChunksInGroup(fileReader, 
blockIndexes[blockIndexes.length - 1][0],
    +          blockIndexes[blockIndexes.length - 1][1]);
    +      index = 0;
    +      for (int j = blockIndexes[blockIndexes.length - 1][0];
    +           j <= blockIndexes[blockIndexes.length - 1][1]; j++) {
    +        dataChunks[j] = groupChunk[index++];
    +      }
    +    }
    +    return dataChunks;
    +  }
    +
    +  /**
    +   * Method to read the blocks data based on block index
    +   *
    +   * @param fileReader file reader to read the blocks
    +   * @param blockIndex block to be read
    +   * @return measure data chunk
    +   */
    +  @Override public MeasureColumnDataChunk readMeasureChunk(FileHolder 
fileReader, int blockIndex) {
    +    MeasureColumnDataChunk datChunk = new MeasureColumnDataChunk();
    +    DataChunk2 measureColumnChunk = null;
    +    byte[] measureDataChunk = null;
    +    byte[] data = null;
    +    byte[] dataPage = null;
    +    if (measureColumnChunkOffsets.size() - 1 == blockIndex) {
    +      measureDataChunk = fileReader
    --- End diff --
    
    move fileReader to next line


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to