GSharayu commented on a change in pull request #6876: URL: https://github.com/apache/incubator-pinot/pull/6876#discussion_r626743217
########## File path: pinot-core/src/test/java/org/apache/pinot/queries/CompressionCodecQueriesTest.java ########## @@ -0,0 +1,264 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pinot.queries; + +import java.io.File; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; +import org.apache.pinot.core.operator.blocks.IntermediateResultsBlock; +import org.apache.pinot.core.operator.query.SelectionOnlyOperator; +import org.apache.pinot.segment.local.indexsegment.immutable.ImmutableSegmentLoader; +import org.apache.pinot.segment.local.segment.creator.impl.SegmentIndexCreationDriverImpl; +import org.apache.pinot.segment.local.segment.index.loader.IndexLoadingConfig; +import org.apache.pinot.segment.local.segment.readers.GenericRowRecordReader; +import org.apache.pinot.segment.spi.ImmutableSegment; +import org.apache.pinot.segment.spi.IndexSegment; +import org.apache.pinot.segment.spi.creator.SegmentGeneratorConfig; +import org.apache.pinot.spi.config.table.FieldConfig; +import org.apache.pinot.spi.config.table.TableConfig; +import org.apache.pinot.spi.config.table.TableType; +import org.apache.pinot.spi.data.FieldSpec; +import org.apache.pinot.spi.data.Schema; +import org.apache.pinot.spi.data.readers.GenericRow; +import org.apache.pinot.spi.data.readers.RecordReader; +import org.apache.pinot.spi.utils.builder.TableConfigBuilder; +import org.testng.Assert; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Test; + + +/** + * Functional tests for compression type feature. + * The tests use three kinds of input data + * (1) string + * (2) integer + * (3) long + */ +public class CompressionCodecQueriesTest extends BaseQueriesTest { + private static final File INDEX_DIR = new File(FileUtils.getTempDirectory(), "CompressionCodecQueriesTest"); + private static final String TABLE_NAME = "MyTable"; + private static final String SEGMENT_NAME = "testSegment"; + + private static final String SNAPPY_STRING = "SNAPPY_STRING"; + private static final String PASS_THROUGH_STRING = "PASS_THROUGH_STRING"; + private static final String ZSTANDARD_STRING = "ZSTANDARD_STRING"; + + private static final String SNAPPY_LONG = "SNAPPY_LONG"; + private static final String PASS_THROUGH_LONG = "PASS_THROUGH_LONG"; + private static final String ZSTANDARD_LONG = "ZSTANDARD_LONG"; + + private static final String SNAPPY_INTEGER = "SNAPPY_INTEGER"; + private static final String PASS_THROUGH_INTEGER = "PASS_THROUGH_INTEGER"; + private static final String ZSTANDARD_INTEGER = "ZSTANDARD_INTEGER"; + + private static final List<String> RAW_SNAPPY_INDEX_COLUMNS = Arrays + .asList(SNAPPY_STRING, SNAPPY_LONG, SNAPPY_INTEGER); + + private static final List<String> RAW_ZSTANDARD_INDEX_COLUMNS = Arrays + .asList(ZSTANDARD_STRING, ZSTANDARD_LONG, ZSTANDARD_INTEGER); + + private static final List<String> RAW_PASS_THROUGH_INDEX_COLUMNS = Arrays + .asList(PASS_THROUGH_STRING, PASS_THROUGH_LONG, PASS_THROUGH_INTEGER); + + private final List<GenericRow> _rows = new ArrayList<>(); + + private IndexSegment _indexSegment; + private List<IndexSegment> _indexSegments; + private List<GenericRow> rows; + + @Override + protected String getFilter() { + return ""; + } + + @Override + protected IndexSegment getIndexSegment() { + return _indexSegment; + } + + @Override + protected List<IndexSegment> getIndexSegments() { + return _indexSegments; + } + + @BeforeClass + public void setUp() + throws Exception { + FileUtils.deleteQuietly(INDEX_DIR); + + buildSegment(); + IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(); + Set<String> indexColumns = new HashSet<>(); + indexColumns.addAll(RAW_SNAPPY_INDEX_COLUMNS); + indexColumns.addAll(RAW_PASS_THROUGH_INDEX_COLUMNS); + indexColumns.addAll(RAW_ZSTANDARD_INDEX_COLUMNS); + + indexLoadingConfig.getNoDictionaryColumns().addAll(indexColumns); + ImmutableSegment immutableSegment = + ImmutableSegmentLoader.load(new File(INDEX_DIR, SEGMENT_NAME), indexLoadingConfig); + _indexSegment = immutableSegment; + _indexSegments = Arrays.asList(immutableSegment, immutableSegment); + } + + @AfterClass + public void tearDown() { + _indexSegment.destroy(); + FileUtils.deleteQuietly(INDEX_DIR); + } + + private void buildSegment() + throws Exception { + rows = createTestData(); + + List<FieldConfig> fieldConfigs = new ArrayList<>(RAW_SNAPPY_INDEX_COLUMNS.size() + RAW_ZSTANDARD_INDEX_COLUMNS.size() + RAW_PASS_THROUGH_INDEX_COLUMNS.size()); + for (String indexColumn : RAW_SNAPPY_INDEX_COLUMNS) { + fieldConfigs + .add(new FieldConfig(indexColumn, FieldConfig.EncodingType.RAW, null, null, FieldConfig.NoDictionaryColumnCompressorCodec.SNAPPY)); + } + + for (String indexColumn : RAW_ZSTANDARD_INDEX_COLUMNS) { + fieldConfigs + .add(new FieldConfig(indexColumn, FieldConfig.EncodingType.RAW, null, null, FieldConfig.NoDictionaryColumnCompressorCodec.ZSTANDARD)); + } + + for (String indexColumn : RAW_PASS_THROUGH_INDEX_COLUMNS) { + fieldConfigs + .add(new FieldConfig(indexColumn, FieldConfig.EncodingType.RAW, null, null, FieldConfig.NoDictionaryColumnCompressorCodec.PASS_THROUGH)); + } + + List<String> _noDictionaryColumns = new ArrayList<>(); + _noDictionaryColumns.addAll(RAW_SNAPPY_INDEX_COLUMNS); + _noDictionaryColumns.addAll(RAW_ZSTANDARD_INDEX_COLUMNS); + _noDictionaryColumns.addAll(RAW_PASS_THROUGH_INDEX_COLUMNS); + + TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME) + .setNoDictionaryColumns(_noDictionaryColumns) + .setFieldConfigList(fieldConfigs).build(); + Schema schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME) + .addSingleValueDimension(SNAPPY_STRING, FieldSpec.DataType.STRING) + .addSingleValueDimension(PASS_THROUGH_STRING, FieldSpec.DataType.STRING) + .addSingleValueDimension(ZSTANDARD_STRING, FieldSpec.DataType.STRING) + .addSingleValueDimension(SNAPPY_INTEGER, FieldSpec.DataType.INT) + .addSingleValueDimension(ZSTANDARD_INTEGER, FieldSpec.DataType.INT) + .addSingleValueDimension(PASS_THROUGH_INTEGER, FieldSpec.DataType.INT) + .addSingleValueDimension(SNAPPY_LONG, FieldSpec.DataType.LONG) + .addSingleValueDimension(ZSTANDARD_LONG, FieldSpec.DataType.LONG) + .addSingleValueDimension(PASS_THROUGH_LONG, FieldSpec.DataType.LONG) + .build(); + SegmentGeneratorConfig config = new SegmentGeneratorConfig(tableConfig, schema); + config.setOutDir(INDEX_DIR.getPath()); + config.setTableName(TABLE_NAME); + config.setSegmentName(SEGMENT_NAME); + SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl(); + try (RecordReader recordReader = new GenericRowRecordReader(rows)) { + driver.init(config, recordReader); + driver.build(); + } + } + + private List<GenericRow> createTestData() + throws Exception { + List<GenericRow> rows = new ArrayList<>(); + + //Generate random data + int rowLength = 1000; + Random random = new Random(); + String[] tempStringRows = new String[rowLength]; + Integer[] tempIntRows = new Integer[rowLength]; + Long[] tempLongRows = new Long[rowLength]; + + for (int i = 0; i < rowLength; i++) { + tempStringRows[i] = RandomStringUtils.random(random.nextInt(100), true, true);; + tempIntRows[i] = RandomUtils.nextInt(); + tempLongRows[i] = RandomUtils.nextLong(); + } + + for (int i = 0; i < rowLength; i++) { + GenericRow row = new GenericRow(); + row.putValue(SNAPPY_STRING, tempStringRows[i]); + row.putValue(ZSTANDARD_STRING, tempStringRows[i]); + row.putValue(PASS_THROUGH_STRING, tempStringRows[i]); + row.putValue(SNAPPY_INTEGER, tempIntRows[i]); + row.putValue(ZSTANDARD_INTEGER, tempIntRows[i]); + row.putValue(PASS_THROUGH_INTEGER, tempIntRows[i]); + row.putValue(SNAPPY_LONG, tempLongRows[i]); + row.putValue(ZSTANDARD_LONG, tempLongRows[i]); + row.putValue(PASS_THROUGH_LONG, tempLongRows[i]); + rows.add(row); + } + return rows; + } + + /** + * Tests for basic compression codec queries. + */ + @Test + public void testQueriesWithSnappyCompressionCodec() + throws Exception { + + String query = + "SELECT SNAPPY_STRING, ZSTANDARD_STRING, PASS_THROUGH_STRING, SNAPPY_INTEGER, ZSTANDARD_INTEGER, PASS_THROUGH_INTEGER, " + + "SNAPPY_LONG, ZSTANDARD_LONG, PASS_THROUGH_LONG FROM MyTable LIMIT 1000"; + ArrayList<Serializable[]> expected = new ArrayList<>(); + + for(GenericRow row: rows) { + expected.add(new Serializable[]{ + String.valueOf(row.getValue(SNAPPY_STRING)), String.valueOf(row.getValue(ZSTANDARD_STRING)), String.valueOf(row.getValue(PASS_THROUGH_STRING)), + (Integer) row.getValue(SNAPPY_INTEGER), (Integer) row.getValue(ZSTANDARD_INTEGER), (Integer) row.getValue(PASS_THROUGH_INTEGER), + (Long) row.getValue(SNAPPY_LONG), (Long)row.getValue(ZSTANDARD_LONG), (Long) row.getValue(PASS_THROUGH_LONG), + }); + } + testSelectQueryHelper(query, expected.size(), expected); + } + + /* + * Helper methods for tests + */ + private void testSelectQueryHelper(String query, int expectedResultSize, List<Serializable[]> expectedResults) + throws Exception { + SelectionOnlyOperator operator = getOperatorForPqlQuery(query); Review comment: done! ########## File path: pinot-segment-local/src/main/java/org/apache/pinot/segment/local/utils/TableConfigUtils.java ########## @@ -573,6 +573,9 @@ private static void validateFieldConfigList(@Nullable List<FieldConfig> fieldCon Preconditions.checkArgument(!noDictionaryColumns.contains(columnName), "FieldConfig encoding type is different from indexingConfig for column: " + columnName); } + Preconditions.checkArgument(fieldConfig.getNoDictionaryColumnCompressorCodec() == null, + "FieldConfig column compression codec is only supported for single value raw encoding type"); Review comment: done! ########## File path: pinot-spi/src/main/java/org/apache/pinot/spi/config/table/FieldConfig.java ########## @@ -69,6 +72,10 @@ public FieldConfig(@JsonProperty(value = "name", required = true) String name, INVERTED, SORTED, TEXT, FST, H3 } + public enum NoDictionaryColumnCompressorCodec { Review comment: done! ########## File path: pinot-segment-local/src/main/java/org/apache/pinot/segment/local/io/compression/ZstandardDecompressor.java ########## @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pinot.segment.local.io.compression; + +import com.github.luben.zstd.Zstd; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.pinot.segment.spi.compression.ChunkDecompressor; + +/** + * Implementation of {@link ChunkDecompressor} using Zstandard(Zstd). + */ +public class ZstandardDecompressor implements ChunkDecompressor { + @Override + public int decompress(ByteBuffer compressedInput, ByteBuffer decompressedOutput) + throws IOException { + int decompressedSize = Zstd.decompress(decompressedOutput, compressedInput); + + // Make the output ByteBuffer ready for read. + decompressedOutput.flip(); Review comment: updated! -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
