vvivekiyer commented on code in PR #9454:
URL: https://github.com/apache/pinot/pull/9454#discussion_r982966903
##########
pinot-segment-local/src/test/java/org/apache/pinot/segment/local/segment/index/loader/SegmentPreProcessorTest.java:
##########
@@ -450,6 +482,15 @@ private void checkIndexCreation(ColumnIndexType indexType,
String column, int ca
SegmentDirectory.Reader reader = segmentDirectory1.createReader()) {
assertTrue(reader.hasIndexFor(column, indexType));
assertTrue(reader.hasIndexFor(column, ColumnIndexType.FORWARD_INDEX));
+
+ // Check if the raw forward index compressionType is correct.
Review Comment:
Added validation for index_map to check if exactly one entry exists. It's
not possible to check if newOffset > oldOffset because the indexmap is
rewritten in alphabetical order when segmentDirectory.close() is called.
##########
pinot-segment-local/src/test/java/org/apache/pinot/segment/local/segment/index/loader/SegmentPreProcessorTest.java:
##########
@@ -304,6 +306,28 @@ public void testEnableFSTIndexOnExistingColumnDictEncoded()
checkFSTIndexCreation(EXISTING_STRING_COL_DICT, 9, 4,
_newColumnsSchemaWithFST, false, false, 26);
}
+ @Test
+ public void testForwardIndexHandler()
+ throws Exception {
+ Map<String, ChunkCompressionType> compressionConfigs = new HashMap<>();
+ ChunkCompressionType newCompressionType = ChunkCompressionType.ZSTANDARD;
+ compressionConfigs.put(EXISTING_STRING_COL_RAW, newCompressionType);
+ _indexLoadingConfig.setCompressionConfigs(compressionConfigs);
+ _indexLoadingConfig.setNoDictionaryColumns(new HashSet<String>() {{
+ add(EXISTING_STRING_COL_RAW);
+ }});
+
+ // Test1: Rewriting forward index will be a no-op for v1 segments. Default
LZ4 compressionType will be retained.
+ constructV1Segment();
+ checkForwardIndexCreation(EXISTING_STRING_COL_RAW, 5, 3, _schema, false,
false, false, 0, ChunkCompressionType.LZ4);
+
+ // Convert the segment to V3.
+ new SegmentV1V2ToV3FormatConverter().convert(_indexDir);
+
+ // Test2: Now forward index will be rewritten with ZSTANDARD
compressionType.
Review Comment:
Added these tests. We verify various metadata, etc after
segmentPreprocessor.process(). This verifies if the existing handlers work with
ForwardIndexHandler.
However, the test doesn't issue queries to validate. If you feel that's
needed, I can add a brand new test in OfflineClusterIntegrationTest to wire
querying and checking.
##########
pinot-segment-local/src/test/java/org/apache/pinot/segment/local/segment/index/loader/ForwardIndexHandlerTest.java:
##########
@@ -0,0 +1,432 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.segment.local.segment.index.loader;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import org.apache.commons.io.FileUtils;
+import
org.apache.pinot.segment.local.segment.creator.impl.SegmentIndexCreationDriverImpl;
+import org.apache.pinot.segment.local.segment.readers.GenericRowRecordReader;
+import org.apache.pinot.segment.local.segment.readers.PinotSegmentColumnReader;
+import org.apache.pinot.segment.local.segment.store.SegmentLocalFSDirectory;
+import org.apache.pinot.segment.spi.ColumnMetadata;
+import org.apache.pinot.segment.spi.V1Constants;
+import org.apache.pinot.segment.spi.compression.ChunkCompressionType;
+import org.apache.pinot.segment.spi.creator.IndexCreatorProvider;
+import org.apache.pinot.segment.spi.creator.SegmentGeneratorConfig;
+import org.apache.pinot.segment.spi.index.IndexingOverrides;
+import org.apache.pinot.segment.spi.index.metadata.SegmentMetadataImpl;
+import org.apache.pinot.segment.spi.index.reader.ForwardIndexReader;
+import org.apache.pinot.segment.spi.store.SegmentDirectory;
+import org.apache.pinot.spi.config.table.FieldConfig;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.FieldSpec;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.data.readers.GenericRow;
+import org.apache.pinot.spi.data.readers.RecordReader;
+import org.apache.pinot.spi.utils.ReadMode;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.assertEquals;
+
+
+public class ForwardIndexHandlerTest {
+ private static final File INDEX_DIR = new File(FileUtils.getTempDirectory(),
"ForwardIndexHandlerTest");
+ private static final String TABLE_NAME = "myTable";
+ private static final String SEGMENT_NAME = "testSegment";
+
+ // TODO:
+ // 1. Add other datatypes (double, float, bigdecimal, bytes). Also add MV
columns.
+ // 2. Add text index and other index types for raw columns.
+ private static final String SNAPPY_STRING = "SNAPPY_STRING";
+ private static final String PASS_THROUGH_STRING = "PASS_THROUGH_STRING";
+ private static final String ZSTANDARD_STRING = "ZSTANDARD_STRING";
+ private static final String LZ4_STRING = "LZ4_STRING";
+
+ private static final String SNAPPY_LONG = "SNAPPY_LONG";
+ private static final String PASS_THROUGH_LONG = "PASS_THROUGH_LONG";
+ private static final String ZSTANDARD_LONG = "ZSTANDARD_LONG";
+ private static final String LZ4_LONG = "LZ4_LONG";
+
+ private static final String SNAPPY_INTEGER = "SNAPPY_INTEGER";
+ private static final String PASS_THROUGH_INTEGER = "PASS_THROUGH_INTEGER";
+ private static final String ZSTANDARD_INTEGER = "ZSTANDARD_INTEGER";
+ private static final String LZ4_INTEGER = "LZ4_INTEGER";
+
+ private static final String DICT_INTEGER = "DICT_INTEGER";
+ private static final String DICT_STRING = "DICT_STRING";
+ private static final String DICT_LONG = "DICT_LONG";
+
+ private static final List<String> RAW_SNAPPY_INDEX_COLUMNS =
+ Arrays.asList(SNAPPY_STRING, SNAPPY_LONG, SNAPPY_INTEGER);
+
+ private static final List<String> RAW_ZSTANDARD_INDEX_COLUMNS =
+ Arrays.asList(ZSTANDARD_STRING, ZSTANDARD_LONG, ZSTANDARD_INTEGER);
+
+ private static final List<String> RAW_PASS_THROUGH_INDEX_COLUMNS =
+ Arrays.asList(PASS_THROUGH_STRING, PASS_THROUGH_LONG,
PASS_THROUGH_INTEGER);
+
+ private static final List<String> RAW_LZ4_INDEX_COLUMNS =
Arrays.asList(LZ4_STRING, LZ4_LONG, LZ4_INTEGER);
+
+ private final List<String> _noDictionaryColumns = new ArrayList<>();
+ TableConfig _tableConfig;
+ Schema _schema;
+ private SegmentMetadataImpl _existingSegmentMetadata;
+ private SegmentDirectory.Writer _writer;
+ private List<FieldConfig.CompressionCodec> _allCompressionTypes =
+ Arrays.asList(FieldConfig.CompressionCodec.values());
+
+ @BeforeClass
+ public void setUp()
+ throws Exception {
+ // Delete index directly if it already exists.
+ FileUtils.deleteQuietly(INDEX_DIR);
+
+ buildSegment();
+ }
+
+ private void buildSegment()
+ throws Exception {
+ List<GenericRow> rows = createTestData();
+
+ List<FieldConfig> fieldConfigs = new ArrayList<>(
+ RAW_SNAPPY_INDEX_COLUMNS.size() + RAW_ZSTANDARD_INDEX_COLUMNS.size() +
RAW_PASS_THROUGH_INDEX_COLUMNS.size()
+ + RAW_LZ4_INDEX_COLUMNS.size());
+
+ for (String indexColumn : RAW_SNAPPY_INDEX_COLUMNS) {
+ fieldConfigs.add(new FieldConfig(indexColumn,
FieldConfig.EncodingType.RAW, Collections.emptyList(),
+ FieldConfig.CompressionCodec.SNAPPY, null));
+ }
+
+ for (String indexColumn : RAW_ZSTANDARD_INDEX_COLUMNS) {
+ fieldConfigs.add(new FieldConfig(indexColumn,
FieldConfig.EncodingType.RAW, Collections.emptyList(),
+ FieldConfig.CompressionCodec.ZSTANDARD, null));
+ }
+
+ for (String indexColumn : RAW_PASS_THROUGH_INDEX_COLUMNS) {
+ fieldConfigs.add(new FieldConfig(indexColumn,
FieldConfig.EncodingType.RAW, Collections.emptyList(),
+ FieldConfig.CompressionCodec.PASS_THROUGH, null));
+ }
+
+ for (String indexColumn : RAW_LZ4_INDEX_COLUMNS) {
+ fieldConfigs.add(new FieldConfig(indexColumn,
FieldConfig.EncodingType.RAW, Collections.emptyList(),
+ FieldConfig.CompressionCodec.LZ4, null));
+ }
+
+ _noDictionaryColumns.addAll(RAW_SNAPPY_INDEX_COLUMNS);
+ _noDictionaryColumns.addAll(RAW_ZSTANDARD_INDEX_COLUMNS);
+ _noDictionaryColumns.addAll(RAW_PASS_THROUGH_INDEX_COLUMNS);
+ _noDictionaryColumns.addAll(RAW_LZ4_INDEX_COLUMNS);
+
+ _tableConfig =
+ new
TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setNoDictionaryColumns(_noDictionaryColumns)
+ .setFieldConfigList(fieldConfigs).build();
+ _schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME)
+ .addSingleValueDimension(SNAPPY_STRING, FieldSpec.DataType.STRING)
+ .addSingleValueDimension(PASS_THROUGH_STRING,
FieldSpec.DataType.STRING)
+ .addSingleValueDimension(ZSTANDARD_STRING, FieldSpec.DataType.STRING)
+ .addSingleValueDimension(LZ4_STRING, FieldSpec.DataType.STRING)
+ .addSingleValueDimension(SNAPPY_INTEGER, FieldSpec.DataType.INT)
+ .addSingleValueDimension(ZSTANDARD_INTEGER, FieldSpec.DataType.INT)
+ .addSingleValueDimension(PASS_THROUGH_INTEGER, FieldSpec.DataType.INT)
+ .addSingleValueDimension(LZ4_INTEGER, FieldSpec.DataType.INT)
+ .addSingleValueDimension(SNAPPY_LONG, FieldSpec.DataType.LONG)
+ .addSingleValueDimension(ZSTANDARD_LONG, FieldSpec.DataType.LONG)
+ .addSingleValueDimension(PASS_THROUGH_LONG, FieldSpec.DataType.LONG)
+ .addSingleValueDimension(LZ4_LONG, FieldSpec.DataType.LONG)
+ .addSingleValueDimension(DICT_INTEGER, FieldSpec.DataType.INT)
+ .addSingleValueDimension(DICT_LONG, FieldSpec.DataType.LONG)
+ .addSingleValueDimension(DICT_STRING,
FieldSpec.DataType.STRING).build();
+
+ SegmentGeneratorConfig config = new SegmentGeneratorConfig(_tableConfig,
_schema);
+ config.setOutDir(INDEX_DIR.getPath());
+ config.setTableName(TABLE_NAME);
+ config.setSegmentName(SEGMENT_NAME);
+ SegmentIndexCreationDriverImpl driver = new
SegmentIndexCreationDriverImpl();
+ try (RecordReader recordReader = new GenericRowRecordReader(rows)) {
+ driver.init(config, recordReader);
+ driver.build();
+ }
+
+ File segmentDirectory = new File(INDEX_DIR, driver.getSegmentName());
+ _existingSegmentMetadata = new SegmentMetadataImpl(segmentDirectory);
+ _writer = new SegmentLocalFSDirectory(segmentDirectory,
_existingSegmentMetadata, ReadMode.mmap).createWriter();
+ }
+
+ private List<GenericRow> createTestData() {
+ List<GenericRow> rows = new ArrayList<>();
+
+ //Generate random data
+ int rowLength = 1000;
+ Random random = new Random();
+ String[] tempStringRows = new String[rowLength];
+ Integer[] tempIntRows = new Integer[rowLength];
+ Long[] tempLongRows = new Long[rowLength];
+
+ for (int i = 0; i < rowLength; i++) {
+ //Adding a fixed value to check for filter queries
+ if (i % 10 == 0) {
+ tempStringRows[i] = "testRow";
+ tempIntRows[i] = 1001;
+ tempLongRows[i] = 1001L;
+ } else {
+ tempStringRows[i] = "n" + i;
+ tempIntRows[i] = i;
+ tempLongRows[i] = (long) i;
+ }
+ }
+
+ for (int i = 0; i < rowLength; i++) {
+ GenericRow row = new GenericRow();
+
+ // Raw String columns
+ row.putValue(SNAPPY_STRING, tempStringRows[i]);
+ row.putValue(ZSTANDARD_STRING, tempStringRows[i]);
+ row.putValue(PASS_THROUGH_STRING, tempStringRows[i]);
+ row.putValue(LZ4_STRING, tempStringRows[i]);
+
+ // Raw integer columns
+ row.putValue(SNAPPY_INTEGER, tempIntRows[i]);
+ row.putValue(ZSTANDARD_INTEGER, tempIntRows[i]);
+ row.putValue(PASS_THROUGH_INTEGER, tempIntRows[i]);
+ row.putValue(LZ4_INTEGER, tempIntRows[i]);
+
+ // Raw long columns
+ row.putValue(SNAPPY_LONG, tempLongRows[i]);
+ row.putValue(ZSTANDARD_LONG, tempLongRows[i]);
+ row.putValue(PASS_THROUGH_LONG, tempLongRows[i]);
+ row.putValue(LZ4_LONG, tempLongRows[i]);
+
+ // Dictionary columns
+ row.putValue(DICT_INTEGER, tempIntRows[i]);
+ row.putValue(DICT_LONG, tempLongRows[i]);
+ row.putValue(DICT_STRING, tempStringRows[i]);
+
+ rows.add(row);
+ }
+ return rows;
+ }
+
+ @Test
+ public void testComputeOperation() throws Exception {
+ // TEST1 : Validate with zero changes.
+ IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null,
_tableConfig);
+ ForwardIndexHandler fwdIndexHandler =
+ new ForwardIndexHandler(_existingSegmentMetadata, indexLoadingConfig);
+ Map<String, ForwardIndexHandler.Operation> operationMap = new HashMap<>();
+ operationMap = fwdIndexHandler.computeOperation(_writer);
+ assertEquals(operationMap, Collections.EMPTY_MAP);
+
+ // TEST2: Enable dictionary for a RAW_ZSTANDARD_INDEX_COLUMN
+ indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
+ indexLoadingConfig.getNoDictionaryColumns().remove(ZSTANDARD_STRING);
+ fwdIndexHandler = new ForwardIndexHandler(_existingSegmentMetadata,
indexLoadingConfig);
+ operationMap = fwdIndexHandler.computeOperation(_writer);
+ assertEquals(operationMap, Collections.EMPTY_MAP);
+
+ // TEST3: Disable dictionary
+ indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
+ indexLoadingConfig.getNoDictionaryColumns().add(DICT_INTEGER);
+ fwdIndexHandler = new ForwardIndexHandler(_existingSegmentMetadata,
indexLoadingConfig);
+ operationMap = fwdIndexHandler.computeOperation(_writer);
+ assertEquals(operationMap, Collections.EMPTY_MAP);
+
+ // TEST4: Add random index
+ indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig);
+ indexLoadingConfig.getTextIndexColumns().add(DICT_INTEGER);
+ indexLoadingConfig.getTextIndexColumns().add(LZ4_INTEGER);
+ fwdIndexHandler = new ForwardIndexHandler(_existingSegmentMetadata,
indexLoadingConfig);
+ operationMap = fwdIndexHandler.computeOperation(_writer);
+ assertEquals(operationMap, Collections.EMPTY_MAP);
Review Comment:
Done.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]