zhangyue19921010 commented on code in PR #13365: URL: https://github.com/apache/hudi/pull/13365#discussion_r2124195103
########## hudi-hadoop-common/src/test/java/org/apache/hudi/parquet/io/TestHoodieParquetFileRewriter.java: ########## @@ -0,0 +1,459 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.parquet.io; + +import org.apache.hudi.io.storage.rewrite.HoodieFileMetadataMerger; +import org.apache.hudi.storage.StoragePath; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.parquet.HadoopReadOptions; +import org.apache.parquet.Version; +import org.apache.parquet.column.ParquetProperties; +import org.apache.parquet.example.data.Group; +import org.apache.parquet.example.data.simple.SimpleGroup; +import org.apache.parquet.format.DataPageHeader; +import org.apache.parquet.format.DataPageHeaderV2; +import org.apache.parquet.format.PageHeader; +import org.apache.parquet.format.converter.ParquetMetadataConverter; +import org.apache.parquet.hadoop.ParquetFileReader; +import org.apache.parquet.hadoop.ParquetReader; +import org.apache.parquet.hadoop.example.GroupReadSupport; +import org.apache.parquet.hadoop.metadata.BlockMetaData; +import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; +import org.apache.parquet.hadoop.metadata.CompressionCodecName; +import org.apache.parquet.hadoop.metadata.FileMetaData; +import org.apache.parquet.hadoop.metadata.ParquetMetadata; +import org.apache.parquet.hadoop.util.CompressionConverter.TransParquetFileReader; +import org.apache.parquet.hadoop.util.HadoopInputFile; +import org.apache.parquet.internal.column.columnindex.ColumnIndex; +import org.apache.parquet.internal.column.columnindex.OffsetIndex; +import org.apache.parquet.schema.GroupType; +import org.apache.parquet.schema.MessageType; +import org.apache.parquet.schema.PrimitiveType; +import org.apache.parquet.schema.Type; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.hudi.common.model.HoodieRecord.FILENAME_METADATA_FIELD; +import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY; +import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT64; +import static org.apache.parquet.schema.Type.Repetition.OPTIONAL; +import static org.apache.parquet.schema.Type.Repetition.REPEATED; +import static org.apache.parquet.schema.Type.Repetition.REQUIRED; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +public class TestHoodieParquetFileRewriter { + + private final int numRecord = 1; + private Configuration conf = new Configuration(); + private List<TestFile> inputFiles = null; + private String outputFile = null; + private HoodieParquetFileRewriter rewriter = null; + + @BeforeEach + public void setUp() { + outputFile = TestFileBuilder.createTempFile("test"); + } + + @AfterEach + public void after() { + if (outputFile != null) { + TestFileBuilder.deleteTempFile(outputFile); + } + if (inputFiles != null) { + inputFiles.stream().map(TestFile::getFileName).forEach(TestFileBuilder::deleteTempFile); + } + } + + @Test + public void testBasic() throws Exception { + MessageType schema = createSchema(); + inputFiles = new ArrayList<>(); + inputFiles.add(makeTestFile(schema, "GZIP")); + inputFiles.add(makeTestFile(schema, "GZIP")); + + rewriter = parquetFileRewriter(schema, "GZIP"); + rewriter.rewrite(); + rewriter.close(); + + // Verify the schema are not changed + ParquetMetadata pmd = ParquetFileReader.readFooter(conf, new Path(outputFile), ParquetMetadataConverter.NO_FILTER); + MessageType fileSchema = pmd.getFileMetaData().getSchema(); + assertEquals(schema, fileSchema); + validateSchema(fileSchema); + + // Verify codec + verifyCodec(outputFile, CompressionCodecName.GZIP); + + // Verify the merged data are not changed + validateColumnData(); + + // Verify the page index + validatePageIndex(0, 1, 2, 3, 4); + + // Verify original.created.by is preserved + validateCreatedBy(); + } + + @Test + public void testTranslateCodec() throws Exception { + MessageType schema = createSchema(); + inputFiles = new ArrayList<>(); + inputFiles.add(makeTestFile(schema, "GZIP")); + inputFiles.add(makeTestFile(schema, "UNCOMPRESSED")); + + rewriter = parquetFileRewriter(schema, "ZSTD"); + rewriter.rewrite(); + rewriter.close(); + + // Verify the schema are not changed for the columns not pruned + ParquetMetadata pmd = ParquetFileReader.readFooter(conf, new Path(outputFile), ParquetMetadataConverter.NO_FILTER); + MessageType fileSchema = pmd.getFileMetaData().getSchema(); + assertEquals(schema, fileSchema); + validateSchema(fileSchema); + + // Verify codec has been translated + verifyCodec(outputFile, CompressionCodecName.ZSTD); + + // Verify the data are not changed for the columns not pruned + validateColumnData(); + + // Verify the page index + validatePageIndex(0, 1, 2, 3, 4); + + // Verify original.created.by is preserved + validateCreatedBy(); + } + + @Test + public void testDifferentSchema() throws Exception { + MessageType schema1 = new MessageType("schema", + new PrimitiveType(OPTIONAL, INT64, "DocId"), + new PrimitiveType(REQUIRED, BINARY, "Name"), + new PrimitiveType(OPTIONAL, BINARY, "Gender"), + new GroupType(OPTIONAL, "Links", + new PrimitiveType(REPEATED, BINARY, "Backward"), + new PrimitiveType(REPEATED, BINARY, "Forward"))); + MessageType schema2 = new MessageType("schema", + new PrimitiveType(OPTIONAL, INT64, "DocId"), + new PrimitiveType(REQUIRED, BINARY, "Name"), + new PrimitiveType(OPTIONAL, BINARY, "Gender")); + inputFiles = new ArrayList<>(); + inputFiles.add(makeTestFile(schema1, "UNCOMPRESSED")); + inputFiles.add(makeTestFile(schema2, "UNCOMPRESSED")); + + rewriter = parquetFileRewriter(schema1, "UNCOMPRESSED"); + rewriter.rewrite(); + rewriter.close(); + + // Verify the schema are not changed for the columns not pruned + ParquetMetadata pmd = ParquetFileReader.readFooter(conf, new Path(outputFile), ParquetMetadataConverter.NO_FILTER); + MessageType schema = pmd.getFileMetaData().getSchema(); + validateSchema(schema); + + // Verify codec has been translated + verifyCodec(outputFile, CompressionCodecName.UNCOMPRESSED); + + // Verify the data are not changed + validateColumnData(); + + // Verify the page index + validatePageIndex(0, 1, 2); + + // Verify original.created.by is preserved + validateCreatedBy(); + } + + @Test + public void testHoodieMetaColumn() throws Exception { + MessageType schema = new MessageType("schema", + new PrimitiveType(OPTIONAL, BINARY, FILENAME_METADATA_FIELD), + new PrimitiveType(OPTIONAL, INT64, "DocId"), + new PrimitiveType(REQUIRED, BINARY, "Name"), + new PrimitiveType(OPTIONAL, BINARY, "Gender"), + new GroupType(OPTIONAL, "Links", + new PrimitiveType(REPEATED, BINARY, "Backward"), + new PrimitiveType(REPEATED, BINARY, "Forward"))); + inputFiles = new ArrayList<>(); + inputFiles.add(makeTestFile(schema, "GZIP")); + inputFiles.add(makeTestFile(schema, "GZIP")); + + rewriter = parquetFileRewriter(schema, "GZIP"); + rewriter.rewrite(); + rewriter.close(); + + // Verify the schema are not changed for the columns not pruned + ParquetMetadata pmd = ParquetFileReader.readFooter(conf, new Path(outputFile), ParquetMetadataConverter.NO_FILTER); + MessageType fileSchema = pmd.getFileMetaData().getSchema(); + assertEquals(schema, fileSchema); + + // Verify codec has been translated + verifyCodec(outputFile, CompressionCodecName.GZIP); + + // Verify the data are not changed + validateColumnData(); Review Comment: _hoodie_file_name will changed to new parquet file name, but _hoodie_commit_time will not be changed. This behavior is consistent with traditional clustering -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
