pvary commented on code in PR #14435:
URL: https://github.com/apache/iceberg/pull/14435#discussion_r2665140281


##########
parquet/src/test/java/org/apache/iceberg/parquet/TestParquetFileMerger.java:
##########
@@ -0,0 +1,931 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.iceberg.parquet;
+
+import static org.apache.iceberg.parquet.Parquet.writeData;
+import static 
org.apache.iceberg.parquet.ParquetWritingTestUtils.createTempFile;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.DataFiles;
+import org.apache.iceberg.FileFormat;
+import org.apache.iceberg.Files;
+import org.apache.iceberg.MetadataColumns;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.TestTables;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.data.parquet.GenericParquetReaders;
+import org.apache.iceberg.data.parquet.GenericParquetWriter;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.io.FileIO;
+import org.apache.iceberg.io.InputFile;
+import org.apache.iceberg.io.OutputFile;
+import org.apache.iceberg.relocated.com.google.common.collect.Lists;
+import org.apache.iceberg.types.Types;
+import org.apache.parquet.column.page.PageReadStore;
+import org.apache.parquet.hadoop.ParquetFileReader;
+import org.apache.parquet.io.ColumnIOFactory;
+import org.apache.parquet.io.MessageColumnIO;
+import org.apache.parquet.io.RecordReader;
+import org.apache.parquet.io.api.Converter;
+import org.apache.parquet.io.api.GroupConverter;
+import org.apache.parquet.io.api.PrimitiveConverter;
+import org.apache.parquet.schema.MessageType;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+public class TestParquetFileMerger {
+
+  private static final Schema SCHEMA =
+      new Schema(
+          Types.NestedField.required(1, "id", Types.IntegerType.get()),
+          Types.NestedField.optional(2, "data", Types.StringType.get()));
+
+  private static final long DEFAULT_ROW_GROUP_SIZE =
+      TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT;
+
+  @TempDir private Path temp;
+
+  private FileIO fileIO;
+
+  @BeforeEach
+  public void setupFileIO() {
+    this.fileIO = new TestTables.LocalFileIO();
+  }
+
+  @Test
+  public void testCanMergeThrowsForEmptyList() {
+    assertThatThrownBy(() -> 
ParquetFileMerger.canMergeAndGetSchema(Collections.emptyList()))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("inputFiles cannot be null or empty");
+  }
+
+  @Test
+  public void testCanMergeThrowsForNullInput() {
+    assertThatThrownBy(() -> ParquetFileMerger.canMergeAndGetSchema(null))
+        .isInstanceOf(IllegalArgumentException.class)
+        .hasMessageContaining("inputFiles cannot be null or empty");
+  }
+
+  @Test
+  public void testCanMergeReturnsFalseForNonParquetFile() throws IOException {
+    // Create a non-Parquet file (just a text file)
+    File textFile = createTempFile(temp);
+    textFile.getParentFile().mkdirs(); // Ensure directory exists
+    java.nio.file.Files.write(textFile.toPath(), "This is not a Parquet 
file".getBytes());
+
+    InputFile inputFile = Files.localInput(textFile);
+    List<InputFile> inputFiles = Lists.newArrayList(inputFile);
+
+    // Should return null because file is not valid Parquet
+    MessageType result = ParquetFileMerger.canMergeAndGetSchema(inputFiles);
+    assertThat(result).isNull();
+  }
+
+  @Test
+  public void testCanMergeReturnsFalseForDifferentSchemas() throws IOException 
{
+    // Create first Parquet file with schema1
+    Schema icebergSchema1 = SCHEMA;
+
+    File parquetFile1 = createTempFile(temp);
+    OutputFile outputFile1 = Files.localOutput(parquetFile1);
+    createParquetFileWithData(outputFile1, icebergSchema1, 
Collections.emptyList());
+
+    // Create second Parquet file with different schema
+    Schema icebergSchema2 =
+        new Schema(
+            Types.NestedField.required(1, "id", Types.IntegerType.get()),
+            Types.NestedField.optional(3, "other", Types.LongType.get())); // 
Different field
+
+    File parquetFile2 = createTempFile(temp);
+    OutputFile outputFile2 = Files.localOutput(parquetFile2);
+    createParquetFileWithData(outputFile2, icebergSchema2, 
Collections.emptyList());
+
+    // Try to validate - should return null due to different schemas
+    InputFile inputFile1 = Files.localInput(parquetFile1);
+    InputFile inputFile2 = Files.localInput(parquetFile2);
+    List<InputFile> inputFiles = Arrays.asList(inputFile1, inputFile2);
+
+    MessageType result = ParquetFileMerger.canMergeAndGetSchema(inputFiles);
+    assertThat(result).isNull();
+  }
+
+  @Test
+  public void testCanMergeReturnsTrueForIdenticalSchemas() throws IOException {
+    // Create two Parquet files with the same schema and some data
+    File parquetFile1 = createTempFile(temp);
+    writeRecordsToFile(parquetFile1, Arrays.asList(createRecord(1, "a")));
+
+    File parquetFile2 = createTempFile(temp);
+    writeRecordsToFile(parquetFile2, Arrays.asList(createRecord(2, "b")));
+
+    // Should return non-null MessageType for identical schemas
+    InputFile inputFile1 = Files.localInput(parquetFile1);
+    InputFile inputFile2 = Files.localInput(parquetFile2);
+    List<InputFile> inputFiles = Arrays.asList(inputFile1, inputFile2);
+
+    MessageType result = ParquetFileMerger.canMergeAndGetSchema(inputFiles);
+    assertThat(result).isNotNull();
+  }
+
+  @Test
+  public void testMergeFilesSynthesizesRowLineageColumns() throws IOException {
+    // Test that merging files with virtual row lineage synthesizes physical 
_row_id and
+    // _last_updated_sequence_number columns
+
+    // Create two files with test data
+    File file1 = createTempFile(temp);
+    writeRecordsToFile(
+        file1, Arrays.asList(createRecord(1, "a"), createRecord(2, "b"), 
createRecord(3, "c")));
+
+    File file2 = createTempFile(temp);
+    writeRecordsToFile(file2, Arrays.asList(createRecord(4, "d"), 
createRecord(5, "e")));
+
+    // Create DataFiles with row lineage metadata
+    List<DataFile> dataFiles =
+        Arrays.asList(
+            createDataFile(file1.getAbsolutePath(), 100L, 5L),
+            createDataFile(file2.getAbsolutePath(), 103L, 5L));
+
+    File mergedFile = createTempFile(temp);
+    OutputFile mergedOutput = Files.localOutput(mergedFile);
+
+    // Perform merge
+    mergeFilesHelper(
+        dataFiles, mergedOutput, DEFAULT_ROW_GROUP_SIZE, 
PartitionSpec.unpartitioned(), null);
+
+    // Verify the merged file has both row lineage columns
+    InputFile mergedInput = Files.localInput(mergedFile);
+    MessageType mergedSchema;
+    try (ParquetFileReader reader = 
ParquetFileReader.open(ParquetIO.file(mergedInput))) {
+      mergedSchema = reader.getFooter().getFileMetaData().getSchema();
+    }
+    assertThat(mergedSchema.containsField(MetadataColumns.ROW_ID.name()))
+        .as("Merged file should have _row_id column")
+        .isTrue();
+    
assertThat(mergedSchema.containsField(MetadataColumns.LAST_UPDATED_SEQUENCE_NUMBER.name()))
+        .as("Merged file should have _last_updated_sequence_number column")
+        .isTrue();
+
+    // Verify row lineage values are correct
+    List<RowLineageRecord> records = readRowLineageData(mergedInput);
+    assertThat(records).hasSize(5);
+
+    // Verify _row_id values are sequential starting from firstRowId
+    // File1 records: 100, 101, 102
+    assertThat(records.get(0).rowId).isEqualTo(100L);
+    assertThat(records.get(1).rowId).isEqualTo(101L);
+    assertThat(records.get(2).rowId).isEqualTo(102L);
+
+    // File2 records: 103, 104
+    assertThat(records.get(3).rowId).isEqualTo(103L);
+    assertThat(records.get(4).rowId).isEqualTo(104L);
+
+    // Verify _last_updated_sequence_number is constant (5L) for all records
+    for (RowLineageRecord record : records) {
+      assertThat(record.seqNum).isEqualTo(5L);
+    }
+
+    // Verify original data is preserved
+    assertThat(records.get(0).id).isEqualTo(1);
+    assertThat(records.get(0).data).isEqualTo("a");
+    assertThat(records.get(1).id).isEqualTo(2);
+    assertThat(records.get(4).data).isEqualTo("e");
+  }
+
+  @Test
+  public void testMergeFilesWithMultipleRowGroups() throws IOException {
+    // Test row lineage synthesis works correctly across multiple row groups
+
+    // Create file with multiple row groups by setting small row group size
+    File file1 = createTempFile(temp);
+    OutputFile output1 = Files.localOutput(file1);
+
+    // Create enough records to span multiple row groups
+    List<Record> records = Lists.newArrayList();
+    for (int i = 0; i < 100; i++) {
+      records.add(createRecord(i, "data" + i));
+    }
+    createParquetFileWithData(output1, SCHEMA, records, 1024); // Small row 
group size

Review Comment:
   nit: newline



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to