This is an automated email from the ASF dual-hosted git repository.

mchades pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/gravitino.git


The following commit(s) were added to refs/heads/main by this push:
     new b6bdf8bc1f [#7788] fix(core): fix duplicate column when load table 
(#7789)
b6bdf8bc1f is described below

commit b6bdf8bc1f8da7a9bf027bb963dbb04cba490f4e
Author: Tianhang <[email protected]>
AuthorDate: Tue Aug 26 20:44:46 2025 +0800

    [#7788] fix(core): fix duplicate column when load table (#7789)
    
    ### What changes were proposed in this pull request?
    
    Fixed duplicate columns issue in HiveTableConverter.getColumns() method
    by filtering out partition keys that have the same names as regular
    columns. When duplicates exist, regular columns from
    StorageDescriptor.getCols() take precedence over partition keys.
    
    ### Why are the changes needed?
    
    When loading tables from external catalogs (e.g., Hive), duplicate
    columns can cause IllegalStateException: Duplicate key crashes. This
    commonly occurs when:
    
    - HiveTableConverter.getColumns() includes a partition field (e.g.,
    log_date) from table.getSd().getCols()
      - The same field also exists in table.getPartitionKeys()
      - This results in duplicate columns in the final table schema
    
    Fix: #7788
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    Test added
    
    ---------
    
    Co-authored-by: teo <[email protected]>
---
 .../hive/converter/HiveTableConverter.java         |   9 +
 .../hive/converter/TestHiveTableConverter.java     | 212 +++++++++++++++++++++
 2 files changed, 221 insertions(+)

diff --git 
a/catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/converter/HiveTableConverter.java
 
b/catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/converter/HiveTableConverter.java
index 03cb233d4f..f58080c85b 100644
--- 
a/catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/converter/HiveTableConverter.java
+++ 
b/catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/converter/HiveTableConverter.java
@@ -22,6 +22,8 @@ import static 
org.apache.gravitino.rel.expressions.transforms.Transforms.identit
 
 import java.time.Instant;
 import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.apache.gravitino.connector.BaseColumn;
 import org.apache.gravitino.meta.AuditInfo;
@@ -34,6 +36,7 @@ import 
org.apache.gravitino.rel.expressions.sorts.SortDirection;
 import org.apache.gravitino.rel.expressions.sorts.SortOrder;
 import org.apache.gravitino.rel.expressions.sorts.SortOrders;
 import org.apache.gravitino.rel.expressions.transforms.Transform;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 
@@ -88,6 +91,10 @@ public class HiveTableConverter {
           BUILDER extends BaseColumn.BaseColumnBuilder<BUILDER, COLUMN>, 
COLUMN extends BaseColumn>
       Column[] getColumns(Table table, BUILDER columnBuilder) {
     StorageDescriptor sd = table.getSd();
+    // Collect column names from sd.getCols() to check for duplicates
+    Set<String> columnNames =
+        
sd.getCols().stream().map(FieldSchema::getName).collect(Collectors.toSet());
+
     return Stream.concat(
             sd.getCols().stream()
                 .map(
@@ -98,6 +105,8 @@ public class HiveTableConverter {
                             .withComment(f.getComment())
                             .build()),
             table.getPartitionKeys().stream()
+                // Filter out partition keys that already exist in sd.getCols()
+                .filter(p -> !columnNames.contains(p.getName()))
                 .map(
                     p ->
                         columnBuilder
diff --git 
a/catalogs/hive-metastore-common/src/test/java/org/apache/gravitino/hive/converter/TestHiveTableConverter.java
 
b/catalogs/hive-metastore-common/src/test/java/org/apache/gravitino/hive/converter/TestHiveTableConverter.java
new file mode 100644
index 0000000000..f0882e35d9
--- /dev/null
+++ 
b/catalogs/hive-metastore-common/src/test/java/org/apache/gravitino/hive/converter/TestHiveTableConverter.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.hive.converter;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.util.Arrays;
+import java.util.List;
+import org.apache.gravitino.connector.BaseColumn;
+import org.apache.gravitino.rel.Column;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.junit.jupiter.api.Test;
+
+public class TestHiveTableConverter {
+
+  @Test
+  public void testGetColumnsWithNoDuplicates() {
+    // Create a table with columns and partition keys that don't overlap
+    Table table = new Table();
+    StorageDescriptor sd = new StorageDescriptor();
+
+    // Regular columns
+    List<FieldSchema> cols =
+        Arrays.asList(
+            new FieldSchema("id", "int", "ID column"),
+            new FieldSchema("name", "string", "Name column"),
+            new FieldSchema("age", "int", "Age column"));
+    sd.setCols(cols);
+    table.setSd(sd);
+
+    // Partition keys (no overlap with regular columns)
+    List<FieldSchema> partitionKeys =
+        Arrays.asList(
+            new FieldSchema("year", "int", "Year partition"),
+            new FieldSchema("month", "int", "Month partition"));
+    table.setPartitionKeys(partitionKeys);
+
+    // Get columns using a test column builder
+    TestColumnBuilder builder = new TestColumnBuilder();
+    Column[] columns = HiveTableConverter.getColumns(table, builder);
+
+    // Should have all 5 columns (3 regular + 2 partition)
+    assertEquals(5, columns.length);
+    assertEquals("id", columns[0].name());
+    assertEquals("name", columns[1].name());
+    assertEquals("age", columns[2].name());
+    assertEquals("year", columns[3].name());
+    assertEquals("month", columns[4].name());
+  }
+
+  @Test
+  public void testGetColumnsWithDuplicates() {
+    // Create a table with columns and partition keys that have duplicates
+    Table table = new Table();
+    StorageDescriptor sd = new StorageDescriptor();
+
+    // Regular columns
+    List<FieldSchema> cols =
+        Arrays.asList(
+            new FieldSchema("id", "int", "ID column"),
+            new FieldSchema("name", "string", "Name column"),
+            new FieldSchema("date", "string", "Date column"));
+    sd.setCols(cols);
+    table.setSd(sd);
+
+    // Partition keys (with duplicates from regular columns)
+    List<FieldSchema> partitionKeys =
+        Arrays.asList(
+            new FieldSchema("date", "string", "Date partition"), // Duplicate!
+            new FieldSchema("name", "string", "Name partition"), // Duplicate!
+            new FieldSchema("region", "string", "Region partition") // New 
column
+            );
+    table.setPartitionKeys(partitionKeys);
+
+    // Get columns using a test column builder
+    TestColumnBuilder builder = new TestColumnBuilder();
+    Column[] columns = HiveTableConverter.getColumns(table, builder);
+
+    // Should have only 4 columns (3 regular + 1 unique partition)
+    // The duplicates (date and name) should not be added again
+    assertEquals(4, columns.length);
+    assertEquals("id", columns[0].name());
+    assertEquals("name", columns[1].name());
+    assertEquals("date", columns[2].name());
+    assertEquals("region", columns[3].name());
+  }
+
+  @Test
+  public void testGetColumnsWithAllDuplicates() {
+    // Create a table where all partition keys are duplicates
+    Table table = new Table();
+    StorageDescriptor sd = new StorageDescriptor();
+
+    // Regular columns
+    List<FieldSchema> cols =
+        Arrays.asList(
+            new FieldSchema("col1", "int", "Column 1"),
+            new FieldSchema("col2", "string", "Column 2"),
+            new FieldSchema("col3", "double", "Column 3"));
+    sd.setCols(cols);
+    table.setSd(sd);
+
+    // Partition keys (all duplicates from regular columns)
+    List<FieldSchema> partitionKeys =
+        Arrays.asList(
+            new FieldSchema("col1", "int", "Column 1 partition"),
+            new FieldSchema("col2", "string", "Column 2 partition"),
+            new FieldSchema("col3", "double", "Column 3 partition"));
+    table.setPartitionKeys(partitionKeys);
+
+    // Get columns using a test column builder
+    TestColumnBuilder builder = new TestColumnBuilder();
+    Column[] columns = HiveTableConverter.getColumns(table, builder);
+
+    // Should have only 3 columns (all from regular columns, no duplicates 
from partition keys)
+    assertEquals(3, columns.length);
+    assertEquals("col1", columns[0].name());
+    assertEquals("col2", columns[1].name());
+    assertEquals("col3", columns[2].name());
+  }
+
+  @Test
+  public void testGetColumnsWithEmptyPartitionKeys() {
+    // Create a table with no partition keys
+    Table table = new Table();
+    StorageDescriptor sd = new StorageDescriptor();
+
+    // Regular columns
+    List<FieldSchema> cols =
+        Arrays.asList(
+            new FieldSchema("id", "int", "ID column"),
+            new FieldSchema("name", "string", "Name column"));
+    sd.setCols(cols);
+    table.setSd(sd);
+
+    // Empty partition keys
+    table.setPartitionKeys(Arrays.asList());
+
+    // Get columns using a test column builder
+    TestColumnBuilder builder = new TestColumnBuilder();
+    Column[] columns = HiveTableConverter.getColumns(table, builder);
+
+    // Should have only 2 columns from regular columns
+    assertEquals(2, columns.length);
+    assertEquals("id", columns[0].name());
+    assertEquals("name", columns[1].name());
+  }
+
+  // Test column implementation for testing
+  static class TestColumn extends BaseColumn {
+    private TestColumn() {}
+
+    static class Builder extends BaseColumn.BaseColumnBuilder<Builder, 
TestColumn> {
+      @Override
+      protected TestColumn internalBuild() {
+        TestColumn column = new TestColumn();
+        // Use reflection to set protected fields
+        try {
+          java.lang.reflect.Field nameField = 
BaseColumn.class.getDeclaredField("name");
+          nameField.setAccessible(true);
+          nameField.set(column, name);
+
+          java.lang.reflect.Field commentField = 
BaseColumn.class.getDeclaredField("comment");
+          commentField.setAccessible(true);
+          commentField.set(column, comment);
+
+          java.lang.reflect.Field dataTypeField = 
BaseColumn.class.getDeclaredField("dataType");
+          dataTypeField.setAccessible(true);
+          dataTypeField.set(column, dataType);
+
+          java.lang.reflect.Field nullableField = 
BaseColumn.class.getDeclaredField("nullable");
+          nullableField.setAccessible(true);
+          nullableField.set(column, nullable);
+
+          java.lang.reflect.Field autoIncrementField =
+              BaseColumn.class.getDeclaredField("autoIncrement");
+          autoIncrementField.setAccessible(true);
+          autoIncrementField.set(column, autoIncrement);
+
+          java.lang.reflect.Field defaultValueField =
+              BaseColumn.class.getDeclaredField("defaultValue");
+          defaultValueField.setAccessible(true);
+          defaultValueField.set(column, defaultValue);
+        } catch (Exception e) {
+          throw new RuntimeException("Failed to build TestColumn", e);
+        }
+        return column;
+      }
+    }
+  }
+
+  // Test column builder helper
+  static class TestColumnBuilder extends TestColumn.Builder {}
+}

Reply via email to