This is an automated email from the ASF dual-hosted git repository.

lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new 3f604f51e [flink] Support add hive catalog table properties (#1984)
3f604f51e is described below

commit 3f604f51e2680fd8ef17ecabe58b378684afa71e
Author: monster <[email protected]>
AuthorDate: Thu Sep 14 18:19:34 2023 +0800

    [flink] Support add hive catalog table properties (#1984)
---
 docs/content/how-to/creating-catalogs.md           |  7 +++
 .../java/org/apache/paimon/hive/HiveCatalog.java   | 19 ++++++--
 .../org/apache/paimon/hive/HiveCatalogTest.java    | 55 ++++++++++++++++++++++
 3 files changed, 77 insertions(+), 4 deletions(-)

diff --git a/docs/content/how-to/creating-catalogs.md 
b/docs/content/how-to/creating-catalogs.md
index 11f2d7925..4e57afda6 100644
--- a/docs/content/how-to/creating-catalogs.md
+++ b/docs/content/how-to/creating-catalogs.md
@@ -163,3 +163,10 @@ You can set location in the properties of table/database 
by the config of `locat
 By default, Paimon does not synchronize newly created partitions into Hive 
metastore. Users will see an unpartitioned table in Hive. Partition push-down 
will be carried out by filter push-down instead.
 
 If you want to see a partitioned table in Hive and also synchronize newly 
created partitions into Hive metastore, please set the table property 
`metastore.partitioned-table` to true. Also see [CoreOptions]({{< ref 
"maintenance/configurations#CoreOptions" >}}).
+
+### Adding Parameters to a Hive Table
+
+Using the table option facilitates the convenient definition of Hive table 
parameters. 
+Parameters prefixed with `hive.` will be automatically defined in the 
`TBLPROPERTIES` of the Hive table. 
+For instance, using the option `hive.table.owner=Jon` will automatically add 
the parameter `table.owner=Jon` to the table properties during the creation 
process.
+
diff --git 
a/paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/hive/HiveCatalog.java
 
b/paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/hive/HiveCatalog.java
index b408214ca..e075d7e21 100644
--- 
a/paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/hive/HiveCatalog.java
+++ 
b/paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/hive/HiveCatalog.java
@@ -97,7 +97,8 @@ public class HiveCatalog extends AbstractCatalog {
     private static final String SERDE_CLASS_NAME = 
"org.apache.paimon.hive.PaimonSerDe";
     private static final String STORAGE_HANDLER_CLASS_NAME =
             "org.apache.paimon.hive.PaimonStorageHandler";
-
+    private static final String HIVE_PREFIX = "hive.";
+    private static final int HIVE_PREFIX_LENGTH = HIVE_PREFIX.length();
     public static final String HIVE_SITE_FILE = "hive-site.xml";
 
     private final HiveConf hiveConf;
@@ -324,7 +325,17 @@ public class HiveCatalog extends AbstractCatalog {
                             + " to underlying files.",
                     e);
         }
-        Table table = newHmsTable(identifier);
+        Table table =
+                newHmsTable(
+                        identifier,
+                        tableSchema.options().entrySet().stream()
+                                .filter(entry -> 
entry.getKey().startsWith(HIVE_PREFIX))
+                                .collect(
+                                        Collectors.toMap(
+                                                entry ->
+                                                        entry.getKey()
+                                                                
.substring(HIVE_PREFIX_LENGTH),
+                                                Map.Entry::getValue)));
         try {
             updateHmsTable(table, identifier, tableSchema);
             client.createTable(table);
@@ -482,7 +493,7 @@ public class HiveCatalog extends AbstractCatalog {
         return database;
     }
 
-    private Table newHmsTable(Identifier identifier) {
+    private Table newHmsTable(Identifier identifier, Map<String, String> 
tableParameters) {
         long currentTimeMillis = System.currentTimeMillis();
         TableType tableType =
                 OptionsUtils.convertToEnum(
@@ -499,7 +510,7 @@ public class HiveCatalog extends AbstractCatalog {
                         Integer.MAX_VALUE,
                         null,
                         Collections.emptyList(),
-                        new HashMap<>(),
+                        tableParameters,
                         null,
                         null,
                         tableType.toString().toUpperCase(Locale.ROOT) + 
"_TABLE");
diff --git 
a/paimon-hive/paimon-hive-catalog/src/test/java/org/apache/paimon/hive/HiveCatalogTest.java
 
b/paimon-hive/paimon-hive-catalog/src/test/java/org/apache/paimon/hive/HiveCatalogTest.java
index ae57695e5..a45130735 100644
--- 
a/paimon-hive/paimon-hive-catalog/src/test/java/org/apache/paimon/hive/HiveCatalogTest.java
+++ 
b/paimon-hive/paimon-hive-catalog/src/test/java/org/apache/paimon/hive/HiveCatalogTest.java
@@ -21,14 +21,23 @@ package org.apache.paimon.hive;
 import org.apache.paimon.catalog.CatalogTestBase;
 import org.apache.paimon.catalog.Identifier;
 import org.apache.paimon.hive.utils.CommonTestUtils;
+import org.apache.paimon.schema.Schema;
+import org.apache.paimon.types.DataField;
+import org.apache.paimon.types.DataTypes;
+
+import org.apache.paimon.shade.guava30.com.google.common.collect.Lists;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.mockito.MockedStatic;
 import org.mockito.Mockito;
 
+import java.lang.reflect.Field;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -37,6 +46,7 @@ import java.util.UUID;
 import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.METASTORECONNECTURLKEY;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /** Tests for {@link HiveCatalog}. */
 public class HiveCatalogTest extends CatalogTestBase {
@@ -148,4 +158,49 @@ public class HiveCatalogTest extends CatalogTestBase {
         HiveConf hiveConf = HiveCatalog.createHiveConf(null, null);
         assertThat(hiveConf.get("hive.metastore.uris")).isEqualTo("dummy-hms");
     }
+
+    @Test
+    public void testAddHiveTableParameters() {
+        try {
+            // Create a new database for the test
+            String databaseName = "test_db";
+            catalog.createDatabase(databaseName, false);
+
+            // Create a new table with Hive table parameters
+            String tableName = "new_table";
+            Map<String, String> options = new HashMap<>();
+            options.put("hive.table.owner", "Jon");
+            options.put("hive.storage.format", "ORC");
+            options.put("snapshot.num-retained.min", "5");
+            options.put("snapshot.time-retained", "1h");
+
+            Schema addHiveTableParametersSchema =
+                    new Schema(
+                            Lists.newArrayList(
+                                    new DataField(0, "pk", DataTypes.INT()),
+                                    new DataField(1, "col1", 
DataTypes.STRING()),
+                                    new DataField(2, "col2", 
DataTypes.STRING())),
+                            Collections.emptyList(),
+                            Collections.emptyList(),
+                            options,
+                            "");
+
+            catalog.createTable(
+                    Identifier.create(databaseName, tableName),
+                    addHiveTableParametersSchema,
+                    false);
+
+            Field clientField = HiveCatalog.class.getDeclaredField("client");
+            clientField.setAccessible(true);
+            IMetaStoreClient client = (IMetaStoreClient) 
clientField.get(catalog);
+            Table table = client.getTable(databaseName, tableName);
+            Map<String, String> tableProperties = table.getParameters();
+
+            // Verify the transformed parameters
+            assertThat(tableProperties).containsEntry("table.owner", "Jon");
+            assertThat(tableProperties).containsEntry("storage.format", "ORC");
+        } catch (Exception e) {
+            fail("Test failed due to exception: " + e.getMessage());
+        }
+    }
 }

Reply via email to