This is an automated email from the ASF dual-hosted git repository.

lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git


The following commit(s) were added to refs/heads/master by this push:
     new 80b68c225d [hive] Make HiveMetastoreClient.addPartition thread safe 
(#4669)
80b68c225d is described below

commit 80b68c225d1580d7461fdee1fb9da9d09dd542f6
Author: Jingsong Lee <[email protected]>
AuthorDate: Tue Dec 10 10:02:49 2024 +0800

    [hive] Make HiveMetastoreClient.addPartition thread safe (#4669)
---
 .../metastore/AddPartitionCommitCallback.java      | 14 ------------
 .../apache/paimon/hive/HiveMetastoreClient.java    | 25 +++++++++-------------
 2 files changed, 10 insertions(+), 29 deletions(-)

diff --git 
a/paimon-core/src/main/java/org/apache/paimon/metastore/AddPartitionCommitCallback.java
 
b/paimon-core/src/main/java/org/apache/paimon/metastore/AddPartitionCommitCallback.java
index 06002161a6..599f88e512 100644
--- 
a/paimon-core/src/main/java/org/apache/paimon/metastore/AddPartitionCommitCallback.java
+++ 
b/paimon-core/src/main/java/org/apache/paimon/metastore/AddPartitionCommitCallback.java
@@ -72,20 +72,6 @@ public class AddPartitionCommitCallback implements 
CommitCallback {
         addPartitions(partitions);
     }
 
-    private void addPartition(BinaryRow partition) {
-        try {
-            boolean added = cache.get(partition, () -> false);
-            if (added) {
-                return;
-            }
-
-            client.addPartition(partition);
-            cache.put(partition, true);
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
     private void addPartitions(Set<BinaryRow> partitions) {
         try {
             List<BinaryRow> newPartitions = new ArrayList<>();
diff --git 
a/paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/hive/HiveMetastoreClient.java
 
b/paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/hive/HiveMetastoreClient.java
index cb70e01911..3793c86f82 100644
--- 
a/paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/hive/HiveMetastoreClient.java
+++ 
b/paimon-hive/paimon-hive-catalog/src/main/java/org/apache/paimon/hive/HiveMetastoreClient.java
@@ -31,6 +31,7 @@ import org.apache.paimon.utils.PartitionPathUtils;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
@@ -92,21 +93,15 @@ public class HiveMetastoreClient implements MetastoreClient 
{
 
     @Override
     public void addPartition(LinkedHashMap<String, String> partitionSpec) 
throws Exception {
-        List<String> partitionValues = new ArrayList<>(partitionSpec.values());
-        try {
-            clients.execute(
-                    client ->
-                            client.getPartition(
-                                    identifier.getDatabaseName(),
-                                    identifier.getTableName(),
-                                    partitionValues));
-            // do nothing if the partition already exists
-        } catch (NoSuchObjectException e) {
-            // partition not found, create new partition
-            Partition hivePartition =
-                    toHivePartition(partitionSpec, (int) 
(System.currentTimeMillis() / 1000));
-            clients.execute(client -> client.add_partition(hivePartition));
-        }
+        Partition hivePartition =
+                toHivePartition(partitionSpec, (int) 
(System.currentTimeMillis() / 1000));
+        clients.execute(
+                client -> {
+                    try {
+                        client.add_partition(hivePartition);
+                    } catch (AlreadyExistsException ignore) {
+                    }
+                });
     }
 
     @Override

Reply via email to