diqiu50 commented on code in PR #9460:
URL: https://github.com/apache/gravitino/pull/9460#discussion_r2610578935


##########
catalogs/catalog-lakehouse-hudi/src/main/java/org/apache/gravitino/catalog/lakehouse/hudi/backend/hms/HudiHMSBackendOps.java:
##########
@@ -45,54 +44,53 @@
 import org.apache.gravitino.exceptions.TableAlreadyExistsException;
 import org.apache.gravitino.hive.CachedClientPool;
 import org.apache.gravitino.rel.Column;
+import org.apache.gravitino.rel.Table;
 import org.apache.gravitino.rel.TableChange;
 import org.apache.gravitino.rel.expressions.distributions.Distribution;
 import org.apache.gravitino.rel.expressions.sorts.SortOrder;
 import org.apache.gravitino.rel.expressions.transforms.Transform;
 import org.apache.gravitino.rel.indexes.Index;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class HudiHMSBackendOps implements HudiCatalogBackendOps {
   private static final Logger LOG = 
LoggerFactory.getLogger(HudiHMSBackendOps.class);
   // Mapping from Gravitino config to Hive config
   private static final Map<String, String> CONFIG_CONVERTER =
-      ImmutableMap.of(URI, HiveConf.ConfVars.METASTOREURIS.varname);
+      ImmutableMap.of(URI, HIVE_METASTORE_URIS);
 
   private static final String HUDI_PACKAGE_PREFIX = "org.apache.hudi";
 
   @VisibleForTesting CachedClientPool clientPool;
 
-  public static final String GRAVITINO_KEYTAB_FORMAT = 
"keytabs/gravitino-lakehouse-hudi-%s-keytab";
-
   @Override
   public void initialize(Map<String, String> properties) {
-    HiveConf hiveConf = buildHiveConfAndInitKerberosAuth(properties);
-    this.clientPool = new CachedClientPool(hiveConf, properties);
+    Properties clientProperties = new Properties();
+
+    Map<String, String> byPassConfigs = Maps.newHashMap();
+    Map<String, String> convertedConfigs = Maps.newHashMap();
+    properties.forEach(
+        (key, value) -> {
+          if (key.startsWith(CATALOG_BYPASS_PREFIX)) {
+            byPassConfigs.put(key.substring(CATALOG_BYPASS_PREFIX.length()), 
value);
+          } else if (CONFIG_CONVERTER.containsKey(key)) {
+            convertedConfigs.put(CONFIG_CONVERTER.get(key), value);
+          } else {
+            convertedConfigs.put(key, value);
+          }
+        });
+    byPassConfigs.forEach(clientProperties::setProperty);
+    convertedConfigs.forEach(clientProperties::setProperty);
+    String catalogKey = "hudi-" + 
properties.getOrDefault(CatalogUtils.CATALOG_ID_KEY, "0");
+    this.clientPool = new CachedClientPool(catalogKey, clientProperties, 
properties);
+    LOG.info("Hudi HMS Backend Ops initialized with properties: {}", 
properties);
   }
 
   @Override
   public HudiSchema loadSchema(NameIdentifier schemaIdent) throws 
NoSuchSchemaException {
     try {
-      Database database = clientPool.run(client -> 
client.getDatabase(schemaIdent.name()));
-      return HudiHMSSchema.builder().withBackendSchema(database).build();
-
-    } catch (NoSuchObjectException | UnknownDBException e) {
-      throw new NoSuchSchemaException(
-          e, "Hudi schema (database) does not exist: %s in Hive Metastore", 
schemaIdent.name());
-
-    } catch (TException e) {
-      throw new RuntimeException(
-          "Failed to load Hudi schema (database) " + schemaIdent.name() + " 
from Hive Metastore",
-          e);
-
+      Schema database = clientPool.run(client -> client.getDatabase("", 
schemaIdent.name()));

Review Comment:
   No, can also access hive2 and hive3 HMS



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to