Copilot commented on code in PR #9416:
URL: https://github.com/apache/gravitino/pull/9416#discussion_r2600956070


##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/converter/HiveDatabaseConverter.java:
##########
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.hive.converter;
+
+import static org.apache.gravitino.catalog.hive.HiveConstants.LOCATION;
+
+import avro.shaded.com.google.common.collect.Maps;
+import com.google.common.base.Preconditions;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.gravitino.hive.HiveSchema;
+import org.apache.gravitino.meta.AuditInfo;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+
+public class HiveDatabaseConverter {
+  public static HiveSchema fromHiveDB(Database db) {
+    Preconditions.checkArgument(db != null, "Database cannot be null");
+
+    Map<String, String> properties = buildSchemaProperties(db);
+
+    // Get audit info from Hive's Database object. Because Hive's database 
doesn't store create
+    // time, last modifier and last modified time, we only get creator from 
Hive's database.
+    AuditInfo.Builder auditInfoBuilder = AuditInfo.builder();
+    
Optional.ofNullable(db.getOwnerName()).ifPresent(auditInfoBuilder::withCreator);
+
+    String catalogName = null;
+    try {
+      java.lang.reflect.Method getCatalogNameMethod = 
db.getClass().getMethod("getCatalogName");
+      catalogName = (String) getCatalogNameMethod.invoke(db);
+    } catch (Exception e) {
+      // Hive2 doesn't have getCatalogName method, catalogName will be null
+    }
+
+    HiveSchema hiveSchema =
+        HiveSchema.builder()
+            .withName(db.getName())
+            .withComment(db.getDescription())
+            .withProperties(properties)
+            .withAuditInfo(auditInfoBuilder.build())
+            .withCatalogName(catalogName)
+            .build();
+    return hiveSchema;
+  }
+  /**
+   * Add a comment on lines L57 to L65Add diff commentMarkdown input: edit mode
+   * selected.WritePreviewHeadingBoldItalicQuoteCodeLinkUnordered listNumbered 
listTask
+   * listMentionReferenceSaved repliesAdd FilesPaste, drop, or click to add 
filesCancelCommentStart
+   * a reviewReturn to code Build schema properties from a Hive Database 
object.
+   *
+   * @param database The Hive Database object.
+   * @return A map of schema properties.
+   */

Review Comment:
   Malformed documentation comment. This appears to be accidentally pasted 
content from a UI that includes HTML elements and UI instructions ("Add diff 
commentMarkdown input: edit mode selected.Write..."). This should be replaced 
with proper JavaDoc documentation.



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/HiveClientPool.java:
##########
@@ -18,119 +18,57 @@
  */
 package org.apache.gravitino.hive;
 
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.gravitino.hive.dyn.DynMethods;
-import org.apache.gravitino.hive.dyn.DynMethods.StaticMethod;
+import java.util.Properties;
+import org.apache.gravitino.exceptions.GravitinoRuntimeException;
+import org.apache.gravitino.hive.client.HiveClient;
+import org.apache.gravitino.hive.client.HiveClientFactory;
 import org.apache.gravitino.utils.ClientPoolImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.thrift.TException;
-import org.apache.thrift.transport.TTransportException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 // hive-metastore/src/main/java/org/apache/iceberg/hive/HiveClientPool.java
 
 /** Represents a client pool for managing connections to an Apache Hive 
Metastore service. */
-public class HiveClientPool extends ClientPoolImpl<IMetaStoreClient, 
TException> {
+public class HiveClientPool extends ClientPoolImpl<HiveClient, 
GravitinoRuntimeException> {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(HiveClientPool.class);
-  private static final StaticMethod GET_CLIENT =
-      DynMethods.builder("getProxy")
-          .impl(
-              RetryingMetaStoreClient.class,
-              HiveConf.class,
-              HiveMetaHookLoader.class,
-              String.class) // Hive 1 and 2
-          .impl(
-              RetryingMetaStoreClient.class,
-              Configuration.class,
-              HiveMetaHookLoader.class,
-              String.class) // Hive 3
-          .buildStatic();
-
-  private final HiveConf hiveConf;
+  private final HiveClientFactory clientFactory;
 
   /**
    * Creates a new HiveClientPool with the specified pool size and 
configuration.
    *
    * @param poolSize The number of clients in the pool.
-   * @param conf The configuration used to initialize the Hive Metastore 
clients.
+   * @param properties The configuration used to initialize the Hive Metastore 
clients.
    */
-  public HiveClientPool(int poolSize, Configuration conf) {
+  public HiveClientPool(int poolSize, Properties properties) {
     // Do not allow retry by default as we rely on RetryingHiveClient
-    super(poolSize, TTransportException.class, false);
-    this.hiveConf = new HiveConf(conf, HiveClientPool.class);
-    this.hiveConf.addResource(conf);
+    super(poolSize, GravitinoRuntimeException.class, false);
+    this.clientFactory = new HiveClientFactory(properties, "xxx");

Review Comment:
   Hardcoded string "xxx" used as an ID parameter. This should be replaced with 
a meaningful value or passed as a constructor parameter to allow different pool 
instances to have distinct IDs.



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/kerberos/AuthenticationConfig.java:
##########
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.gravitino.hive.kerberos;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
+import java.util.Map;
+import java.util.Properties;
+import org.apache.gravitino.Config;
+import org.apache.gravitino.config.ConfigBuilder;
+import org.apache.gravitino.config.ConfigConstants;
+import org.apache.gravitino.config.ConfigEntry;
+import org.apache.hadoop.conf.Configuration;
+
+public class AuthenticationConfig extends Config {
+
+  // The key for the authentication type, currently we support Kerberos and 
simple
+  public static final String AUTH_TYPE_KEY = "authentication.type";
+
+  public static final String IMPERSONATION_ENABLE_KEY = 
"authentication.impersonation-enable";
+
+  enum AuthenticationType {
+    SIMPLE,
+    KERBEROS;
+  }
+
+  public static final boolean KERBEROS_DEFAULT_IMPERSONATION_ENABLE = false;
+
+  public AuthenticationConfig(Properties properties, Configuration 
configuration) {
+    super(false);
+    loadFromHdfsConfiguration(configuration);
+    loadFromMap((Map) properties, k -> true);
+  }
+
+  private void loadFromHdfsConfiguration(Configuration configuration) {
+    String authType = configuration.get(HADOOP_SECURITY_AUTHENTICATION, 
"simple");
+    configMap.put(AUTH_TYPE_KEY, authType);
+  }
+
+  public static final ConfigEntry<String> AUTH_TYPE_ENTRY =
+      new ConfigBuilder(AUTH_TYPE_KEY)
+          .doc(
+              "The type of authentication for Hudi catalog, currently we only 
support simple and Kerberos")
+          .version(ConfigConstants.VERSION_1_0_0)
+          .stringConf()
+          .createWithDefault("simple");
+
+  public static final ConfigEntry<Boolean> ENABLE_IMPERSONATION_ENTRY =
+      new ConfigBuilder(IMPERSONATION_ENABLE_KEY)
+          .doc("Whether to enable impersonation for the Hudi catalog")

Review Comment:
   Documentation mentions "Hudi catalog" but this is for Hive. This appears to 
be copy-pasted from Hudi code. Should reference "Hive catalog" instead.
   ```suggestion
                 "The type of authentication for Hive catalog, currently we 
only support simple and Kerberos")
             .version(ConfigConstants.VERSION_1_0_0)
             .stringConf()
             .createWithDefault("simple");
   
     public static final ConfigEntry<Boolean> ENABLE_IMPERSONATION_ENTRY =
         new ConfigBuilder(IMPERSONATION_ENABLE_KEY)
             .doc("Whether to enable impersonation for the Hive catalog")
   ```



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/kerberos/KerberosClient.java:
##########
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.gravitino.hive.kerberos;
+
+import static 
org.apache.gravitino.catalog.hive.HiveConstants.HIVE_METASTORE_TOKEN_SIGNATURE;
+import static org.apache.gravitino.hive.kerberos.KerberosConfig.PRINCIPAL_KEY;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Splitter;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.gravitino.hive.client.HiveClient;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.thrift.DelegationTokenIdentifier;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class KerberosClient implements java.io.Closeable {
+  private static final Logger LOG = 
LoggerFactory.getLogger(KerberosClient.class);
+
+  private volatile ScheduledThreadPoolExecutor checkTgtExecutor;
+  private final Properties conf;
+  private final Configuration hadoopConf;
+  private final boolean refreshCredentials;
+  private volatile UserGroupInformation realLoginUgi;
+  private final String keytabFilePath;
+  private volatile HiveClient hiveClient = null;
+
+  public KerberosClient(
+      Properties properties,
+      Configuration hadoopConf,
+      boolean refreshCredentials,
+      String keytabFilePath)
+      throws IOException {
+    this.conf = properties;
+    this.hadoopConf = hadoopConf;
+    this.refreshCredentials = refreshCredentials;
+    File keyTabFile = saveKeyTabFileFromUri(keytabFilePath);
+    this.keytabFilePath = keyTabFile.getAbsolutePath();
+  }
+
+  public UserGroupInformation loginProxyUser(String currentUser) {
+    try {
+      if (currentUser.equals(realLoginUgi.getUserName()) || hiveClient == 
null) {
+        return realLoginUgi;
+      }
+
+      String tokenSignature = conf.getProperty(HIVE_METASTORE_TOKEN_SIGNATURE, 
"");
+      String principal = conf.getProperty(PRINCIPAL_KEY, "");
+      @SuppressWarnings("null")
+      List<String> principalComponents = 
Splitter.on('@').splitToList(principal);
+      Preconditions.checkArgument(
+          principalComponents.size() == 2, "The principal has the wrong 
format");

Review Comment:
   The `@SuppressWarnings("null")` annotation on line 77 suggests potential 
null pointer issues. The code should handle the case where 
`Splitter.on('@').splitToList(principal)` might not produce exactly 2 elements 
gracefully rather than relying on suppression.
   ```suggestion
         Preconditions.checkArgument(
             StringUtils.isNotBlank(principal),
             "Kerberos principal is not set or is blank in configuration");
         List<String> principalComponents = 
Splitter.on('@').splitToList(principal);
         Preconditions.checkArgument(
             principalComponents.size() == 2, "The principal has the wrong 
format: " + principal);
   ```



##########
catalogs/hive-metastore-common/src/test/java/org/apache/gravitino/hive/TestCachedClientPool.java:
##########
@@ -40,7 +41,8 @@ public void testClientPoolCleaner() throws 
InterruptedException {
             "1",
             HiveConstants.CLIENT_POOL_CACHE_EVICTION_INTERVAL_MS,
             "5000");
-    CachedClientPool clientPool = new 
CachedClientPool(MiniHiveMetastoreService.hiveConf, props);
+    // todo yuhui

Review Comment:
   TODO comment `// todo yuhui` should be removed or expanded with a proper 
description of what needs to be done.
   ```suggestion
   
   ```



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/converter/HiveTableConverter.java:
##########
@@ -99,21 +325,65 @@ Column[] getColumns(Table table, BUILDER columnBuilder) {
             sd.getCols().stream()
                 .map(
                     f ->
-                        columnBuilder
-                            .withName(f.getName())
-                            
.withType(HiveDataTypeConverter.CONVERTER.toGravitino(f.getType()))
-                            .withComment(f.getComment())
-                            .build()),
+                        buildColumn(
+                            f.getName(),
+                            
HiveDataTypeConverter.CONVERTER.toGravitino(f.getType()),
+                            f.getComment())),
             table.getPartitionKeys().stream()
                 // Filter out partition keys that already exist in sd.getCols()
                 .filter(p -> !columnNames.contains(p.getName()))
                 .map(
                     p ->
-                        columnBuilder
-                            .withName(p.getName())
-                            
.withType(HiveDataTypeConverter.CONVERTER.toGravitino(p.getType()))
-                            .withComment(p.getComment())
-                            .build()))
+                        buildColumn(
+                            p.getName(),
+                            
HiveDataTypeConverter.CONVERTER.toGravitino(p.getType()),
+                            p.getComment())))
         .toArray(Column[]::new);
   }
+
+  private static Column buildColumn(String name, Type type, String comment) {
+    HiveColumn.Builder builder =
+        HiveColumn.builder().withName(name).withType(type).withNullable(true);
+    if (comment != null) {
+      builder.withComment(comment);
+    }
+    return builder.build();
+  }
+
+  public static HivePartition fromHivePartition(
+      HiveTable table, org.apache.hadoop.hive.metastore.api.Partition 
partition) {
+    Preconditions.checkArgument(table != null, "Table cannot be null");
+    Preconditions.checkArgument(partition != null, "Partition cannot be null");
+    List<String> partitionColumns = table.partitionFieldNames();
+    String partitionName = FileUtils.makePartName(partitionColumns, 
partition.getValues());
+    // todo: support partition properties metadata to get more necessary 
information
+    return HivePartition.identity(partitionName, partition.getParameters());
+  }
+
+  public static org.apache.hadoop.hive.metastore.api.Partition toHivePartition(
+      String dbName, HiveTable table, HivePartition partition) {
+    Preconditions.checkArgument(dbName != null, "Database name cannot be 
null");
+    Preconditions.checkArgument(table != null, "Table cannot be null");
+    Preconditions.checkArgument(partition != null, "Partition cannot be null");
+    org.apache.hadoop.hive.metastore.api.Partition hivePartition =
+        new org.apache.hadoop.hive.metastore.api.Partition();
+    hivePartition.setDbName(dbName);
+    hivePartition.setTableName(table.name());
+
+    List<FieldSchema> partitionFields =
+        table.partitionFieldNames().stream()
+            .map(fieldName -> buildPartitionKeyField(fieldName, table))
+            .collect(Collectors.toList());
+    // todo: support custom serde and location if necessary

Review Comment:
   TODO comment should be addressed or converted to a proper JIRA issue 
reference regarding custom serde and location support for partitions.
   ```suggestion
       // TODO(GRAVITINO-XXX): Support custom serde and location for Hive 
partitions if necessary
   ```



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/converter/HiveDatabaseConverter.java:
##########
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.hive.converter;
+
+import static org.apache.gravitino.catalog.hive.HiveConstants.LOCATION;
+
+import avro.shaded.com.google.common.collect.Maps;

Review Comment:
   Incorrect import: using `avro.shaded.com.google.common.collect.Maps` instead 
of `com.google.common.collect.Maps`. This uses a shaded/relocated version from 
an Avro dependency which can cause classloading issues. Import the standard 
Guava version instead.
   ```suggestion
   import com.google.common.collect.Maps;
   ```



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/converter/HiveTableConverter.java:
##########
@@ -99,21 +325,65 @@ Column[] getColumns(Table table, BUILDER columnBuilder) {
             sd.getCols().stream()
                 .map(
                     f ->
-                        columnBuilder
-                            .withName(f.getName())
-                            
.withType(HiveDataTypeConverter.CONVERTER.toGravitino(f.getType()))
-                            .withComment(f.getComment())
-                            .build()),
+                        buildColumn(
+                            f.getName(),
+                            
HiveDataTypeConverter.CONVERTER.toGravitino(f.getType()),
+                            f.getComment())),
             table.getPartitionKeys().stream()
                 // Filter out partition keys that already exist in sd.getCols()
                 .filter(p -> !columnNames.contains(p.getName()))
                 .map(
                     p ->
-                        columnBuilder
-                            .withName(p.getName())
-                            
.withType(HiveDataTypeConverter.CONVERTER.toGravitino(p.getType()))
-                            .withComment(p.getComment())
-                            .build()))
+                        buildColumn(
+                            p.getName(),
+                            
HiveDataTypeConverter.CONVERTER.toGravitino(p.getType()),
+                            p.getComment())))
         .toArray(Column[]::new);
   }
+
+  private static Column buildColumn(String name, Type type, String comment) {
+    HiveColumn.Builder builder =
+        HiveColumn.builder().withName(name).withType(type).withNullable(true);
+    if (comment != null) {
+      builder.withComment(comment);
+    }
+    return builder.build();
+  }
+
+  public static HivePartition fromHivePartition(
+      HiveTable table, org.apache.hadoop.hive.metastore.api.Partition 
partition) {
+    Preconditions.checkArgument(table != null, "Table cannot be null");
+    Preconditions.checkArgument(partition != null, "Partition cannot be null");
+    List<String> partitionColumns = table.partitionFieldNames();
+    String partitionName = FileUtils.makePartName(partitionColumns, 
partition.getValues());
+    // todo: support partition properties metadata to get more necessary 
information

Review Comment:
   TODO comment should be addressed or converted to a proper JIRA issue 
reference. Consider whether partition properties metadata support is planned 
for a future release.
   ```suggestion
       // TODO(GRAVITINO-1234): Support partition properties metadata to get 
more necessary information.
   ```



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/CachedClientPool.java:
##########
@@ -154,6 +147,5 @@ public void close() {
     // class loader is closed.
     clientPoolCache.asMap().forEach((key, value) -> value.close());
     clientPoolCache.invalidateAll();

Review Comment:
   The `scheduler.shutdownNow()` call has been removed from the `close()` 
method. This will leak the scheduled executor thread and prevent proper 
cleanup. The scheduler should still be shut down when closing the pool.
   ```suggestion
       clientPoolCache.invalidateAll();
       if (scheduler != null) {
         scheduler.shutdownNow();
       }
   ```



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/kerberos/FetchFileUtils.java:
##########
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.hive.kerberos;
+
+import java.io.File;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.file.Files;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+public class FetchFileUtils {
+
+  private FetchFileUtils() {}
+
+  public static void fetchFileFromUri(
+      String fileUri, File destFile, int timeout, Configuration conf) throws 
java.io.IOException {
+    try {
+      URI uri = new URI(fileUri);
+      String scheme = 
java.util.Optional.ofNullable(uri.getScheme()).orElse("file");
+
+      switch (scheme) {
+        case "http":
+        case "https":
+        case "ftp":
+          FileUtils.copyURLToFile(uri.toURL(), destFile, timeout * 1000, 
timeout * 1000);
+          break;
+
+        case "file":
+          Files.createSymbolicLink(destFile.toPath(), new 
File(uri.getPath()).toPath());

Review Comment:
   Creating a symbolic link for "file:" scheme URIs could be a security 
concern. If the keytab file path is controlled by user input, this could create 
links to arbitrary files on the filesystem. Consider copying the file instead 
or validating the source path.
   ```suggestion
             FileUtils.copyFile(new File(uri.getPath()), destFile);
   ```



##########
catalogs/hive-metastore-common/src/main/java/org/apache/gravitino/hive/kerberos/AuthenticationConfig.java:
##########
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.gravitino.hive.kerberos;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
+import java.util.Map;
+import java.util.Properties;
+import org.apache.gravitino.Config;
+import org.apache.gravitino.config.ConfigBuilder;
+import org.apache.gravitino.config.ConfigConstants;
+import org.apache.gravitino.config.ConfigEntry;
+import org.apache.hadoop.conf.Configuration;
+
+public class AuthenticationConfig extends Config {
+
+  // The key for the authentication type, currently we support Kerberos and 
simple
+  public static final String AUTH_TYPE_KEY = "authentication.type";
+
+  public static final String IMPERSONATION_ENABLE_KEY = 
"authentication.impersonation-enable";
+
+  enum AuthenticationType {
+    SIMPLE,
+    KERBEROS;
+  }
+
+  public static final boolean KERBEROS_DEFAULT_IMPERSONATION_ENABLE = false;
+
+  public AuthenticationConfig(Properties properties, Configuration 
configuration) {
+    super(false);
+    loadFromHdfsConfiguration(configuration);
+    loadFromMap((Map) properties, k -> true);
+  }
+
+  private void loadFromHdfsConfiguration(Configuration configuration) {
+    String authType = configuration.get(HADOOP_SECURITY_AUTHENTICATION, 
"simple");
+    configMap.put(AUTH_TYPE_KEY, authType);
+  }
+
+  public static final ConfigEntry<String> AUTH_TYPE_ENTRY =
+      new ConfigBuilder(AUTH_TYPE_KEY)
+          .doc(
+              "The type of authentication for Hudi catalog, currently we only 
support simple and Kerberos")
+          .version(ConfigConstants.VERSION_1_0_0)
+          .stringConf()
+          .createWithDefault("simple");
+
+  public static final ConfigEntry<Boolean> ENABLE_IMPERSONATION_ENTRY =
+      new ConfigBuilder(IMPERSONATION_ENABLE_KEY)
+          .doc("Whether to enable impersonation for the Hudi catalog")

Review Comment:
   Documentation mentions "Hudi catalog" but this is for Hive. Should reference 
"Hive catalog" instead.
   ```suggestion
                 "The type of authentication for Hive catalog, currently we 
only support simple and Kerberos")
             .version(ConfigConstants.VERSION_1_0_0)
             .stringConf()
             .createWithDefault("simple");
   
     public static final ConfigEntry<Boolean> ENABLE_IMPERSONATION_ENTRY =
         new ConfigBuilder(IMPERSONATION_ENABLE_KEY)
             .doc("Whether to enable impersonation for the Hive catalog")
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to