Copilot commented on code in PR #6464:
URL: https://github.com/apache/hive/pull/6464#discussion_r3198710069


##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreDriver.java:
##########
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.sql.Connection;
+import java.sql.Driver;
+import java.sql.DriverManager;
+import java.sql.DriverPropertyInfo;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Properties;
+import java.util.logging.Logger;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.math.NumberUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+import org.slf4j.LoggerFactory;
+
+import static java.sql.DriverManager.registerDriver;
+
+public class MetastoreDriver implements Driver {
+  private static final org.slf4j.Logger LOG = 
LoggerFactory.getLogger(MetastoreDriver.class);
+  private static final String URL_PREFIX = "jdbc:metastore://";
+  private static final Configuration configuration;
+  private static int majorVersion = -1;
+  private static int minorVersion = -1;
+  private static volatile Driver delegateDriver;
+  static {
+    try {
+      registerDriver(new MetastoreDriver());
+      String versionString = MetastoreVersionInfo.getVersion();
+      String[] versionNums = versionString.split("\\.");
+      if (NumberUtils.isNumber(versionNums[0])) {
+        majorVersion = Integer.parseInt(versionNums[0]);
+      }
+      if (versionNums.length >1 && NumberUtils.isNumber(versionNums[1])) {
+        minorVersion = Integer.parseInt(versionNums[1]);
+      }
+      configuration = MetastoreConf.newMetastoreConf();
+    } catch (Exception e) {
+      throw new RuntimeException("Failed to register Metastore driver", e);
+    }
+  }
+
+  private synchronized static Driver
+      findRegisteredDriver(String jdbcUrl, String driverClassName) throws 
SQLException {
+    List<Driver> candidates = new ArrayList<>();
+    for (Enumeration<Driver> drivers = DriverManager.getDrivers(); 
drivers.hasMoreElements();) {
+      Driver driver = drivers.nextElement();
+      try {
+        if (driver.acceptsURL(jdbcUrl)) {
+          candidates.add(driver);
+        }
+      } catch (Exception e) {
+      }
+    }
+
+    if (candidates.isEmpty()) {
+      Class<Driver> driverClz = tryLoadDriver(driverClassName, 
Thread.currentThread().getContextClassLoader(),
+          MetastoreDriver.class.getClassLoader());
+      if (driverClz != null) {
+        try {
+          Driver driver = driverClz.getDeclaredConstructor().newInstance();
+          if (!driver.acceptsURL(jdbcUrl)) {
+            throw new RuntimeException("Driver " + driverClassName + " cannot 
accept jdbcUrl");
+          }
+          candidates.add(driver);
+        } catch (Exception e) {
+          LOG.warn("Failed to create instance of driver class {}", 
driverClassName, e);
+        }
+      }
+    }
+    delegateDriver = candidates.isEmpty() ? DriverManager.getDriver(jdbcUrl) : 
candidates.getFirst();

Review Comment:
   `List#getFirst()` is only available on newer Java versions 
(SequencedCollection, Java 21+). If this project targets Java 8/11/17 (as Hive 
commonly does), this will not compile. Use `candidates.get(0)` (after ensuring 
non-empty) to keep compatibility.
   



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreStatement.java:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.Statement;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.logSlowExecution;
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.isSlowExecution;
+
+@SuppressWarnings("unchecked")
+public final class MetastoreStatement implements InvocationHandler {
+  private static final Logger LOG = 
LoggerFactory.getLogger(MetastoreStatement.class);
+  private static final ThreadLocal<HMSHandlerContext.CallCtx> CALL_CTX = new 
ThreadLocal<>();
+  static final String EXEC_HOOK = "metastore.jdbc.execution.hook";
+
+  private final String rawSql;
+  private final Statement delegate;
+  private final Configuration configuration;
+  private final MetastoreStatementHook hook;
+
+  private MetastoreStatement(Configuration conf, Statement statement, String 
rawSql) {
+    this.configuration = Objects.requireNonNull(conf);
+    this.rawSql = rawSql;
+    this.delegate = Objects.requireNonNull(statement);
+    String className = conf.get(EXEC_HOOK, "");
+    if (StringUtils.isEmpty(className)) {
+      hook = new JdbcProfilerUtils(conf);
+    } else {
+      try {
+        hook = JavaUtils.newInstance(JavaUtils.getClass(className.trim(), 
MetastoreStatementHook.class),
+            new Class[] { Configuration.class}, new Object[] {conf});
+      } catch (MetaException e) {
+        throw new RuntimeException(e.getMessage(), e);
+      }
+    }
+  }
+
+  public static <T extends Statement> T getProxyStatement(Configuration 
configuration, T delegate, String rawSql) {
+    MetastoreStatement handler = new MetastoreStatement(configuration, 
delegate, rawSql);
+    return (T) Proxy.newProxyInstance(JavaUtils.getClassLoader(),
+        ClassUtils.getAllInterfaces(delegate.getClass()).toArray(new 
Class[0]), handler);
+  }
+
+  private void logSummary(boolean monitor) {
+    Optional<HMSHandlerContext.CallCtx> ctxCall = 
HMSHandlerContext.getCallCtx();
+    HMSHandlerContext.CallCtx previousCall = CALL_CTX.get();
+    if (ctxCall.isPresent()) {
+      if (previousCall == null) {
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        }
+      } else if (!ctxCall.get().equals(previousCall)) {
+        // we approach the end of previous thrift call
+        long totalSpent = previousCall.totalTime().get();
+        LOG.debug("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        if (isSlowExecution(configuration, totalSpent)) {
+          LOG.warn("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        }
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        } else {
+          CALL_CTX.remove();
+        }
+      }
+    }
+  }
+
+  @Override
+  public Object invoke(Object proxy, Method method, Object[] args) throws 
Throwable {
+    Timer.Context ctx = null;
+    try {
+      boolean monitor = hook.profile(rawSql, method, args);
+      logSummary(monitor);
+      if (Metrics.getRegistry() != null && monitor) {
+        String metricName = hook.getMetricName(method, args);
+        Timer timer = Metrics.getOrCreateTimer(metricName);
+        if (timer != null) {
+          ctx = timer.time();
+        }
+      }
+      long start = System.currentTimeMillis();
+      hook.preRun(method, args);
+      Object result = method.invoke(delegate, args);
+      hook.postRun(method, args, result);
+      long timeSpent = System.currentTimeMillis() - start;

Review Comment:
   Two issues here: (1) elapsed timing should use `System.nanoTime()` rather 
than `currentTimeMillis()` to avoid negative/incorrect durations if the system 
clock changes; (2) `throw e.getCause()` can throw `NullPointerException` if 
`getCause()` is null (possible for `UndeclaredThrowableException` depending on 
how it’s constructed). Prefer rethrowing `e.getCause()` when non-null, 
otherwise rethrow `e`.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/DbCPDataSourceProvider.java:
##########
@@ -127,10 +119,16 @@ public DataSource create(Configuration hdpConfig, int 
maxPoolSize) throws SQLExc
         objectPool.setSoftMinEvictableIdleDuration(Duration.ofMillis(600 * 
1000));
       }
     }
-    String stmt = dbProduct.getPrepareTxnStmt();
-    if (stmt != null) {
-      
poolableConnFactory.setConnectionInitSql(Collections.singletonList(stmt));
-    }
+    StringBuilder connectionProperties = new StringBuilder();
+    DataSourceProvider.preparePool(hdpConfig,
+        stmt -> 
poolableConnFactory.setConnectionInitSql(Collections.singletonList(stmt)),
+        kv -> {
+          if (!connectionProperties.isEmpty()) {
+            connectionProperties.append(';');
+          }
+          
connectionProperties.append(kv.getKey()).append('=').append(kv.getValue());
+          dbcpDs.setConnectionProperties(connectionProperties.toString());
+        });

Review Comment:
   `StringBuilder#isEmpty()` does not exist on Java 8/11/17, so this won’t 
compile under typical Hive build JDKs. Replace this check with 
`connectionProperties.length() > 0`.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreStatement.java:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.Statement;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.logSlowExecution;
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.isSlowExecution;
+
+@SuppressWarnings("unchecked")
+public final class MetastoreStatement implements InvocationHandler {
+  private static final Logger LOG = 
LoggerFactory.getLogger(MetastoreStatement.class);
+  private static final ThreadLocal<HMSHandlerContext.CallCtx> CALL_CTX = new 
ThreadLocal<>();
+  static final String EXEC_HOOK = "metastore.jdbc.execution.hook";
+
+  private final String rawSql;
+  private final Statement delegate;
+  private final Configuration configuration;
+  private final MetastoreStatementHook hook;
+
+  private MetastoreStatement(Configuration conf, Statement statement, String 
rawSql) {
+    this.configuration = Objects.requireNonNull(conf);
+    this.rawSql = rawSql;
+    this.delegate = Objects.requireNonNull(statement);
+    String className = conf.get(EXEC_HOOK, "");
+    if (StringUtils.isEmpty(className)) {
+      hook = new JdbcProfilerUtils(conf);
+    } else {
+      try {
+        hook = JavaUtils.newInstance(JavaUtils.getClass(className.trim(), 
MetastoreStatementHook.class),
+            new Class[] { Configuration.class}, new Object[] {conf});
+      } catch (MetaException e) {
+        throw new RuntimeException(e.getMessage(), e);
+      }
+    }
+  }
+
+  public static <T extends Statement> T getProxyStatement(Configuration 
configuration, T delegate, String rawSql) {
+    MetastoreStatement handler = new MetastoreStatement(configuration, 
delegate, rawSql);
+    return (T) Proxy.newProxyInstance(JavaUtils.getClassLoader(),
+        ClassUtils.getAllInterfaces(delegate.getClass()).toArray(new 
Class[0]), handler);
+  }
+
+  private void logSummary(boolean monitor) {
+    Optional<HMSHandlerContext.CallCtx> ctxCall = 
HMSHandlerContext.getCallCtx();
+    HMSHandlerContext.CallCtx previousCall = CALL_CTX.get();
+    if (ctxCall.isPresent()) {
+      if (previousCall == null) {
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        }
+      } else if (!ctxCall.get().equals(previousCall)) {
+        // we approach the end of previous thrift call
+        long totalSpent = previousCall.totalTime().get();
+        LOG.debug("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        if (isSlowExecution(configuration, totalSpent)) {
+          LOG.warn("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        }
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        } else {
+          CALL_CTX.remove();
+        }
+      }
+    }
+  }
+
+  @Override
+  public Object invoke(Object proxy, Method method, Object[] args) throws 
Throwable {
+    Timer.Context ctx = null;
+    try {
+      boolean monitor = hook.profile(rawSql, method, args);
+      logSummary(monitor);
+      if (Metrics.getRegistry() != null && monitor) {
+        String metricName = hook.getMetricName(method, args);
+        Timer timer = Metrics.getOrCreateTimer(metricName);
+        if (timer != null) {
+          ctx = timer.time();
+        }
+      }
+      long start = System.currentTimeMillis();
+      hook.preRun(method, args);
+      Object result = method.invoke(delegate, args);
+      hook.postRun(method, args, result);
+      long timeSpent = System.currentTimeMillis() - start;
+      if (monitor) {
+        String statement = rawSql != null ? rawSql : (args != null && 
args.length > 0 ? (String) args[0] : "no sql found");
+        LOG.debug("Jdbc query: {} completed in {} ms", statement, timeSpent);
+        if (CALL_CTX.get() != null) {
+          CALL_CTX.get().totalTime().addAndGet(timeSpent);
+        }
+      }
+      logSlowExecution(timeSpent, configuration, rawSql, method, args);
+      return result;
+    } catch (InvocationTargetException | UndeclaredThrowableException e) {
+      throw e.getCause();
+    } finally {
+      if (ctx != null) {
+        ctx.stop();
+      }
+    }
+  }
+
+  public interface MetastoreStatementHook {
+    /**
+     * Whether should monitor the current call, this method gives a chance to 
profile a specific pattern of queries.
+     * For example, we use {@link JdbcProfilerUtils} to profile the queries 
originated from a set of specific APIs.
+     * @param sql The sql being executed, it might be null for {@link 
Statement#execute}, for this case
+     *            need to obtain the sql from args, the method input.
+     * @param method Method which is being called
+     * @param args The method input
+     * @return true for profiling this call, false otherwise
+     */
+    boolean profile(String sql, Method method, Object[] args);
+
+    String getMetricName(Method method, Object[] args);
+    /**
+     * Invoked before the method call
+     * @param method Method which is being called
+     * @param args The method input
+     */
+    default void preRun(Method method, Object[] args) {
+
+    }
+
+    /**
+     *  Invoked post the method call
+     * @param method Method which is being called
+     * @param args The method input
+     * @param result The execution result from the call
+     */
+    default void postRun(Method method, Object[] args, Object result) {
+
+    }
+  }
+
+  /**
+   * This class is used to profile the underlying statement originated from 
specific thrift API calls
+   */
+  public static class JdbcProfilerUtils implements 
MetastoreStatement.MetastoreStatementHook  {
+    private static final Set<String> PROFILED_APIS = new HashSet<>();
+    static final Set<String> QUERY_EXECUTION =
+        Set.of("executeQuery", "executeUpdate", "execute", "executeBatch");
+    private static volatile boolean initialized = false;
+    private static long logSlowQueriesThreshold;

Review Comment:
   This caches `logSlowQueriesThreshold` and `PROFILED_APIS` in static state 
after the first initialization and never refreshes them. That can cause 
test-order dependence (one test’s config can “stick” for later tests), and it 
prevents configuration changes from taking effect in the same JVM. Consider 
making these instance fields on `JdbcProfilerUtils` (preferred since you 
already construct a hook instance per statement), or add a test-only 
reset/reinit mechanism to avoid cross-test pollution.



##########
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java:
##########
@@ -648,6 +648,13 @@ public enum ConfVars {
         "hive.txn.acid.metrics.delta.pct.threshold", 0.01f,
         "Percentage (fractional) size of the delta files relative to the base 
directory. Deltas smaller than this threshold " +
             "count as small deltas. Default 0.01 = 1%.)"),
+    
METASTORE_JDBC_SLOW_QUERIES("metastore.jdbc.execution.logSlowQueriesThreshold", 
"metastore.jdbc.execution.logSlowQueriesThreshold",
+        3000, "Log the slow jdbc query that Metastore has been waiting for the 
result beyond the threshold(ms), " +
+        "should enable the metastore.profile.jdbc.execution first"),
+    METASTORE_PROFILE_JDBC_EXECUTION("metastore.profile.jdbc.execution", 
"metastore.profile.jdbc.execution", true,
+        "Profile the jdbc executions, will give the histogram about the jdbc 
read and write if check the metrics"),

Review Comment:
   `METASTORE_PROFILE_JDBC_EXECUTION` is introduced with a default of `true`, 
which effectively enables connection/statement proxying and metric creation by 
default. That can add overhead and change runtime behavior (URL wrapping via 
`MetastoreDriver`) even for users who didn’t opt in. If the intention is opt-in 
profiling, consider defaulting this to `false`; otherwise, please update the 
slow-query description (it currently implies profiling is something to be 
enabled first) and ensure the default-on behavior is explicitly justified.
   



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreStatement.java:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.Statement;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.logSlowExecution;
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.isSlowExecution;
+
+@SuppressWarnings("unchecked")
+public final class MetastoreStatement implements InvocationHandler {
+  private static final Logger LOG = 
LoggerFactory.getLogger(MetastoreStatement.class);
+  private static final ThreadLocal<HMSHandlerContext.CallCtx> CALL_CTX = new 
ThreadLocal<>();
+  static final String EXEC_HOOK = "metastore.jdbc.execution.hook";
+
+  private final String rawSql;
+  private final Statement delegate;
+  private final Configuration configuration;
+  private final MetastoreStatementHook hook;
+
+  private MetastoreStatement(Configuration conf, Statement statement, String 
rawSql) {
+    this.configuration = Objects.requireNonNull(conf);
+    this.rawSql = rawSql;
+    this.delegate = Objects.requireNonNull(statement);
+    String className = conf.get(EXEC_HOOK, "");
+    if (StringUtils.isEmpty(className)) {
+      hook = new JdbcProfilerUtils(conf);
+    } else {
+      try {
+        hook = JavaUtils.newInstance(JavaUtils.getClass(className.trim(), 
MetastoreStatementHook.class),
+            new Class[] { Configuration.class}, new Object[] {conf});
+      } catch (MetaException e) {
+        throw new RuntimeException(e.getMessage(), e);
+      }
+    }
+  }
+
+  public static <T extends Statement> T getProxyStatement(Configuration 
configuration, T delegate, String rawSql) {
+    MetastoreStatement handler = new MetastoreStatement(configuration, 
delegate, rawSql);
+    return (T) Proxy.newProxyInstance(JavaUtils.getClassLoader(),
+        ClassUtils.getAllInterfaces(delegate.getClass()).toArray(new 
Class[0]), handler);
+  }
+
+  private void logSummary(boolean monitor) {
+    Optional<HMSHandlerContext.CallCtx> ctxCall = 
HMSHandlerContext.getCallCtx();
+    HMSHandlerContext.CallCtx previousCall = CALL_CTX.get();
+    if (ctxCall.isPresent()) {
+      if (previousCall == null) {
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        }
+      } else if (!ctxCall.get().equals(previousCall)) {
+        // we approach the end of previous thrift call
+        long totalSpent = previousCall.totalTime().get();
+        LOG.debug("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        if (isSlowExecution(configuration, totalSpent)) {
+          LOG.warn("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        }
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        } else {
+          CALL_CTX.remove();
+        }
+      }
+    }
+  }
+
+  @Override
+  public Object invoke(Object proxy, Method method, Object[] args) throws 
Throwable {
+    Timer.Context ctx = null;
+    try {
+      boolean monitor = hook.profile(rawSql, method, args);
+      logSummary(monitor);
+      if (Metrics.getRegistry() != null && monitor) {
+        String metricName = hook.getMetricName(method, args);
+        Timer timer = Metrics.getOrCreateTimer(metricName);
+        if (timer != null) {
+          ctx = timer.time();
+        }
+      }
+      long start = System.currentTimeMillis();
+      hook.preRun(method, args);
+      Object result = method.invoke(delegate, args);
+      hook.postRun(method, args, result);
+      long timeSpent = System.currentTimeMillis() - start;
+      if (monitor) {
+        String statement = rawSql != null ? rawSql : (args != null && 
args.length > 0 ? (String) args[0] : "no sql found");
+        LOG.debug("Jdbc query: {} completed in {} ms", statement, timeSpent);
+        if (CALL_CTX.get() != null) {
+          CALL_CTX.get().totalTime().addAndGet(timeSpent);
+        }
+      }
+      logSlowExecution(timeSpent, configuration, rawSql, method, args);
+      return result;
+    } catch (InvocationTargetException | UndeclaredThrowableException e) {
+      throw e.getCause();
+    } finally {

Review Comment:
   Two issues here: (1) elapsed timing should use `System.nanoTime()` rather 
than `currentTimeMillis()` to avoid negative/incorrect durations if the system 
clock changes; (2) `throw e.getCause()` can throw `NullPointerException` if 
`getCause()` is null (possible for `UndeclaredThrowableException` depending on 
how it’s constructed). Prefer rethrowing `e.getCause()` when non-null, 
otherwise rethrow `e`.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreDriver.java:
##########
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.sql.Connection;
+import java.sql.Driver;
+import java.sql.DriverManager;
+import java.sql.DriverPropertyInfo;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Properties;
+import java.util.logging.Logger;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.math.NumberUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+import org.slf4j.LoggerFactory;
+
+import static java.sql.DriverManager.registerDriver;
+
+public class MetastoreDriver implements Driver {
+  private static final org.slf4j.Logger LOG = 
LoggerFactory.getLogger(MetastoreDriver.class);
+  private static final String URL_PREFIX = "jdbc:metastore://";
+  private static final Configuration configuration;
+  private static int majorVersion = -1;
+  private static int minorVersion = -1;
+  private static volatile Driver delegateDriver;
+  static {
+    try {
+      registerDriver(new MetastoreDriver());
+      String versionString = MetastoreVersionInfo.getVersion();
+      String[] versionNums = versionString.split("\\.");
+      if (NumberUtils.isNumber(versionNums[0])) {
+        majorVersion = Integer.parseInt(versionNums[0]);
+      }
+      if (versionNums.length >1 && NumberUtils.isNumber(versionNums[1])) {
+        minorVersion = Integer.parseInt(versionNums[1]);
+      }
+      configuration = MetastoreConf.newMetastoreConf();
+    } catch (Exception e) {
+      throw new RuntimeException("Failed to register Metastore driver", e);
+    }
+  }
+
+  private synchronized static Driver
+      findRegisteredDriver(String jdbcUrl, String driverClassName) throws 
SQLException {
+    List<Driver> candidates = new ArrayList<>();
+    for (Enumeration<Driver> drivers = DriverManager.getDrivers(); 
drivers.hasMoreElements();) {
+      Driver driver = drivers.nextElement();
+      try {
+        if (driver.acceptsURL(jdbcUrl)) {
+          candidates.add(driver);
+        }
+      } catch (Exception e) {

Review Comment:
   The empty catch block swallows errors from `acceptsURL(...)` silently, 
making driver selection problems hard to debug. At minimum, log at DEBUG with 
the driver class name and exception (or restrict the catch to `SQLException`) 
so operational diagnosis is possible without changing behavior.
   



##########
standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestMetastoreConnection.java:
##########
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Timer;
+import com.zaxxer.hikari.HikariDataSource;
+
+import javax.sql.DataSource;
+import java.lang.reflect.Method;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.dbcp2.PoolingDataSource;
+import org.apache.derby.impl.jdbc.EmbedConnection;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MetastoreUnitTest.class)
+public class TestMetastoreConnection {
+  private Configuration conf;
+  private Counter slowQuery;
+
+  @Before
+  public void init() {
+    conf = MetastoreDriver.getConfiguration();
+    conf.set(MetastoreStatement.EXEC_HOOK, 
MetastoreStatementTestHook.class.getName());
+    MetastoreConf.setLongVar(conf, 
MetastoreConf.ConfVars.METASTORE_JDBC_SLOW_QUERIES, 200);
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, 
true);
+    MetastoreConf.setBoolVar(conf, 
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_EXECUTION, true);
+    MetastoreConf.setVar(conf, 
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_THRIFT_APIS, 
"test_metastore_statement");
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME, 
"dummyUser");
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.PWD, "dummyPass");
+    conf.unset(MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.getVarname());
+
+    Metrics.initialize(conf);
+    slowQuery = Metrics.getOrCreateCounter(MetricsConstants.JDBC_SLOW_QUERIES);
+  }
+
+  @Test
+  public void testDefaultHikariCp() throws Exception {
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE, 
HikariCPDataSourceProvider.HIKARI);
+
+    DataSourceProvider dsp = 
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+    Assert.assertNotNull(dsp);
+    DataSource ds = dsp.create(conf);
+    Assert.assertTrue(ds instanceof HikariDataSource);
+    verify(ds.getConnection());
+  }
+
+  @Test
+  public void testDbCpDataSource() throws Exception {
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE, 
DbCPDataSourceProvider.DBCP);
+
+    DataSourceProvider dsp = 
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+    Assert.assertNotNull(dsp);
+    DataSource ds = dsp.create(conf);
+    Assert.assertTrue(ds instanceof PoolingDataSource);
+    verify(ds.getConnection());

Review Comment:
   The test opens JDBC connections but never closes them, and it also never 
closes the created data source (notably important for `HikariDataSource`). This 
can leak threads/connections and make the test suite flaky. Use 
try-with-resources for the `Connection`, and ensure `HikariDataSource#close()` 
is called when applicable.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreStatement.java:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.Statement;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.logSlowExecution;
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.isSlowExecution;
+
+@SuppressWarnings("unchecked")
+public final class MetastoreStatement implements InvocationHandler {
+  private static final Logger LOG = 
LoggerFactory.getLogger(MetastoreStatement.class);
+  private static final ThreadLocal<HMSHandlerContext.CallCtx> CALL_CTX = new 
ThreadLocal<>();
+  static final String EXEC_HOOK = "metastore.jdbc.execution.hook";
+
+  private final String rawSql;
+  private final Statement delegate;
+  private final Configuration configuration;
+  private final MetastoreStatementHook hook;
+
+  private MetastoreStatement(Configuration conf, Statement statement, String 
rawSql) {
+    this.configuration = Objects.requireNonNull(conf);
+    this.rawSql = rawSql;
+    this.delegate = Objects.requireNonNull(statement);
+    String className = conf.get(EXEC_HOOK, "");
+    if (StringUtils.isEmpty(className)) {
+      hook = new JdbcProfilerUtils(conf);
+    } else {
+      try {
+        hook = JavaUtils.newInstance(JavaUtils.getClass(className.trim(), 
MetastoreStatementHook.class),
+            new Class[] { Configuration.class}, new Object[] {conf});
+      } catch (MetaException e) {
+        throw new RuntimeException(e.getMessage(), e);
+      }
+    }
+  }
+
+  public static <T extends Statement> T getProxyStatement(Configuration 
configuration, T delegate, String rawSql) {
+    MetastoreStatement handler = new MetastoreStatement(configuration, 
delegate, rawSql);
+    return (T) Proxy.newProxyInstance(JavaUtils.getClassLoader(),
+        ClassUtils.getAllInterfaces(delegate.getClass()).toArray(new 
Class[0]), handler);
+  }
+
+  private void logSummary(boolean monitor) {
+    Optional<HMSHandlerContext.CallCtx> ctxCall = 
HMSHandlerContext.getCallCtx();
+    HMSHandlerContext.CallCtx previousCall = CALL_CTX.get();
+    if (ctxCall.isPresent()) {
+      if (previousCall == null) {
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        }
+      } else if (!ctxCall.get().equals(previousCall)) {
+        // we approach the end of previous thrift call
+        long totalSpent = previousCall.totalTime().get();
+        LOG.debug("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        if (isSlowExecution(configuration, totalSpent)) {
+          LOG.warn("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        }
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        } else {
+          CALL_CTX.remove();
+        }
+      }

Review Comment:
   If `HMSHandlerContext.getCallCtx()` becomes empty (e.g., context cleared at 
end of a request), `CALL_CTX` is never removed. That leaves stale per-thread 
state around and can both skew subsequent summaries and retain references 
longer than necessary in thread pools. Consider adding an `else` branch to 
remove `CALL_CTX` when `ctxCall` is not present.
   



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreStatement.java:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.Statement;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.logSlowExecution;
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.isSlowExecution;
+
+@SuppressWarnings("unchecked")
+public final class MetastoreStatement implements InvocationHandler {
+  private static final Logger LOG = 
LoggerFactory.getLogger(MetastoreStatement.class);
+  private static final ThreadLocal<HMSHandlerContext.CallCtx> CALL_CTX = new 
ThreadLocal<>();
+  static final String EXEC_HOOK = "metastore.jdbc.execution.hook";
+
+  private final String rawSql;
+  private final Statement delegate;
+  private final Configuration configuration;
+  private final MetastoreStatementHook hook;
+
+  private MetastoreStatement(Configuration conf, Statement statement, String 
rawSql) {
+    this.configuration = Objects.requireNonNull(conf);
+    this.rawSql = rawSql;
+    this.delegate = Objects.requireNonNull(statement);
+    String className = conf.get(EXEC_HOOK, "");
+    if (StringUtils.isEmpty(className)) {
+      hook = new JdbcProfilerUtils(conf);
+    } else {
+      try {
+        hook = JavaUtils.newInstance(JavaUtils.getClass(className.trim(), 
MetastoreStatementHook.class),
+            new Class[] { Configuration.class}, new Object[] {conf});
+      } catch (MetaException e) {
+        throw new RuntimeException(e.getMessage(), e);
+      }
+    }
+  }
+
+  public static <T extends Statement> T getProxyStatement(Configuration 
configuration, T delegate, String rawSql) {
+    MetastoreStatement handler = new MetastoreStatement(configuration, 
delegate, rawSql);
+    return (T) Proxy.newProxyInstance(JavaUtils.getClassLoader(),
+        ClassUtils.getAllInterfaces(delegate.getClass()).toArray(new 
Class[0]), handler);
+  }
+
+  private void logSummary(boolean monitor) {
+    Optional<HMSHandlerContext.CallCtx> ctxCall = 
HMSHandlerContext.getCallCtx();
+    HMSHandlerContext.CallCtx previousCall = CALL_CTX.get();
+    if (ctxCall.isPresent()) {
+      if (previousCall == null) {
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        }
+      } else if (!ctxCall.get().equals(previousCall)) {
+        // we approach the end of previous thrift call
+        long totalSpent = previousCall.totalTime().get();
+        LOG.debug("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        if (isSlowExecution(configuration, totalSpent)) {
+          LOG.warn("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        }
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        } else {
+          CALL_CTX.remove();
+        }
+      }
+    }
+  }
+
+  @Override
+  public Object invoke(Object proxy, Method method, Object[] args) throws 
Throwable {
+    Timer.Context ctx = null;
+    try {
+      boolean monitor = hook.profile(rawSql, method, args);
+      logSummary(monitor);
+      if (Metrics.getRegistry() != null && monitor) {
+        String metricName = hook.getMetricName(method, args);
+        Timer timer = Metrics.getOrCreateTimer(metricName);
+        if (timer != null) {
+          ctx = timer.time();
+        }
+      }
+      long start = System.currentTimeMillis();
+      hook.preRun(method, args);
+      Object result = method.invoke(delegate, args);
+      hook.postRun(method, args, result);
+      long timeSpent = System.currentTimeMillis() - start;
+      if (monitor) {
+        String statement = rawSql != null ? rawSql : (args != null && 
args.length > 0 ? (String) args[0] : "no sql found");
+        LOG.debug("Jdbc query: {} completed in {} ms", statement, timeSpent);
+        if (CALL_CTX.get() != null) {
+          CALL_CTX.get().totalTime().addAndGet(timeSpent);
+        }
+      }
+      logSlowExecution(timeSpent, configuration, rawSql, method, args);
+      return result;
+    } catch (InvocationTargetException | UndeclaredThrowableException e) {
+      throw e.getCause();
+    } finally {
+      if (ctx != null) {
+        ctx.stop();
+      }
+    }
+  }
+
+  public interface MetastoreStatementHook {
+    /**
+     * Whether should monitor the current call, this method gives a chance to 
profile a specific pattern of queries.
+     * For example, we use {@link JdbcProfilerUtils} to profile the queries 
originated from a set of specific APIs.
+     * @param sql The sql being executed, it might be null for {@link 
Statement#execute}, for this case
+     *            need to obtain the sql from args, the method input.
+     * @param method Method which is being called
+     * @param args The method input
+     * @return true for profiling this call, false otherwise
+     */
+    boolean profile(String sql, Method method, Object[] args);
+
+    String getMetricName(Method method, Object[] args);
+    /**
+     * Invoked before the method call
+     * @param method Method which is being called
+     * @param args The method input
+     */
+    default void preRun(Method method, Object[] args) {
+
+    }
+
+    /**
+     *  Invoked post the method call
+     * @param method Method which is being called
+     * @param args The method input
+     * @param result The execution result from the call
+     */
+    default void postRun(Method method, Object[] args, Object result) {
+
+    }
+  }
+
+  /**
+   * This class is used to profile the underlying statement originated from 
specific thrift API calls
+   */
+  public static class JdbcProfilerUtils implements 
MetastoreStatement.MetastoreStatementHook  {
+    private static final Set<String> PROFILED_APIS = new HashSet<>();
+    static final Set<String> QUERY_EXECUTION =
+        Set.of("executeQuery", "executeUpdate", "execute", "executeBatch");
+    private static volatile boolean initialized = false;
+    private static long logSlowQueriesThreshold;
+
+    public JdbcProfilerUtils(Configuration configuration) {
+      initialize(Objects.requireNonNull(configuration));
+    }
+
+    private static void initialize(Configuration configuration) {
+      if (!initialized) {
+        synchronized (JdbcProfilerUtils.class) {
+          if (!initialized) {
+            initialized = true;
+            logSlowQueriesThreshold = MetastoreConf.getLongVar(configuration,
+                MetastoreConf.ConfVars.METASTORE_JDBC_SLOW_QUERIES);
+            if (logSlowQueriesThreshold > 0) {
+              LOG.info("The slow query log enabled, will log the query that 
takes more than {} ms",
+                  logSlowQueriesThreshold);
+            }
+            String thriftApis = MetastoreConf.getVar(configuration,
+                MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_THRIFT_APIS);
+            for (String thriftApi : thriftApis.split(",")) {
+              String trimmedThriftApi = thriftApi.trim();
+              if (!trimmedThriftApi.isEmpty()) {
+                PROFILED_APIS.add(trimmedThriftApi);
+              }
+            }
+          }
+        }
+      }
+    }

Review Comment:
   This caches `logSlowQueriesThreshold` and `PROFILED_APIS` in static state 
after the first initialization and never refreshes them. That can cause 
test-order dependence (one test’s config can “stick” for later tests), and it 
prevents configuration changes from taking effect in the same JVM. Consider 
making these instance fields on `JdbcProfilerUtils` (preferred since you 
already construct a hook instance per statement), or add a test-only 
reset/reinit mechanism to avoid cross-test pollution.



##########
standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestMetastoreConnection.java:
##########
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Timer;
+import com.zaxxer.hikari.HikariDataSource;
+
+import javax.sql.DataSource;
+import java.lang.reflect.Method;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.dbcp2.PoolingDataSource;
+import org.apache.derby.impl.jdbc.EmbedConnection;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MetastoreUnitTest.class)
+public class TestMetastoreConnection {
+  private Configuration conf;
+  private Counter slowQuery;
+
+  @Before
+  public void init() {
+    conf = MetastoreDriver.getConfiguration();
+    conf.set(MetastoreStatement.EXEC_HOOK, 
MetastoreStatementTestHook.class.getName());
+    MetastoreConf.setLongVar(conf, 
MetastoreConf.ConfVars.METASTORE_JDBC_SLOW_QUERIES, 200);
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, 
true);
+    MetastoreConf.setBoolVar(conf, 
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_EXECUTION, true);
+    MetastoreConf.setVar(conf, 
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_THRIFT_APIS, 
"test_metastore_statement");
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME, 
"dummyUser");
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.PWD, "dummyPass");
+    conf.unset(MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.getVarname());
+
+    Metrics.initialize(conf);
+    slowQuery = Metrics.getOrCreateCounter(MetricsConstants.JDBC_SLOW_QUERIES);
+  }
+
+  @Test
+  public void testDefaultHikariCp() throws Exception {
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE, 
HikariCPDataSourceProvider.HIKARI);
+
+    DataSourceProvider dsp = 
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+    Assert.assertNotNull(dsp);
+    DataSource ds = dsp.create(conf);
+    Assert.assertTrue(ds instanceof HikariDataSource);
+    verify(ds.getConnection());
+  }
+
+  @Test
+  public void testDbCpDataSource() throws Exception {
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE, 
DbCPDataSourceProvider.DBCP);
+
+    DataSourceProvider dsp = 
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+    Assert.assertNotNull(dsp);
+    DataSource ds = dsp.create(conf);
+    Assert.assertTrue(ds instanceof PoolingDataSource);
+    verify(ds.getConnection());
+  }
+
+  private void verify(Connection connection) throws Exception {
+    Assert.assertTrue(connection.unwrap(MetastoreConnection.class).delegate() 
instanceof EmbedConnection);
+    long slowNum = slowQuery.getCount();
+    Timer timer = 
Metrics.getOrCreateTimer(MetastoreStatementTestHook.TEST_METRIC_NAME);
+    Assert.assertNotNull(timer);
+    long timeCount = timer.getCount();
+    try (AutoCloseable sleep = 
MetastoreStatementTestHook.testConnection("test_metastore_statement", 300)) {
+      try (Statement statement = connection.createStatement();
+           ResultSet rs = statement.executeQuery("VALUES 1")) {
+        Assert.assertTrue(rs.next());
+      }
+    }
+    Assert.assertEquals(slowNum + 1, slowQuery.getCount());
+    Assert.assertEquals(timeCount + 1, timer.getCount());
+    Assert.assertTrue(timer.getSnapshot().getMean() > 
TimeUnit.MILLISECONDS.toNanos(300));

Review Comment:
   This assertion can be timing-flaky: `Thread.sleep(300)` is not guaranteed to 
sleep at least 300ms, and checking `getMean()` can be sensitive to additional 
timer updates. Prefer asserting on `getMax()` (or checking the last recorded 
value if available) and/or allow a tolerance (e.g., >= 250ms) to reduce 
flakiness across CI environments.
   



##########
standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/datasource/TestMetastoreConnection.java:
##########
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Timer;
+import com.zaxxer.hikari.HikariDataSource;
+
+import javax.sql.DataSource;
+import java.lang.reflect.Method;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.dbcp2.PoolingDataSource;
+import org.apache.derby.impl.jdbc.EmbedConnection;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(MetastoreUnitTest.class)
+public class TestMetastoreConnection {
+  private Configuration conf;
+  private Counter slowQuery;
+
+  @Before
+  public void init() {
+    conf = MetastoreDriver.getConfiguration();
+    conf.set(MetastoreStatement.EXEC_HOOK, 
MetastoreStatementTestHook.class.getName());
+    MetastoreConf.setLongVar(conf, 
MetastoreConf.ConfVars.METASTORE_JDBC_SLOW_QUERIES, 200);
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, 
true);
+    MetastoreConf.setBoolVar(conf, 
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_EXECUTION, true);
+    MetastoreConf.setVar(conf, 
MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_THRIFT_APIS, 
"test_metastore_statement");
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME, 
"dummyUser");
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.PWD, "dummyPass");
+    conf.unset(MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE.getVarname());
+
+    Metrics.initialize(conf);
+    slowQuery = Metrics.getOrCreateCounter(MetricsConstants.JDBC_SLOW_QUERIES);
+  }
+
+  @Test
+  public void testDefaultHikariCp() throws Exception {
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE, 
HikariCPDataSourceProvider.HIKARI);
+
+    DataSourceProvider dsp = 
DataSourceProviderFactory.tryGetDataSourceProviderOrNull(conf);
+    Assert.assertNotNull(dsp);
+    DataSource ds = dsp.create(conf);
+    Assert.assertTrue(ds instanceof HikariDataSource);
+    verify(ds.getConnection());

Review Comment:
   The test opens JDBC connections but never closes them, and it also never 
closes the created data source (notably important for `HikariDataSource`). This 
can leak threads/connections and make the test suite flaky. Use 
try-with-resources for the `Connection`, and ensure `HikariDataSource#close()` 
is called when applicable.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreDriver.java:
##########
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.sql.Connection;
+import java.sql.Driver;
+import java.sql.DriverManager;
+import java.sql.DriverPropertyInfo;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Properties;
+import java.util.logging.Logger;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang3.math.NumberUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+import org.slf4j.LoggerFactory;
+
+import static java.sql.DriverManager.registerDriver;
+
+public class MetastoreDriver implements Driver {
+  private static final org.slf4j.Logger LOG = 
LoggerFactory.getLogger(MetastoreDriver.class);
+  private static final String URL_PREFIX = "jdbc:metastore://";
+  private static final Configuration configuration;
+  private static int majorVersion = -1;
+  private static int minorVersion = -1;
+  private static volatile Driver delegateDriver;
+  static {
+    try {
+      registerDriver(new MetastoreDriver());
+      String versionString = MetastoreVersionInfo.getVersion();
+      String[] versionNums = versionString.split("\\.");
+      if (NumberUtils.isNumber(versionNums[0])) {
+        majorVersion = Integer.parseInt(versionNums[0]);
+      }
+      if (versionNums.length >1 && NumberUtils.isNumber(versionNums[1])) {
+        minorVersion = Integer.parseInt(versionNums[1]);
+      }
+      configuration = MetastoreConf.newMetastoreConf();
+    } catch (Exception e) {
+      throw new RuntimeException("Failed to register Metastore driver", e);
+    }
+  }
+
+  private synchronized static Driver
+      findRegisteredDriver(String jdbcUrl, String driverClassName) throws 
SQLException {
+    List<Driver> candidates = new ArrayList<>();
+    for (Enumeration<Driver> drivers = DriverManager.getDrivers(); 
drivers.hasMoreElements();) {
+      Driver driver = drivers.nextElement();
+      try {
+        if (driver.acceptsURL(jdbcUrl)) {
+          candidates.add(driver);
+        }
+      } catch (Exception e) {
+      }
+    }
+
+    if (candidates.isEmpty()) {
+      Class<Driver> driverClz = tryLoadDriver(driverClassName, 
Thread.currentThread().getContextClassLoader(),
+          MetastoreDriver.class.getClassLoader());
+      if (driverClz != null) {
+        try {
+          Driver driver = driverClz.getDeclaredConstructor().newInstance();
+          if (!driver.acceptsURL(jdbcUrl)) {
+            throw new RuntimeException("Driver " + driverClassName + " cannot 
accept jdbcUrl");
+          }
+          candidates.add(driver);
+        } catch (Exception e) {
+          LOG.warn("Failed to create instance of driver class {}", 
driverClassName, e);
+        }
+      }
+    }
+    delegateDriver = candidates.isEmpty() ? DriverManager.getDriver(jdbcUrl) : 
candidates.getFirst();
+    return delegateDriver;
+  }
+
+  private static Class<Driver> tryLoadDriver(String driverClassName, 
ClassLoader... loaders) {
+    for (ClassLoader loader : loaders) {
+      if (loader != null) {
+        try {
+          return (Class<Driver>) loader.loadClass(driverClassName);
+        } catch (ClassNotFoundException e) {
+          LOG.debug("Driver class {} not found in class loader {}", 
driverClassName, loader);
+        }
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public Connection connect(String url, Properties info) throws SQLException {
+    if (!acceptsURL(url)) {
+      return null;
+    }
+
+    String driverAndUrl = url.substring(URL_PREFIX.length());
+    String defaultDriverClz =  driverAndUrl.split(":")[0];
+    String jdbcUrl = driverAndUrl.substring(defaultDriverClz.length() + 1);

Review Comment:
   This PR introduces non-trivial URL parsing and driver selection logic, but 
the added tests only validate statement profiling. Please add a focused unit 
test for `MetastoreDriver.connect(...)` that validates parsing (including edge 
cases like missing `:` after driver class) and ensures the returned 
`Connection` is a `MetastoreConnection` delegating to the underlying driver 
connection.
   



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/datasource/MetastoreStatement.java:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.datasource;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.sql.Statement;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.commons.lang3.ClassUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HMSHandlerContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.logSlowExecution;
+import static 
org.apache.hadoop.hive.metastore.datasource.MetastoreStatement.JdbcProfilerUtils.isSlowExecution;
+
+@SuppressWarnings("unchecked")
+public final class MetastoreStatement implements InvocationHandler {
+  private static final Logger LOG = 
LoggerFactory.getLogger(MetastoreStatement.class);
+  private static final ThreadLocal<HMSHandlerContext.CallCtx> CALL_CTX = new 
ThreadLocal<>();
+  static final String EXEC_HOOK = "metastore.jdbc.execution.hook";
+
+  private final String rawSql;
+  private final Statement delegate;
+  private final Configuration configuration;
+  private final MetastoreStatementHook hook;
+
+  private MetastoreStatement(Configuration conf, Statement statement, String 
rawSql) {
+    this.configuration = Objects.requireNonNull(conf);
+    this.rawSql = rawSql;
+    this.delegate = Objects.requireNonNull(statement);
+    String className = conf.get(EXEC_HOOK, "");
+    if (StringUtils.isEmpty(className)) {
+      hook = new JdbcProfilerUtils(conf);
+    } else {
+      try {
+        hook = JavaUtils.newInstance(JavaUtils.getClass(className.trim(), 
MetastoreStatementHook.class),
+            new Class[] { Configuration.class}, new Object[] {conf});
+      } catch (MetaException e) {
+        throw new RuntimeException(e.getMessage(), e);
+      }
+    }
+  }
+
+  public static <T extends Statement> T getProxyStatement(Configuration 
configuration, T delegate, String rawSql) {
+    MetastoreStatement handler = new MetastoreStatement(configuration, 
delegate, rawSql);
+    return (T) Proxy.newProxyInstance(JavaUtils.getClassLoader(),
+        ClassUtils.getAllInterfaces(delegate.getClass()).toArray(new 
Class[0]), handler);
+  }
+
+  private void logSummary(boolean monitor) {
+    Optional<HMSHandlerContext.CallCtx> ctxCall = 
HMSHandlerContext.getCallCtx();
+    HMSHandlerContext.CallCtx previousCall = CALL_CTX.get();
+    if (ctxCall.isPresent()) {
+      if (previousCall == null) {
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        }
+      } else if (!ctxCall.get().equals(previousCall)) {
+        // we approach the end of previous thrift call
+        long totalSpent = previousCall.totalTime().get();
+        LOG.debug("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        if (isSlowExecution(configuration, totalSpent)) {
+          LOG.warn("{} took {} ms to complete all queries", 
previousCall.methodName(), totalSpent);
+        }
+        if (monitor) {
+          CALL_CTX.set(ctxCall.get());
+        } else {
+          CALL_CTX.remove();
+        }
+      }
+    }
+  }
+
+  @Override
+  public Object invoke(Object proxy, Method method, Object[] args) throws 
Throwable {
+    Timer.Context ctx = null;
+    try {
+      boolean monitor = hook.profile(rawSql, method, args);
+      logSummary(monitor);
+      if (Metrics.getRegistry() != null && monitor) {
+        String metricName = hook.getMetricName(method, args);
+        Timer timer = Metrics.getOrCreateTimer(metricName);
+        if (timer != null) {
+          ctx = timer.time();
+        }
+      }
+      long start = System.currentTimeMillis();
+      hook.preRun(method, args);
+      Object result = method.invoke(delegate, args);
+      hook.postRun(method, args, result);
+      long timeSpent = System.currentTimeMillis() - start;
+      if (monitor) {
+        String statement = rawSql != null ? rawSql : (args != null && 
args.length > 0 ? (String) args[0] : "no sql found");
+        LOG.debug("Jdbc query: {} completed in {} ms", statement, timeSpent);
+        if (CALL_CTX.get() != null) {
+          CALL_CTX.get().totalTime().addAndGet(timeSpent);
+        }
+      }
+      logSlowExecution(timeSpent, configuration, rawSql, method, args);
+      return result;
+    } catch (InvocationTargetException | UndeclaredThrowableException e) {
+      throw e.getCause();
+    } finally {
+      if (ctx != null) {
+        ctx.stop();
+      }
+    }
+  }
+
+  public interface MetastoreStatementHook {
+    /**
+     * Whether should monitor the current call, this method gives a chance to 
profile a specific pattern of queries.
+     * For example, we use {@link JdbcProfilerUtils} to profile the queries 
originated from a set of specific APIs.
+     * @param sql The sql being executed, it might be null for {@link 
Statement#execute}, for this case
+     *            need to obtain the sql from args, the method input.
+     * @param method Method which is being called
+     * @param args The method input
+     * @return true for profiling this call, false otherwise
+     */
+    boolean profile(String sql, Method method, Object[] args);
+
+    String getMetricName(Method method, Object[] args);
+    /**
+     * Invoked before the method call
+     * @param method Method which is being called
+     * @param args The method input
+     */
+    default void preRun(Method method, Object[] args) {
+
+    }
+
+    /**
+     *  Invoked post the method call
+     * @param method Method which is being called
+     * @param args The method input
+     * @param result The execution result from the call
+     */
+    default void postRun(Method method, Object[] args, Object result) {
+
+    }
+  }
+
+  /**
+   * This class is used to profile the underlying statement originated from 
specific thrift API calls
+   */
+  public static class JdbcProfilerUtils implements 
MetastoreStatement.MetastoreStatementHook  {
+    private static final Set<String> PROFILED_APIS = new HashSet<>();
+    static final Set<String> QUERY_EXECUTION =
+        Set.of("executeQuery", "executeUpdate", "execute", "executeBatch");
+    private static volatile boolean initialized = false;
+    private static long logSlowQueriesThreshold;
+
+    public JdbcProfilerUtils(Configuration configuration) {
+      initialize(Objects.requireNonNull(configuration));
+    }
+
+    private static void initialize(Configuration configuration) {
+      if (!initialized) {
+        synchronized (JdbcProfilerUtils.class) {
+          if (!initialized) {
+            initialized = true;
+            logSlowQueriesThreshold = MetastoreConf.getLongVar(configuration,
+                MetastoreConf.ConfVars.METASTORE_JDBC_SLOW_QUERIES);
+            if (logSlowQueriesThreshold > 0) {
+              LOG.info("The slow query log enabled, will log the query that 
takes more than {} ms",
+                  logSlowQueriesThreshold);
+            }
+            String thriftApis = MetastoreConf.getVar(configuration,
+                MetastoreConf.ConfVars.METASTORE_PROFILE_JDBC_THRIFT_APIS);
+            for (String thriftApi : thriftApis.split(",")) {
+              String trimmedThriftApi = thriftApi.trim();
+              if (!trimmedThriftApi.isEmpty()) {
+                PROFILED_APIS.add(trimmedThriftApi);
+              }
+            }
+          }

Review Comment:
   This caches `logSlowQueriesThreshold` and `PROFILED_APIS` in static state 
after the first initialization and never refreshes them. That can cause 
test-order dependence (one test’s config can “stick” for later tests), and it 
prevents configuration changes from taking effect in the same JVM. Consider 
making these instance fields on `JdbcProfilerUtils` (preferred since you 
already construct a hook instance per statement), or add a test-only 
reset/reinit mechanism to avoid cross-test pollution.
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to