This is an automated email from the ASF dual-hosted git repository. stigahuang pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/impala.git
commit a766f197a7fd0297ab736ed24bd35f7c4568927e Author: Riza Suminto <riza.sumi...@cloudera.com> AuthorDate: Sat Jul 5 11:23:44 2025 -0700 IMPALA-14207: Hook log4j to Glog in AdmissionD log4j logs does not shows up in dedicated AdmissionD service. This is because GlogAppender.Install is not called anywhere. It is called in constructor of JniCatalog.java and JniFrontend.java, but AdmissionD does not utilize either of them. AdmissionD has call path to JniRequestPoolService.java. This patch adds isAdmissiond parameter to JniRequestPoolService constructor to call GlogAppender.Install and hook up log4j to Glog. Only AdmissionD sets this parameter to true when initializing JniRequestPoolService. Note that this hook up only happen if fair_scheduler_allocation_path flag is not empty. Testing: Run few tests in TestAdmissionControllerWithACService and confirm log4j logs appears in admissiond.INFO Change-Id: Ie28ec017f70c79d6ffdc56daa5aaeb9efcb8bb79 Reviewed-on: http://gerrit.cloudera.org:8080/23140 Reviewed-by: Michael Smith <michael.sm...@cloudera.com> Reviewed-by: Yida Wu <wydbaggio...@gmail.com> Tested-by: Impala Public Jenkins <impala-public-jenk...@cloudera.com> --- be/src/scheduling/admissiond-env.cc | 3 ++- be/src/scheduling/request-pool-service.cc | 11 ++++++----- be/src/scheduling/request-pool-service.h | 2 +- .../org/apache/impala/util/JniRequestPoolService.java | 18 +++++++++++++----- .../org/apache/impala/util/RequestPoolService.java | 2 +- 5 files changed, 23 insertions(+), 13 deletions(-) diff --git a/be/src/scheduling/admissiond-env.cc b/be/src/scheduling/admissiond-env.cc index 7ff883ebb..1f6d13b86 100644 --- a/be/src/scheduling/admissiond-env.cc +++ b/be/src/scheduling/admissiond-env.cc @@ -50,7 +50,8 @@ AdmissiondEnv* AdmissiondEnv::admissiond_env_ = nullptr; AdmissiondEnv::AdmissiondEnv() : pool_mem_trackers_(new PoolMemTrackerRegistry), - request_pool_service_(new RequestPoolService(DaemonEnv::GetInstance()->metrics())), + request_pool_service_( + new RequestPoolService(DaemonEnv::GetInstance()->metrics(), true)), rpc_mgr_(new RpcMgr(IsInternalTlsConfigured())), rpc_metrics_(DaemonEnv::GetInstance()->metrics()->GetOrCreateChildGroup("rpc")) { MetricGroup* metrics = DaemonEnv::GetInstance()->metrics(); diff --git a/be/src/scheduling/request-pool-service.cc b/be/src/scheduling/request-pool-service.cc index eeddf5d97..78a241906 100644 --- a/be/src/scheduling/request-pool-service.cc +++ b/be/src/scheduling/request-pool-service.cc @@ -86,8 +86,8 @@ static const string ERROR_USER_NOT_SPECIFIED = "User must be specified because " const string RequestPoolService::DEFAULT_POOL_NAME = "default-pool"; -RequestPoolService::RequestPoolService(MetricGroup* metrics) : - resolve_pool_ms_metric_(NULL) { +RequestPoolService::RequestPoolService(MetricGroup* metrics, bool is_admissiond) + : resolve_pool_ms_metric_(NULL) { DCHECK(metrics != NULL); resolve_pool_ms_metric_ = StatsMetric<double>::CreateAndRegister(metrics, RESOLVE_POOL_METRIC_NAME); @@ -114,7 +114,7 @@ RequestPoolService::RequestPoolService(MetricGroup* metrics) : jmethodID start_id; // JniRequestPoolService.start(), only called in this method. JniMethodDescriptor methods[] = { - {"<init>", "([BLjava/lang/String;Ljava/lang/String;Z)V", &ctor_}, + {"<init>", "([BLjava/lang/String;Ljava/lang/String;ZZ)V", &ctor_}, {"start", "()V", &start_id}, {"resolveRequestPool", "([B)[B", &resolve_request_pool_id_}, {"getPoolConfig", "([B)[B", &get_pool_config_id_}, @@ -142,8 +142,9 @@ RequestPoolService::RequestPoolService(MetricGroup* metrics) : ABORT_IF_EXC(jni_env); jboolean is_be_test = TestInfo::is_be_test(); - jobject jni_request_pool_service = jni_env->NewObject(jni_request_pool_service_class_, - ctor_, cfg_bytes, fair_scheduler_config_path, llama_site_path, is_be_test); + jobject jni_request_pool_service = + jni_env->NewObject(jni_request_pool_service_class_, ctor_, cfg_bytes, + fair_scheduler_config_path, llama_site_path, is_be_test, is_admissiond); ABORT_IF_EXC(jni_env); ABORT_IF_ERROR(JniUtil::LocalToGlobalRef( jni_env, jni_request_pool_service, &jni_request_pool_service_)); diff --git a/be/src/scheduling/request-pool-service.h b/be/src/scheduling/request-pool-service.h index 7156095c0..33f09b46d 100644 --- a/be/src/scheduling/request-pool-service.h +++ b/be/src/scheduling/request-pool-service.h @@ -41,7 +41,7 @@ class RequestPoolService { /// Initializes the JNI method stubs if configuration files are specified. If any /// method can't be found, or if there is any further error, the constructor will /// terminate the process. - RequestPoolService(MetricGroup* metrics); + RequestPoolService(MetricGroup* metrics, bool is_admissiond = false); /// Resolves the request to a resource pool as determined by the policy. Returns an /// error if the request cannot be resolved to a pool or if the user does not have diff --git a/fe/src/main/java/org/apache/impala/util/JniRequestPoolService.java b/fe/src/main/java/org/apache/impala/util/JniRequestPoolService.java index 9a1435834..1f3187297 100644 --- a/fe/src/main/java/org/apache/impala/util/JniRequestPoolService.java +++ b/fe/src/main/java/org/apache/impala/util/JniRequestPoolService.java @@ -21,20 +21,19 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.Groups; import org.apache.impala.common.ImpalaException; import org.apache.impala.common.InternalException; import org.apache.impala.common.JniUtil; import org.apache.impala.service.BackendConfig; import org.apache.impala.thrift.TBackendGflags; -import org.apache.impala.thrift.TErrorCode; -import org.apache.hadoop.security.Groups; import org.apache.impala.thrift.TGetHadoopGroupsRequest; import org.apache.impala.thrift.TGetHadoopGroupsResponse; -import org.apache.impala.thrift.TPoolConfigParams; +import org.apache.impala.thrift.TLogLevel; import org.apache.impala.thrift.TPoolConfig; +import org.apache.impala.thrift.TPoolConfigParams; import org.apache.impala.thrift.TResolveRequestPoolParams; import org.apache.impala.thrift.TResolveRequestPoolResult; - import org.apache.thrift.TException; import org.apache.thrift.TSerializer; import org.apache.thrift.protocol.TBinaryProtocol; @@ -67,12 +66,21 @@ public class JniRequestPoolService { * * @param fsAllocationPath path to the fair scheduler allocation file. * @param sitePath path to the configuration file. + * @param isBackendTest true if instantiated from backend test. + * @param isAdmissiond true if running in AdmissionD. Enable log4j to Glog hookup. */ public JniRequestPoolService(byte[] thriftBackendConfig, final String fsAllocationPath, - final String sitePath, boolean isBackendTest) throws ImpalaException { + final String sitePath, boolean isBackendTest, boolean isAdmissiond) + throws ImpalaException { Preconditions.checkNotNull(fsAllocationPath); TBackendGflags cfg = new TBackendGflags(); JniUtil.deserializeThrift(protocolFactory_, cfg, thriftBackendConfig); + if (isAdmissiond) { + GlogAppender.Install(TLogLevel.values()[cfg.impala_log_lvl], + TLogLevel.values()[cfg.non_impala_java_vlog]); + LOG.info("GlogAppender installed for AdmissionD."); + LOG.info(JniUtil.getJavaVersion()); + } BackendConfig.create(cfg, false); requestPoolService_ = diff --git a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java index 28efaefad..91ee8af36 100644 --- a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java +++ b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java @@ -410,7 +410,7 @@ public class RequestPoolService { result.setOnly_coordinators(allocationConf_.get().isOnlyCoordinators(pool)); } if (LOG.isTraceEnabled()) { - LOG.debug("getPoolConfig(pool={}): max_mem_resources={}, max_requests={}," + LOG.trace("getPoolConfig(pool={}): max_mem_resources={}, max_requests={}," + " max_queued={}, queue_timeout_ms={}, default_query_options={}," + " max_query_mem_limit={}, min_query_mem_limit={}," + " clamp_mem_limit_query_option={}, max_query_cpu_core_per_node_limit={},"