This is an automated email from the ASF dual-hosted git repository.

yangzy pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git


The following commit(s) were added to refs/heads/main by this push:
     new 4d35fb4a4a [GLUTEN-7609][CORE] Fix the bug that logging level cannot 
be change (#7610)
4d35fb4a4a is described below

commit 4d35fb4a4a20779a3c4cdcca1002eb042bca05f6
Author: Jiaan Geng <[email protected]>
AuthorDate: Mon Oct 21 16:42:29 2024 +0800

    [GLUTEN-7609][CORE] Fix the bug that logging level cannot be change (#7610)
---
 .../org/apache/gluten/softaffinity/SoftAffinityManager.scala | 12 ++++++------
 .../java/org/apache/spark/softaffinity/SoftAffinity.scala    |  6 ++----
 .../gluten/extension/columnar/ColumnarRuleApplier.scala      |  5 ++---
 .../org/apache/gluten/execution/WholeStageTransformer.scala  |  5 +++--
 .../main/scala/org/apache/gluten/extension/GlutenPlan.scala  | 11 ++++-------
 5 files changed, 17 insertions(+), 22 deletions(-)

diff --git 
a/gluten-core/src/main/java/org/apache/gluten/softaffinity/SoftAffinityManager.scala
 
b/gluten-core/src/main/java/org/apache/gluten/softaffinity/SoftAffinityManager.scala
index 7cb0b2fa26..576697bdb8 100644
--- 
a/gluten-core/src/main/java/org/apache/gluten/softaffinity/SoftAffinityManager.scala
+++ 
b/gluten-core/src/main/java/org/apache/gluten/softaffinity/SoftAffinityManager.scala
@@ -53,8 +53,6 @@ abstract class AffinityManager extends LogLevelUtil with 
Logging {
 
   protected val totalRegisteredExecutors = new AtomicInteger(0)
 
-  lazy val logLevel: String = GlutenConfig.getConf.softAffinityLogLevel
-
   // rdd id -> patition id, file path, start, length
   val rddPartitionInfoMap: LoadingCache[Integer, Array[(Int, String, Long, 
Long)]] =
     CacheBuilder
@@ -113,7 +111,7 @@ abstract class AffinityManager extends LogLevelUtil with 
Logging {
         totalRegisteredExecutors.addAndGet(1)
       }
       logOnLevel(
-        logLevel,
+        GlutenConfig.getConf.softAffinityLogLevel,
         s"After adding executor ${execHostId._1} on host ${execHostId._2}, " +
           s"fixedIdForExecutors is ${fixedIdForExecutors.mkString(",")}, " +
           s"nodesExecutorsMap is ${nodesExecutorsMap.keySet.mkString(",")}, " +
@@ -147,7 +145,7 @@ abstract class AffinityManager extends LogLevelUtil with 
Logging {
         totalRegisteredExecutors.addAndGet(-1)
       }
       logOnLevel(
-        logLevel,
+        GlutenConfig.getConf.softAffinityLogLevel,
         s"After removing executor $execId, " +
           s"fixedIdForExecutors is ${fixedIdForExecutors.mkString(",")}, " +
           s"nodesExecutorsMap is ${nodesExecutorsMap.keySet.mkString(",")}, " +
@@ -192,7 +190,8 @@ abstract class AffinityManager extends LogLevelUtil with 
Logging {
                 } else {
                   (originalValues ++ value)
                 }
-                logOnLevel(logLevel, s"update host for $key: 
${values.mkString(",")}")
+                logOnLevel(GlutenConfig.getConf.softAffinityLogLevel,
+                  s"update host for $key: ${values.mkString(",")}")
                 duplicateReadingInfos.put(key, values)
               }
           }
@@ -273,7 +272,8 @@ abstract class AffinityManager extends LogLevelUtil with 
Logging {
 
     if (!hosts.isEmpty) {
       rand.shuffle(hosts)
-      logOnLevel(logLevel, s"get host for $f: ${hosts.distinct.mkString(",")}")
+      logOnLevel(GlutenConfig.getConf.softAffinityLogLevel,
+        s"get host for $f: ${hosts.distinct.mkString(",")}")
     }
     hosts.distinct.toSeq
   }
diff --git 
a/gluten-core/src/main/java/org/apache/spark/softaffinity/SoftAffinity.scala 
b/gluten-core/src/main/java/org/apache/spark/softaffinity/SoftAffinity.scala
index 0e439d245c..65a0bfd078 100644
--- a/gluten-core/src/main/java/org/apache/spark/softaffinity/SoftAffinity.scala
+++ b/gluten-core/src/main/java/org/apache/spark/softaffinity/SoftAffinity.scala
@@ -16,6 +16,7 @@
  */
 package org.apache.spark.softaffinity
 
+import org.apache.gluten.GlutenConfig
 import org.apache.gluten.logging.LogLevelUtil
 import org.apache.gluten.softaffinity.{AffinityManager, SoftAffinityManager}
 import org.apache.spark.internal.Logging
@@ -23,9 +24,6 @@ import org.apache.spark.scheduler.ExecutorCacheTaskLocation
 import org.apache.spark.sql.execution.datasources.FilePartition
 
 abstract class Affinity(val manager: AffinityManager) extends LogLevelUtil 
with Logging {
-
-  private lazy val logLevel: String = manager.logLevel
-
   protected def internalGetHostLocations(filePath: String): Array[String]
   private def shouldUseSoftAffinity(
       filePaths: Array[String],
@@ -67,7 +65,7 @@ abstract class Affinity(val manager: AffinityManager) extends 
LogLevelUtil with
     val locations = manager.askExecutors(filePath)
     if (locations.nonEmpty) {
       logOnLevel(
-        logLevel,
+        GlutenConfig.getConf.softAffinityLogLevel,
         s"SAMetrics=File $filePath - the expected executors are 
${locations.mkString("_")} ")
       locations.map { case (executor, host) => toTaskLocation(host, executor) }
     } else {
diff --git 
a/gluten-core/src/main/scala/org/apache/gluten/extension/columnar/ColumnarRuleApplier.scala
 
b/gluten-core/src/main/scala/org/apache/gluten/extension/columnar/ColumnarRuleApplier.scala
index b47b2f3388..d9ea4b25c0 100644
--- 
a/gluten-core/src/main/scala/org/apache/gluten/extension/columnar/ColumnarRuleApplier.scala
+++ 
b/gluten-core/src/main/scala/org/apache/gluten/extension/columnar/ColumnarRuleApplier.scala
@@ -57,8 +57,7 @@ object ColumnarRuleApplier {
     extends Rule[SparkPlan]
     with Logging
     with LogLevelUtil {
-    // Columnar plan change logging added since 
https://github.com/apache/incubator-gluten/pull/456.
-    private val transformPlanLogLevel = 
GlutenConfig.getConf.transformPlanLogLevel
+
     override val ruleName: String = delegate.ruleName
 
     private def message(oldPlan: SparkPlan, newPlan: SparkPlan, millisTime: 
Long): String =
@@ -71,7 +70,7 @@ object ColumnarRuleApplier {
 
     override def apply(plan: SparkPlan): SparkPlan = {
       val (out, millisTime) = 
GlutenTimeMetric.recordMillisTime(delegate.apply(plan))
-      logOnLevel(transformPlanLogLevel, message(plan, out, millisTime))
+      logOnLevel(GlutenConfig.getConf.transformPlanLogLevel, message(plan, 
out, millisTime))
       out
     }
   }
diff --git 
a/gluten-substrait/src/main/scala/org/apache/gluten/execution/WholeStageTransformer.scala
 
b/gluten-substrait/src/main/scala/org/apache/gluten/execution/WholeStageTransformer.scala
index b01dd9845f..7bd84e09d1 100644
--- 
a/gluten-substrait/src/main/scala/org/apache/gluten/execution/WholeStageTransformer.scala
+++ 
b/gluten-substrait/src/main/scala/org/apache/gluten/execution/WholeStageTransformer.scala
@@ -130,7 +130,6 @@ case class WholeStageTransformer(child: SparkPlan, 
materializeInput: Boolean = f
 
   val sparkConf: SparkConf = sparkContext.getConf
   val numaBindingInfo: GlutenNumaBindingInfo = 
GlutenConfig.getConf.numaBindingInfo
-  val substraitPlanLogLevel: String = 
GlutenConfig.getConf.substraitPlanLogLevel
 
   @transient
   private var wholeStageTransformerContext: Option[WholeStageTransformContext] 
= None
@@ -274,7 +273,9 @@ case class WholeStageTransformer(child: SparkPlan, 
materializeInput: Boolean = f
       doWholeStageTransform()
     }(
       t =>
-        logOnLevel(substraitPlanLogLevel, s"$nodeName generating the substrait 
plan took: $t ms."))
+        logOnLevel(
+          GlutenConfig.getConf.substraitPlanLogLevel,
+          s"$nodeName generating the substrait plan took: $t ms."))
     val inputRDDs = new ColumnarInputRDDsWrapper(columnarInputRDDs)
     // Check if BatchScan exists.
     val basicScanExecTransformers = findAllScanTransformers()
diff --git 
a/gluten-substrait/src/main/scala/org/apache/gluten/extension/GlutenPlan.scala 
b/gluten-substrait/src/main/scala/org/apache/gluten/extension/GlutenPlan.scala
index c658a43760..856d208ead 100644
--- 
a/gluten-substrait/src/main/scala/org/apache/gluten/extension/GlutenPlan.scala
+++ 
b/gluten-substrait/src/main/scala/org/apache/gluten/extension/GlutenPlan.scala
@@ -54,9 +54,6 @@ object ValidationResult {
 
 /** Every Gluten Operator should extend this trait. */
 trait GlutenPlan extends SparkPlan with Convention.KnownBatchType with 
LogLevelUtil {
-
-  private lazy val validationLogLevel = glutenConf.validationLogLevel
-  private lazy val printStackOnValidationFailure = 
glutenConf.printStackOnValidationFailure
   protected lazy val enableNativeValidation = glutenConf.enableNativeValidation
 
   protected def glutenConf: GlutenConfig = GlutenConfig.getConf
@@ -115,7 +112,7 @@ trait GlutenPlan extends SparkPlan with 
Convention.KnownBatchType with LogLevelU
   protected def doValidateInternal(): ValidationResult = 
ValidationResult.succeeded
 
   protected def doNativeValidation(context: SubstraitContext, node: RelNode): 
ValidationResult = {
-    if (node != null && enableNativeValidation) {
+    if (node != null && glutenConf.enableNativeValidation) {
       val planNode = PlanBuilder.makePlan(context, Lists.newArrayList(node))
       BackendsApiManager.getValidatorApiInstance
         .doNativeValidateWithFailureReason(planNode)
@@ -125,10 +122,10 @@ trait GlutenPlan extends SparkPlan with 
Convention.KnownBatchType with LogLevelU
   }
 
   private def logValidationMessage(msg: => String, e: Throwable): Unit = {
-    if (printStackOnValidationFailure) {
-      logOnLevel(validationLogLevel, msg, e)
+    if (glutenConf.printStackOnValidationFailure) {
+      logOnLevel(glutenConf.validationLogLevel, msg, e)
     } else {
-      logOnLevel(validationLogLevel, msg)
+      logOnLevel(glutenConf.validationLogLevel, msg)
     }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to