attilapiros commented on code in PR #41746:
URL: https://github.com/apache/spark/pull/41746#discussion_r1267104959


##########
core/src/main/scala/org/apache/spark/SparkContext.scala:
##########
@@ -585,6 +588,12 @@ class SparkContext(config: SparkConf) extends Logging {
     _dagScheduler = new DAGScheduler(this)
     _heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet)
 
+    // After initialisation of  _schedulerBackend, if 
EXECUTOR_ALLOW_SYNC_LOG_LEVEL is true and
+    // SPARK_LOG_LEVEL is already set, sync the new log level with all the 
current executors.

Review Comment:
   Nit: this comment is not really needed 



##########
core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala:
##########
@@ -473,6 +475,10 @@ private[spark] object CoarseGrainedExecutorBackend extends 
Logging {
       }
 
       driverConf.set(EXECUTOR_ID, arguments.executorId)
+
+      // Set the current log level of driver in Executor

Review Comment:
   Nit: comment is not needed
   



##########
core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala:
##########
@@ -345,7 +358,8 @@ class CoarseGrainedSchedulerBackend(scheduler: 
TaskSchedulerImpl, val rpcEnv: Rp
           sparkProperties,
           SparkEnv.get.securityManager.getIOEncryptionKey(),
           Option(delegationTokens.get()),
-          rp)
+          rp,
+          CoarseGrainedSchedulerBackend.this.logLevel)

Review Comment:
   Nit: I do not think we need "CoarseGrainedSchedulerBackend.this." here, do 
we? 



##########
core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala:
##########
@@ -318,6 +321,16 @@ class CoarseGrainedSchedulerBackend(scheduler: 
TaskSchedulerImpl, val rpcEnv: Rp
         context.reply(true)
         stop()
 
+      case UpdateExecutorsLogLevel(logLevel) =>
+        // Remember the current log level of the application, so that it can 
be propagated to
+        // new executors on RetrieveSparkAppConfig event

Review Comment:
   Nit: comment is not needed



##########
core/src/main/scala/org/apache/spark/SparkContext.scala:
##########
@@ -396,7 +395,11 @@ class SparkContext(config: SparkConf) extends Logging {
     require(SparkContext.VALID_LOG_LEVELS.contains(upperCased),
       s"Supplied level $logLevel did not match one of:" +
         s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}")
-    Utils.setLogLevel(Level.toLevel(upperCased))
+    Utils.setLogLevelIfNeeded(upperCased)
+    // Inform all executors about the change

Review Comment:
   Nit: comment is not needed



##########
core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala:
##########
@@ -653,6 +667,12 @@ class CoarseGrainedSchedulerBackend(scheduler: 
TaskSchedulerImpl, val rpcEnv: Rp
     }
   }
 
+  override def updateLogLevel(logLevel: String): Unit = {

Review Comment:
   Nit: 
   ```suggestion
     override def updateExecutorsLogLevel(logLevel: String): Unit = {
   ```
   Also update this please in the base class and  in the SparkContext where it 
is used.



##########
core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala:
##########
@@ -473,6 +475,10 @@ private[spark] object CoarseGrainedExecutorBackend extends 
Logging {
       }
 
       driverConf.set(EXECUTOR_ID, arguments.executorId)
+
+      // Set the current log level of driver in Executor
+      cfg.logLevel.foreach(l => Utils.setLogLevelIfNeeded(l))

Review Comment:
   ```suggestion
         cfg.logLevel.foreach(logLevel => Utils.setLogLevelIfNeeded(logLevel))
   ```



##########
core/src/main/scala/org/apache/spark/util/Utils.scala:
##########
@@ -2436,16 +2437,35 @@ private[spark] object Utils extends Logging with 
SparkClassUtils {
    * configure a new log4j level
    */
   def setLogLevel(l: Level): Unit = {
-    val ctx = LogManager.getContext(false).asInstanceOf[LoggerContext]
-    val config = ctx.getConfiguration()
-    val loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME)
+    val (ctx, loggerConfig) = getLogContext
     loggerConfig.setLevel(l)
     ctx.updateLoggers()
 
     // Setting threshold to null as rootLevel will define log level for 
spark-shell
     Logging.sparkShellThresholdLevel = null
   }
 
+
+  def setLogLevelIfNeeded(newLogLevel: String): Unit = {
+    // Update only if new log level is not same as current log level

Review Comment:
   Nit: remove the comment (we only need comments for the complex code parts)



##########
core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala:
##########
@@ -123,6 +123,9 @@ class CoarseGrainedSchedulerBackend(scheduler: 
TaskSchedulerImpl, val rpcEnv: Rp
   // The num of current max ExecutorId used to re-register appMaster
   @volatile protected var currentExecutorIdCounter = 0
 
+  // Current log level of driver to send to executor
+  @volatile private var logLevel: Option[String] = None

Review Comment:
   Nit: rename to `currentLogLevel`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to