This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new f78f164c5ea3 [SPARK-53496][SQL][CORE][ML][SS][K8S][YARN] Remove 
dangling right curly braces in log
f78f164c5ea3 is described below

commit f78f164c5ea3275767a69d16dff80d22b052afd3
Author: Kent Yao <y...@apache.org>
AuthorDate: Thu Sep 4 13:27:23 2025 -0700

    [SPARK-53496][SQL][CORE][ML][SS][K8S][YARN] Remove dangling right curly 
braces in log
    
    ### What changes were proposed in this pull request?
    Remove dangling right curly braces in log
    
    ### Why are the changes needed?
    fix log structure
    
    ### Does this PR introduce _any_ user-facing change?
    no
    
    ### How was this patch tested?
    Passing CI
    
    ### Was this patch authored or co-authored using generative AI tooling?
    no
    
    Closes #52226 from yaooqinn/minor.
    
    Authored-by: Kent Yao <y...@apache.org>
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
---
 .../scala/org/apache/spark/ml/classification/LogisticRegression.scala   | 2 +-
 .../org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala    | 2 +-
 .../org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala  | 2 +-
 .../yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala       | 2 +-
 .../src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala     | 2 +-
 .../org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala   | 2 +-
 .../datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala        | 2 +-
 .../spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala    | 2 +-
 .../apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala  | 2 +-
 .../sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala    | 2 +-
 .../src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala | 2 +-
 11 files changed, 11 insertions(+), 11 deletions(-)

diff --git 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
index d0824925780c..b602632e68c0 100644
--- 
a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
@@ -836,7 +836,7 @@ class LogisticRegression @Since("1.2.0") (
           (_initialModel.getFitIntercept == $(fitIntercept))
         if (!modelIsValid) {
           instr.logWarning(log"Initial coefficients will be ignored! Its 
dimensions " +
-            log"(${MDC(LogKeys.NUM_ROWS, providedCoefs.numRows)}}, " +
+            log"(${MDC(LogKeys.NUM_ROWS, providedCoefs.numRows)}, " +
             log"${MDC(LogKeys.NUM_COLUMNS, providedCoefs.numCols)}) did not 
match the " +
             log"expected size (${MDC(LogKeys.NUM_COEFFICIENTS, 
numCoefficientSets)}, " +
             log"${MDC(LogKeys.NUM_FEATURES, numFeatures)})")
diff --git 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala
 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala
index f4e205ee28f4..ededa6bb9035 100644
--- 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala
+++ 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala
@@ -98,7 +98,7 @@ private[k8s] class LoggingPodStatusWatcherImpl(conf: 
KubernetesDriverConf)
   }
 
   override def watchOrStop(sId: String): Boolean = {
-    logInfo(log"Waiting for application ${MDC(APP_NAME, conf.appName)}} with 
application ID " +
+    logInfo(log"Waiting for application ${MDC(APP_NAME, conf.appName)} with 
application ID " +
       log"${MDC(APP_ID, appId)} and submission ID ${MDC(SUBMISSION_ID, sId)} 
to finish...")
     val interval = conf.get(REPORT_INTERVAL)
     synchronized {
diff --git 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala
 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala
index e84a0c97724c..051b7e048b08 100644
--- 
a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala
+++ 
b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala
@@ -191,7 +191,7 @@ class ExecutorPodsAllocator(
     }
 
     if (timedOut.nonEmpty) {
-      logWarning(log"Executors with ids ${MDC(LogKeys.EXECUTOR_IDS, 
timedOut.mkString(","))}} " +
+      logWarning(log"Executors with ids ${MDC(LogKeys.EXECUTOR_IDS, 
timedOut.mkString(","))} " +
         log"were not detected in the Kubernetes cluster after " +
         log"${MDC(LogKeys.TIMEOUT, podCreationTimeout)} ms despite the fact 
that a previous " +
         log"allocation attempt tried to create them. The executors may have 
been deleted but the " +
diff --git 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
index b4f9c7469236..09655007acb3 100644
--- 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
+++ 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
@@ -332,7 +332,7 @@ private[spark] class Client(
         appContext.setLogAggregationContext(logAggregationContext)
       } catch {
         case NonFatal(e) =>
-          logWarning(log"Ignoring ${MDC(LogKeys.CONFIG, 
ROLLED_LOG_INCLUDE_PATTERN.key)}} " +
+          logWarning(log"Ignoring ${MDC(LogKeys.CONFIG, 
ROLLED_LOG_INCLUDE_PATTERN.key)} " +
             log"because the version of YARN does not support it", e)
       }
     }
diff --git 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
index 48946b5a8e6e..8b117650ab5b 100644
--- 
a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
+++ 
b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
@@ -587,7 +587,7 @@ private[yarn] class YarnAllocator(
         if (log.isInfoEnabled()) {
           val (localized, anyHost) = 
newLocalityRequests.partition(_.getNodes() != null)
           if (anyHost.nonEmpty) {
-            logInfo(log"Submitted ${MDC(LogKeys.COUNT, anyHost.size)}} 
unlocalized container " +
+            logInfo(log"Submitted ${MDC(LogKeys.COUNT, anyHost.size)} 
unlocalized container " +
               log"requests.")
           }
           localized.foreach { request =>
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala
index f3a0da2437ae..ca6273570e59 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala
@@ -39,7 +39,7 @@ case class AnalyzeTablesCommand(
       } catch {
         case NonFatal(e) =>
           logWarning(log"Failed to analyze table ${MDC(TABLE_NAME, tbl.table)} 
in the " +
-            log"database ${MDC(DATABASE_NAME, db)} because of ${MDC(ERROR, 
e.toString)}}", e)
+            log"database ${MDC(DATABASE_NAME, db)} because of ${MDC(ERROR, 
e.toString)}", e)
       }
     }
     Seq.empty[Row]
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
index 77e21bcbbdb8..8cf2bd844bf4 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
@@ -69,7 +69,7 @@ object GroupBasedRowLevelOperationScanPlanning extends 
Rule[LogicalPlan] with Pr
             |Pushing operators to ${MDC(LogKeys.RELATION_NAME, relation.name)}
             |Pushed filters: ${MDC(LogKeys.PUSHED_FILTERS, pushedFiltersStr)}
             |Filters evaluated on data source side: 
${MDC(LogKeys.EVALUATED_FILTERS, evaluatedFilters.mkString(", "))}
-            |Filters evaluated on Spark side: ${MDC(LogKeys.POST_SCAN_FILTERS, 
postScanFilters.mkString(", "))}}
+            |Filters evaluated on Spark side: ${MDC(LogKeys.POST_SCAN_FILTERS, 
postScanFilters.mkString(", "))}
             |Output: ${MDC(LogKeys.RELATION_OUTPUT, output.mkString(", "))}
            """.stripMargin)
       // scalastyle:on line.size.limit
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala
index 68e8c1c6e104..4904e3d60dc9 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala
@@ -436,7 +436,7 @@ trait V2TableWriteExec extends V2CommandExec with 
UnaryExecNode with AdaptiveSpa
 
     logInfo(log"Start processing data source write support: " +
       log"${MDC(LogKeys.BATCH_WRITE, batchWrite)}. The input RDD has " +
-      log"${MDC(LogKeys.COUNT, messages.length)}} partitions.")
+      log"${MDC(LogKeys.COUNT, messages.length)} partitions.")
 
     // Avoid object not serializable issue.
     val writeMetrics: Map[String, SQLMetric] = customMetrics
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala
index 8a175e633d49..ccdcc6fab4da 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala
@@ -101,7 +101,7 @@ case class ProcessingTimeExecutor(
   /** Called when a batch falls behind */
   def notifyBatchFallingBehind(realElapsedTimeMs: Long): Unit = {
     logWarning(log"Current batch is falling behind. The trigger interval is " +
-      log"${MDC(TRIGGER_INTERVAL, intervalMs)}} milliseconds, but spent " +
+      log"${MDC(TRIGGER_INTERVAL, intervalMs)} milliseconds, but spent " +
       log"${MDC(ELAPSED_TIME, realElapsedTimeMs)} milliseconds")
   }
 
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
index f37a26012e22..49eb72513588 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
@@ -515,7 +515,7 @@ private[sql] class HDFSBackedStateStoreProvider extends 
StateStoreProvider with
       logInfo(log"Trying to add version=${MDC(LogKeys.STATE_STORE_VERSION, 
newVersion)} to state " +
         log"cache map with current_size=${MDC(LogKeys.NUM_LOADED_ENTRIES, 
loadedEntries)} and " +
         log"earliest_loaded_version=" +
-        log"${MDC(LogKeys.EARLIEST_LOADED_VERSION, 
earliestLoadedVersion.get)}} " +
+        log"${MDC(LogKeys.EARLIEST_LOADED_VERSION, earliestLoadedVersion.get)} 
" +
         log"and max_versions_to_retain_in_memory=" +
         log"${MDC(LogKeys.NUM_VERSIONS_RETAIN, 
numberOfVersionsToRetainInMemory)}")
     } else {
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 9ac9d4cc1e19..b0e0cc5a926c 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -349,7 +349,7 @@ private[hive] class HiveMetastoreCatalog(sparkSession: 
SparkSession) extends Log
     if (shouldInfer) {
       val tableName = tableMeta.identifier.unquotedString
       logInfo(log"Inferring case-sensitive schema for table ${MDC(TABLE_NAME, 
tableName)} " +
-        log"(inference mode:  ${MDC(INFERENCE_MODE, inferenceMode)})})")
+        log"(inference mode: ${MDC(INFERENCE_MODE, inferenceMode)})")
       val fileIndex = fileIndexOpt.getOrElse {
         val rootPath = new Path(tableMeta.location)
         new InMemoryFileIndex(sparkSession, Seq(rootPath), options, None)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to