This is an automated email from the ASF dual-hosted git repository.

yao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 852daf595540 [SPARK-54215][SQL] Add the SessionStateHelper trait to 
FilePartition
852daf595540 is described below

commit 852daf595540bb024b842dd76a07ba027b1bd9ae
Author: Kent Yao <[email protected]>
AuthorDate: Thu Nov 6 16:31:42 2025 +0800

    [SPARK-54215][SQL] Add the SessionStateHelper trait to FilePartition
    
    ### What changes were proposed in this pull request?
    Add the SessionStateHelper trait to FilePartition
    
    ### Why are the changes needed?
    
    Fix IDE indexing
    
    ### Does this PR introduce _any_ user-facing change?
    No, refactoring only changes
    
    ### How was this patch tested?
    Passing CI
    
    ### Was this patch authored or co-authored using generative AI tooling?
    no
    
    Closes #52913 from yaooqinn/SPARK-54215.
    
    Authored-by: Kent Yao <[email protected]>
    Signed-off-by: Kent Yao <[email protected]>
---
 .../sql/execution/datasources/FilePartition.scala      | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FilePartition.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FilePartition.scala
index 68a9bbfc7ab1..c5a385e6f0f4 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FilePartition.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FilePartition.scala
@@ -27,7 +27,7 @@ import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.classic.ClassicConversions._
 import org.apache.spark.sql.connector.read.InputPartition
 import org.apache.spark.sql.execution.ScanFileListing
-import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.internal.{SessionStateHelper, SQLConf}
 
 /**
  * A collection of file blocks that should be read as a single task
@@ -53,7 +53,7 @@ case class FilePartition(index: Int, files: 
Array[PartitionedFile])
   }
 }
 
-object FilePartition extends Logging {
+object FilePartition extends SessionStateHelper with Logging {
 
   private def getFilePartitions(
       partitionedFiles: Seq[PartitionedFile],
@@ -91,8 +91,9 @@ object FilePartition extends Logging {
       sparkSession: SparkSession,
       partitionedFiles: Seq[PartitionedFile],
       maxSplitBytes: Long): Seq[FilePartition] = {
-    val openCostBytes = sparkSession.sessionState.conf.filesOpenCostInBytes
-    val maxPartNum = sparkSession.sessionState.conf.filesMaxPartitionNum
+    val conf = getSqlConf(sparkSession)
+    val openCostBytes = conf.filesOpenCostInBytes
+    val maxPartNum = conf.filesMaxPartitionNum
     val partitions = getFilePartitions(partitionedFiles, maxSplitBytes, 
openCostBytes)
     if (maxPartNum.exists(partitions.size > _)) {
       val totalSizeInBytes =
@@ -116,9 +117,10 @@ object FilePartition extends Logging {
    * partitions.
    */
   def maxSplitBytes(sparkSession: SparkSession, calculateTotalBytes: => Long): 
Long = {
-    val defaultMaxSplitBytes = 
sparkSession.sessionState.conf.filesMaxPartitionBytes
-    val openCostInBytes = sparkSession.sessionState.conf.filesOpenCostInBytes
-    val minPartitionNum = sparkSession.sessionState.conf.filesMinPartitionNum
+    val conf = getSqlConf(sparkSession)
+    val defaultMaxSplitBytes = conf.filesMaxPartitionBytes
+    val openCostInBytes = conf.filesOpenCostInBytes
+    val minPartitionNum = conf.filesMinPartitionNum
       .getOrElse(sparkSession.leafNodeDefaultParallelism)
     val totalBytes = calculateTotalBytes
     val bytesPerCore = totalBytes / minPartitionNum
@@ -141,7 +143,7 @@ object FilePartition extends Logging {
    */
   def maxSplitBytes(
       sparkSession: SparkSession, selectedPartitions: 
Seq[PartitionDirectory]): Long = {
-    val openCostInBytes = sparkSession.sessionState.conf.filesOpenCostInBytes
+    val openCostInBytes = getSqlConf(sparkSession).filesOpenCostInBytes
     val byteNum = selectedPartitions.flatMap(_.files.map(_.getLen + 
openCostInBytes)).sum
     maxSplitBytes(sparkSession, byteNum)
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to