beliefer commented on a change in pull request #27691: [SPARK-30841][SQL][DOC]
Add version information to the configuration of SQL
URL: https://github.com/apache/spark/pull/27691#discussion_r383743063
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
##########
@@ -265,51 +276,59 @@ object SQLConf {
.internal()
.doc("When true, dynamic partition pruning will only apply when the
broadcast exchange of " +
"a broadcast hash join operation can be reused as the dynamic pruning
filter.")
+ .version("3.0.0")
.booleanConf
.createWithDefault(true)
val COMPRESS_CACHED =
buildConf("spark.sql.inMemoryColumnarStorage.compressed")
.doc("When set to true Spark SQL will automatically select a compression
codec for each " +
"column based on statistics of the data.")
+ .version("1.0.1")
.booleanConf
.createWithDefault(true)
val COLUMN_BATCH_SIZE =
buildConf("spark.sql.inMemoryColumnarStorage.batchSize")
.doc("Controls the size of batches for columnar caching. Larger batch
sizes can improve " +
"memory utilization and compression, but risk OOMs when caching data.")
+ .version("1.1.1")
.intConf
.createWithDefault(10000)
val IN_MEMORY_PARTITION_PRUNING =
buildConf("spark.sql.inMemoryColumnarStorage.partitionPruning")
.internal()
.doc("When true, enable partition pruning for in-memory columnar
tables.")
+ .version("1.2.0")
.booleanConf
.createWithDefault(true)
val IN_MEMORY_TABLE_SCAN_STATISTICS_ENABLED =
buildConf("spark.sql.inMemoryTableScanStatistics.enable")
.internal()
.doc("When true, enable in-memory table scan accumulators.")
+ .version("3.0.0")
.booleanConf
.createWithDefault(false)
val CACHE_VECTORIZED_READER_ENABLED =
buildConf("spark.sql.inMemoryColumnarStorage.enableVectorizedReader")
.doc("Enables vectorized reader for columnar caching.")
+ .version("2.3.1")
.booleanConf
.createWithDefault(true)
val COLUMN_VECTOR_OFFHEAP_ENABLED =
buildConf("spark.sql.columnVector.offheap.enabled")
.internal()
.doc("When true, use OffHeapColumnVector in ColumnarBatch.")
+ .version("2.3.0")
.booleanConf
.createWithDefault(false)
val PREFER_SORTMERGEJOIN = buildConf("spark.sql.join.preferSortMergeJoin")
.internal()
.doc("When true, prefer sort merge join over shuffle hash join.")
+ .version("2.0.0")
Review comment:
SPARK-13977, commit ID:
9c23c818ca0175c8f2a4a66eac261ec251d27c97#diff-32bb9518401c0948c5ea19377b5069ab
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]