beliefer commented on a change in pull request #27730:
[SPARK-30841][SQL][DOC][FOLLOW-UP] Add version information to the configuration
of SQL
URL: https://github.com/apache/spark/pull/27730#discussion_r385538107
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
##########
@@ -803,47 +814,55 @@ object SQLConf {
"scanned are partition columns and the query has an aggregate operator
that satisfies " +
"distinct semantics. By default the optimization is disabled, since it
may return " +
"incorrect results when the files are empty.")
+ .version("2.1.1")
.booleanConf
.createWithDefault(false)
val COLUMN_NAME_OF_CORRUPT_RECORD =
buildConf("spark.sql.columnNameOfCorruptRecord")
.doc("The name of internal column for storing raw/un-parsed JSON and CSV
records that fail " +
"to parse.")
+ .version("1.2.0")
.stringConf
.createWithDefault("_corrupt_record")
val BROADCAST_TIMEOUT = buildConf("spark.sql.broadcastTimeout")
.doc("Timeout in seconds for the broadcast wait time in broadcast joins.")
+ .version("1.3.0")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString(s"${5 * 60}")
// This is only used for the thriftserver
val THRIFTSERVER_POOL = buildConf("spark.sql.thriftserver.scheduler.pool")
.doc("Set a Fair Scheduler pool for a JDBC client session.")
+ .version("1.1.1")
.stringConf
.createOptional
val THRIFTSERVER_INCREMENTAL_COLLECT =
buildConf("spark.sql.thriftServer.incrementalCollect")
.internal()
.doc("When true, enable incremental collection for execution in Thrift
Server.")
+ .version("2.0.3")
.booleanConf
.createWithDefault(false)
val THRIFTSERVER_UI_STATEMENT_LIMIT =
buildConf("spark.sql.thriftserver.ui.retainedStatements")
.doc("The number of SQL statements kept in the JDBC/ODBC web UI
history.")
+ .version("1.4.0")
.intConf
.createWithDefault(200)
val THRIFTSERVER_UI_SESSION_LIMIT =
buildConf("spark.sql.thriftserver.ui.retainedSessions")
.doc("The number of SQL client sessions kept in the JDBC/ODBC web UI
history.")
+ .version("1.4.0")
.intConf
.createWithDefault(200)
// This is used to set the default data source
val DEFAULT_DATA_SOURCE_NAME = buildConf("spark.sql.sources.default")
.doc("The default data source to use in input/output.")
+ .version("1.3.0")
Review comment:
SPARK-5658, commit ID:
a21090ebe1ef7a709709300712de7d928a923244#diff-41ef65b9ef5b518f77e2a03559893f4d
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]