Github user rxin commented on a diff in the pull request:
https://github.com/apache/spark/pull/6747#discussion_r32148163
--- Diff: sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala ---
@@ -131,44 +340,42 @@ private[sql] class SQLConf extends Serializable with
CatalystConf {
* Note that the choice of dialect does not affect things like what
tables are available or
* how query execution is performed.
*/
- private[spark] def dialect: String = getConf(DIALECT, "sql")
+ private[spark] def dialect: String = getConf(DIALECT)
/** When true tables cached using the in-memory columnar caching will be
compressed. */
- private[spark] def useCompression: Boolean = getConf(COMPRESS_CACHED,
"true").toBoolean
+ private[spark] def useCompression: Boolean = getConf(COMPRESS_CACHED)
/** The compression codec for writing to a Parquetfile */
- private[spark] def parquetCompressionCodec: String =
getConf(PARQUET_COMPRESSION, "gzip")
+ private[spark] def parquetCompressionCodec: String =
getConf(PARQUET_COMPRESSION)
+
+ private[spark] def parquetCacheMetadata: Boolean =
getConf(PARQUET_CACHE_METADATA)
/** The number of rows that will be */
- private[spark] def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE,
"10000").toInt
+ private[spark] def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE)
/** Number of partitions to use for shuffle operators. */
- private[spark] def numShufflePartitions: Int =
getConf(SHUFFLE_PARTITIONS, "200").toInt
+ private[spark] def numShufflePartitions: Int =
getConf(SHUFFLE_PARTITIONS)
/** When true predicates will be passed to the parquet record reader
when possible. */
- private[spark] def parquetFilterPushDown =
- getConf(PARQUET_FILTER_PUSHDOWN_ENABLED, "false").toBoolean
+ private[spark] def parquetFilterPushDown: Boolean =
getConf(PARQUET_FILTER_PUSHDOWN_ENABLED)
/** When true uses Parquet implementation based on data source API */
- private[spark] def parquetUseDataSourceApi =
- getConf(PARQUET_USE_DATA_SOURCE_API, "true").toBoolean
+ private[spark] def parquetUseDataSourceApi: Boolean =
getConf(PARQUET_USE_DATA_SOURCE_API)
- private[spark] def orcFilterPushDown =
- getConf(ORC_FILTER_PUSHDOWN_ENABLED, "false").toBoolean
+ private[spark] def orcFilterPushDown: Boolean =
getConf(ORC_FILTER_PUSHDOWN_ENABLED)
/** When true uses verifyPartitionPath to prune the path which is not
exists. */
- private[spark] def verifyPartitionPath =
- getConf(HIVE_VERIFY_PARTITIONPATH, "true").toBoolean
+ private[spark] def verifyPartitionPath =
getConf(HIVE_VERIFY_PARTITIONPATH)
--- End diff --
can we add return type for all these functions?
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]