cloud-fan commented on a change in pull request #30815:
URL: https://github.com/apache/spark/pull/30815#discussion_r544822490
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/CacheManager.scala
##########
@@ -88,12 +88,24 @@ class CacheManager extends Logging with
AdaptiveSparkPlanHelper {
query: Dataset[_],
tableName: Option[String] = None,
storageLevel: StorageLevel = MEMORY_AND_DISK): Unit = {
- val planToCache = query.logicalPlan
+ cacheQueryWithLogicalPlan(query.sparkSession, query.logicalPlan,
tableName, storageLevel)
+ }
+
+ /**
+ * Caches the data produced by the given [[LogicalPlan]].
+ * Unlike `RDD.cache()`, the default storage level is set to be
`MEMORY_AND_DISK` because
+ * recomputing the in-memory columnar representation of the underlying table
is expensive.
+ */
+ def cacheQueryWithLogicalPlan(
+ spark: SparkSession,
+ planToCache: LogicalPlan,
+ tableName: Option[String] = None,
Review comment:
does this method need default parameter values?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]