beliefer commented on a change in pull request #32513:
URL: https://github.com/apache/spark/pull/32513#discussion_r645379831
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala
##########
@@ -67,12 +67,38 @@ object HiveResult {
command.executeCollect().map(_.getString(1))
case other =>
val timeFormatters = getTimeFormatters
- val result: Seq[Seq[Any]] =
other.executeCollectPublic().map(_.toSeq).toSeq
- // We need the types so we can output struct field names
- val types = executedPlan.output.map(_.dataType)
- // Reformat to match hive tab delimited output.
- result.map(_.zip(types).map(e => toHiveString(e, false, timeFormatters)))
- .map(_.mkString("\t"))
+ val commandPhysicalPlans = other collect {
Review comment:
OK
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
##########
@@ -73,12 +76,33 @@ class QueryExecution(
sparkSession.sessionState.analyzer.executeAndCheck(logical, tracker)
}
+ // SPARK-35378: Commands should be executed eagerly so that `sql("INSERT
...")` can trigger the
+ // table insertion immediately without a `.collect()`. We also need to
eagerly execute non-root
+ // commands, because many commands return `GenericInternalRow` and can't be
put in a query plan
+ // directly, otherwise the query engine may cast `GenericInternalRow` to
`UnsafeRow` and fail.
+ lazy val commandExecuted: LogicalPlan = if (isExecutingCommand) {
+ analyzed.mapChildren(eagerlyExecuteCommands)
+ } else {
+ eagerlyExecuteCommands(analyzed)
+ }
+
+ private def eagerlyExecuteCommands(p: LogicalPlan) = p transformDown {
+ case c: Command =>
+ val qe = sparkSession.sessionState.executePlan(c, true)
+ CommandResult(
+ qe.analyzed.output,
+ qe.commandExecuted,
+ qe.executedPlan,
+ SQLExecution.withNewExecutionId(qe,
name)(qe.executedPlan.executeCollect()))
+ case other => other
+ }
+
lazy val withCachedData: LogicalPlan = sparkSession.withActive {
assertAnalyzed()
assertSupported()
// clone the plan to avoid sharing the plan instance between different
stages like analyzing,
// optimizing and planning.
- sparkSession.sharedState.cacheManager.useCachedData(analyzed.clone())
+
sparkSession.sharedState.cacheManager.useCachedData(commandExecuted.clone())
}
lazy val optimizedPlan: LogicalPlan =
executePhase(QueryPlanningTracker.OPTIMIZATION) {
Review comment:
OK
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]