cloud-fan commented on a change in pull request #25990: [SPARK-29248][SQL] Pass
in number of partitions to WriteBuilder
URL: https://github.com/apache/spark/pull/25990#discussion_r346753863
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V1FallbackWriters.scala
##########
@@ -98,22 +96,21 @@ sealed trait V1FallbackWriters extends SupportsV1Write {
}
protected def newWriteBuilder(): V1WriteBuilder = {
- val writeBuilder = table.newWriteBuilder(writeOptions)
- .withInputDataSchema(plan.schema)
- .withQueryId(UUID.randomUUID().toString)
+ val writeInfo = LogicalWriteInfoImpl(
+ queryId = UUID.randomUUID().toString,
+ schema = query.schema)
+ val writeBuilder = table.newWriteBuilder(writeOptions, writeInfo)
+
writeBuilder.asV1Builder
}
}
/**
* A trait that allows Tables that use V1 Writer interfaces to append data.
*/
-trait SupportsV1Write extends SparkPlan {
- // TODO: We should be able to work on SparkPlans at this point.
- def plan: LogicalPlan
-
+trait SupportsV1Write extends SparkPlan with WriteBase {
protected def writeWithV1(relation: InsertableRelation): RDD[InternalRow] = {
- relation.insert(Dataset.ofRows(sqlContext.sparkSession, plan), overwrite =
false)
+ relation.insert(sqlContext.internalCreateDataFrame(rdd, query.schema),
overwrite = false)
Review comment:
This is a breaking change. Previously the v1 implementation can get the
original query plan but now they can't.
Since we don't need to create writer factory for v1 write, I don't think we
need to pass the physical schema to v1 write.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]