rdblue commented on a change in pull request #24980: [SPARK-28178][SQL] 
DataSourceV2: DataFrameWriter.insertInfo
URL: https://github.com/apache/spark/pull/24980#discussion_r310240788
 
 

 ##########
 File path: sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
 ##########
 @@ -370,6 +371,45 @@ final class DataFrameWriter[T] private[sql](ds: 
Dataset[T]) {
       )
     }
 
+    df.sparkSession.sessionState.sqlParser.parseMultipartIdentifier(tableName) 
match {
+      case CatalogObjectIdentifier(Some(catalog), ident) =>
+        insertInto(catalog, ident)
+      case AsTableIdentifier(tableIdentifier) =>
+        insertInto(tableIdentifier)
+    }
+  }
+
+  private def insertInto(catalog: CatalogPlugin, ident: Identifier): Unit = {
+    import org.apache.spark.sql.catalog.v2.CatalogV2Implicits._
+
+    val table = 
DataSourceV2Relation.create(catalog.asTableCatalog.loadTable(ident))
+
+    val command = modeForDSV2 match {
+      case SaveMode.Append =>
+        AppendData.byName(table, df.logicalPlan)
 
 Review comment:
   > If we are going to create a new dataframe writer API in the future, I'd 
like to keep it as it is, and always do by-position in this insertInto.
   
   Sounds good to me. I'll submit a PR for the new API.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to