Github user rxin commented on a diff in the pull request:
https://github.com/apache/spark/pull/354#discussion_r11375848
--- Diff: sql/core/src/main/scala/org/apache/spark/sql/SchemaRDDLike.scala
---
@@ -62,4 +64,41 @@ trait SchemaRDDLike {
def registerAsTable(tableName: String): Unit = {
sqlContext.registerRDDAsTable(baseSchemaRDD, tableName)
}
+
+ /**
+ * <span class="badge badge-red" style="float:
right;">EXPERIMENTAL</span>
+ *
+ * Adds the rows from this RDD to the specified table, optionally
overwriting the existing data.
+ *
+ * @group schema
+ */
+ def insertInto(tableName: String, overwrite: Boolean): Unit =
+ sqlContext.executePlan(
+ InsertIntoTable(UnresolvedRelation(None, tableName), Map.empty,
logicalPlan, overwrite)).toRdd
+
+ /**
+ * <span class="badge badge-red" style="float:
right;">EXPERIMENTAL</span>
+ *
+ * Appends the rows from this RDD to the specified table.
+ *
+ * @group schema
+ */
+ def insertInto(tableName: String): Unit = insertInto(tableName, false)
+
+ /**
+ * <span class="badge badge-red" style="float:
right;">EXPERIMENTAL</span>
+ *
+ * Creates a table from the the contents of this SchemaRDD. This will
fail if the table already
+ * exists.
+ *
+ * Note that this currently only works with SchemaRDDs that are created
from a HiveContext as
+ * there is no notion of a persisted catalog in a standard SQL context.
Instead you can write
+ * an RDD out to a parquet file, and then register that file as a table.
This "table" can then
+ * be the target of an `insertInto`.
+ *
+ * @param tableName
+ */
+ def createTableAs(tableName: String) =
--- End diff --
and i think the next line doesn't need wrapping (should fit in 100 chars)
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---