rdblue commented on a change in pull request #374: Migrate spark table to 
iceberg table
URL: https://github.com/apache/incubator-iceberg/pull/374#discussion_r317711446
 
 

 ##########
 File path: spark/src/main/scala/org/apache/iceberg/spark/SparkTableUtil.scala
 ##########
 @@ -297,5 +302,81 @@ object SparkTableUtil {
       )
     }
   }
+
+  private def buildManifest(table: Table,
+      sparkDataFiles: Seq[SparkDataFile],
+      partitionSpec: PartitionSpec): ManifestFile = {
+    val outputFile = table.io
+      .newOutputFile(FileFormat.AVRO.addExtension("/tmp/" + 
UUID.randomUUID.toString))
+    val writer = ManifestWriter.write(partitionSpec, outputFile)
+    try {
+      sparkDataFiles.foreach { file =>
+        writer.add(file.toDataFile(partitionSpec))
+      }
+    } finally {
+      writer.close()
+    }
+
+    writer.toManifestFile
+  }
+
+  /**
+   * Import a spark table to a iceberg table.
+   *
+   * The import uses the spark session to get table metadata. It assumes no
+   * operation is going on original table and target table and thus is not
+   * thread-safe.
+   *
+   * @param source the database name of the table to be import
+   * @param location the location used to store table metadata
+   *
+   * @return table the imported table
+   */
+  def importSparkTable(source: TableIdentifier, location: String): Table = {
+    val sparkSession = SparkSession.builder().getOrCreate()
+    import sparkSession.sqlContext.implicits._
+
+    val dbName = source.database.getOrElse("default")
+    val tableName = source.table
+
+    if (!sparkSession.catalog.tableExists(dbName, tableName)) {
+      throw new NoSuchTableException(s"Table $dbName.$tableName does not 
exist")
+    }
+
+    val partitionSpec = SparkSchemaUtil.specForTable(sparkSession, 
s"$dbName.$tableName")
+    val conf = sparkSession.sparkContext.hadoopConfiguration
+    val tables = new HadoopTables(conf)
+    val schema = SparkSchemaUtil.schemaForTable(sparkSession, 
s"$dbName.$tableName")
+    val table = tables.create(schema, partitionSpec, ImmutableMap.of(), 
location)
+    val appender = table.newAppend()
+
+    if (partitionSpec == PartitionSpec.unpartitioned) {
+      val tableMetadata = 
sparkSession.sessionState.catalog.getTableMetadata(source)
+      val format = tableMetadata.provider.getOrElse("none")
+
+      if (format != "avro" && format != "parquet" && format != "orc") {
+        throw new UnsupportedOperationException(s"Unsupported format: $format")
+      }
+      listPartition(Map.empty[String, String], tableMetadata.location.toString,
+        format).foreach{f => 
appender.appendFile(f.toDataFile(PartitionSpec.unpartitioned))}
+      appender.commit()
+    } else {
+      val partitions = partitionDF(sparkSession, s"$dbName.$tableName")
+      partitions.flatMap { row =>
+        listPartition(row.getMap[String, String](0).toMap, row.getString(1), 
row.getString(2))
+      }.coalesce(1).mapPartitions {
 
 Review comment:
   Here's `writeManifest`:
   
   ```scala
     def writeManifest(
         conf: SerializableConfiguration,
         spec: PartitionSpec,
         basePath: String): Iterator[SparkDataFile] => Iterator[Manifest] = {
       files =>
         if (files.hasNext) {
           val ctx = TaskContext.get()
           val manifestLocation = new Path(basePath,
             
s"stage-${ctx.stageId()}-task-${ctx.taskAttemptId()}-manifest.avro").toString
           val io = new HadoopFileIO(conf.value)
           val writer = ManifestWriter.write(spec, 
io.newOutputFile(manifestLocation))
   
           try {
             files.foreach { file =>
               writer.add(file.toDataFile(spec))
             }
           } finally {
             writer.close()
           }
   
           val manifest = writer.toManifestFile
           Seq(Manifest(manifest.path, manifest.length, 
manifest.partitionSpecId)).iterator
   
         } else {
           Seq.empty.iterator
         }
     }
   ```

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to