This is an automated email from the ASF dual-hosted git repository.
szehon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git
The following commit(s) were added to refs/heads/master by this push:
new 7a44ee2f84 Spark 3.4: Allow importing empty tables (#7980)
7a44ee2f84 is described below
commit 7a44ee2f8447eb571f9d78615ddcfae3a084192f
Author: Rui Li <[email protected]>
AuthorDate: Thu Jul 13 04:11:02 2023 +0800
Spark 3.4: Allow importing empty tables (#7980)
---
.../extensions/TestMigrateTableProcedure.java | 22 ++++++++++++++++++++++
.../org/apache/iceberg/spark/SparkTableUtil.java | 12 ++++++------
2 files changed, 28 insertions(+), 6 deletions(-)
diff --git
a/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestMigrateTableProcedure.java
b/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestMigrateTableProcedure.java
index 52ba372958..2fc1044f7d 100644
---
a/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestMigrateTableProcedure.java
+++
b/spark/v3.4/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestMigrateTableProcedure.java
@@ -195,4 +195,26 @@ public class TestMigrateTableProcedure extends
SparkExtensionsTestBase {
ImmutableList.of(row(1L, "2023/05/30",
java.sql.Date.valueOf("2023-05-30"))),
sql("SELECT * FROM %s ORDER BY id", tableName));
}
+
+ @Test
+ public void testMigrateEmptyPartitionedTable() throws Exception {
+ Assume.assumeTrue(catalogName.equals("spark_catalog"));
+ String location = temp.newFolder().toString();
+ sql(
+ "CREATE TABLE %s (id bigint NOT NULL, data string) USING parquet
PARTITIONED BY (id) LOCATION '%s'",
+ tableName, location);
+ Object result = scalarSql("CALL %s.system.migrate('%s')", catalogName,
tableName);
+ Assert.assertEquals(0L, result);
+ }
+
+ @Test
+ public void testMigrateEmptyTable() throws Exception {
+ Assume.assumeTrue(catalogName.equals("spark_catalog"));
+ String location = temp.newFolder().toString();
+ sql(
+ "CREATE TABLE %s (id bigint NOT NULL, data string) USING parquet
LOCATION '%s'",
+ tableName, location);
+ Object result = scalarSql("CALL %s.system.migrate('%s')", catalogName,
tableName);
+ Assert.assertEquals(0L, result);
+ }
}
diff --git
a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java
b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java
index 6fe40a245c..88b752c3c6 100644
---
a/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java
+++
b/spark/v3.4/spark/src/main/java/org/apache/iceberg/spark/SparkTableUtil.java
@@ -404,12 +404,12 @@ public class SparkTableUtil {
} else {
List<SparkPartition> sourceTablePartitions =
getPartitions(spark, sourceTableIdent, partitionFilter);
- Preconditions.checkArgument(
- !sourceTablePartitions.isEmpty(),
- "Cannot find any partitions in table %s",
- sourceTableIdent);
- importSparkPartitions(
- spark, sourceTablePartitions, targetTable, spec, stagingDir,
checkDuplicateFiles);
+ if (sourceTablePartitions.isEmpty()) {
+ targetTable.newAppend().commit();
+ } else {
+ importSparkPartitions(
+ spark, sourceTablePartitions, targetTable, spec, stagingDir,
checkDuplicateFiles);
+ }
}
} catch (AnalysisException e) {
throw SparkExceptionUtil.toUncheckedException(