moulimukherjee commented on a change in pull request #1241:
URL: https://github.com/apache/iceberg/pull/1241#discussion_r460232691
##########
File path:
spark/src/test/java/org/apache/iceberg/spark/source/TestDataSourceOptions.java
##########
@@ -364,4 +364,28 @@ public void testDefaultMetadataSplitSize() throws
IOException {
int partitionNum = metadataDf.javaRDD().getNumPartitions();
Assert.assertEquals("Spark partitions should match", expectedSplits,
partitionNum);
}
+
+ @Test
+ public void testExtraSnapshotMetadata() throws IOException {
+ String tableLocation = temp.newFolder("iceberg-table").toString();
+ HadoopTables tables = new HadoopTables(CONF);
+ tables.create(SCHEMA, PartitionSpec.unpartitioned(), Maps.newHashMap(),
tableLocation);
+
+ List<SimpleRecord> expectedRecords = Lists.newArrayList(
+ new SimpleRecord(1, "a"),
+ new SimpleRecord(2, "b")
+ );
+ Dataset<Row> originalDf = spark.createDataFrame(expectedRecords,
SimpleRecord.class);
+ originalDf.select("id", "data").write()
+ .format("iceberg")
+ .mode("append")
+ .option("extra-metadata.extra-key", "someValue")
+ .option("extra-metadata.another-key", "anotherValue")
+ .save(tableLocation);
+
+ Table table = tables.load(tableLocation);
+
+
Assert.assertTrue(table.currentSnapshot().summary().get("extra-key").equals("someValue"));
Review comment:
plumbing to pass extra information from write options
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]