CodingCat commented on code in PR #7649:
URL: https://github.com/apache/iceberg/pull/7649#discussion_r1204943222
##########
spark/v3.3/spark/src/main/java/org/apache/iceberg/spark/source/SparkPositionDeltaWrite.java:
##########
@@ -157,6 +157,7 @@ public DeltaWriterFactory
createBatchWriterFactory(PhysicalWriteInfo info) {
@Override
public void commit(WriterCommitMessage[] messages) {
+
Review Comment:
updated
##########
spark/v3.3/spark/src/test/java/org/apache/iceberg/spark/source/TestDataSourceOptions.java:
##########
@@ -448,4 +451,52 @@ public void testExtraSnapshotMetadataWithSQL() throws
InterruptedException, IOEx
Assert.assertTrue(threadNames.contains(null));
Assert.assertTrue(threadNames.contains("test-extra-commit-message-writer-thread"));
}
+
+ @Test
+ public void testExtraSnapshotMetadataWithDelete()
+ throws InterruptedException, IOException, NoSuchTableException {
+ spark.sessionState().conf().setConfString("spark.sql.shuffle.partitions",
"1");
+ sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName);
+ List<SimpleRecord> expectedRecords =
+ Lists.newArrayList(
+ new SimpleRecord(1, "a"), new SimpleRecord(2, "b"), new
SimpleRecord(3, "c"));
+ Dataset<Row> originalDf = spark.createDataFrame(expectedRecords,
SimpleRecord.class);
+ originalDf.repartition(5, new Column("data")).select("id",
"data").writeTo(tableName).append();
+ spark.sql("SELECT * from " + tableName + ".files").show();
+ System.out.println(
+ spark
+ .sql("EXPLAIN DELETE FROM " + tableName + " where id = 1")
+ .collectAsList()
+ .get(0)
+ .get(0));
+ System.out.println("finished inserting");
+ Thread writerThread =
+ new Thread(
+ () -> {
+ Map<String, String> properties = Maps.newHashMap();
+ properties.put("writer-thread",
String.valueOf(Thread.currentThread().getName()));
+ CommitMetadata.withCommitProperties(
+ properties,
+ () -> {
+ spark.sql("DELETE FROM " + tableName + " where id = 1");
+ return 0;
+ },
+ RuntimeException.class);
+ });
+ writerThread.setName("test-extra-commit-message-delete-thread");
+ writerThread.start();
+ writerThread.join();
+ Set<String> threadNames = Sets.newHashSet();
+ spark.sql("SELECT * from " + tableName).show();
+ Table table = validationCatalog.loadTable(tableIdent);
+ for (Snapshot snapshot : table.snapshots()) {
+ threadNames.add(snapshot.summary().get("writer-thread"));
+ }
+ for (String t : threadNames) {
+ System.out.println(t);
+ }
Review Comment:
oops, forgot to cleanup
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]