rdblue commented on code in PR #6965:
URL: https://github.com/apache/iceberg/pull/6965#discussion_r1125766679


##########
spark/v3.3/spark-extensions/src/test/java/org/apache/iceberg/spark/extensions/TestDelete.java:
##########
@@ -276,69 +280,72 @@ public void testDeleteNonExistingRecords() {
     assertEquals(
         "Should have expected rows",
         ImmutableList.of(row(1, "hr"), row(2, "hardware"), row(null, "hr")),
-        sql("SELECT * FROM %s ORDER BY id ASC NULLS LAST", tableName));
+        sql("SELECT * FROM %s ORDER BY id ASC NULLS LAST", selectTarget()));
   }
 
   @Test
   public void testDeleteWithoutCondition() {
     createAndInitPartitionedTable();
 
-    sql("INSERT INTO TABLE %s VALUES (1, 'hr')", tableName);
-    sql("INSERT INTO TABLE %s VALUES (2, 'hardware')", tableName);
-    sql("INSERT INTO TABLE %s VALUES (null, 'hr')", tableName);
+    sql("INSERT INTO TABLE %s VALUES (1, 'hr')", queryTarget());
+    sql("INSERT INTO TABLE %s VALUES (2, 'hardware')", queryTarget());
+    sql("INSERT INTO TABLE %s VALUES (null, 'hr')", queryTarget());
 
-    sql("DELETE FROM %s", tableName);
+    sql("DELETE FROM %s", queryTarget());
 
     Table table = validationCatalog.loadTable(tableIdent);
     Assert.assertEquals("Should have 4 snapshots", 4, 
Iterables.size(table.snapshots()));
 
     // should be a delete instead of an overwrite as it is done through a 
metadata operation
-    Snapshot currentSnapshot = table.currentSnapshot();
+    Snapshot currentSnapshot = SnapshotUtil.latestSnapshot(table, branch);
     validateDelete(currentSnapshot, "2", "3");
 
     assertEquals(
-        "Should have expected rows", ImmutableList.of(), sql("SELECT * FROM 
%s", tableName));
+        "Should have expected rows", ImmutableList.of(), sql("SELECT * FROM 
%s", queryTarget()));
   }
 
   @Test
   public void testDeleteUsingMetadataWithComplexCondition() {
+    Assume.assumeTrue("test".equals(branch));

Review Comment:
   Looks like `deleteWhere` in `SparkTable` wasn't updated to use the branch.
   
   Updating that method makes all cases pass when I remove `assumeTrue`:
   
   ```java
     @Override
     public void deleteWhere(Filter[] filters) {
       Expression deleteExpr = SparkFilters.convert(filters);
   
       if (deleteExpr == Expressions.alwaysFalse()) {
         LOG.info("Skipping the delete operation as the condition is always 
false");
         return;
       }
   
       DeleteFiles delete =
           icebergTable
               .newDelete()
               .set("spark.app.id", 
sparkSession().sparkContext().applicationId())
               .deleteFromRowFilter(deleteExpr);
   
       if (branch != null) {
         delete.toBranch(branch);
       }
   
       delete.commit();
     }
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to