singhpk234 commented on a change in pull request #4342:
URL: https://github.com/apache/iceberg/pull/4342#discussion_r828872749



##########
File path: aws/src/main/java/org/apache/iceberg/aws/AwsProperties.java
##########
@@ -244,6 +244,15 @@
    */
   public static final String S3_WRITE_TAGS_PREFIX = "s3.write.tags.";
 
+  /**
+   * Used by {@link S3FileIO} to tag objects when deleting. To set, we can 
pass a catalog property.
+   * <p>
+   * For more details, see 
https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html
+   * <p>
+   * Example in Spark: --conf 
spark.sql.catalog.my_catalog.s3.delete.tags.my_key=my_val

Review comment:
       can use  `s3.delete.tags.my_key=my_val` as example to make it engine 
agnostic.
   
   ref @rdblue 
[suggestion](https://github.com/apache/iceberg/pull/4334#discussion_r828195275) 
   ` 

##########
File path: aws/src/main/java/org/apache/iceberg/aws/s3/S3FileIO.java
##########
@@ -128,36 +137,65 @@ public void deleteFile(String path) {
    */
   @Override
   public void deleteFiles(Iterable<String> paths) throws 
BulkDeletionFailureException {
-    SetMultimap<String, String> bucketToObjects = 
Multimaps.newSetMultimap(Maps.newHashMap(), Sets::newHashSet);
-    int numberOfFailedDeletions = 0;
-    for (String path : paths) {
-      S3URI location = new S3URI(path);
-      String bucket = location.bucket();
-      String objectKey = location.key();
-      Set<String> objectsInBucket = bucketToObjects.get(bucket);
-      if (objectsInBucket.size() == awsProperties.s3FileIoDeleteBatchSize()) {
-        List<String> failedDeletionsForBatch = deleteObjectsInBucket(bucket, 
objectsInBucket);
-        numberOfFailedDeletions += failedDeletionsForBatch.size();
-        failedDeletionsForBatch.forEach(failedPath -> LOG.warn("Failed to 
delete object at path {}", failedPath));
-        bucketToObjects.removeAll(bucket);
+    if (deleteTags.isEmpty()) {
+      SetMultimap<String, String> bucketToObjects = Multimaps
+          .newSetMultimap(Maps.newHashMap(), Sets::newHashSet);
+      int numberOfFailedDeletions = 0;
+      for (String path : paths) {
+        S3URI location = new S3URI(path);
+        String bucket = location.bucket();
+        String objectKey = location.key();
+        Set<String> objectsInBucket = bucketToObjects.get(bucket);
+        if (objectsInBucket.size() == awsProperties.s3FileIoDeleteBatchSize()) 
{
+          List<String> failedDeletionsForBatch = deleteObjectsInBucket(bucket, 
objectsInBucket);
+          numberOfFailedDeletions += failedDeletionsForBatch.size();
+          failedDeletionsForBatch
+              .forEach(failedPath -> LOG.warn("Failed to delete object at path 
{}", failedPath));
+          bucketToObjects.removeAll(bucket);
+        }
+        bucketToObjects.get(bucket).add(objectKey);
       }
-      bucketToObjects.get(bucket).add(objectKey);
-    }
 
-    // Delete the remainder
-    for (Map.Entry<String, Collection<String>> bucketToObjectsEntry : 
bucketToObjects.asMap().entrySet()) {
-      final String bucket = bucketToObjectsEntry.getKey();
-      final Collection<String> objects = bucketToObjectsEntry.getValue();
-      List<String> failedDeletions = deleteObjectsInBucket(bucket, objects);
-      failedDeletions.forEach(failedPath -> LOG.warn("Failed to delete object 
at path {}", failedPath));
-      numberOfFailedDeletions += failedDeletions.size();
-    }
+      // Delete the remainder
+      for (Map.Entry<String, Collection<String>> bucketToObjectsEntry : 
bucketToObjects.asMap()
+          .entrySet()) {
+        final String bucket = bucketToObjectsEntry.getKey();
+        final Collection<String> objects = bucketToObjectsEntry.getValue();
+        List<String> failedDeletions = deleteObjectsInBucket(bucket, objects);
+        failedDeletions
+            .forEach(failedPath -> LOG.warn("Failed to delete object at path 
{}", failedPath));
+        numberOfFailedDeletions += failedDeletions.size();
+      }
 
-    if (numberOfFailedDeletions > 0) {
-      throw new BulkDeletionFailureException(numberOfFailedDeletions);
+      if (numberOfFailedDeletions > 0) {
+        throw new BulkDeletionFailureException(numberOfFailedDeletions);
+      }
+    } else {
+      paths.forEach(this::doSoftDelete);

Review comment:
       [question] doing it one path at a time can slow stuff. As we will do 
(GET + PUT) for each one ... can we use multiThreading here ? 

##########
File path: aws/src/main/java/org/apache/iceberg/aws/s3/S3FileIO.java
##########
@@ -128,36 +137,65 @@ public void deleteFile(String path) {
    */
   @Override
   public void deleteFiles(Iterable<String> paths) throws 
BulkDeletionFailureException {
-    SetMultimap<String, String> bucketToObjects = 
Multimaps.newSetMultimap(Maps.newHashMap(), Sets::newHashSet);
-    int numberOfFailedDeletions = 0;
-    for (String path : paths) {
-      S3URI location = new S3URI(path);
-      String bucket = location.bucket();
-      String objectKey = location.key();
-      Set<String> objectsInBucket = bucketToObjects.get(bucket);
-      if (objectsInBucket.size() == awsProperties.s3FileIoDeleteBatchSize()) {
-        List<String> failedDeletionsForBatch = deleteObjectsInBucket(bucket, 
objectsInBucket);
-        numberOfFailedDeletions += failedDeletionsForBatch.size();
-        failedDeletionsForBatch.forEach(failedPath -> LOG.warn("Failed to 
delete object at path {}", failedPath));
-        bucketToObjects.removeAll(bucket);
+    if (deleteTags.isEmpty()) {
+      SetMultimap<String, String> bucketToObjects = Multimaps
+          .newSetMultimap(Maps.newHashMap(), Sets::newHashSet);
+      int numberOfFailedDeletions = 0;
+      for (String path : paths) {
+        S3URI location = new S3URI(path);
+        String bucket = location.bucket();
+        String objectKey = location.key();
+        Set<String> objectsInBucket = bucketToObjects.get(bucket);
+        if (objectsInBucket.size() == awsProperties.s3FileIoDeleteBatchSize()) 
{
+          List<String> failedDeletionsForBatch = deleteObjectsInBucket(bucket, 
objectsInBucket);
+          numberOfFailedDeletions += failedDeletionsForBatch.size();
+          failedDeletionsForBatch
+              .forEach(failedPath -> LOG.warn("Failed to delete object at path 
{}", failedPath));
+          bucketToObjects.removeAll(bucket);
+        }
+        bucketToObjects.get(bucket).add(objectKey);
       }
-      bucketToObjects.get(bucket).add(objectKey);
-    }
 
-    // Delete the remainder
-    for (Map.Entry<String, Collection<String>> bucketToObjectsEntry : 
bucketToObjects.asMap().entrySet()) {
-      final String bucket = bucketToObjectsEntry.getKey();
-      final Collection<String> objects = bucketToObjectsEntry.getValue();
-      List<String> failedDeletions = deleteObjectsInBucket(bucket, objects);
-      failedDeletions.forEach(failedPath -> LOG.warn("Failed to delete object 
at path {}", failedPath));
-      numberOfFailedDeletions += failedDeletions.size();
-    }
+      // Delete the remainder
+      for (Map.Entry<String, Collection<String>> bucketToObjectsEntry : 
bucketToObjects.asMap()
+          .entrySet()) {
+        final String bucket = bucketToObjectsEntry.getKey();
+        final Collection<String> objects = bucketToObjectsEntry.getValue();
+        List<String> failedDeletions = deleteObjectsInBucket(bucket, objects);
+        failedDeletions
+            .forEach(failedPath -> LOG.warn("Failed to delete object at path 
{}", failedPath));
+        numberOfFailedDeletions += failedDeletions.size();
+      }
 
-    if (numberOfFailedDeletions > 0) {
-      throw new BulkDeletionFailureException(numberOfFailedDeletions);
+      if (numberOfFailedDeletions > 0) {
+        throw new BulkDeletionFailureException(numberOfFailedDeletions);
+      }
+    } else {
+      paths.forEach(this::doSoftDelete);
     }
   }
 
+  private void doSoftDelete(String path) {
+    S3URI location = new S3URI(path);
+    String bucket = location.bucket();
+    String objectKey = location.key();
+    GetObjectTaggingRequest getObjectTaggingRequest = 
GetObjectTaggingRequest.builder()
+        .bucket(bucket)
+        .key(objectKey)
+        .build();
+    GetObjectTaggingResponse getObjectTaggingResponse = client()
+        .getObjectTagging(getObjectTaggingRequest);
+    // Get existing tags, if any and then add the delete tags
+    Set<Tag> tags = Sets.newHashSet(getObjectTaggingResponse.tagSet());
+    tags.addAll(deleteTags);
+    PutObjectTaggingRequest putObjectTaggingRequest = 
PutObjectTaggingRequest.builder()

Review comment:
       [question] what if between our read via a GET and PUT some one updated 
the TAG ? in this case we might loose the tags (add + deleted) in between as it 
will be over-written by what my PUT is over-riding. Any thought's on this ?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to