sadanand48 commented on code in PR #4842:
URL: https://github.com/apache/ozone/pull/4842#discussion_r1221743247


##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java:
##########
@@ -18,27 +18,220 @@
 
 package org.apache.hadoop.ozone.shell.volume;
 
+import com.google.common.base.Strings;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
+import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
 
 /**
  * Executes deleteVolume call for the shell.
  */
 @Command(name = "delete",
-    description = "deletes a volume if it is empty")
+    description = "deletes a volume")
 public class DeleteVolumeHandler extends VolumeHandler {
+  @CommandLine.Option(
+      names = {"-skipTrash"},
+      description = "Delete volume without trash"
+  )
+  private boolean bSkipTrash = false;
+  @CommandLine.Option(
+      names = {"-r"},
+      description = "Delete volume recursively"
+  )
+  private boolean bRecursive = false;
+  @CommandLine.Option(
+      names = {"-id", "--om-service-id"},
+      description = "Ozone Manager Service ID"
+  )
+  private String omServiceId;
+  private ExecutorService executor;
+  private List<String> bucketIdList = new ArrayList<>();
+  private AtomicInteger cleanedBucketCounter =
+      new AtomicInteger();
+  private int totalBucketCount;
+  private OzoneVolume vol;
+  private AtomicInteger numberOfBucketsCleaned = new AtomicInteger(0);
+  private volatile Throwable exception;
+  private static final int MAX_KEY_DELETE_BATCH_SIZE = 1000;
 
   @Override
   protected void execute(OzoneClient client, OzoneAddress address)
       throws IOException {
 
     String volumeName = address.getVolumeName();
-
+    try {
+      if (bRecursive) {
+        if (!bSkipTrash) {
+          out().printf("Use -skipTrash for recursive volume delete%n");
+          return;
+        }
+        if (OmUtils.isServiceIdsDefined(getConf()) &&
+            Strings.isNullOrEmpty(omServiceId)) {
+          out().printf("OmServiceID not provided, provide using " +
+              "-id <OM_SERVICE_ID>%n");
+          return;
+        }
+        vol = client.getObjectStore().getVolume(volumeName);
+        deleteVolumeRecursive();
+      }
+    } catch (InterruptedException e) {
+      out().printf("Exception while deleting volume recursively%n");
+      return;
+    }
     client.getObjectStore().deleteVolume(volumeName);
     out().printf("Volume %s is deleted%n", volumeName);
   }
+
+  private void deleteVolumeRecursive()
+      throws InterruptedException {
+    // Get all the buckets for given volume
+    Iterator<? extends OzoneBucket> bucketIterator =
+        vol.listBuckets(null);
+
+    while (bucketIterator.hasNext()) {
+      OzoneBucket bucket = bucketIterator.next();
+      bucketIdList.add(bucket.getName());
+      totalBucketCount++;
+    }
+    doCleanBuckets();
+  }
+
+  /**
+   * Clean OBS bucket recursively.
+   *
+   * @param  bucket OzoneBucket
+   * @return boolean
+   */
+  private boolean cleanOBSBucket(OzoneBucket bucket) {
+    ArrayList<String> keys = new ArrayList<>();
+    try {
+      if (!bucket.isLink()) {
+        Iterator<? extends OzoneKey> iterator = bucket.listKeys(null);
+        while (iterator.hasNext()) {
+          keys.add(iterator.next().getName());
+          if (MAX_KEY_DELETE_BATCH_SIZE == keys.size()) {
+            bucket.deleteKeys(keys);
+            keys.clear();
+          }
+        }
+        // delete if any remaining keys left
+        if (keys.size() > 0) {
+          bucket.deleteKeys(keys);
+        }
+      }
+      vol.deleteBucket(bucket.getName());
+      numberOfBucketsCleaned.getAndIncrement();
+      return true;
+    } catch (Exception e) {
+      LOG.error("Could not clean bucket ", e);
+      return false;
+    }
+  }
+
+  /**
+   * Clean Legacy/FSO bucket recursively.
+   *
+   * @param  bucket OzoneBucket
+   * @return boolean
+   */
+  private boolean cleanFSBucket(OzoneBucket bucket) {
+    try {
+      String hostPrefix = OZONE_OFS_URI_SCHEME + "://";
+      if (!Strings.isNullOrEmpty(omServiceId)) {
+        hostPrefix += omServiceId + PATH_SEPARATOR_STR;
+      }
+      String ofsPrefix = hostPrefix + vol.getName() + PATH_SEPARATOR_STR +
+          bucket.getName();
+      final Path path = new Path(ofsPrefix);
+      OzoneConfiguration clientConf = new OzoneConfiguration(getConf());
+      clientConf.set(FS_DEFAULT_NAME_KEY, hostPrefix);
+      clientConf.setInt(FS_TRASH_INTERVAL_KEY, 0);
+      FileSystem fs = FileSystem.get(clientConf);
+      if (!fs.delete(path, true)) {
+        throw new IOException("Failed to delete bucket");
+      }
+      numberOfBucketsCleaned.getAndIncrement();
+      return true;
+    } catch (Exception e) {
+      exception = e;
+      LOG.error("Could not clean bucket ", e);
+      return false;
+    }
+  }
+
+  private class BucketCleaner implements Runnable {
+    @Override
+    public void run() {
+      int i;
+      while ((i = cleanedBucketCounter.getAndIncrement()) < totalBucketCount) {
+        try {
+          OzoneBucket bucket = vol.getBucket(bucketIdList.get(i));
+          switch (bucket.getBucketLayout()) {
+          case FILE_SYSTEM_OPTIMIZED:
+          case LEGACY:
+            if (!cleanFSBucket(bucket)) {
+              throw new RuntimeException("Failed to clean bucket");
+            }
+            break;
+          case OBJECT_STORE:
+            if (!cleanOBSBucket(bucket)) {
+              throw new RuntimeException("Failed to clean bucket");
+            }
+          default:
+            throw new RuntimeException("Invalid bucket layout");
+          }
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+    }
+  }
+
+  private void doCleanBuckets() throws InterruptedException {
+    int threadPoolSize = 10;

Review Comment:
   Lets keep this configurable with default being 10.



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java:
##########
@@ -18,27 +18,220 @@
 
 package org.apache.hadoop.ozone.shell.volume;
 
+import com.google.common.base.Strings;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
+import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
 
 /**
  * Executes deleteVolume call for the shell.
  */
 @Command(name = "delete",
-    description = "deletes a volume if it is empty")
+    description = "deletes a volume")
 public class DeleteVolumeHandler extends VolumeHandler {
+  @CommandLine.Option(
+      names = {"-skipTrash"},
+      description = "Delete volume without trash"
+  )
+  private boolean bSkipTrash = false;
+  @CommandLine.Option(
+      names = {"-r"},
+      description = "Delete volume recursively"
+  )
+  private boolean bRecursive = false;
+  @CommandLine.Option(
+      names = {"-id", "--om-service-id"},
+      description = "Ozone Manager Service ID"
+  )
+  private String omServiceId;
+  private ExecutorService executor;
+  private List<String> bucketIdList = new ArrayList<>();
+  private AtomicInteger cleanedBucketCounter =
+      new AtomicInteger();
+  private int totalBucketCount;
+  private OzoneVolume vol;
+  private AtomicInteger numberOfBucketsCleaned = new AtomicInteger(0);
+  private volatile Throwable exception;
+  private static final int MAX_KEY_DELETE_BATCH_SIZE = 1000;
 
   @Override
   protected void execute(OzoneClient client, OzoneAddress address)
       throws IOException {
 
     String volumeName = address.getVolumeName();
-
+    try {
+      if (bRecursive) {
+        if (!bSkipTrash) {
+          out().printf("Use -skipTrash for recursive volume delete%n");
+          return;
+        }
+        if (OmUtils.isServiceIdsDefined(getConf()) &&
+            Strings.isNullOrEmpty(omServiceId)) {
+          out().printf("OmServiceID not provided, provide using " +
+              "-id <OM_SERVICE_ID>%n");
+          return;
+        }
+        vol = client.getObjectStore().getVolume(volumeName);
+        deleteVolumeRecursive();
+      }
+    } catch (InterruptedException e) {
+      out().printf("Exception while deleting volume recursively%n");
+      return;
+    }
     client.getObjectStore().deleteVolume(volumeName);
     out().printf("Volume %s is deleted%n", volumeName);
   }
+
+  private void deleteVolumeRecursive()
+      throws InterruptedException {
+    // Get all the buckets for given volume
+    Iterator<? extends OzoneBucket> bucketIterator =
+        vol.listBuckets(null);
+
+    while (bucketIterator.hasNext()) {
+      OzoneBucket bucket = bucketIterator.next();
+      bucketIdList.add(bucket.getName());
+      totalBucketCount++;
+    }
+    doCleanBuckets();
+  }
+
+  /**
+   * Clean OBS bucket recursively.
+   *
+   * @param  bucket OzoneBucket
+   * @return boolean
+   */
+  private boolean cleanOBSBucket(OzoneBucket bucket) {
+    ArrayList<String> keys = new ArrayList<>();
+    try {
+      if (!bucket.isLink()) {
+        Iterator<? extends OzoneKey> iterator = bucket.listKeys(null);
+        while (iterator.hasNext()) {
+          keys.add(iterator.next().getName());
+          if (MAX_KEY_DELETE_BATCH_SIZE == keys.size()) {
+            bucket.deleteKeys(keys);
+            keys.clear();
+          }
+        }
+        // delete if any remaining keys left
+        if (keys.size() > 0) {
+          bucket.deleteKeys(keys);
+        }
+      }
+      vol.deleteBucket(bucket.getName());
+      numberOfBucketsCleaned.getAndIncrement();
+      return true;
+    } catch (Exception e) {
+      LOG.error("Could not clean bucket ", e);
+      return false;
+    }
+  }
+
+  /**
+   * Clean Legacy/FSO bucket recursively.
+   *
+   * @param  bucket OzoneBucket
+   * @return boolean
+   */
+  private boolean cleanFSBucket(OzoneBucket bucket) {
+    try {
+      String hostPrefix = OZONE_OFS_URI_SCHEME + "://";
+      if (!Strings.isNullOrEmpty(omServiceId)) {
+        hostPrefix += omServiceId + PATH_SEPARATOR_STR;
+      }
+      String ofsPrefix = hostPrefix + vol.getName() + PATH_SEPARATOR_STR +
+          bucket.getName();
+      final Path path = new Path(ofsPrefix);
+      OzoneConfiguration clientConf = new OzoneConfiguration(getConf());
+      clientConf.set(FS_DEFAULT_NAME_KEY, hostPrefix);
+      clientConf.setInt(FS_TRASH_INTERVAL_KEY, 0);

Review Comment:
   Setting trash interval is not required as we are directly calling 
fs.delete() and not moveToTrash/rename



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java:
##########
@@ -18,27 +18,220 @@
 
 package org.apache.hadoop.ozone.shell.volume;
 
+import com.google.common.base.Strings;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
+import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
 
 /**
  * Executes deleteVolume call for the shell.
  */
 @Command(name = "delete",
-    description = "deletes a volume if it is empty")
+    description = "deletes a volume")
 public class DeleteVolumeHandler extends VolumeHandler {
+  @CommandLine.Option(
+      names = {"-skipTrash"},
+      description = "Delete volume without trash"
+  )
+  private boolean bSkipTrash = false;
+  @CommandLine.Option(
+      names = {"-r"},
+      description = "Delete volume recursively"
+  )
+  private boolean bRecursive = false;
+  @CommandLine.Option(
+      names = {"-id", "--om-service-id"},
+      description = "Ozone Manager Service ID"
+  )
+  private String omServiceId;
+  private ExecutorService executor;
+  private List<String> bucketIdList = new ArrayList<>();
+  private AtomicInteger cleanedBucketCounter =
+      new AtomicInteger();
+  private int totalBucketCount;
+  private OzoneVolume vol;
+  private AtomicInteger numberOfBucketsCleaned = new AtomicInteger(0);
+  private volatile Throwable exception;
+  private static final int MAX_KEY_DELETE_BATCH_SIZE = 1000;
 
   @Override
   protected void execute(OzoneClient client, OzoneAddress address)
       throws IOException {
 
     String volumeName = address.getVolumeName();
-
+    try {
+      if (bRecursive) {
+        if (!bSkipTrash) {
+          out().printf("Use -skipTrash for recursive volume delete%n");
+          return;
+        }
+        if (OmUtils.isServiceIdsDefined(getConf()) &&
+            Strings.isNullOrEmpty(omServiceId)) {
+          out().printf("OmServiceID not provided, provide using " +
+              "-id <OM_SERVICE_ID>%n");
+          return;
+        }
+        vol = client.getObjectStore().getVolume(volumeName);
+        deleteVolumeRecursive();
+      }
+    } catch (InterruptedException e) {
+      out().printf("Exception while deleting volume recursively%n");
+      return;
+    }
     client.getObjectStore().deleteVolume(volumeName);
     out().printf("Volume %s is deleted%n", volumeName);
   }
+
+  private void deleteVolumeRecursive()
+      throws InterruptedException {
+    // Get all the buckets for given volume
+    Iterator<? extends OzoneBucket> bucketIterator =
+        vol.listBuckets(null);
+
+    while (bucketIterator.hasNext()) {
+      OzoneBucket bucket = bucketIterator.next();
+      bucketIdList.add(bucket.getName());
+      totalBucketCount++;
+    }
+    doCleanBuckets();
+  }
+
+  /**
+   * Clean OBS bucket recursively.
+   *
+   * @param  bucket OzoneBucket
+   * @return boolean
+   */
+  private boolean cleanOBSBucket(OzoneBucket bucket) {
+    ArrayList<String> keys = new ArrayList<>();
+    try {
+      if (!bucket.isLink()) {
+        Iterator<? extends OzoneKey> iterator = bucket.listKeys(null);
+        while (iterator.hasNext()) {
+          keys.add(iterator.next().getName());
+          if (MAX_KEY_DELETE_BATCH_SIZE == keys.size()) {
+            bucket.deleteKeys(keys);
+            keys.clear();
+          }
+        }
+        // delete if any remaining keys left
+        if (keys.size() > 0) {
+          bucket.deleteKeys(keys);
+        }
+      }
+      vol.deleteBucket(bucket.getName());
+      numberOfBucketsCleaned.getAndIncrement();
+      return true;
+    } catch (Exception e) {
+      LOG.error("Could not clean bucket ", e);
+      return false;
+    }
+  }
+
+  /**
+   * Clean Legacy/FSO bucket recursively.
+   *
+   * @param  bucket OzoneBucket
+   * @return boolean
+   */
+  private boolean cleanFSBucket(OzoneBucket bucket) {
+    try {
+      String hostPrefix = OZONE_OFS_URI_SCHEME + "://";
+      if (!Strings.isNullOrEmpty(omServiceId)) {
+        hostPrefix += omServiceId + PATH_SEPARATOR_STR;
+      }
+      String ofsPrefix = hostPrefix + vol.getName() + PATH_SEPARATOR_STR +
+          bucket.getName();
+      final Path path = new Path(ofsPrefix);
+      OzoneConfiguration clientConf = new OzoneConfiguration(getConf());
+      clientConf.set(FS_DEFAULT_NAME_KEY, hostPrefix);
+      clientConf.setInt(FS_TRASH_INTERVAL_KEY, 0);
+      FileSystem fs = FileSystem.get(clientConf);
+      if (!fs.delete(path, true)) {
+        throw new IOException("Failed to delete bucket");
+      }
+      numberOfBucketsCleaned.getAndIncrement();
+      return true;
+    } catch (Exception e) {
+      exception = e;
+      LOG.error("Could not clean bucket ", e);
+      return false;
+    }
+  }
+
+  private class BucketCleaner implements Runnable {
+    @Override
+    public void run() {
+      int i;
+      while ((i = cleanedBucketCounter.getAndIncrement()) < totalBucketCount) {
+        try {
+          OzoneBucket bucket = vol.getBucket(bucketIdList.get(i));
+          switch (bucket.getBucketLayout()) {
+          case FILE_SYSTEM_OPTIMIZED:
+          case LEGACY:
+            if (!cleanFSBucket(bucket)) {
+              throw new RuntimeException("Failed to clean bucket");
+            }
+            break;
+          case OBJECT_STORE:
+            if (!cleanOBSBucket(bucket)) {
+              throw new RuntimeException("Failed to clean bucket");
+            }
+          default:
+            throw new RuntimeException("Invalid bucket layout");
+          }
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+    }
+  }
+
+  private void doCleanBuckets() throws InterruptedException {
+    int threadPoolSize = 10;
+    executor = Executors.newFixedThreadPool(threadPoolSize);
+    for (int i = 0; i < threadPoolSize; i++) {
+      executor.execute(new BucketCleaner());
+    }
+
+    try {
+      // wait until all Buckets are cleaned or exception occurred.
+      while (numberOfBucketsCleaned.get() != totalBucketCount

Review Comment:
   Whats stopping user to create more buckets in between this operation, IIUC, 
this op doesn't take a global volume lock throughout the recursive volume 
delete, Say Initially total bucket count = 10  , later I get to create more 
buckets in b/w as there is some time where the volume/buckets are not locked  , 
In some time bucket count = 15 , this will exit when deleted buckets = 10 but 
there are 5 more added. Volume deletes will fail in such a case. 



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java:
##########
@@ -18,27 +18,220 @@
 
 package org.apache.hadoop.ozone.shell.volume;
 
+import com.google.common.base.Strings;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
+import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
 
 /**
  * Executes deleteVolume call for the shell.
  */
 @Command(name = "delete",
-    description = "deletes a volume if it is empty")
+    description = "deletes a volume")
 public class DeleteVolumeHandler extends VolumeHandler {
+  @CommandLine.Option(
+      names = {"-skipTrash"},
+      description = "Delete volume without trash"
+  )
+  private boolean bSkipTrash = false;

Review Comment:
   Just a suggestion : Given that the operation is too destructive, can we ask 
a prompt from the user to type something like yes/no just like some linux ops 
and make the user aware that this would take some time? This will prevent 
accidental deletion.



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java:
##########
@@ -18,27 +18,220 @@
 
 package org.apache.hadoop.ozone.shell.volume;
 
+import com.google.common.base.Strings;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
+import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
 
 /**
  * Executes deleteVolume call for the shell.
  */
 @Command(name = "delete",
-    description = "deletes a volume if it is empty")
+    description = "deletes a volume")
 public class DeleteVolumeHandler extends VolumeHandler {
+  @CommandLine.Option(
+      names = {"-skipTrash"},
+      description = "Delete volume without trash"
+  )
+  private boolean bSkipTrash = false;
+  @CommandLine.Option(
+      names = {"-r"},
+      description = "Delete volume recursively"
+  )
+  private boolean bRecursive = false;
+  @CommandLine.Option(
+      names = {"-id", "--om-service-id"},
+      description = "Ozone Manager Service ID"
+  )
+  private String omServiceId;
+  private ExecutorService executor;
+  private List<String> bucketIdList = new ArrayList<>();
+  private AtomicInteger cleanedBucketCounter =
+      new AtomicInteger();
+  private int totalBucketCount;
+  private OzoneVolume vol;
+  private AtomicInteger numberOfBucketsCleaned = new AtomicInteger(0);
+  private volatile Throwable exception;
+  private static final int MAX_KEY_DELETE_BATCH_SIZE = 1000;
 
   @Override
   protected void execute(OzoneClient client, OzoneAddress address)
       throws IOException {
 
     String volumeName = address.getVolumeName();
-
+    try {
+      if (bRecursive) {
+        if (!bSkipTrash) {
+          out().printf("Use -skipTrash for recursive volume delete%n");
+          return;
+        }
+        if (OmUtils.isServiceIdsDefined(getConf()) &&
+            Strings.isNullOrEmpty(omServiceId)) {

Review Comment:
   Should it be OR here ?
   ```
   if (OmUtils.isServiceIdsDefined(getConf()) ||
               Strings.isNullOrEmpty(omServiceId))
   ```



##########
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java:
##########
@@ -18,27 +18,220 @@
 
 package org.apache.hadoop.ozone.shell.volume;
 
+import com.google.common.base.Strings;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneKey;
+import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
+import picocli.CommandLine;
 import picocli.CommandLine.Command;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
 
 /**
  * Executes deleteVolume call for the shell.
  */
 @Command(name = "delete",
-    description = "deletes a volume if it is empty")
+    description = "deletes a volume")
 public class DeleteVolumeHandler extends VolumeHandler {
+  @CommandLine.Option(
+      names = {"-skipTrash"},
+      description = "Delete volume without trash"
+  )
+  private boolean bSkipTrash = false;
+  @CommandLine.Option(
+      names = {"-r"},
+      description = "Delete volume recursively"
+  )
+  private boolean bRecursive = false;
+  @CommandLine.Option(
+      names = {"-id", "--om-service-id"},
+      description = "Ozone Manager Service ID"
+  )
+  private String omServiceId;
+  private ExecutorService executor;
+  private List<String> bucketIdList = new ArrayList<>();
+  private AtomicInteger cleanedBucketCounter =
+      new AtomicInteger();
+  private int totalBucketCount;
+  private OzoneVolume vol;
+  private AtomicInteger numberOfBucketsCleaned = new AtomicInteger(0);
+  private volatile Throwable exception;
+  private static final int MAX_KEY_DELETE_BATCH_SIZE = 1000;
 
   @Override
   protected void execute(OzoneClient client, OzoneAddress address)
       throws IOException {
 
     String volumeName = address.getVolumeName();
-
+    try {
+      if (bRecursive) {
+        if (!bSkipTrash) {
+          out().printf("Use -skipTrash for recursive volume delete%n");
+          return;
+        }
+        if (OmUtils.isServiceIdsDefined(getConf()) &&
+            Strings.isNullOrEmpty(omServiceId)) {
+          out().printf("OmServiceID not provided, provide using " +
+              "-id <OM_SERVICE_ID>%n");
+          return;
+        }
+        vol = client.getObjectStore().getVolume(volumeName);
+        deleteVolumeRecursive();
+      }
+    } catch (InterruptedException e) {
+      out().printf("Exception while deleting volume recursively%n");
+      return;
+    }
     client.getObjectStore().deleteVolume(volumeName);
     out().printf("Volume %s is deleted%n", volumeName);
   }
+
+  private void deleteVolumeRecursive()
+      throws InterruptedException {
+    // Get all the buckets for given volume
+    Iterator<? extends OzoneBucket> bucketIterator =
+        vol.listBuckets(null);
+
+    while (bucketIterator.hasNext()) {
+      OzoneBucket bucket = bucketIterator.next();
+      bucketIdList.add(bucket.getName());
+      totalBucketCount++;
+    }
+    doCleanBuckets();
+  }
+
+  /**
+   * Clean OBS bucket recursively.
+   *
+   * @param  bucket OzoneBucket
+   * @return boolean
+   */
+  private boolean cleanOBSBucket(OzoneBucket bucket) {
+    ArrayList<String> keys = new ArrayList<>();
+    try {
+      if (!bucket.isLink()) {
+        Iterator<? extends OzoneKey> iterator = bucket.listKeys(null);
+        while (iterator.hasNext()) {

Review Comment:
   Same issue here, if new keys are written by another client concurrently , 
the iterator reference is older and doesn't have the new keys and bucket delete 
fails. Ideal way to solve this to take a bucket lock on the server , iterate 
through all keys and delete. If we are sure that the bucket is to be deleted , 
taking a bucket lock is ok as it doesn't take a toll on other ops as we know we 
wont be doing any ops on this bucket.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to