swamirishi commented on code in PR #5579:
URL: https://github.com/apache/ozone/pull/5579#discussion_r1425673008


##########
hadoop-hdds/common/src/main/resources/ozone-default.xml:
##########
@@ -3514,6 +3514,25 @@
     </description>
   </property>
 
+  <property>
+    <name>ozone.snapshot.directory.service.timeout</name>
+    <value>300s</value>
+    <tag>OZONE, PERFORMANCE, OM</tag>
+    <description>
+      Timeout value for SnapshotDirectoryService.

Review Comment:
   Would SnapshotDirectoryCleaningService would be better?



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,520 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(

Review Comment:
   Are we closing this snapshot db?



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,520 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousSnapshot.getVolumeName(),
+                previousSnapshot.getBucketName(),
+                getSnapshotPrefix(previousSnapshot.getName()), false);
+            OmSnapshot omPreviousSnapshot = (OmSnapshot)
+                rcPrevOmSnapshot.get();
+
+            previousKeyTable = omPreviousSnapshot.getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+            prevRenamedTable = omPreviousSnapshot
+                .getMetadataManager().getSnapshotRenamedTable();
+            previousToPrevSnapshot = getPreviousActiveSnapshot(
+                previousSnapshot, snapChainManager, omSnapshotManager);
+          }
+
+          Table<String, OmKeyInfo> previousToPrevKeyTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevToPrevOmSnapshot = null;
+          if (previousToPrevSnapshot != null) {
+            rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousToPrevSnapshot.getVolumeName(),
+                previousToPrevSnapshot.getBucketName(),
+                getSnapshotPrefix(previousToPrevSnapshot.getName()), false);
+            OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot)
+                rcPrevToPrevOmSnapshot.get();
+
+            previousToPrevKeyTable = omPreviousToPrevSnapshot
+                .getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+          }
+
+          String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
+              currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+          try (ReferenceCounted<IOmMetadataReader, SnapshotCache>
+                   rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot(
+              currSnapInfo.getVolumeName(),
+              currSnapInfo.getBucketName(),
+              getSnapshotPrefix(currSnapInfo.getName()),
+              false)) {
+
+            OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get();
+            Table<String, OmKeyInfo> snapDeletedDirTable =
+                currOmSnapshot.getMetadataManager().getDeletedDirTable();
+
+            try (TableIterator<String, ? extends Table.KeyValue<String,
+                OmKeyInfo>> deletedDirIterator = snapDeletedDirTable
+                .iterator(dbBucketKeyForDir)) {
+
+              while (deletedDirIterator.hasNext()) {
+                Table.KeyValue<String, OmKeyInfo> deletedDirInfo =
+                    deletedDirIterator.next();
+
+                // For each deleted directory we do an in-memory DFS and
+                // do a deep clean and exclusive size calculation.
+                iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo,
+                    previousSnapshot, previousToPrevSnapshot,
+                    currOmSnapshot, previousKeyTable, prevRenamedTable,
+                    previousToPrevKeyTable, dbBucketKeyForDir);
+              }
+              updateDeepCleanSnapshotDir(currSnapInfo.getTableKey());
+              if (previousSnapshot != null) {
+                updateExclusiveSize(previousSnapshot.getTableKey());
+              }
+            }
+          }
+        }
+      } catch (IOException ex) {
+        LOG.error("Error while running directory deep clean on snapshots." +
+            " Will retry at next run.", ex);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void iterateDirectoryTree(
+      Table.KeyValue<String, OmKeyInfo> deletedDirInfo, long volumeId,
+      OmBucketInfo bucketInfo,
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmSnapshot currOmSnapshot,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      String dbBucketKeyForDir) throws IOException {
+
+    Table<String, OmDirectoryInfo> snapDirTable =
+        currOmSnapshot.getMetadataManager().getDirectoryTable();
+    Table<String, String> snapRenamedTable =
+        currOmSnapshot.getMetadataManager().getSnapshotRenamedTable();
+    Stack<StackNode> stackNodes =
+        new Stack<>();
+    OmDirectoryInfo omDeletedDirectoryInfo =
+        getDirectoryInfo(deletedDirInfo.getValue());
+    String dirPathDbKey = currOmSnapshot.getMetadataManager()
+        .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+            omDeletedDirectoryInfo);
+    // Stack Init
+    StackNode topLevelDir = new StackNode();
+    topLevelDir.setDirKey(dirPathDbKey);
+    topLevelDir.setDirValue(omDeletedDirectoryInfo);
+    stackNodes.add(topLevelDir);
+
+    try (
+        TableIterator<String, ? extends Table.KeyValue<String, 
OmDirectoryInfo>>
+            directoryIterator = snapDirTable.iterator(dbBucketKeyForDir)) {
+
+      while (!stackNodes.isEmpty()) {
+        StackNode stackTop = stackNodes.pop();
+        String seekDirInDB;
+        // First process all the files in the current directory
+        // and then do a DFS for directory.
+        if (StringUtils.isEmpty(stackTop.getSubDirSeek())) {
+          processFilesUnderDir(previousSnapshot,
+              previousToPrevSnapshot,
+              volumeId,
+              bucketInfo,
+              stackTop.getDirValue(),
+              currOmSnapshot.getMetadataManager(),
+              snapRenamedTable,
+              previousKeyTable,
+              prevRenamedTable,
+              previousToPrevKeyTable);
+          seekDirInDB = currOmSnapshot.getMetadataManager()
+              .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+                  stackTop.getDirValue().getObjectID(), "");
+          directoryIterator.seek(seekDirInDB);
+        } else {
+          // When a leaf node is processed, we need to come back in
+          // the call stack and process the next directories.
+          seekDirInDB = stackTop.getSubDirSeek();
+          directoryIterator.seek(seekDirInDB);
+          // We need skip to the next sub-directory because we already
+          // processed the current sub-directory in the previous run.
+          if (directoryIterator.hasNext()) {
+            directoryIterator.next();
+          } else {
+            continue;
+          }
+        }
+
+        if (directoryIterator.hasNext()) {

Review Comment:
   Can we move this loop into the else condition. In one iteration we will 
either delete files in the directory or process the sub directory. Let us not 
do both. Then we wouldn't be requiring the next.next in the else loop 
condition. It makes the logic complicated.



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,520 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousSnapshot.getVolumeName(),
+                previousSnapshot.getBucketName(),
+                getSnapshotPrefix(previousSnapshot.getName()), false);
+            OmSnapshot omPreviousSnapshot = (OmSnapshot)
+                rcPrevOmSnapshot.get();
+
+            previousKeyTable = omPreviousSnapshot.getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+            prevRenamedTable = omPreviousSnapshot
+                .getMetadataManager().getSnapshotRenamedTable();
+            previousToPrevSnapshot = getPreviousActiveSnapshot(
+                previousSnapshot, snapChainManager, omSnapshotManager);
+          }
+
+          Table<String, OmKeyInfo> previousToPrevKeyTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevToPrevOmSnapshot = null;
+          if (previousToPrevSnapshot != null) {
+            rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousToPrevSnapshot.getVolumeName(),
+                previousToPrevSnapshot.getBucketName(),
+                getSnapshotPrefix(previousToPrevSnapshot.getName()), false);
+            OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot)
+                rcPrevToPrevOmSnapshot.get();
+
+            previousToPrevKeyTable = omPreviousToPrevSnapshot
+                .getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+          }
+
+          String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
+              currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+          try (ReferenceCounted<IOmMetadataReader, SnapshotCache>
+                   rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot(
+              currSnapInfo.getVolumeName(),
+              currSnapInfo.getBucketName(),
+              getSnapshotPrefix(currSnapInfo.getName()),
+              false)) {
+
+            OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get();
+            Table<String, OmKeyInfo> snapDeletedDirTable =
+                currOmSnapshot.getMetadataManager().getDeletedDirTable();
+
+            try (TableIterator<String, ? extends Table.KeyValue<String,
+                OmKeyInfo>> deletedDirIterator = snapDeletedDirTable
+                .iterator(dbBucketKeyForDir)) {
+
+              while (deletedDirIterator.hasNext()) {
+                Table.KeyValue<String, OmKeyInfo> deletedDirInfo =
+                    deletedDirIterator.next();
+
+                // For each deleted directory we do an in-memory DFS and
+                // do a deep clean and exclusive size calculation.
+                iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo,
+                    previousSnapshot, previousToPrevSnapshot,
+                    currOmSnapshot, previousKeyTable, prevRenamedTable,
+                    previousToPrevKeyTable, dbBucketKeyForDir);
+              }
+              updateDeepCleanSnapshotDir(currSnapInfo.getTableKey());
+              if (previousSnapshot != null) {
+                updateExclusiveSize(previousSnapshot.getTableKey());
+              }
+            }
+          }
+        }
+      } catch (IOException ex) {
+        LOG.error("Error while running directory deep clean on snapshots." +
+            " Will retry at next run.", ex);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void iterateDirectoryTree(
+      Table.KeyValue<String, OmKeyInfo> deletedDirInfo, long volumeId,
+      OmBucketInfo bucketInfo,
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmSnapshot currOmSnapshot,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      String dbBucketKeyForDir) throws IOException {
+
+    Table<String, OmDirectoryInfo> snapDirTable =
+        currOmSnapshot.getMetadataManager().getDirectoryTable();
+    Table<String, String> snapRenamedTable =
+        currOmSnapshot.getMetadataManager().getSnapshotRenamedTable();
+    Stack<StackNode> stackNodes =
+        new Stack<>();

Review Comment:
   nit: this can be in the same line



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java:
##########
@@ -469,6 +469,98 @@ public long optimizeDirDeletesAndSubmitRequest(long 
remainNum,
     return remainNum;
   }
 
+  /**
+   * To calculate Exclusive Size for current snapshot, Check
+   * the next snapshot deletedTable if the deleted key is
+   * referenced in current snapshot and not referenced in the
+   * previous snapshot then that key is exclusive to the current
+   * snapshot. Here since we are only iterating through
+   * deletedTable we can check the previous and previous to
+   * previous snapshot to achieve the same.
+   * previousSnapshot - Snapshot for which exclusive size is
+   *                    getting calculating.
+   * currSnapshot - Snapshot's deletedTable is used to calculate
+   *                previousSnapshot snapshot's exclusive size.
+   * previousToPrevSnapshot - Snapshot which is used to check
+   *                 if key is exclusive to previousSnapshot.
+   */
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  public void calculateExclusiveSize(
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmKeyInfo keyInfo,
+      OmBucketInfo bucketInfo, long volumeId,
+      Table<String, String> snapRenamedTable,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      Map<String, Long> exclusiveSizeMap,
+      Map<String, Long> exclusiveReplicatedSizeMap) throws IOException {
+    String prevSnapKey = previousSnapshot.getTableKey();
+    long exclusiveReplicatedSize =
+        exclusiveReplicatedSizeMap.getOrDefault(
+            prevSnapKey, 0L) + keyInfo.getReplicatedSize();
+    long exclusiveSize = exclusiveSizeMap.getOrDefault(
+        prevSnapKey, 0L) + keyInfo.getDataSize();
+
+    // If there is no previous to previous snapshot, then
+    // the previous snapshot is the first snapshot.
+    if (previousToPrevSnapshot == null) {
+      exclusiveSizeMap.put(prevSnapKey, exclusiveSize);
+      exclusiveReplicatedSizeMap.put(prevSnapKey,
+          exclusiveReplicatedSize);
+    } else {
+      OmKeyInfo keyInfoPrevSnapshot = getPreviousSnapshotKeyName(
+          keyInfo, bucketInfo, volumeId,
+          snapRenamedTable, previousKeyTable);
+      OmKeyInfo keyInfoPrevToPrevSnapshot = getPreviousSnapshotKeyName(
+          keyInfoPrevSnapshot, bucketInfo, volumeId,
+          prevRenamedTable, previousToPrevKeyTable);
+      // If the previous to previous snapshot doesn't
+      // have the key, then it is exclusive size for the
+      // previous snapshot.
+      if (keyInfoPrevToPrevSnapshot == null) {

Review Comment:
   We are supposed to look for the version diff, in case of object versioning. 
Exclusive size would be the size of the versions not found in the previous to 
previous and found in the previous snapshot.



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,520 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousSnapshot.getVolumeName(),
+                previousSnapshot.getBucketName(),
+                getSnapshotPrefix(previousSnapshot.getName()), false);
+            OmSnapshot omPreviousSnapshot = (OmSnapshot)
+                rcPrevOmSnapshot.get();
+
+            previousKeyTable = omPreviousSnapshot.getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+            prevRenamedTable = omPreviousSnapshot
+                .getMetadataManager().getSnapshotRenamedTable();
+            previousToPrevSnapshot = getPreviousActiveSnapshot(
+                previousSnapshot, snapChainManager, omSnapshotManager);
+          }
+
+          Table<String, OmKeyInfo> previousToPrevKeyTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevToPrevOmSnapshot = null;
+          if (previousToPrevSnapshot != null) {
+            rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousToPrevSnapshot.getVolumeName(),
+                previousToPrevSnapshot.getBucketName(),
+                getSnapshotPrefix(previousToPrevSnapshot.getName()), false);
+            OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot)
+                rcPrevToPrevOmSnapshot.get();
+
+            previousToPrevKeyTable = omPreviousToPrevSnapshot
+                .getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+          }
+
+          String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
+              currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+          try (ReferenceCounted<IOmMetadataReader, SnapshotCache>
+                   rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot(
+              currSnapInfo.getVolumeName(),
+              currSnapInfo.getBucketName(),
+              getSnapshotPrefix(currSnapInfo.getName()),
+              false)) {
+
+            OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get();
+            Table<String, OmKeyInfo> snapDeletedDirTable =
+                currOmSnapshot.getMetadataManager().getDeletedDirTable();
+
+            try (TableIterator<String, ? extends Table.KeyValue<String,
+                OmKeyInfo>> deletedDirIterator = snapDeletedDirTable
+                .iterator(dbBucketKeyForDir)) {
+
+              while (deletedDirIterator.hasNext()) {
+                Table.KeyValue<String, OmKeyInfo> deletedDirInfo =
+                    deletedDirIterator.next();
+
+                // For each deleted directory we do an in-memory DFS and
+                // do a deep clean and exclusive size calculation.
+                iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo,
+                    previousSnapshot, previousToPrevSnapshot,
+                    currOmSnapshot, previousKeyTable, prevRenamedTable,
+                    previousToPrevKeyTable, dbBucketKeyForDir);
+              }
+              updateDeepCleanSnapshotDir(currSnapInfo.getTableKey());
+              if (previousSnapshot != null) {
+                updateExclusiveSize(previousSnapshot.getTableKey());
+              }
+            }
+          }
+        }
+      } catch (IOException ex) {
+        LOG.error("Error while running directory deep clean on snapshots." +
+            " Will retry at next run.", ex);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void iterateDirectoryTree(
+      Table.KeyValue<String, OmKeyInfo> deletedDirInfo, long volumeId,
+      OmBucketInfo bucketInfo,
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmSnapshot currOmSnapshot,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      String dbBucketKeyForDir) throws IOException {
+
+    Table<String, OmDirectoryInfo> snapDirTable =
+        currOmSnapshot.getMetadataManager().getDirectoryTable();
+    Table<String, String> snapRenamedTable =
+        currOmSnapshot.getMetadataManager().getSnapshotRenamedTable();
+    Stack<StackNode> stackNodes =
+        new Stack<>();
+    OmDirectoryInfo omDeletedDirectoryInfo =
+        getDirectoryInfo(deletedDirInfo.getValue());
+    String dirPathDbKey = currOmSnapshot.getMetadataManager()
+        .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+            omDeletedDirectoryInfo);
+    // Stack Init
+    StackNode topLevelDir = new StackNode();
+    topLevelDir.setDirKey(dirPathDbKey);
+    topLevelDir.setDirValue(omDeletedDirectoryInfo);
+    stackNodes.add(topLevelDir);
+
+    try (
+        TableIterator<String, ? extends Table.KeyValue<String, 
OmDirectoryInfo>>
+            directoryIterator = snapDirTable.iterator(dbBucketKeyForDir)) {
+
+      while (!stackNodes.isEmpty()) {
+        StackNode stackTop = stackNodes.pop();

Review Comment:
   Do we need to pop out the node from the stack? I guess we need to peek here 
instead. I don't see a loop which adds all sub directory for the top from the 
stack.



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,520 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousSnapshot.getVolumeName(),
+                previousSnapshot.getBucketName(),
+                getSnapshotPrefix(previousSnapshot.getName()), false);
+            OmSnapshot omPreviousSnapshot = (OmSnapshot)
+                rcPrevOmSnapshot.get();
+
+            previousKeyTable = omPreviousSnapshot.getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+            prevRenamedTable = omPreviousSnapshot
+                .getMetadataManager().getSnapshotRenamedTable();
+            previousToPrevSnapshot = getPreviousActiveSnapshot(
+                previousSnapshot, snapChainManager, omSnapshotManager);
+          }
+
+          Table<String, OmKeyInfo> previousToPrevKeyTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevToPrevOmSnapshot = null;
+          if (previousToPrevSnapshot != null) {
+            rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousToPrevSnapshot.getVolumeName(),
+                previousToPrevSnapshot.getBucketName(),
+                getSnapshotPrefix(previousToPrevSnapshot.getName()), false);
+            OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot)
+                rcPrevToPrevOmSnapshot.get();
+
+            previousToPrevKeyTable = omPreviousToPrevSnapshot
+                .getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+          }
+
+          String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
+              currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+          try (ReferenceCounted<IOmMetadataReader, SnapshotCache>
+                   rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot(
+              currSnapInfo.getVolumeName(),
+              currSnapInfo.getBucketName(),
+              getSnapshotPrefix(currSnapInfo.getName()),
+              false)) {
+
+            OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get();
+            Table<String, OmKeyInfo> snapDeletedDirTable =
+                currOmSnapshot.getMetadataManager().getDeletedDirTable();
+
+            try (TableIterator<String, ? extends Table.KeyValue<String,
+                OmKeyInfo>> deletedDirIterator = snapDeletedDirTable
+                .iterator(dbBucketKeyForDir)) {
+
+              while (deletedDirIterator.hasNext()) {
+                Table.KeyValue<String, OmKeyInfo> deletedDirInfo =
+                    deletedDirIterator.next();
+
+                // For each deleted directory we do an in-memory DFS and
+                // do a deep clean and exclusive size calculation.
+                iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo,
+                    previousSnapshot, previousToPrevSnapshot,
+                    currOmSnapshot, previousKeyTable, prevRenamedTable,
+                    previousToPrevKeyTable, dbBucketKeyForDir);
+              }
+              updateDeepCleanSnapshotDir(currSnapInfo.getTableKey());
+              if (previousSnapshot != null) {
+                updateExclusiveSize(previousSnapshot.getTableKey());
+              }
+            }
+          }
+        }
+      } catch (IOException ex) {
+        LOG.error("Error while running directory deep clean on snapshots." +
+            " Will retry at next run.", ex);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void iterateDirectoryTree(
+      Table.KeyValue<String, OmKeyInfo> deletedDirInfo, long volumeId,
+      OmBucketInfo bucketInfo,
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmSnapshot currOmSnapshot,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      String dbBucketKeyForDir) throws IOException {
+
+    Table<String, OmDirectoryInfo> snapDirTable =
+        currOmSnapshot.getMetadataManager().getDirectoryTable();
+    Table<String, String> snapRenamedTable =
+        currOmSnapshot.getMetadataManager().getSnapshotRenamedTable();
+    Stack<StackNode> stackNodes =
+        new Stack<>();
+    OmDirectoryInfo omDeletedDirectoryInfo =
+        getDirectoryInfo(deletedDirInfo.getValue());
+    String dirPathDbKey = currOmSnapshot.getMetadataManager()
+        .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+            omDeletedDirectoryInfo);
+    // Stack Init
+    StackNode topLevelDir = new StackNode();
+    topLevelDir.setDirKey(dirPathDbKey);
+    topLevelDir.setDirValue(omDeletedDirectoryInfo);
+    stackNodes.add(topLevelDir);

Review Comment:
   nit: use push instead of add



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,520 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousSnapshot.getVolumeName(),
+                previousSnapshot.getBucketName(),
+                getSnapshotPrefix(previousSnapshot.getName()), false);
+            OmSnapshot omPreviousSnapshot = (OmSnapshot)
+                rcPrevOmSnapshot.get();
+
+            previousKeyTable = omPreviousSnapshot.getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+            prevRenamedTable = omPreviousSnapshot
+                .getMetadataManager().getSnapshotRenamedTable();
+            previousToPrevSnapshot = getPreviousActiveSnapshot(
+                previousSnapshot, snapChainManager, omSnapshotManager);
+          }
+
+          Table<String, OmKeyInfo> previousToPrevKeyTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevToPrevOmSnapshot = null;
+          if (previousToPrevSnapshot != null) {
+            rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousToPrevSnapshot.getVolumeName(),
+                previousToPrevSnapshot.getBucketName(),
+                getSnapshotPrefix(previousToPrevSnapshot.getName()), false);
+            OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot)
+                rcPrevToPrevOmSnapshot.get();
+
+            previousToPrevKeyTable = omPreviousToPrevSnapshot
+                .getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+          }
+
+          String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
+              currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+          try (ReferenceCounted<IOmMetadataReader, SnapshotCache>
+                   rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot(
+              currSnapInfo.getVolumeName(),
+              currSnapInfo.getBucketName(),
+              getSnapshotPrefix(currSnapInfo.getName()),
+              false)) {
+
+            OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get();
+            Table<String, OmKeyInfo> snapDeletedDirTable =
+                currOmSnapshot.getMetadataManager().getDeletedDirTable();
+
+            try (TableIterator<String, ? extends Table.KeyValue<String,
+                OmKeyInfo>> deletedDirIterator = snapDeletedDirTable
+                .iterator(dbBucketKeyForDir)) {
+
+              while (deletedDirIterator.hasNext()) {
+                Table.KeyValue<String, OmKeyInfo> deletedDirInfo =
+                    deletedDirIterator.next();
+
+                // For each deleted directory we do an in-memory DFS and
+                // do a deep clean and exclusive size calculation.
+                iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo,
+                    previousSnapshot, previousToPrevSnapshot,
+                    currOmSnapshot, previousKeyTable, prevRenamedTable,
+                    previousToPrevKeyTable, dbBucketKeyForDir);
+              }
+              updateDeepCleanSnapshotDir(currSnapInfo.getTableKey());
+              if (previousSnapshot != null) {
+                updateExclusiveSize(previousSnapshot.getTableKey());
+              }
+            }
+          }
+        }
+      } catch (IOException ex) {
+        LOG.error("Error while running directory deep clean on snapshots." +
+            " Will retry at next run.", ex);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void iterateDirectoryTree(
+      Table.KeyValue<String, OmKeyInfo> deletedDirInfo, long volumeId,
+      OmBucketInfo bucketInfo,
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmSnapshot currOmSnapshot,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      String dbBucketKeyForDir) throws IOException {
+
+    Table<String, OmDirectoryInfo> snapDirTable =
+        currOmSnapshot.getMetadataManager().getDirectoryTable();
+    Table<String, String> snapRenamedTable =
+        currOmSnapshot.getMetadataManager().getSnapshotRenamedTable();
+    Stack<StackNode> stackNodes =
+        new Stack<>();
+    OmDirectoryInfo omDeletedDirectoryInfo =
+        getDirectoryInfo(deletedDirInfo.getValue());
+    String dirPathDbKey = currOmSnapshot.getMetadataManager()
+        .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+            omDeletedDirectoryInfo);
+    // Stack Init
+    StackNode topLevelDir = new StackNode();
+    topLevelDir.setDirKey(dirPathDbKey);
+    topLevelDir.setDirValue(omDeletedDirectoryInfo);
+    stackNodes.add(topLevelDir);
+
+    try (
+        TableIterator<String, ? extends Table.KeyValue<String, 
OmDirectoryInfo>>
+            directoryIterator = snapDirTable.iterator(dbBucketKeyForDir)) {
+
+      while (!stackNodes.isEmpty()) {
+        StackNode stackTop = stackNodes.pop();
+        String seekDirInDB;
+        // First process all the files in the current directory

Review Comment:
   Why are we doing a DFS? DFS would require a lot of seeks. BFS would be 
better.



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,520 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousSnapshot.getVolumeName(),
+                previousSnapshot.getBucketName(),
+                getSnapshotPrefix(previousSnapshot.getName()), false);
+            OmSnapshot omPreviousSnapshot = (OmSnapshot)
+                rcPrevOmSnapshot.get();
+
+            previousKeyTable = omPreviousSnapshot.getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+            prevRenamedTable = omPreviousSnapshot
+                .getMetadataManager().getSnapshotRenamedTable();
+            previousToPrevSnapshot = getPreviousActiveSnapshot(
+                previousSnapshot, snapChainManager, omSnapshotManager);
+          }
+
+          Table<String, OmKeyInfo> previousToPrevKeyTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevToPrevOmSnapshot = null;
+          if (previousToPrevSnapshot != null) {
+            rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousToPrevSnapshot.getVolumeName(),
+                previousToPrevSnapshot.getBucketName(),
+                getSnapshotPrefix(previousToPrevSnapshot.getName()), false);
+            OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot)
+                rcPrevToPrevOmSnapshot.get();
+
+            previousToPrevKeyTable = omPreviousToPrevSnapshot
+                .getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+          }
+
+          String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
+              currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+          try (ReferenceCounted<IOmMetadataReader, SnapshotCache>
+                   rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot(
+              currSnapInfo.getVolumeName(),
+              currSnapInfo.getBucketName(),
+              getSnapshotPrefix(currSnapInfo.getName()),
+              false)) {
+
+            OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get();
+            Table<String, OmKeyInfo> snapDeletedDirTable =
+                currOmSnapshot.getMetadataManager().getDeletedDirTable();
+
+            try (TableIterator<String, ? extends Table.KeyValue<String,
+                OmKeyInfo>> deletedDirIterator = snapDeletedDirTable
+                .iterator(dbBucketKeyForDir)) {
+
+              while (deletedDirIterator.hasNext()) {
+                Table.KeyValue<String, OmKeyInfo> deletedDirInfo =
+                    deletedDirIterator.next();
+
+                // For each deleted directory we do an in-memory DFS and
+                // do a deep clean and exclusive size calculation.
+                iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo,
+                    previousSnapshot, previousToPrevSnapshot,
+                    currOmSnapshot, previousKeyTable, prevRenamedTable,
+                    previousToPrevKeyTable, dbBucketKeyForDir);
+              }
+              updateDeepCleanSnapshotDir(currSnapInfo.getTableKey());
+              if (previousSnapshot != null) {
+                updateExclusiveSize(previousSnapshot.getTableKey());
+              }
+            }
+          }
+        }
+      } catch (IOException ex) {
+        LOG.error("Error while running directory deep clean on snapshots." +
+            " Will retry at next run.", ex);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void iterateDirectoryTree(
+      Table.KeyValue<String, OmKeyInfo> deletedDirInfo, long volumeId,
+      OmBucketInfo bucketInfo,
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmSnapshot currOmSnapshot,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      String dbBucketKeyForDir) throws IOException {
+
+    Table<String, OmDirectoryInfo> snapDirTable =
+        currOmSnapshot.getMetadataManager().getDirectoryTable();
+    Table<String, String> snapRenamedTable =
+        currOmSnapshot.getMetadataManager().getSnapshotRenamedTable();
+    Stack<StackNode> stackNodes =
+        new Stack<>();
+    OmDirectoryInfo omDeletedDirectoryInfo =
+        getDirectoryInfo(deletedDirInfo.getValue());
+    String dirPathDbKey = currOmSnapshot.getMetadataManager()
+        .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+            omDeletedDirectoryInfo);
+    // Stack Init
+    StackNode topLevelDir = new StackNode();
+    topLevelDir.setDirKey(dirPathDbKey);
+    topLevelDir.setDirValue(omDeletedDirectoryInfo);
+    stackNodes.add(topLevelDir);
+
+    try (
+        TableIterator<String, ? extends Table.KeyValue<String, 
OmDirectoryInfo>>
+            directoryIterator = snapDirTable.iterator(dbBucketKeyForDir)) {
+
+      while (!stackNodes.isEmpty()) {
+        StackNode stackTop = stackNodes.pop();
+        String seekDirInDB;
+        // First process all the files in the current directory
+        // and then do a DFS for directory.
+        if (StringUtils.isEmpty(stackTop.getSubDirSeek())) {
+          processFilesUnderDir(previousSnapshot,
+              previousToPrevSnapshot,
+              volumeId,
+              bucketInfo,
+              stackTop.getDirValue(),
+              currOmSnapshot.getMetadataManager(),
+              snapRenamedTable,
+              previousKeyTable,
+              prevRenamedTable,
+              previousToPrevKeyTable);
+          seekDirInDB = currOmSnapshot.getMetadataManager()
+              .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+                  stackTop.getDirValue().getObjectID(), "");
+          directoryIterator.seek(seekDirInDB);
+        } else {
+          // When a leaf node is processed, we need to come back in
+          // the call stack and process the next directories.
+          seekDirInDB = stackTop.getSubDirSeek();
+          directoryIterator.seek(seekDirInDB);
+          // We need skip to the next sub-directory because we already
+          // processed the current sub-directory in the previous run.
+          if (directoryIterator.hasNext()) {
+            directoryIterator.next();
+          } else {
+            continue;
+          }
+        }
+
+        if (directoryIterator.hasNext()) {
+          Table.KeyValue<String, OmDirectoryInfo> deletedSubDirInfo =
+              directoryIterator.next();
+          String deletedSubDirKey = deletedSubDirInfo.getKey();
+
+          String prefixCheck = currOmSnapshot.getMetadataManager()
+              .getOzoneDeletePathDirKey(seekDirInDB);
+          // Exit if it is out of the sub dir prefix scope.
+          if (!deletedSubDirKey.startsWith(prefixCheck)) {
+            // Add exit condition.
+            continue;
+          }
+          stackTop.setSubDirSeek(deletedSubDirKey);
+          stackNodes.add(stackTop);
+          StackNode nextSubDir = new StackNode();
+          nextSubDir.setDirKey(deletedSubDirInfo.getKey());
+          nextSubDir.setDirValue(deletedSubDirInfo.getValue());
+          stackNodes.add(nextSubDir);

Review Comment:
   nit: use push instead of add.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to