hemantk-12 commented on code in PR #5579:
URL: https://github.com/apache/ozone/pull/5579#discussion_r1422000922


##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+/**
+ * Test Snapshot Directory Service.
+ */
+@Timeout(300)
+public class TestSnapshotDirectoryCleaningService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDirectoryCleaningService.class);
+
+  private static boolean omRatisEnabled = true;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static String volumeName;
+  private static String bucketName;
+  private static OzoneClient client;
+
+  @BeforeAll
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500);
+    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500,
+        TimeUnit.MILLISECONDS);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);

Review Comment:
   Ratis is enabled by default
   
https://github.com/apache/ozone/blob/22ef3155c3a6e2d4218769c81899bc7ba62f091c/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java#L174



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+/**
+ * Test Snapshot Directory Service.
+ */
+@Timeout(300)
+public class TestSnapshotDirectoryCleaningService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDirectoryCleaningService.class);
+
+  private static boolean omRatisEnabled = true;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static String volumeName;
+  private static String bucketName;
+  private static OzoneClient client;
+
+  @BeforeAll
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500);
+    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500,
+        TimeUnit.MILLISECONDS);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.newClient();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
+        BucketLayout.FILE_SYSTEM_OPTIMIZED);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+
+    fs = FileSystem.get(conf);
+  }
+
+  @AfterAll
+  public static void teardown() {
+    IOUtils.closeQuietly(client);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @AfterEach
+  public void cleanup() {
+    try {
+      Path root = new Path("/");
+      FileStatus[] fileStatuses = fs.listStatus(root);
+      for (FileStatus fileStatus : fileStatuses) {
+        fs.delete(fileStatus.getPath(), true);
+      }
+    } catch (IOException ex) {
+      fail("Failed to cleanup files.");
+    }
+  }
+
+  @SuppressWarnings("checkstyle:LineLength")
+  @Test
+  public void testExclusiveSizeWithDirectoryDeepClean() throws Exception {
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager()
+            .getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+    Table<String, RepeatedOmKeyInfo> deletedKeyTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedTable();
+    Table<String, SnapshotInfo> snapshotInfoTable =
+        cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+    SnapshotDirectoryCleaningService snapshotDirectoryCleaningService =
+        
cluster.getOzoneManager().getKeyManager().getSnapshotDirectoryService();
+
+    /*    DirTable
+    /v/b/snapDir
+    /v/b/snapDir/appRoot0-2/
+    /v/b/snapDir/appRoot0-2/parentDir0-2/
+          KeyTable

Review Comment:
   Shouldn't it be `fileTable`?



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+/**
+ * Test Snapshot Directory Service.
+ */
+@Timeout(300)
+public class TestSnapshotDirectoryCleaningService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDirectoryCleaningService.class);
+
+  private static boolean omRatisEnabled = true;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static String volumeName;
+  private static String bucketName;
+  private static OzoneClient client;
+
+  @BeforeAll
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500);
+    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500,
+        TimeUnit.MILLISECONDS);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.newClient();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
+        BucketLayout.FILE_SYSTEM_OPTIMIZED);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+
+    fs = FileSystem.get(conf);
+  }
+
+  @AfterAll
+  public static void teardown() {
+    IOUtils.closeQuietly(client);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @AfterEach
+  public void cleanup() {
+    try {
+      Path root = new Path("/");
+      FileStatus[] fileStatuses = fs.listStatus(root);
+      for (FileStatus fileStatus : fileStatuses) {
+        fs.delete(fileStatus.getPath(), true);
+      }
+    } catch (IOException ex) {
+      fail("Failed to cleanup files.");
+    }
+  }
+
+  @SuppressWarnings("checkstyle:LineLength")
+  @Test
+  public void testExclusiveSizeWithDirectoryDeepClean() throws Exception {
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager()
+            .getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+    Table<String, RepeatedOmKeyInfo> deletedKeyTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedTable();
+    Table<String, SnapshotInfo> snapshotInfoTable =
+        cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+    SnapshotDirectoryCleaningService snapshotDirectoryCleaningService =
+        
cluster.getOzoneManager().getKeyManager().getSnapshotDirectoryService();
+
+    /*    DirTable
+    /v/b/snapDir
+    /v/b/snapDir/appRoot0-2/
+    /v/b/snapDir/appRoot0-2/parentDir0-2/
+          KeyTable
+    /v/b/snapDir/testKey0 - testKey4  = 5 keys
+    /v/b/snapDir/appRoot0-2/parentDir0-2/childFile = 9 keys
+    /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 = 15 keys
+     */
+
+    Path root = new Path("/snapDir");
+    // Create  parent dir from root.

Review Comment:
   ```suggestion 
       // Create parent dir from root.
   ```



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+/**
+ * Test Snapshot Directory Service.
+ */
+@Timeout(300)
+public class TestSnapshotDirectoryCleaningService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDirectoryCleaningService.class);
+
+  private static boolean omRatisEnabled = true;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static String volumeName;
+  private static String bucketName;
+  private static OzoneClient client;
+
+  @BeforeAll
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500);
+    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500,
+        TimeUnit.MILLISECONDS);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.newClient();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
+        BucketLayout.FILE_SYSTEM_OPTIMIZED);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+
+    fs = FileSystem.get(conf);
+  }
+
+  @AfterAll
+  public static void teardown() {
+    IOUtils.closeQuietly(client);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @AfterEach
+  public void cleanup() {
+    try {
+      Path root = new Path("/");
+      FileStatus[] fileStatuses = fs.listStatus(root);
+      for (FileStatus fileStatus : fileStatuses) {
+        fs.delete(fileStatus.getPath(), true);
+      }
+    } catch (IOException ex) {
+      fail("Failed to cleanup files.");
+    }
+  }
+
+  @SuppressWarnings("checkstyle:LineLength")
+  @Test
+  public void testExclusiveSizeWithDirectoryDeepClean() throws Exception {
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager()
+            .getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+    Table<String, RepeatedOmKeyInfo> deletedKeyTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedTable();
+    Table<String, SnapshotInfo> snapshotInfoTable =
+        cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+    SnapshotDirectoryCleaningService snapshotDirectoryCleaningService =
+        
cluster.getOzoneManager().getKeyManager().getSnapshotDirectoryService();
+
+    /*    DirTable
+    /v/b/snapDir
+    /v/b/snapDir/appRoot0-2/
+    /v/b/snapDir/appRoot0-2/parentDir0-2/
+          KeyTable
+    /v/b/snapDir/testKey0 - testKey4  = 5 keys
+    /v/b/snapDir/appRoot0-2/parentDir0-2/childFile = 9 keys
+    /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 = 15 keys
+     */
+
+    Path root = new Path("/snapDir");
+    // Create  parent dir from root.
+    fs.mkdirs(root);
+
+    // Added 5 sub files inside root dir

Review Comment:
   ```suggestion
       // Add 5 files inside root dir
   ```



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,518 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousSnapshot.getVolumeName(),
+                previousSnapshot.getBucketName(),
+                getSnapshotPrefix(previousSnapshot.getName()), false);
+            OmSnapshot omPreviousSnapshot = (OmSnapshot)
+                rcPrevOmSnapshot.get();
+
+            previousKeyTable = omPreviousSnapshot.getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+            prevRenamedTable = omPreviousSnapshot
+                .getMetadataManager().getSnapshotRenamedTable();
+            previousToPrevSnapshot = getPreviousActiveSnapshot(
+                previousSnapshot, snapChainManager, omSnapshotManager);
+          }
+
+          Table<String, OmKeyInfo> previousToPrevKeyTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevToPrevOmSnapshot = null;
+          if (previousToPrevSnapshot != null) {
+            rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousToPrevSnapshot.getVolumeName(),
+                previousToPrevSnapshot.getBucketName(),
+                getSnapshotPrefix(previousToPrevSnapshot.getName()), false);
+            OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot)
+                rcPrevToPrevOmSnapshot.get();
+
+            previousToPrevKeyTable = omPreviousToPrevSnapshot
+                .getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+          }
+
+          String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
+              currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+          try (ReferenceCounted<IOmMetadataReader, SnapshotCache>
+                   rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot(
+              currSnapInfo.getVolumeName(),
+              currSnapInfo.getBucketName(),
+              getSnapshotPrefix(currSnapInfo.getName()),
+              false)) {
+
+            OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get();
+            Table<String, OmKeyInfo> snapDeletedDirTable =
+                currOmSnapshot.getMetadataManager().getDeletedDirTable();
+
+            try (TableIterator<String, ? extends Table.KeyValue<String,
+                OmKeyInfo>> deletedDirIterator = snapDeletedDirTable
+                .iterator(dbBucketKeyForDir)) {
+
+              while (deletedDirIterator.hasNext()) {
+                Table.KeyValue<String, OmKeyInfo> deletedDirInfo =
+                    deletedDirIterator.next();
+
+                // For each deleted directory we do an in-memory DFS and
+                // do a deep clean and exclusive size calculation.
+                iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo,
+                    previousSnapshot, previousToPrevSnapshot,
+                    currOmSnapshot, previousKeyTable, prevRenamedTable,
+                    previousToPrevKeyTable, dbBucketKeyForDir);
+              }
+              updateDeepCleanSnapshotDir(currSnapInfo.getTableKey());
+              if (previousSnapshot != null) {
+                updateExclusiveSize(previousSnapshot.getTableKey());
+              }
+            }
+          }
+        }
+      } catch (IOException ex) {
+        LOG.error("Error while running directory deep clean on snapshots." +
+            " Will retry at next run.", ex);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void iterateDirectoryTree(
+      Table.KeyValue<String, OmKeyInfo> deletedDirInfo, long volumeId,
+      OmBucketInfo bucketInfo,
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmSnapshot currOmSnapshot,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      String dbBucketKeyForDir) throws IOException {
+
+    Table<String, OmDirectoryInfo> snapDirTable =
+        currOmSnapshot.getMetadataManager().getDirectoryTable();
+    Table<String, String> snapRenamedTable =
+        currOmSnapshot.getMetadataManager().getSnapshotRenamedTable();
+    Stack<StackNode> stackNodes =
+        new Stack<>();
+    OmDirectoryInfo omDeletedDirectoryInfo =
+        getDirectoryInfo(deletedDirInfo.getValue());
+    String dirPathDbKey = currOmSnapshot.getMetadataManager()
+        .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+            omDeletedDirectoryInfo);
+    // Stack Init
+    StackNode topLevelDir = new StackNode();
+    topLevelDir.setDirKey(dirPathDbKey);
+    topLevelDir.setDirValue(omDeletedDirectoryInfo);
+    stackNodes.add(topLevelDir);
+
+    try (
+        TableIterator<String, ? extends Table.KeyValue<String, 
OmDirectoryInfo>>
+            directoryIterator = snapDirTable.iterator(dbBucketKeyForDir)) {
+
+      while (!stackNodes.isEmpty()) {
+        StackNode stackTop = stackNodes.pop();
+        String seekDirInDB;
+        // First process all the files in the current directory
+        // and then do a DFS for directory.
+        if (StringUtils.isEmpty(stackTop.getSubDirSeek())) {
+          processFilesUnderDir(previousSnapshot,
+              previousToPrevSnapshot,
+              volumeId,
+              bucketInfo,
+              stackTop.getDirValue(),
+              currOmSnapshot.getMetadataManager(),
+              snapRenamedTable,
+              previousKeyTable,
+              prevRenamedTable,
+              previousToPrevKeyTable);
+          seekDirInDB = currOmSnapshot.getMetadataManager()
+              .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+                  stackTop.getDirValue().getObjectID(), "");
+          directoryIterator.seek(seekDirInDB);
+        } else {
+          // When a leaf node is processed, we need come back in

Review Comment:
   ```suggestion
             // When a leaf node is processed, we need to come back in
   ```



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,518 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.IOmMetadataReader;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OmSnapshot;
+import org.apache.hadoop.ozone.om.OmSnapshotManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.SnapshotChainManager;
+import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
+import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest;
+import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix;
+import static 
org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE;
+import static 
org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo;
+import static 
org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso;
+
+/**
+ * Snapshot BG Service for deleted directory deep clean and exclusive size
+ * calculation for deleted directories.
+ */
+public class SnapshotDirectoryCleaningService
+    extends AbstractKeyDeletingService {
+  // Use only a single thread for DirDeletion. Multiple threads would read
+  // or write to same tables and can send deletion requests for same key
+  // multiple times.
+  private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1;
+
+  private final AtomicBoolean suspended;
+  private final Map<String, Long> exclusiveSizeMap;
+  private final Map<String, Long> exclusiveReplicatedSizeMap;
+
+  public SnapshotDirectoryCleaningService(long interval, TimeUnit unit,
+                                          long serviceTimeout,
+                                          OzoneManager ozoneManager,
+                                          ScmBlockLocationProtocol scmClient) {
+    super(SnapshotDirectoryCleaningService.class.getSimpleName(),
+        interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout,
+        ozoneManager, scmClient);
+    this.suspended = new AtomicBoolean(false);
+    this.exclusiveSizeMap = new HashMap<>();
+    this.exclusiveReplicatedSizeMap = new HashMap<>();
+  }
+
+  private boolean shouldRun() {
+    if (getOzoneManager() == null) {
+      // OzoneManager can be null for testing
+      return true;
+    }
+    return getOzoneManager().isLeaderReady() && !suspended.get();
+  }
+
+  /**
+   * Suspend the service.
+   */
+  @VisibleForTesting
+  public void suspend() {
+    suspended.set(true);
+  }
+
+  /**
+   * Resume the service if suspended.
+   */
+  @VisibleForTesting
+  public void resume() {
+    suspended.set(false);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask());
+    return queue;
+  }
+
+  private class SnapshotDirTask implements BackgroundTask {
+
+    @Override
+    public BackgroundTaskResult call() {
+      if (!shouldRun()) {
+        return BackgroundTaskResult.EmptyTaskResult.newResult();
+      }
+      LOG.debug("Running SnapshotDirectoryCleaningService");
+
+      getRunCount().incrementAndGet();
+      OmSnapshotManager omSnapshotManager =
+          getOzoneManager().getOmSnapshotManager();
+      Table<String, SnapshotInfo> snapshotInfoTable =
+          getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+      OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl)
+          getOzoneManager().getMetadataManager();
+      SnapshotChainManager snapChainManager = metadataManager
+          .getSnapshotChainManager();
+
+      try (TableIterator<String, ? extends Table.KeyValue
+          <String, SnapshotInfo>> iterator = snapshotInfoTable.iterator()) {
+
+        while (iterator.hasNext()) {
+          SnapshotInfo currSnapInfo = iterator.next().getValue();
+
+          // Expand deleted dirs only on active snapshot. Deleted Snapshots
+          // will be cleaned up by SnapshotDeletingService.
+          if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE ||
+              currSnapInfo.getDeepCleanedDeletedDir()) {
+            continue;
+          }
+
+          long volumeId = metadataManager
+              .getVolumeId(currSnapInfo.getVolumeName());
+          // Get bucketInfo for the snapshot bucket to get bucket layout.
+          String dbBucketKey = metadataManager
+              .getBucketKey(currSnapInfo.getVolumeName(),
+                  currSnapInfo.getBucketName());
+          OmBucketInfo bucketInfo = metadataManager
+              .getBucketTable().get(dbBucketKey);
+
+          if (bucketInfo == null) {
+            throw new IllegalStateException("Bucket " + "/" +
+                currSnapInfo.getVolumeName() + "/" + currSnapInfo
+                .getBucketName() + " is not found. BucketInfo should not be " +
+                "null for snapshotted bucket. The OM is in unexpected state.");
+          }
+
+          SnapshotInfo previousSnapshot = getPreviousActiveSnapshot(
+              currSnapInfo, snapChainManager, omSnapshotManager);
+          SnapshotInfo previousToPrevSnapshot = null;
+
+          Table<String, OmKeyInfo> previousKeyTable = null;
+          Table<String, String> prevRenamedTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevOmSnapshot = null;
+
+          if (previousSnapshot != null) {
+            rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousSnapshot.getVolumeName(),
+                previousSnapshot.getBucketName(),
+                getSnapshotPrefix(previousSnapshot.getName()), false);
+            OmSnapshot omPreviousSnapshot = (OmSnapshot)
+                rcPrevOmSnapshot.get();
+
+            previousKeyTable = omPreviousSnapshot.getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+            prevRenamedTable = omPreviousSnapshot
+                .getMetadataManager().getSnapshotRenamedTable();
+            previousToPrevSnapshot = getPreviousActiveSnapshot(
+                previousSnapshot, snapChainManager, omSnapshotManager);
+          }
+
+          Table<String, OmKeyInfo> previousToPrevKeyTable = null;
+          ReferenceCounted<IOmMetadataReader, SnapshotCache>
+              rcPrevToPrevOmSnapshot = null;
+          if (previousToPrevSnapshot != null) {
+            rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot(
+                previousToPrevSnapshot.getVolumeName(),
+                previousToPrevSnapshot.getBucketName(),
+                getSnapshotPrefix(previousToPrevSnapshot.getName()), false);
+            OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot)
+                rcPrevToPrevOmSnapshot.get();
+
+            previousToPrevKeyTable = omPreviousToPrevSnapshot
+                .getMetadataManager()
+                .getKeyTable(bucketInfo.getBucketLayout());
+          }
+
+          String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager,
+              currSnapInfo.getVolumeName(), currSnapInfo.getBucketName());
+          try (ReferenceCounted<IOmMetadataReader, SnapshotCache>
+                   rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot(
+              currSnapInfo.getVolumeName(),
+              currSnapInfo.getBucketName(),
+              getSnapshotPrefix(currSnapInfo.getName()),
+              false)) {
+
+            OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get();
+            Table<String, OmKeyInfo> snapDeletedDirTable =
+                currOmSnapshot.getMetadataManager().getDeletedDirTable();
+
+            try (TableIterator<String, ? extends Table.KeyValue<String,
+                OmKeyInfo>> deletedDirIterator = snapDeletedDirTable
+                .iterator(dbBucketKeyForDir)) {
+
+              while (deletedDirIterator.hasNext()) {
+                Table.KeyValue<String, OmKeyInfo> deletedDirInfo =
+                    deletedDirIterator.next();
+
+                // For each deleted directory we do an in-memory DFS and
+                // do a deep clean and exclusive size calculation.
+                iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo,
+                    previousSnapshot, previousToPrevSnapshot,
+                    currOmSnapshot, previousKeyTable, prevRenamedTable,
+                    previousToPrevKeyTable, dbBucketKeyForDir);
+              }
+              updateDeepCleanSnapshotDir(currSnapInfo.getTableKey());
+              if (previousSnapshot != null) {
+                updateExclusiveSize(previousSnapshot.getTableKey());
+              }
+            }
+          }
+        }
+      } catch (IOException ex) {
+        LOG.error("Error while running directory deep clean on snapshots." +
+            " Will retry at next run.", ex);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private void iterateDirectoryTree(
+      Table.KeyValue<String, OmKeyInfo> deletedDirInfo, long volumeId,
+      OmBucketInfo bucketInfo,
+      SnapshotInfo previousSnapshot,
+      SnapshotInfo previousToPrevSnapshot,
+      OmSnapshot currOmSnapshot,
+      Table<String, OmKeyInfo> previousKeyTable,
+      Table<String, String> prevRenamedTable,
+      Table<String, OmKeyInfo> previousToPrevKeyTable,
+      String dbBucketKeyForDir) throws IOException {
+
+    Table<String, OmDirectoryInfo> snapDirTable =
+        currOmSnapshot.getMetadataManager().getDirectoryTable();
+    Table<String, String> snapRenamedTable =
+        currOmSnapshot.getMetadataManager().getSnapshotRenamedTable();
+    Stack<StackNode> stackNodes =
+        new Stack<>();
+    OmDirectoryInfo omDeletedDirectoryInfo =
+        getDirectoryInfo(deletedDirInfo.getValue());
+    String dirPathDbKey = currOmSnapshot.getMetadataManager()
+        .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+            omDeletedDirectoryInfo);
+    // Stack Init
+    StackNode topLevelDir = new StackNode();
+    topLevelDir.setDirKey(dirPathDbKey);
+    topLevelDir.setDirValue(omDeletedDirectoryInfo);
+    stackNodes.add(topLevelDir);
+
+    try (
+        TableIterator<String, ? extends Table.KeyValue<String, 
OmDirectoryInfo>>
+            directoryIterator = snapDirTable.iterator(dbBucketKeyForDir)) {
+
+      while (!stackNodes.isEmpty()) {
+        StackNode stackTop = stackNodes.pop();
+        String seekDirInDB;
+        // First process all the files in the current directory
+        // and then do a DFS for directory.
+        if (StringUtils.isEmpty(stackTop.getSubDirSeek())) {
+          processFilesUnderDir(previousSnapshot,
+              previousToPrevSnapshot,
+              volumeId,
+              bucketInfo,
+              stackTop.getDirValue(),
+              currOmSnapshot.getMetadataManager(),
+              snapRenamedTable,
+              previousKeyTable,
+              prevRenamedTable,
+              previousToPrevKeyTable);
+          seekDirInDB = currOmSnapshot.getMetadataManager()
+              .getOzonePathKey(volumeId, bucketInfo.getObjectID(),
+                  stackTop.getDirValue().getObjectID(), "");
+          directoryIterator.seek(seekDirInDB);
+        } else {
+          // When a leaf node is processed, we need come back in
+          // the call stack and process the next directories.
+          seekDirInDB = stackTop.getSubDirSeek();
+          directoryIterator.seek(seekDirInDB);
+          if (directoryIterator.hasNext()) {

Review Comment:
   Can you please add a comment here explaining we skip to the next because 
entry was added in lines 339-342? 



##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java:
##########
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService;
+import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
+/**
+ * Test Snapshot Directory Service.
+ */
+@Timeout(300)
+public class TestSnapshotDirectoryCleaningService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestSnapshotDirectoryCleaningService.class);
+
+  private static boolean omRatisEnabled = true;
+
+  private static MiniOzoneCluster cluster;
+  private static FileSystem fs;
+  private static String volumeName;
+  private static String bucketName;
+  private static OzoneClient client;
+
+  @BeforeAll
+  public static void init() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500);
+    conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500,
+        TimeUnit.MILLISECONDS);
+    conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled);
+    conf.setBoolean(OZONE_ACL_ENABLED, true);
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+    client = cluster.newClient();
+
+    // create a volume and a bucket to be used by OzoneFileSystem
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client,
+        BucketLayout.FILE_SYSTEM_OPTIMIZED);
+    volumeName = bucket.getVolumeName();
+    bucketName = bucket.getName();
+
+    String rootPath = String.format("%s://%s.%s/",
+        OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
+
+    // Set the fs.defaultFS and start the filesystem
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+    // Set the number of keys to be processed during batch operate.
+    conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+
+    fs = FileSystem.get(conf);
+  }
+
+  @AfterAll
+  public static void teardown() {
+    IOUtils.closeQuietly(client);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    IOUtils.closeQuietly(fs);
+  }
+
+  @AfterEach
+  public void cleanup() {
+    try {
+      Path root = new Path("/");
+      FileStatus[] fileStatuses = fs.listStatus(root);
+      for (FileStatus fileStatus : fileStatuses) {
+        fs.delete(fileStatus.getPath(), true);
+      }
+    } catch (IOException ex) {
+      fail("Failed to cleanup files.");
+    }
+  }
+
+  @SuppressWarnings("checkstyle:LineLength")
+  @Test
+  public void testExclusiveSizeWithDirectoryDeepClean() throws Exception {
+
+    Table<String, OmKeyInfo> deletedDirTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedDirTable();
+    Table<String, OmKeyInfo> keyTable =
+        cluster.getOzoneManager().getMetadataManager()
+            .getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+    Table<String, OmDirectoryInfo> dirTable =
+        cluster.getOzoneManager().getMetadataManager().getDirectoryTable();
+    Table<String, RepeatedOmKeyInfo> deletedKeyTable =
+        cluster.getOzoneManager().getMetadataManager().getDeletedTable();
+    Table<String, SnapshotInfo> snapshotInfoTable =
+        cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable();
+    SnapshotDirectoryCleaningService snapshotDirectoryCleaningService =
+        
cluster.getOzoneManager().getKeyManager().getSnapshotDirectoryService();
+
+    /*    DirTable
+    /v/b/snapDir
+    /v/b/snapDir/appRoot0-2/
+    /v/b/snapDir/appRoot0-2/parentDir0-2/
+          KeyTable
+    /v/b/snapDir/testKey0 - testKey4  = 5 keys
+    /v/b/snapDir/appRoot0-2/parentDir0-2/childFile = 9 keys
+    /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 = 15 keys
+     */
+
+    Path root = new Path("/snapDir");
+    // Create  parent dir from root.
+    fs.mkdirs(root);
+
+    // Added 5 sub files inside root dir
+    // Creates /v/b/snapDir/testKey0 - testKey4
+    for (int i = 0; i < 5; i++) {
+      Path path = new Path(root, "testKey" + i);
+      try (FSDataOutputStream stream = fs.create(path)) {
+        stream.write(1);
+      }
+    }
+
+    // Creates /v/b/snapDir/appRoot0-2/parentDir0-2/childFile
+    for (int i = 0; i < 3; i++) {
+      for (int j = 0; j < 3; j++) {
+        Path appRoot = new Path(root, "appRoot" + j);
+        Path parent = new Path(appRoot, "parentDir" + i);
+        Path child = new Path(parent, "childFile");
+        try (FSDataOutputStream stream = fs.create(child)) {
+          stream.write(1);
+        }
+      }
+    }
+
+    assertTableRowCount(keyTable, 14);
+    assertTableRowCount(dirTable, 13);
+    // Create snapshot
+    client.getObjectStore().createSnapshot(volumeName, bucketName, "snap1");
+
+    // Creates /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4
+    for (int i = 0; i < 3; i++) {
+      Path appRoot = new Path(root, "appRoot0");
+      Path parent = new Path(appRoot, "parentDir" + i);
+      for (int j = 0; j < 5; j++) {
+        Path child = new Path(parent, "childFile" + j);
+        try (FSDataOutputStream stream = fs.create(child)) {
+          stream.write(1);
+        }
+      }
+    }
+
+    for (int i = 5; i < 10; i++) {
+      Path path = new Path(root, "testKey" + i);
+      try (FSDataOutputStream stream = fs.create(path)) {
+        stream.write(1);
+      }
+    }
+
+    assertTableRowCount(deletedDirTable, 0);
+    assertTableRowCount(keyTable, 34);
+    assertTableRowCount(dirTable, 13);
+    Path appRoot0 = new Path(root, "appRoot0");
+    // Only parentDir0-2/childFile under appRoot0 is exclusive for snap1
+    fs.delete(appRoot0, true);
+    assertTableRowCount(deletedDirTable, 1);
+    client.getObjectStore().createSnapshot(volumeName, bucketName, "snap2");
+
+    // Delete testKey0-9
+    for (int i = 0; i < 10; i++) {
+      Path testKey = new Path(root, "testKey" + i);
+      fs.delete(testKey, false);
+    }
+
+    fs.delete(root, true);
+    assertTableRowCount(deletedKeyTable, 10);
+    client.getObjectStore().createSnapshot(volumeName, bucketName, "snap3");
+    long prevRunCount = snapshotDirectoryCleaningService.getRunCount().get();
+    GenericTestUtils.waitFor(() -> 
snapshotDirectoryCleaningService.getRunCount().get()
+        > prevRunCount + 1, 100, 10000);
+
+    Thread.sleep(2000);
+    Map<String, Long> expectedSize = new HashMap<String, Long>() {{
+      // /v/b/snapDir/appRoot0/parentDir0-2/childFile contribute
+      // exclusive size, /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4
+      // are deep cleaned and hence don't contribute to size.
+        put("snap1", 3L);
+      // Only testKey5-9 contribute to the exclusive size
+        put("snap2", 5L);
+        put("snap3", 0L);
+      }};
+    try (TableIterator<String, ? extends Table.KeyValue<String, SnapshotInfo>>
+        iterator = snapshotInfoTable.iterator()) {
+      while (iterator.hasNext()) {
+        Table.KeyValue<String, SnapshotInfo> snapshotEntry = iterator.next();
+        String snapshotName = snapshotEntry.getValue().getName();
+        assertEquals(expectedSize.get(snapshotName), snapshotEntry.getValue().
+            getExclusiveSize());
+        // Since for the test we are using RATIS/THREE
+        assertEquals(expectedSize.get(snapshotName) * 3,
+            snapshotEntry.getValue().getExclusiveReplicatedSize());
+
+      }
+    }
+  }
+
+  private void assertTableRowCount(Table<String, ?> table, int count)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> assertTableRowCount(count, table), 1000,
+        120000); // 2 minutes
+  }
+
+  private boolean assertTableRowCount(int expectedCount,
+                                      Table<String, ?> table) {
+    long count = 0L;
+    try {
+      count = cluster.getOzoneManager().getMetadataManager()
+          .countRowsInTable(table);
+      LOG.info("{} actual row count={}, expectedCount={}", table.getName(),

Review Comment:
   This log statement is OK for debugging but not needed to be added.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to