This is an automated email from the ASF dual-hosted git repository. weichiu pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push: new 13ccecaea1a HDDS-13436. Snapshot Diff Error when DirectoryDeletingService deep cleans snapshot (#8800) 13ccecaea1a is described below commit 13ccecaea1a166010175e68a4d3c9a641764d141 Author: Swaminathan Balachandran <swamirishi...@gmail.com> AuthorDate: Sun Jul 20 23:07:15 2025 -0400 HDDS-13436. Snapshot Diff Error when DirectoryDeletingService deep cleans snapshot (#8800) --- .../TestSnapshotDirectoryCleaningService.java | 73 +++++++++++++++++++++- .../ozone/om/snapshot/SnapshotDiffManager.java | 25 +++++--- 2 files changed, 89 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 1e7981dbec7..a34e11e6226 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -21,10 +21,12 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; +import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; +import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -35,9 +37,12 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; @@ -52,6 +57,8 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.DirectoryDeletingService; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; @@ -74,6 +81,7 @@ public class TestSnapshotDirectoryCleaningService { private static String volumeName; private static String bucketName; private static OzoneClient client; + private static AtomicLong counter = new AtomicLong(0L); @BeforeAll public static void init() throws Exception { @@ -258,11 +266,74 @@ public void testExclusiveSizeWithDirectoryDeepClean() throws Exception { // Since for the test we are using RATIS/THREE assertEquals(expectedSize.get(snapshotName) * 3, snapshotInfo.getExclusiveReplicatedSize() + snapshotInfo.getExclusiveReplicatedSizeDeltaFromDirDeepCleaning()); - } } } + private SnapshotDiffReportOzone getSnapDiffReport(String volume, + String bucket, + String fromSnapshot, + String toSnapshot) + throws InterruptedException, IOException { + SnapshotDiffResponse response; + do { + response = client.getObjectStore().snapshotDiff(volume, bucket, fromSnapshot, + toSnapshot, null, 0, true, true); + Thread.sleep(response.getWaitTimeInMs()); + } while (response.getJobStatus() != DONE); + assertEquals(DONE, response.getJobStatus()); + return response.getSnapshotDiffReport(); + } + + /** + * Testing Scenario: + * 1) Create dir1/dir2/dir3/dir4 + * 2) Suspend KeyDeletingService & DirectoryDeletingService + * 3) Delete dir1/dir2 + * 4) Create snapshot snap1 + * 5) Create dir1/dir3/dir6 + * 6) Create snapshot snap2 + * 7) Resume KeyDeletingService & DirectoryDeletingService + * 8) Wait for snap1 to get Deep cleaned completely. + * 9) Create dir1/dir4/dir5 + * 9) Create snapshot snap3 + * 10) Perform SnapshotDiff b/w snap2 & snap3 + * @throws Exception + */ + @Test + public void testSnapshotDiffBeforeAndAfterDeepCleaning() throws Exception { + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buc-" + counter.incrementAndGet(); + // create a volume and a bucket to be used by OzoneFileSystem + OzoneBucket volBucket = TestDataUtil.createVolumeAndBucket(client, volume, bucket, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + volBucket.createDirectory("dir1/dir2/dir3/dir4"); + cluster.getOzoneManager().getKeyManager().getDirDeletingService().suspend(); + cluster.getOzoneManager().getKeyManager().getDeletingService().suspend(); + volBucket.deleteDirectory("dir1/dir2", true); + client.getObjectStore().createSnapshot(volume, bucket, "snap1"); + volBucket.createDirectory("dir1/dir3/dir6"); + client.getObjectStore().createSnapshot(volume, bucket, "snap2"); + volBucket.createDirectory("dir1/dir4/dir5"); + cluster.getOzoneManager().getKeyManager().getDirDeletingService().resume(); + cluster.getOzoneManager().getKeyManager().getDeletingService().resume(); + GenericTestUtils.waitFor(() -> { + try { + SnapshotInfo snapshotInfo = cluster.getOzoneManager().getSnapshotInfo(volume, bucket, "snap1"); + return snapshotInfo.isDeepCleaned() && snapshotInfo.isDeepCleanedDeletedDir(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 1000, 10000); + client.getObjectStore().createSnapshot(volume, bucket, "snap3"); + SnapshotDiffReportOzone diff = getSnapDiffReport(volume, bucket, "snap2", "snap3"); + assertEquals(2, diff.getDiffList().size()); + assertEquals(Arrays.asList( + new DiffReportEntry(SnapshotDiffReport.DiffType.CREATE, StringUtils.string2Bytes("dir1/dir4")), + new DiffReportEntry(SnapshotDiffReport.DiffType.CREATE, StringUtils.string2Bytes("dir1/dir4/dir5"))), + diff.getDiffList()); + } + private void assertTableRowCount(Table<String, ?> table, int count) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> assertTableRowCount(count, table), 1000, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 51638ac9c72..21c2b5979a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -970,7 +970,7 @@ void generateSnapshotDiffReport(final String jobKey, oldParentIdPathMap.get().putAll(new FSODirectoryPathResolver( tablePrefix, bucketId, fromSnapshot.getMetadataManager().getDirectoryTable()) - .getAbsolutePathForObjectIDs(oldParentIds)); + .getAbsolutePathForObjectIDs(oldParentIds, true)); newParentIdPathMap.get().putAll(new FSODirectoryPathResolver( tablePrefix, bucketId, toSnapshot.getMetadataManager().getDirectoryTable()) @@ -1378,10 +1378,12 @@ long generateDiffReport( } } else if (newKeyName == null) { // Key Deleted. String key = resolveBucketRelativePath(isFSOBucket, - oldParentIdPathMap, oldKeyName, false); - DiffReportEntry entry = - SnapshotDiffReportOzone.getDiffReportEntry(DELETE, key); - deleteDiffs.add(codecRegistry.asRawData(entry)); + oldParentIdPathMap, oldKeyName, true); + if (key != null) { + DiffReportEntry entry = + SnapshotDiffReportOzone.getDiffReportEntry(DELETE, key); + deleteDiffs.add(codecRegistry.asRawData(entry)); + } } else if (isDirectoryObject && Arrays.equals(oldKeyName, newKeyName)) { String key = resolveBucketRelativePath(isFSOBucket, @@ -1395,10 +1397,18 @@ long generateDiffReport( String keyPrefix = getTablePrefix(tablePrefix, (isDirectoryObject ? fsDirTable : fsTable).getName()); String oldKey = resolveBucketRelativePath(isFSOBucket, - oldParentIdPathMap, oldKeyName, false); + oldParentIdPathMap, oldKeyName, true); String newKey = resolveBucketRelativePath(isFSOBucket, newParentIdPathMap, newKeyName, true); - if (newKey == null) { + if (oldKey == null && newKey == null) { + // When both are unresolved then it means both keys are deleted. So no change for these objects. + continue; + } else if (oldKey == null) { + // This should never happen where oldKey path is unresolved and new snapshot is resolved. + throw new IllegalStateException(String.format("Old and new key resolved paths both are not null when " + + "oldKey is null for oldKey : %s newKey: %s", codecRegistry.asObject(oldKeyName, String.class), + codecRegistry.asObject(newKeyName, String.class))); + } else if (newKey == null) { deleteDiffs.add(codecRegistry.asRawData(SnapshotDiffReportOzone .getDiffReportEntry(DELETE, oldKey))); } else { @@ -1422,7 +1432,6 @@ long generateDiffReport( } } } - counter++; } } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@ozone.apache.org For additional commands, e-mail: commits-h...@ozone.apache.org