This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch ozone-2.1
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 9d4ec3174973df876a2656f63cf42ed57b7d8cc2
Author: Wei-Chiu Chuang <[email protected]>
AuthorDate: Wed Nov 19 12:56:59 2025 -0800

    Revert "HDDS-13228. Take snapshot cache lock during the last iteration of 
tarball transfer. (#8678)"
    
    This reverts commit c706c7ac0d7f8614d9fffd6d390219a0479b0f2a.
    
     Conflicts:
            
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
            
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
            
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
---
 .../hadoop/ozone/om/helpers/SnapshotInfo.java      |   4 +-
 .../TestOMDbCheckpointServletInodeBasedXfer.java   | 217 +++------------------
 .../om/OMDBCheckpointServletInodeBasedXfer.java    |  35 ----
 .../apache/hadoop/ozone/om/OmSnapshotManager.java  |  10 -
 .../hadoop/ozone/om/codec/OMDBDefinition.java      |  10 -
 .../hadoop/ozone/om/snapshot/SnapshotCache.java    |   1 -
 6 files changed, 27 insertions(+), 250 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
index cbc3709ea1e..a8fdc8848bc 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java
@@ -20,7 +20,6 @@
 import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf;
 import static org.apache.hadoop.hdds.HddsUtils.toProtobuf;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -58,6 +57,7 @@ public final class SnapshotInfo implements Auditable, 
CopyObject<SnapshotInfo> {
       SnapshotInfo::getProtobuf,
       SnapshotInfo.class);
 
+  private static final String SEPARATOR = "-";
   private static final long INVALID_TIMESTAMP = -1;
   private static final UUID INITIAL_SNAPSHOT_ID = UUID.randomUUID();
 
@@ -565,7 +565,7 @@ public Map<String, String> toAuditMap() {
   public static String getCheckpointDirName(UUID snapshotId) {
     Objects.requireNonNull(snapshotId,
         "SnapshotId is needed to create checkpoint directory");
-    return OM_SNAPSHOT_SEPARATOR + snapshotId;
+    return SEPARATOR + snapshotId;
   }
 
   /**
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
index 099c281db97..89f167e143a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.om;
 
-import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
@@ -30,10 +29,8 @@
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.doCallRealMethod;
@@ -58,7 +55,6 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
@@ -73,7 +69,6 @@
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
@@ -83,22 +78,13 @@
 import org.apache.hadoop.ozone.TestDataUtil;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneSnapshot;
 import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
-import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
-import org.rocksdb.ColumnFamilyDescriptor;
-import org.rocksdb.ColumnFamilyHandle;
-import org.rocksdb.DBOptions;
-import org.rocksdb.RocksDB;
 
 /**
  * Class used for testing the OM DB Checkpoint provider servlet using inode 
based transfer logic.
@@ -121,9 +107,6 @@ public class TestOMDbCheckpointServletInodeBasedXfer {
   @BeforeEach
   void init() throws Exception {
     conf = new OzoneConfiguration();
-    // ensure cache entries are not evicted thereby snapshot db's are not 
closed
-    
conf.setTimeDuration(OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL,
-        100, TimeUnit.MINUTES);
   }
 
   @AfterEach
@@ -209,46 +192,35 @@ public void write(int b) throws IOException {
     doCallRealMethod().when(omDbCheckpointServletMock).getSstBackupDir();
   }
 
-  @ParameterizedTest
-  @ValueSource(booleans = {true, false})
-  public void testTarballBatching(boolean includeSnapshots) throws Exception {
+  @Test
+  void testContentsOfTarballWithSnapshot() throws Exception {
+    setupCluster();
+    setupMocks();
+    
when(requestMock.getParameter(OZONE_DB_CHECKPOINT_INCLUDE_SNAPSHOT_DATA)).thenReturn("true");
     String volumeName = "vol" + RandomStringUtils.secure().nextNumeric(5);
     String bucketName = "buck" + RandomStringUtils.secure().nextNumeric(5);
+    // Create a "spy" dbstore keep track of the checkpoint.
+    writeData(volumeName, bucketName, true);
+    DBStore dbStore = om.getMetadataManager().getStore();
+    DBStore spyDbStore = spy(dbStore);
     AtomicReference<DBCheckpoint> realCheckpoint = new AtomicReference<>();
-    setupClusterAndMocks(volumeName, bucketName, realCheckpoint, 
includeSnapshots);
-    long maxFileSizeLimit = 4096;
-    
om.getConfiguration().setLong(OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY, 
maxFileSizeLimit);
-    // Get the tarball.
-    omDbCheckpointServletMock.doGet(requestMock, responseMock);
-    String testDirName = folder.resolve("testDir").toString();
-    String newDbDirName = testDirName + OM_KEY_PREFIX + OM_DB_NAME;
-    File newDbDir = new File(newDbDirName);
-    assertTrue(newDbDir.mkdirs());
-    FileUtil.unTar(tempFile, newDbDir);
-    long totalSize;
-    try (Stream<Path> list = Files.list(newDbDir.toPath())) {
-      totalSize = list.mapToLong(path -> path.toFile().length()).sum();
-    }
-    boolean obtainedFilesUnderMaxLimit = totalSize < maxFileSizeLimit;
-    if (!includeSnapshots) {
-      // If includeSnapshotData flag is set to false , it always sends all data
-      // in one batch and doesn't respect the max size config. This is how 
Recon
-      // uses it today.
-      assertFalse(obtainedFilesUnderMaxLimit);
-    } else {
-      assertTrue(obtainedFilesUnderMaxLimit);
-    }
-  }
+    when(spyDbStore.getCheckpoint(true)).thenAnswer(b -> {
+      DBCheckpoint checkpoint = spy(dbStore.getCheckpoint(true));
+      // Don't delete the checkpoint, because we need to compare it
+      // with the snapshot data.
+      doNothing().when(checkpoint).cleanupCheckpoint();
+      realCheckpoint.set(checkpoint);
+      return checkpoint;
+    });
+    // Init the mock with the spyDbstore
+    doCallRealMethod().when(omDbCheckpointServletMock).initialize(any(), any(),
+        eq(false), any(), any(), eq(false));
+    omDbCheckpointServletMock.initialize(spyDbStore, 
om.getMetrics().getDBCheckpointMetrics(),
+        false,
+        om.getOmAdminUsernames(), om.getOmAdminGroups(), false);
 
-  @ParameterizedTest
-  @ValueSource(booleans =  {true, false})
-  public void testContentsOfTarballWithSnapshot(boolean includeSnapshot) 
throws Exception {
-    String volumeName = "vol" + RandomStringUtils.secure().nextNumeric(5);
-    String bucketName = "buck" + RandomStringUtils.secure().nextNumeric(5);
-    AtomicReference<DBCheckpoint> realCheckpoint = new AtomicReference<>();
-    setupClusterAndMocks(volumeName, bucketName, realCheckpoint, 
includeSnapshot);
-    DBStore dbStore = om.getMetadataManager().getStore();
     // Get the tarball.
+    when(responseMock.getOutputStream()).thenReturn(servletOutputStream);
     omDbCheckpointServletMock.doGet(requestMock, responseMock);
     String testDirName = folder.resolve("testDir").toString();
     String newDbDirName = testDirName + OM_KEY_PREFIX + OM_DB_NAME;
@@ -283,8 +255,6 @@ public void testContentsOfTarballWithSnapshot(boolean 
includeSnapshot) throws Ex
         numSnapshots++;
       }
     }
-    populateInodesOfFilesInDirectory(dbStore, 
Paths.get(dbStore.getRocksDBCheckpointDiffer().getSSTBackupDir()),
-        inodesFromOmDataDir, hardLinkMapFromOmData);
     Path hardlinkFilePath =
         newDbDir.toPath().resolve(OmSnapshotManager.OM_HARDLINK_FILE);
     Map<String, List<String>> hardlinkMapFromTarball = 
readFileToMap(hardlinkFilePath.toString());
@@ -330,143 +300,6 @@ public void testContentsOfTarballWithSnapshot(boolean 
includeSnapshot) throws Ex
     assertFalse(hardlinkFilePath.toFile().exists());
   }
 
-  /**
-   * Verifies that a manually added entry to the snapshot's delete table
-   * is persisted and can be retrieved from snapshot db loaded from OM DB 
checkpoint.
-   */
-  @Test
-  public void testSnapshotDBConsistency() throws Exception {
-    String volumeName = "vol" + RandomStringUtils.secure().nextNumeric(5);
-    String bucketName = "buck" + RandomStringUtils.secure().nextNumeric(5);
-    AtomicReference<DBCheckpoint> realCheckpoint = new AtomicReference<>();
-    setupClusterAndMocks(volumeName, bucketName, realCheckpoint, true);
-    List<OzoneSnapshot> snapshots = new ArrayList<>();
-    client.getObjectStore().listSnapshot(volumeName, bucketName, "", null)
-        .forEachRemaining(snapshots::add);
-    OzoneSnapshot snapshotToModify = snapshots.get(0);
-    String dummyKey = "dummyKey";
-    writeDummyKeyToDeleteTableOfSnapshotDB(snapshotToModify, bucketName, 
volumeName, dummyKey);
-    // Get the tarball.
-    omDbCheckpointServletMock.doGet(requestMock, responseMock);
-    String testDirName = folder.resolve("testDir").toString();
-    String newDbDirName = testDirName + OM_KEY_PREFIX + OM_DB_NAME;
-    File newDbDir = new File(newDbDirName);
-    assertTrue(newDbDir.mkdirs());
-    FileUtil.unTar(tempFile, newDbDir);
-    Set<Path> allPathsInTarball = getAllPathsInTarball(newDbDir);
-    // create hardlinks now
-    OmSnapshotUtils.createHardLinks(newDbDir.toPath(), false);
-    for (Path old : allPathsInTarball) {
-      assertTrue(old.toFile().delete());
-    }
-    Path snapshotDbDir = Paths.get(newDbDir.toPath().toString(), 
OM_SNAPSHOT_CHECKPOINT_DIR,
-        OM_DB_NAME + "-" + snapshotToModify.getSnapshotId());
-    deleteWalFiles(snapshotDbDir);
-    assertTrue(Files.exists(snapshotDbDir));
-    String value = getValueFromSnapshotDeleteTable(dummyKey, 
snapshotDbDir.toString());
-    assertNotNull(value);
-  }
-
-  private static void deleteWalFiles(Path snapshotDbDir) throws IOException {
-    try (Stream<Path> filesInTarball = Files.list(snapshotDbDir)) {
-      List<Path> files = filesInTarball.filter(p -> 
p.toString().contains(".log"))
-          .collect(Collectors.toList());
-      for (Path p : files) {
-        Files.delete(p);
-      }
-    }
-  }
-
-  private static Set<Path> getAllPathsInTarball(File newDbDir) throws 
IOException {
-    Set<Path> allPathsInTarball = new HashSet<>();
-    try (Stream<Path> filesInTarball = Files.list(newDbDir.toPath())) {
-      List<Path> files = filesInTarball.collect(Collectors.toList());
-      for (Path p : files) {
-        File file = p.toFile();
-        if (file.getName().equals(OmSnapshotManager.OM_HARDLINK_FILE)) {
-          continue;
-        }
-        allPathsInTarball.add(p);
-      }
-    }
-    return allPathsInTarball;
-  }
-
-  private void writeDummyKeyToDeleteTableOfSnapshotDB(OzoneSnapshot 
snapshotToModify, String bucketName,
-      String volumeName, String keyName)
-      throws IOException {
-    try (UncheckedAutoCloseableSupplier<OmSnapshot> supplier = 
om.getOmSnapshotManager()
-        .getSnapshot(snapshotToModify.getSnapshotId())) {
-      OmSnapshot omSnapshot = supplier.get();
-      OmKeyInfo dummyOmKeyInfo =
-          new 
OmKeyInfo.Builder().setBucketName(bucketName).setVolumeName(volumeName).setKeyName(keyName)
-              
.setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
-      RepeatedOmKeyInfo dummyRepeatedKeyInfo =
-          new 
RepeatedOmKeyInfo.Builder().setOmKeyInfos(Collections.singletonList(dummyOmKeyInfo)).build();
-      
omSnapshot.getMetadataManager().getDeletedTable().put(dummyOmKeyInfo.getKeyName(),
 dummyRepeatedKeyInfo);
-    }
-  }
-
-  private void setupClusterAndMocks(String volumeName, String bucketName,
-      AtomicReference<DBCheckpoint> realCheckpoint, boolean includeSnapshots) 
throws Exception {
-    setupCluster();
-    setupMocks();
-    om.getKeyManager().getSnapshotSstFilteringService().pause();
-    when(requestMock.getParameter(OZONE_DB_CHECKPOINT_INCLUDE_SNAPSHOT_DATA))
-        .thenReturn(String.valueOf(includeSnapshots));
-    // Create a "spy" dbstore keep track of the checkpoint.
-    writeData(volumeName, bucketName, true);
-    DBStore dbStore = om.getMetadataManager().getStore();
-    DBStore spyDbStore = spy(dbStore);
-    when(spyDbStore.getCheckpoint(true)).thenAnswer(b -> {
-      DBCheckpoint checkpoint = spy(dbStore.getCheckpoint(true));
-      // Don't delete the checkpoint, because we need to compare it
-      // with the snapshot data.
-      doNothing().when(checkpoint).cleanupCheckpoint();
-      realCheckpoint.set(checkpoint);
-      return checkpoint;
-    });
-    // Init the mock with the spyDbstore
-    doCallRealMethod().when(omDbCheckpointServletMock).initialize(any(), any(),
-        eq(false), any(), any(), eq(false));
-    omDbCheckpointServletMock.initialize(spyDbStore, 
om.getMetrics().getDBCheckpointMetrics(),
-        false,
-        om.getOmAdminUsernames(), om.getOmAdminGroups(), false);
-    when(responseMock.getOutputStream()).thenReturn(servletOutputStream);
-  }
-
-  String getValueFromSnapshotDeleteTable(String key, String snapshotDB) {
-    String result = null;
-    List<ColumnFamilyDescriptor> cfDescriptors = new ArrayList<>();
-    int count = 1;
-    int deletedTableCFIndex = 0;
-    cfDescriptors.add(new 
ColumnFamilyDescriptor("default".getBytes(StandardCharsets.UTF_8)));
-    for (String cfName : OMDBDefinition.getAllColumnFamilies()) {
-      if (cfName.equals(OMDBDefinition.DELETED_TABLE)) {
-        deletedTableCFIndex = count;
-      }
-      cfDescriptors.add(new 
ColumnFamilyDescriptor(cfName.getBytes(StandardCharsets.UTF_8)));
-      count++;
-    }
-    // For holding handles
-    List<ColumnFamilyHandle> cfHandles = new ArrayList<>();
-    try (DBOptions options = new 
DBOptions().setCreateIfMissing(false).setCreateMissingColumnFamilies(true);
-        RocksDB db = RocksDB.openReadOnly(options, snapshotDB, cfDescriptors, 
cfHandles)) {
-
-      ColumnFamilyHandle deletedTableCF = cfHandles.get(deletedTableCFIndex); 
// 0 is default
-      byte[] value = db.get(deletedTableCF, 
key.getBytes(StandardCharsets.UTF_8));
-      if (value != null) {
-        result = new String(value, StandardCharsets.UTF_8);
-      }
-    } catch (Exception e) {
-      fail("Exception while reading from snapshot DB " + e.getMessage());
-    } finally {
-      for (ColumnFamilyHandle handle : cfHandles) {
-        handle.close();
-      }
-    }
-    return result;
-  }
 
   public static Map<String, List<String>> readFileToMap(String filePath) 
throws IOException {
     Map<String, List<String>> dataMap = new HashMap<>();
@@ -474,7 +307,7 @@ public static Map<String, List<String>> 
readFileToMap(String filePath) throws IO
       String line;
       while ((line = reader.readLine()) != null) {
         String trimmedLine = line.trim();
-        if (!trimmedLine.contains("\t")) {
+        if (trimmedLine.isEmpty() || !trimmedLine.contains("\t")) {
           continue;
         }
         int tabIndex = trimmedLine.indexOf("\t");
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
index 4e4191aaa35..dee84d4b401 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
@@ -26,7 +26,6 @@
 import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY;
-import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK;
 import static 
org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.includeSnapshotData;
 import static 
org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.logEstimatedTarballSize;
 import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX;
@@ -49,7 +48,6 @@
 import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.stream.Stream;
 import javax.servlet.ServletException;
@@ -255,9 +253,6 @@ public void writeDbDataToStream(HttpServletRequest request, 
OutputStream destina
               hardLinkFileMap, getCompactionLogDir());
           writeDBToArchive(sstFilesToExclude, tmpSstBackupDir, 
maxTotalSstSize, archiveOutputStream, tmpdir,
               hardLinkFileMap, getSstBackupDir());
-          // This is done to ensure all data to be copied correctly is flushed 
in the snapshot DB
-          transferSnapshotData(sstFilesToExclude, tmpdir, snapshotPaths, 
maxTotalSstSize,
-              archiveOutputStream, hardLinkFileMap);
         }
         writeHardlinkFile(getConf(), hardLinkFileMap, archiveOutputStream);
         includeRatisSnapshotCompleteFlag(archiveOutputStream);
@@ -271,36 +266,6 @@ public void writeDbDataToStream(HttpServletRequest 
request, OutputStream destina
     }
   }
 
-  /**
-   * Transfers the snapshot data from the specified snapshot directories into 
the archive output stream,
-   * handling deduplication and managing resource locking.
-   *
-   * @param sstFilesToExclude   Set of SST file identifiers to exclude from 
the archive.
-   * @param tmpdir              Temporary directory for intermediate 
processing.
-   * @param snapshotPaths       Set of paths to snapshot directories to be 
processed.
-   * @param maxTotalSstSize     AtomicLong to track the cumulative size of SST 
files included.
-   * @param archiveOutputStream Archive output stream to write the snapshot 
data.
-   * @param hardLinkFileMap     Map of hardlink file paths to their unique 
identifiers for deduplication.
-   * @throws IOException if an I/O error occurs during processing.
-   */
-  private void transferSnapshotData(Set<String> sstFilesToExclude, Path 
tmpdir, Set<Path> snapshotPaths,
-      AtomicLong maxTotalSstSize, ArchiveOutputStream<TarArchiveEntry> 
archiveOutputStream,
-      Map<String, String> hardLinkFileMap) throws IOException {
-    OzoneManager om = (OzoneManager) 
getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
-    OMMetadataManager omMetadataManager = om.getMetadataManager();
-    for (Path snapshotDir : snapshotPaths) {
-      String snapshotId = 
OmSnapshotManager.extractSnapshotIDFromCheckpointDirName(snapshotDir.toString());
-      omMetadataManager.getLock().acquireReadLock(SNAPSHOT_DB_LOCK, 
snapshotId);
-      try {
-        // invalidate closes the snapshot DB
-        
om.getOmSnapshotManager().invalidateCacheEntry(UUID.fromString(snapshotId));
-        writeDBToArchive(sstFilesToExclude, snapshotDir, maxTotalSstSize, 
archiveOutputStream, tmpdir, hardLinkFileMap);
-      } finally {
-        omMetadataManager.getLock().releaseReadLock(SNAPSHOT_DB_LOCK, 
snapshotId);
-      }
-    }
-  }
-
   private boolean writeDBToArchive(Set<String> sstFilesToExclude, Path dir,
       AtomicLong maxTotalSstSize, ArchiveOutputStream<TarArchiveEntry> 
archiveOutputStream,
       Path tmpdir, Map<String, String> hardLinkFileMap) throws IOException {
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
index 7b9beb80cf6..71c5f29511e 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
@@ -26,7 +26,6 @@
 import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIFF_DB_NAME;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL;
@@ -822,15 +821,6 @@ public static String getSnapshotPath(OzoneConfiguration 
conf,
         OM_DB_NAME + checkpointDirName;
   }
 
-  public static String extractSnapshotIDFromCheckpointDirName(String 
snapshotPath) {
-    // Find "om.db-" in the path and return whatever comes after
-    int index = snapshotPath.lastIndexOf(OM_DB_NAME);
-    if (index == -1 || index + OM_DB_NAME.length() + 
OM_SNAPSHOT_SEPARATOR.length() >= snapshotPath.length()) {
-      throw new IllegalArgumentException("Invalid snapshot path " + 
snapshotPath);
-    }
-    return snapshotPath.substring(index + OM_DB_NAME.length() + 
OM_SNAPSHOT_SEPARATOR.length());
-  }
-
   public static boolean isSnapshotKey(String[] keyParts) {
     return (keyParts.length > 1) &&
         (keyParts[0].compareTo(OM_SNAPSHOT_INDICATOR) == 0);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index 9894e8f5d6b..6d053e1e5e0 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -17,8 +17,6 @@
 
 package org.apache.hadoop.ozone.om.codec;
 
-import java.util.ArrayList;
-import java.util.List;
 import java.util.Map;
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
@@ -360,13 +358,5 @@ public String getName() {
   public String getLocationConfigKey() {
     return OMConfigKeys.OZONE_OM_DB_DIRS;
   }
-
-  public static List<String> getAllColumnFamilies() {
-    List<String> columnFamilies = new ArrayList<>();
-    COLUMN_FAMILIES.values().forEach(cf -> {
-      columnFamilies.add(cf.getName());
-    });
-    return columnFamilies;
-  }
 }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java
index 6867f819b9c..81c9dc46554 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java
@@ -151,7 +151,6 @@ public void invalidate(UUID key) {
         LOG.debug("SnapshotId: '{}' does not exist in snapshot cache.", k);
       } else {
         try {
-          v.get().getMetadataManager().getStore().flushDB();
           v.get().close();
         } catch (IOException e) {
           throw new IllegalStateException("Failed to close snapshotId: " + 
key, e);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to