jojochuang commented on code in PR #8477:
URL: https://github.com/apache/ozone/pull/8477#discussion_r2144126864


##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBCheckpointUtils.java:
##########
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.codec;
+
+import static org.apache.commons.io.filefilter.TrueFileFilter.TRUE;
+import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_INCLUDE_SNAPSHOT_DATA;
+import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Set;
+import javax.servlet.http.HttpServletRequest;
+import org.apache.commons.io.IOCase;
+import org.apache.commons.io.file.Counters;
+import org.apache.commons.io.file.CountingPathVisitor;
+import org.apache.commons.io.file.PathFilter;
+import org.apache.commons.io.filefilter.SuffixFileFilter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class for handling operations related to OM DB Checkpoints.
+ * This includes extracting metadata directory paths, handling snapshot data,
+ * and logging estimated sizes of checkpoint tarball streams.
+ */
+public final class OMDBCheckpointUtils {

Review Comment:
   nice to have: test class for OMDBCheckpointUtils



##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/Archiver.java:
##########
@@ -115,6 +119,33 @@ public static long includeFile(File file, String entryName,
     return bytes;
   }
 
+  /**
+   * Creates a hardlink for the given file in a temporary directory, adds it
+   * as an entry in the archive, and includes its contents in the archive 
output.
+   * The temporary hardlink is deleted after processing.
+   */
+  public static void linkAndIncludeFile(File file, String entryName,

Review Comment:
   add a test for this method.



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java:
##########
@@ -0,0 +1,359 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import static org.apache.hadoop.hdds.utils.Archiver.includeFile;
+import static org.apache.hadoop.hdds.utils.Archiver.linkAndIncludeFile;
+import static org.apache.hadoop.hdds.utils.Archiver.tar;
+import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
+import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT;
+import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY;
+import static 
org.apache.hadoop.ozone.om.codec.OMDBCheckpointUtils.includeSnapshotData;
+import static 
org.apache.hadoop.ozone.om.codec.OMDBCheckpointUtils.logEstimatedTarballSize;
+import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX;
+import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_SUFFIX;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Stream;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.recon.ReconConfig;
+import org.apache.hadoop.hdds.utils.DBCheckpointServlet;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Specialized OMDBCheckpointServlet implementation that transfers Ozone 
Manager
+ * database checkpoints using inode-based deduplication.
+ * <p>
+ * This servlet constructs checkpoint archives by examining file inodes,
+ * ensuring that files with the same inode (i.e., hardlinks or duplicates)
+ * are only transferred once. It maintains mappings from inode IDs to file
+ * paths, manages hardlink information, and enforces snapshot and SST file
+ * size constraints as needed.
+ * <p>
+ * This approach optimizes checkpoint streaming by reducing redundant data
+ * transfer, especially in environments where RocksDB and snapshotting result
+ * in multiple hardlinks to the same physical data.
+ */
+public class OMDBCheckpointServletInodeBasedXfer extends DBCheckpointServlet {
+
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(OMDBCheckpointServletInodeBasedXfer.class);
+  private static final long serialVersionUID = 1L;
+
+  @Override
+  public void init() throws ServletException {
+    OzoneManager om = (OzoneManager) getServletContext()
+        .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
+
+    if (om == null) {
+      LOG.error("Unable to initialize OMDBCheckpointServlet. OM is null");
+      return;
+    }
+
+    OzoneConfiguration conf = getConf();
+    // Only Ozone Admins and Recon are allowed
+    Collection<String> allowedUsers =
+        new LinkedHashSet<>(om.getOmAdminUsernames());
+    Collection<String> allowedGroups = om.getOmAdminGroups();
+    ReconConfig reconConfig = conf.getObject(ReconConfig.class);
+    String reconPrincipal = reconConfig.getKerberosPrincipal();
+    if (!reconPrincipal.isEmpty()) {
+      UserGroupInformation ugi =
+          UserGroupInformation.createRemoteUser(reconPrincipal);
+      allowedUsers.add(ugi.getShortUserName());
+    }
+
+    initialize(om.getMetadataManager().getStore(),
+        om.getMetrics().getDBCheckpointMetrics(),
+        om.getAclsEnabled(),
+        allowedUsers,
+        allowedGroups,
+        om.isSpnegoEnabled());
+  }
+
+  @Override
+  protected void processMetadataSnapshotRequest(HttpServletRequest request, 
HttpServletResponse response,
+      boolean isFormData, DBCheckpoint checkpoint, boolean flush) {
+    List<String> excludedSstList = new ArrayList<>();
+    String[] sstParam = isFormData ?
+        parseFormDataParameters(request) : request.getParameterValues(
+        OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST);
+    Set<String> receivedSstFiles = fetchSstFilesReceived(sstParam);
+    Path tmpdir = null;
+    try (BootstrapStateHandler.Lock lock = getBootstrapStateLock().lock()) {
+      tmpdir = Files.createTempDirectory(getBootstrapTempData().toPath(),

Review Comment:
   abort if tmpdir is null pointer. The subsequent code expects it to be a 
valid reference.



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java:
##########
@@ -0,0 +1,359 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import static org.apache.hadoop.hdds.utils.Archiver.includeFile;
+import static org.apache.hadoop.hdds.utils.Archiver.linkAndIncludeFile;
+import static org.apache.hadoop.hdds.utils.Archiver.tar;
+import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
+import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT;
+import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY;
+import static 
org.apache.hadoop.ozone.om.codec.OMDBCheckpointUtils.includeSnapshotData;
+import static 
org.apache.hadoop.ozone.om.codec.OMDBCheckpointUtils.logEstimatedTarballSize;
+import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX;
+import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_SUFFIX;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Stream;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.recon.ReconConfig;
+import org.apache.hadoop.hdds.utils.DBCheckpointServlet;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Specialized OMDBCheckpointServlet implementation that transfers Ozone 
Manager
+ * database checkpoints using inode-based deduplication.
+ * <p>
+ * This servlet constructs checkpoint archives by examining file inodes,
+ * ensuring that files with the same inode (i.e., hardlinks or duplicates)
+ * are only transferred once. It maintains mappings from inode IDs to file
+ * paths, manages hardlink information, and enforces snapshot and SST file
+ * size constraints as needed.
+ * <p>
+ * This approach optimizes checkpoint streaming by reducing redundant data
+ * transfer, especially in environments where RocksDB and snapshotting result
+ * in multiple hardlinks to the same physical data.
+ */
+public class OMDBCheckpointServletInodeBasedXfer extends DBCheckpointServlet {
+
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(OMDBCheckpointServletInodeBasedXfer.class);
+  private static final long serialVersionUID = 1L;
+
+  @Override
+  public void init() throws ServletException {
+    OzoneManager om = (OzoneManager) getServletContext()
+        .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
+
+    if (om == null) {
+      LOG.error("Unable to initialize OMDBCheckpointServlet. OM is null");
+      return;
+    }
+
+    OzoneConfiguration conf = getConf();
+    // Only Ozone Admins and Recon are allowed
+    Collection<String> allowedUsers =
+        new LinkedHashSet<>(om.getOmAdminUsernames());
+    Collection<String> allowedGroups = om.getOmAdminGroups();
+    ReconConfig reconConfig = conf.getObject(ReconConfig.class);
+    String reconPrincipal = reconConfig.getKerberosPrincipal();
+    if (!reconPrincipal.isEmpty()) {
+      UserGroupInformation ugi =
+          UserGroupInformation.createRemoteUser(reconPrincipal);
+      allowedUsers.add(ugi.getShortUserName());
+    }
+
+    initialize(om.getMetadataManager().getStore(),
+        om.getMetrics().getDBCheckpointMetrics(),
+        om.getAclsEnabled(),
+        allowedUsers,
+        allowedGroups,
+        om.isSpnegoEnabled());
+  }
+
+  @Override
+  protected void processMetadataSnapshotRequest(HttpServletRequest request, 
HttpServletResponse response,
+      boolean isFormData, DBCheckpoint checkpoint, boolean flush) {
+    List<String> excludedSstList = new ArrayList<>();
+    String[] sstParam = isFormData ?
+        parseFormDataParameters(request) : request.getParameterValues(
+        OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST);
+    Set<String> receivedSstFiles = fetchSstFilesReceived(sstParam);
+    Path tmpdir = null;
+    try (BootstrapStateHandler.Lock lock = getBootstrapStateLock().lock()) {
+      tmpdir = Files.createTempDirectory(getBootstrapTempData().toPath(),
+          "bootstrap-data-");
+      String tarName = "om.data-" + System.currentTimeMillis() + ".tar";
+      response.setContentType("application/x-tar");
+      response.setHeader("Content-Disposition", "attachment; filename=\"" + 
tarName + "\"");
+      Instant start = Instant.now();
+      writeDbDataToStream(request, response.getOutputStream(), 
receivedSstFiles, tmpdir);
+      Instant end = Instant.now();
+      long duration = Duration.between(start, end).toMillis();
+      LOG.info("Time taken to write the checkpoint to response output " +
+          "stream: {} milliseconds", duration);
+      logSstFileList(excludedSstList,
+          "Excluded {} SST files from the latest checkpoint{}: {}", 5);
+    } catch (Exception e) {
+      LOG.error(
+          "Unable to process metadata snapshot request. ", e);
+      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+    } finally {
+      try {
+        if (tmpdir != null) {
+          FileUtils.deleteDirectory(tmpdir.toFile());
+        }
+      } catch (IOException e) {
+        LOG.error("unable to delete: " + tmpdir);
+      }
+    }
+    super.processMetadataSnapshotRequest(request, response, isFormData, 
checkpoint, flush);
+  }
+
+  Path getSstBackupDir() {
+    RocksDBCheckpointDiffer differ = getDbStore().getRocksDBCheckpointDiffer();
+    return new File(differ.getSSTBackupDir()).toPath();
+  }
+
+  Path getCompactionLogDir() {
+    RocksDBCheckpointDiffer differ = getDbStore().getRocksDBCheckpointDiffer();
+    return new File(differ.getCompactionLogDir()).toPath();
+  }
+
+  public void writeDbDataToStream(HttpServletRequest request, OutputStream 
destination,
+      Set<String> sstFilesToExclude, Path tmpdir) throws IOException {
+    DBCheckpoint checkpoint = null;
+    OzoneManager om = (OzoneManager) 
getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
+    OMMetadataManager omMetadataManager = om.getMetadataManager();
+    boolean includeSnapshotData = includeSnapshotData(request);
+    AtomicLong maxTotalSstSize = new 
AtomicLong(getConf().getLong(OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY,
+        OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT));
+
+    Set<Path> snapshotPaths = Collections.emptySet();
+
+    if (!includeSnapshotData) {
+      maxTotalSstSize.set(Long.MAX_VALUE);
+    } else {
+      snapshotPaths = getSnapshotDirs(omMetadataManager);
+    }
+
+    if (sstFilesToExclude.isEmpty()) {
+      logEstimatedTarballSize(getDbStore().getDbLocation().toPath(), 
snapshotPaths);
+    }
+
+    boolean shouldContinue = true;
+
+    Map<String, String> hardLinkFileMap = new HashMap<>();
+    try (ArchiveOutputStream<TarArchiveEntry> archiveOutputStream = 
tar(destination)) {
+      if (includeSnapshotData) {
+        // Process each snapshot db path and write it to archive
+        for (Path snapshotDbPath : snapshotPaths) {
+          if (!shouldContinue) {
+            break;
+          }
+          shouldContinue = writeDBToArchive(sstFilesToExclude, snapshotDbPath,
+              maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap);
+        }
+
+
+        if (shouldContinue) {
+          shouldContinue = writeDBToArchive(sstFilesToExclude, 
getSstBackupDir(),
+              maxTotalSstSize, archiveOutputStream,  tmpdir, hardLinkFileMap);
+        }
+
+        if (shouldContinue) {
+          shouldContinue = writeDBToArchive(sstFilesToExclude, 
getCompactionLogDir(),
+              maxTotalSstSize, archiveOutputStream,  tmpdir, hardLinkFileMap);
+        }
+      }
+
+      if (shouldContinue) {
+        // we finished transferring files from snapshot DB's by now and
+        // this is the last step where we transfer the active om.db contents
+        checkpoint = getCheckpoint(tmpdir, true);
+        // unlimited files as we want the Active DB contents to be transferred 
in a single batch
+        maxTotalSstSize.set(Long.MAX_VALUE);
+        Path checkpointDir = checkpoint.getCheckpointLocation();
+        writeDBToArchive(sstFilesToExclude, checkpointDir,
+            maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap);
+        Path tmpCompactionLogDir = 
tmpdir.resolve(getCompactionLogDir().getFileName());
+        Path tmpSstBackupDir = tmpdir.resolve(getSstBackupDir().getFileName());
+        writeDBToArchive(sstFilesToExclude, tmpCompactionLogDir, 
maxTotalSstSize, archiveOutputStream, tmpdir,
+            hardLinkFileMap, getCompactionLogDir());
+        writeDBToArchive(sstFilesToExclude, tmpSstBackupDir, maxTotalSstSize, 
archiveOutputStream, tmpdir,
+            hardLinkFileMap, getSstBackupDir());
+        writeHardlinkFile(getConf(), hardLinkFileMap, archiveOutputStream);
+      }
+
+    } catch (IOException ioe) {
+      LOG.error("got exception writing to archive " + ioe);
+      throw ioe;
+    } finally {
+      cleanupCheckpoint(checkpoint);
+    }
+  }
+
+  private boolean writeDBToArchive(Set<String> sstFilesToExclude, Path dir,
+      AtomicLong maxTotalSstSize, ArchiveOutputStream<TarArchiveEntry> 
archiveOutputStream,
+      Path tmpdir, Map<String, String> hardLinkFileMap) throws IOException {
+    return writeDBToArchive(sstFilesToExclude, dir, maxTotalSstSize,
+        archiveOutputStream, tmpdir, hardLinkFileMap, null);
+  }
+
+  private static void cleanupCheckpoint(DBCheckpoint checkpoint) {
+    if (checkpoint != null) {
+      try {
+        checkpoint.cleanupCheckpoint();
+      } catch (IOException e) {
+        LOG.error("Error trying to clean checkpoint at {} .",
+            checkpoint.getCheckpointLocation().toString());
+      }
+    }
+  }
+
+  /**
+   * Writes a hardlink mapping file to the archive, which maps file IDs to 
their
+   * relative paths. This method generates the mapping file based on the 
provided
+   * hardlink metadata and adds it to the archive output stream.
+   *
+   * @param conf                Ozone configuration for the OM instance.
+   * @param hardlinkFileMap     A map where the key is the absolute file path
+   *                            and the value is its corresponding file ID.
+   * @param archiveOutputStream The archive output stream to which the hardlink
+   *                            file should be written.
+   * @throws IOException If an I/O error occurs while creating or writing the
+   *                     hardlink file.
+   */
+  private static void writeHardlinkFile(OzoneConfiguration conf, Map<String, 
String> hardlinkFileMap,
+      ArchiveOutputStream<TarArchiveEntry> archiveOutputStream) throws 
IOException {
+    Path data = Files.createTempFile(DATA_PREFIX, DATA_SUFFIX);
+    Path metaDirPath = OMStorage.getOmDbDir(conf).toPath();
+    StringBuilder sb = new StringBuilder();
+
+    for (Map.Entry<String, String> entry : hardlinkFileMap.entrySet()) {
+      Path p = Paths.get(entry.getKey());
+      String fileId = entry.getValue();
+      Path relativePath = metaDirPath.relativize(p);
+      sb.append(fileId).append('\t').append(relativePath).append('\n');
+    }
+    Files.write(data, sb.toString().getBytes(StandardCharsets.UTF_8), 
StandardOpenOption.TRUNCATE_EXISTING);
+    includeFile(data.toFile(), OmSnapshotManager.OM_HARDLINK_FILE, 
archiveOutputStream);
+  }
+
+  /**
+   * Gets the configuration from the OzoneManager context.
+   *
+   * @return OzoneConfiguration instance
+   */
+  private OzoneConfiguration getConf() {
+    return ((OzoneManager) getServletContext()
+        .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE))
+        .getConfiguration();
+  }
+
+  /**
+   * Collects paths to all snapshot databases.
+   *
+   * @param omMetadataManager OMMetadataManager instance
+   * @return Set of paths to snapshot databases
+   * @throws IOException if an I/O error occurs
+   */
+  Set<Path> getSnapshotDirs(OMMetadataManager omMetadataManager) throws 
IOException {
+    Set<Path> snapshotPaths = new HashSet<>();
+    SnapshotChainManager snapshotChainManager = new 
SnapshotChainManager(omMetadataManager);
+    for (SnapshotChainInfo snapInfo : 
snapshotChainManager.getGlobalSnapshotChain().values()) {
+      String snapshotDir =
+          OmSnapshotManager.getSnapshotPath(getConf(), 
SnapshotInfo.getCheckpointDirName(snapInfo.getSnapshotId()));
+      Path path = Paths.get(snapshotDir);
+      snapshotPaths.add(path);
+    }
+    return snapshotPaths;
+  }
+
+  /**
+   * Writes database files to the archive, handling deduplication based on 
inode IDs.
+   *
+   * @param sstFilesToExclude Set of SST file IDs to exclude from the archive
+   * @param dbDir Directory containing database files to archive
+   * @param maxTotalSstSize Maximum total size of SST files to include
+   * @param archiveOutputStream Archive output stream
+   * @param tmpDir Temporary directory for processing
+   * @return true if processing should continue, false if size limit reached
+   * @throws IOException if an I/O error occurs
+   */
+  boolean writeDBToArchive(Set<String> sstFilesToExclude, Path dbDir, 
AtomicLong maxTotalSstSize,

Review Comment:
   make it a private method



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java:
##########
@@ -0,0 +1,359 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import static org.apache.hadoop.hdds.utils.Archiver.includeFile;
+import static org.apache.hadoop.hdds.utils.Archiver.linkAndIncludeFile;
+import static org.apache.hadoop.hdds.utils.Archiver.tar;
+import static 
org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
+import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT;
+import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY;
+import static 
org.apache.hadoop.ozone.om.codec.OMDBCheckpointUtils.includeSnapshotData;
+import static 
org.apache.hadoop.ozone.om.codec.OMDBCheckpointUtils.logEstimatedTarballSize;
+import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX;
+import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_SUFFIX;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Stream;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.recon.ReconConfig;
+import org.apache.hadoop.hdds.utils.DBCheckpointServlet;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Specialized OMDBCheckpointServlet implementation that transfers Ozone 
Manager
+ * database checkpoints using inode-based deduplication.
+ * <p>
+ * This servlet constructs checkpoint archives by examining file inodes,
+ * ensuring that files with the same inode (i.e., hardlinks or duplicates)
+ * are only transferred once. It maintains mappings from inode IDs to file
+ * paths, manages hardlink information, and enforces snapshot and SST file
+ * size constraints as needed.
+ * <p>
+ * This approach optimizes checkpoint streaming by reducing redundant data
+ * transfer, especially in environments where RocksDB and snapshotting result
+ * in multiple hardlinks to the same physical data.
+ */
+public class OMDBCheckpointServletInodeBasedXfer extends DBCheckpointServlet {
+
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(OMDBCheckpointServletInodeBasedXfer.class);
+  private static final long serialVersionUID = 1L;
+
+  @Override
+  public void init() throws ServletException {
+    OzoneManager om = (OzoneManager) getServletContext()
+        .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
+
+    if (om == null) {
+      LOG.error("Unable to initialize OMDBCheckpointServlet. OM is null");
+      return;
+    }
+
+    OzoneConfiguration conf = getConf();
+    // Only Ozone Admins and Recon are allowed
+    Collection<String> allowedUsers =
+        new LinkedHashSet<>(om.getOmAdminUsernames());
+    Collection<String> allowedGroups = om.getOmAdminGroups();
+    ReconConfig reconConfig = conf.getObject(ReconConfig.class);
+    String reconPrincipal = reconConfig.getKerberosPrincipal();
+    if (!reconPrincipal.isEmpty()) {
+      UserGroupInformation ugi =
+          UserGroupInformation.createRemoteUser(reconPrincipal);
+      allowedUsers.add(ugi.getShortUserName());
+    }
+
+    initialize(om.getMetadataManager().getStore(),
+        om.getMetrics().getDBCheckpointMetrics(),
+        om.getAclsEnabled(),
+        allowedUsers,
+        allowedGroups,
+        om.isSpnegoEnabled());
+  }
+
+  @Override
+  protected void processMetadataSnapshotRequest(HttpServletRequest request, 
HttpServletResponse response,
+      boolean isFormData, DBCheckpoint checkpoint, boolean flush) {
+    List<String> excludedSstList = new ArrayList<>();
+    String[] sstParam = isFormData ?
+        parseFormDataParameters(request) : request.getParameterValues(
+        OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST);
+    Set<String> receivedSstFiles = fetchSstFilesReceived(sstParam);
+    Path tmpdir = null;
+    try (BootstrapStateHandler.Lock lock = getBootstrapStateLock().lock()) {
+      tmpdir = Files.createTempDirectory(getBootstrapTempData().toPath(),
+          "bootstrap-data-");
+      String tarName = "om.data-" + System.currentTimeMillis() + ".tar";
+      response.setContentType("application/x-tar");
+      response.setHeader("Content-Disposition", "attachment; filename=\"" + 
tarName + "\"");
+      Instant start = Instant.now();
+      writeDbDataToStream(request, response.getOutputStream(), 
receivedSstFiles, tmpdir);
+      Instant end = Instant.now();
+      long duration = Duration.between(start, end).toMillis();
+      LOG.info("Time taken to write the checkpoint to response output " +
+          "stream: {} milliseconds", duration);
+      logSstFileList(excludedSstList,
+          "Excluded {} SST files from the latest checkpoint{}: {}", 5);
+    } catch (Exception e) {
+      LOG.error(
+          "Unable to process metadata snapshot request. ", e);
+      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+    } finally {
+      try {
+        if (tmpdir != null) {
+          FileUtils.deleteDirectory(tmpdir.toFile());
+        }
+      } catch (IOException e) {
+        LOG.error("unable to delete: " + tmpdir);

Review Comment:
   suggest to log exception message.
   ```suggestion
           LOG.error("unable to delete: " + tmpdir, e.toString());
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@ozone.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@ozone.apache.org
For additional commands, e-mail: issues-h...@ozone.apache.org

Reply via email to