nandakumar131 commented on code in PR #3885: URL: https://github.com/apache/ozone/pull/3885#discussion_r1022481146
########## hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java: ########## @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffReport.DiffType; +import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffReport.DiffReportEntry; + +import org.apache.ozone.rocksdb.util.ManagedSstFileReader; +import org.apache.ozone.rocksdb.util.RdbUtil; +import org.rocksdb.RocksDBException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Stream; + +/** + * Class to generate snapshot diff. + */ +public class SnapshotDiffManager { + + public SnapshotDiffReport getSnapshotDiffReport(final String volume, + final String bucket, + final OmSnapshot fromSnapshot, + final OmSnapshot toSnapshot) + throws IOException, RocksDBException { + + // TODO: Once RocksDBCheckpointDiffer exposes method to get list + // of delta SST files, plug it in here. + + Set<String> fromSnapshotFiles = RdbUtil.getKeyTableSSTFiles(fromSnapshot + .getMetadataManager().getStore().getDbLocation().getPath()); + Set<String> toSnapshotFiles = RdbUtil.getKeyTableSSTFiles(toSnapshot + .getMetadataManager().getStore().getDbLocation().getPath()); + + final Set<String> deltaFiles = new HashSet<>(); + deltaFiles.addAll(fromSnapshotFiles); + deltaFiles.addAll(toSnapshotFiles); + + // TODO: Filter out the files. + + final Stream<String> keysToCheck = new ManagedSstFileReader(deltaFiles) + .getKeyStream(); + + final BucketLayout bucketLayout = getBucketLayout(volume, bucket, + fromSnapshot.getMetadataManager()); + + final Table<String, OmKeyInfo> fsKeyTable = fromSnapshot + .getMetadataManager().getKeyTable(bucketLayout); + final Table<String, OmKeyInfo> tsKeyTable = toSnapshot + .getMetadataManager().getKeyTable(bucketLayout); + + /* + * The reason for having ObjectID to KeyName mapping instead of OmKeyInfo + * is to reduce the memory footprint. + */ + final Map<Long, String> oldObjIdToKeyMap = new HashMap<>(); + // Long --> const. length + // String --> var. length "/dir1/dir2/dir3/dir4/dir5/key1" + final Map<Long, String> newObjIdToKeyMap = new HashMap<>(); + Review Comment: Thansk @GeorgeJahad for taking a look at the change. The HashMaps only stores the keys which are affected between snapshots (created, deleted, renamed) For create, we only add it to newObjIdToKeyMap For delete, we only add it to oldObjIdToKeyMap For rename, we add it to both newObjIdToKeyMap and oldObjIdToKeyMap We don't store the whole key metadata here in these HashMaps, we only store the object ID and key name. Object ID is a long value which is of size 64-bit. We will reach 100 bytes for an entry only if all the key paths are of length ~99 characters. So we will use 1 gig of memory per HashMap only if we have 10 million renames with an average key size of ~99 characters. In this case, our SnapshotDiff report will also be around 2 gig. A single RPC call cannot handle 2 gig of data, so SnapshotDiff for large delta will fail. We don't have paging logic built into SnapshotDiff yet. We need support for partial SnapshotDiff generation and paging logic at the client side to support large SnapshotDiff. For now, we can add a check while we generate the SnapshotDiff and throw an error if the delta is large between the snapshots. I will add the check as part of HDDS-7466. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
