swamirishi commented on code in PR #7563:
URL: https://github.com/apache/ozone/pull/7563#discussion_r1881208706
##########
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java:
##########
@@ -73,44 +71,38 @@ public static String constructBucketKey(String keyName) {
}
public static void filterRelevantSstFiles(Set<String> inputFiles,
- Map<String, String> tableToPrefixMap) throws IOException {
- for (Iterator<String> fileIterator =
- inputFiles.iterator(); fileIterator.hasNext();) {
- String filepath = fileIterator.next();
- if (!RocksDiffUtils.doesSstFileContainKeyRange(filepath,
- tableToPrefixMap)) {
- fileIterator.remove();
- }
- }
+ Map<String, String>
tableToPrefixMap,
+ ManagedRocksDB... dbs) {
+ filterRelevantSstFiles(inputFiles, tableToPrefixMap,
Collections.emptyMap(), dbs);
}
- public static boolean doesSstFileContainKeyRange(String filepath,
- Map<String, String> tableToPrefixMap) throws IOException {
-
- try (
- ManagedOptions options = new ManagedOptions();
- ManagedSstFileReader sstFileReader = new
ManagedSstFileReader(options)) {
- sstFileReader.open(filepath);
- TableProperties properties = sstFileReader.getTableProperties();
- String tableName = new String(properties.getColumnFamilyName(), UTF_8);
- if (tableToPrefixMap.containsKey(tableName)) {
- String prefix = tableToPrefixMap.get(tableName);
-
- try (
- ManagedReadOptions readOptions = new ManagedReadOptions();
- ManagedSstFileReaderIterator iterator =
ManagedSstFileReaderIterator.managed(
- sstFileReader.newIterator(readOptions))) {
- iterator.get().seek(prefix.getBytes(UTF_8));
- String seekResultKey = new String(iterator.get().key(), UTF_8);
- return seekResultKey.startsWith(prefix);
+ /**
+ * Filter sst files based on prefixes.
+ */
+ public static void filterRelevantSstFiles(Set<String> inputFiles,
+ Map<String, String>
tableToPrefixMap,
+ Map<String, CompactionNode>
preExistingCompactionNodes,
+ ManagedRocksDB... dbs) {
+ Map<String, LiveFileMetaData> liveFileMetaDataMap = new HashMap<>();
+ int dbIdx = 0;
+ for (Iterator<String> fileIterator =
+ inputFiles.iterator(); fileIterator.hasNext();) {
+ String filename = FilenameUtils.getBaseName(fileIterator.next());
+ while (!preExistingCompactionNodes.containsKey(filename) &&
!liveFileMetaDataMap.containsKey(filename)
+ && dbIdx < dbs.length) {
+ while (dbIdx < dbs.length) {
+ liveFileMetaDataMap.putAll(dbs[dbIdx].getLiveMetadataForSSTFiles());
+ dbIdx += 1;
}
}
- return false;
- } catch (RocksDBException e) {
- LOG.error("Failed to read SST File ", e);
- throw new IOException(e);
+ CompactionNode compactionNode = preExistingCompactionNodes.get(filename);
+ if (compactionNode == null) {
+ compactionNode = new CompactionNode(new
CompactionFileInfo.Builder(filename)
+ .setValues(liveFileMetaDataMap.get(filename)).build());
+ }
+ if (RocksDBCheckpointDiffer.shouldSkipNode(compactionNode,
tableToPrefixMap)) {
Review Comment:
will do it
##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java:
##########
@@ -1155,21 +1157,16 @@ Set<String> getDeltaFiles(OmSnapshot fromSnapshot,
LOG.warn("RocksDBCheckpointDiffer is not available, falling back to" +
" slow path");
}
-
- Set<String> fromSnapshotFiles =
- RdbUtil.getSSTFilesForComparison(
- ((RDBStore)fromSnapshot.getMetadataManager().getStore())
- .getDb().getManagedRocksDb(),
- tablesToLookUp);
- Set<String> toSnapshotFiles =
- RdbUtil.getSSTFilesForComparison(
- ((RDBStore)toSnapshot.getMetadataManager().getStore()).getDb()
- .getManagedRocksDb(),
- tablesToLookUp);
+ ManagedRocksDB fromDB =
((RDBStore)fromSnapshot.getMetadataManager().getStore())
+ .getDb().getManagedRocksDb();
+ ManagedRocksDB toDB =
((RDBStore)fromSnapshot.getMetadataManager().getStore())
Review Comment:
done
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]