hemantk-12 commented on code in PR #7563:
URL: https://github.com/apache/ozone/pull/7563#discussion_r1881223906
##########
hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDiffUtils.java:
##########
@@ -73,44 +71,38 @@ public static String constructBucketKey(String keyName) {
}
public static void filterRelevantSstFiles(Set<String> inputFiles,
- Map<String, String> tableToPrefixMap) throws IOException {
- for (Iterator<String> fileIterator =
- inputFiles.iterator(); fileIterator.hasNext();) {
- String filepath = fileIterator.next();
- if (!RocksDiffUtils.doesSstFileContainKeyRange(filepath,
- tableToPrefixMap)) {
- fileIterator.remove();
- }
- }
+ Map<String, String>
tableToPrefixMap,
+ ManagedRocksDB... dbs) {
+ filterRelevantSstFiles(inputFiles, tableToPrefixMap,
Collections.emptyMap(), dbs);
}
- public static boolean doesSstFileContainKeyRange(String filepath,
- Map<String, String> tableToPrefixMap) throws IOException {
-
- try (
- ManagedOptions options = new ManagedOptions();
- ManagedSstFileReader sstFileReader = new
ManagedSstFileReader(options)) {
- sstFileReader.open(filepath);
- TableProperties properties = sstFileReader.getTableProperties();
- String tableName = new String(properties.getColumnFamilyName(), UTF_8);
- if (tableToPrefixMap.containsKey(tableName)) {
- String prefix = tableToPrefixMap.get(tableName);
-
- try (
- ManagedReadOptions readOptions = new ManagedReadOptions();
- ManagedSstFileReaderIterator iterator =
ManagedSstFileReaderIterator.managed(
- sstFileReader.newIterator(readOptions))) {
- iterator.get().seek(prefix.getBytes(UTF_8));
- String seekResultKey = new String(iterator.get().key(), UTF_8);
- return seekResultKey.startsWith(prefix);
+ /**
+ * Filter sst files based on prefixes.
+ */
+ public static void filterRelevantSstFiles(Set<String> inputFiles,
+ Map<String, String>
tableToPrefixMap,
+ Map<String, CompactionNode>
preExistingCompactionNodes,
+ ManagedRocksDB... dbs) {
+ Map<String, LiveFileMetaData> liveFileMetaDataMap = new HashMap<>();
+ int dbIdx = 0;
+ for (Iterator<String> fileIterator =
+ inputFiles.iterator(); fileIterator.hasNext();) {
+ String filename = FilenameUtils.getBaseName(fileIterator.next());
+ while (!preExistingCompactionNodes.containsKey(filename) &&
!liveFileMetaDataMap.containsKey(filename)
Review Comment:
1. I don't think we need a nested while loop because the second while loop
will always add liveFileMetaDataMap from both DBs.
2. Can it be moved outside the for loop? Because if there is no compaction
in the current snapshot, you will always going to add files to
liveFileMetaDataMap.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]