hemantk-12 commented on code in PR #8016: URL: https://github.com/apache/ozone/pull/8016#discussion_r1987573964
########## hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/PopulateCompactionTable.java: ########## @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ozone.compaction.log; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.common.base.Preconditions; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import org.rocksdb.TableProperties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility methods to populate compactionLogTable from a compaction-log file. + */ +public final class PopulateCompactionTable { Review Comment: nit: maybe `CompactionDagHelper`. ########## hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/CompactionLogDagPrinter.java: ########## @@ -0,0 +1,220 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug.om; + +import static org.apache.hadoop.ozone.OzoneConsts.COMPACTION_LOG_TABLE; + +import com.google.common.graph.GraphBuilder; +import com.google.common.graph.MutableGraph; +import com.google.protobuf.InvalidProtocolBufferException; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.apache.ozone.compaction.log.CompactionFileInfo; +import org.apache.ozone.compaction.log.CompactionLogEntry; +import org.apache.ozone.compaction.log.PopulateCompactionTable; +import org.apache.ozone.graph.PrintableGraph; +import org.apache.ozone.rocksdiff.CompactionNode; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import picocli.CommandLine; + +/** + * Handler to generate image for current compaction DAG in the OM leader node. + * ozone debug om print-log-dag. + */ [email protected]( + name = "print-log-dag", + aliases = "pld", + description = "Create an image of the current compaction log DAG.") +public class CompactionLogDagPrinter implements Callable<Void> { + + @CommandLine.Option(names = {"-f", "--file-location"}, + required = true, + description = "Path to location at which image will be downloaded. " + + "Should include the image file name with \".png\" extension.") + private String imageLocation; + + @CommandLine.Option(names = {"--db"}, + required = true, + scope = CommandLine.ScopeType.INHERIT, + description = "Path to OM RocksDB") + private String dbPath; + + @CommandLine.Option(names = {"--compaction-log"}, + required = true, + scope = CommandLine.ScopeType.INHERIT, + description = "Path to compaction-log directory.") + private String compactionLogDir; + + // TODO: Change graphType to enum. + @CommandLine.Option(names = {"-t", "--graph-type"}, + description = "Type of node name to use in the graph image. (optional)\n Accepted values are: \n" + + " FILE_NAME (default) : to use file name as node name in DAG,\n" + + " KEY_SIZE: to show the no. of keys in the file along with file name in the DAG node name,\n" + + " CUMULATIVE_SIZE: to show the cumulative size along with file name in the DAG node name.", + defaultValue = "FILE_NAME") + private String graphType; + + @Override + public Void call() throws Exception { + try { + System.out.println("tej enter try "); Review Comment: I believe these print statements were for local testing only. Please remove them. ########## hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/compaction/log/PopulateCompactionTable.java: ########## @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ozone.compaction.log; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.common.base.Preconditions; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileReader; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import org.rocksdb.TableProperties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility methods to populate compactionLogTable from a compaction-log file. + */ +public final class PopulateCompactionTable { + private static final Logger LOG = + LoggerFactory.getLogger(PopulateCompactionTable.class); + + /** + * Used during DAG construction. + */ + private long reconstructionSnapshotCreationTime; + private String reconstructionCompactionReason; + private final String compactionLogDir; + private ManagedRocksDB activeRocksDB; + private ColumnFamilyHandle compactionLogTableCFHandle; + + public PopulateCompactionTable(String compactLogDir, ManagedRocksDB db, ColumnFamilyHandle cf) { + compactionLogDir = compactLogDir; + activeRocksDB = db; + compactionLogTableCFHandle = cf; + reconstructionSnapshotCreationTime = 0L; + reconstructionCompactionReason = null; + } + + public void setActiveRocksDB(ManagedRocksDB activeRocksDB) { + this.activeRocksDB = activeRocksDB; + } + + public void setCompactionLogTableCFHandle(ColumnFamilyHandle compactionLogTableCFHandle) { + this.compactionLogTableCFHandle = compactionLogTableCFHandle; + } + + public void addEntriesFromLogFilesToDagAndCompactionLogTable() { Review Comment: This was for backward compatibility to read compaction logs from the comapction-backup-dir. ########## hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java: ########## @@ -322,16 +268,12 @@ public void close() { private final MutableGraph<CompactionNode> backwardCompactionDAG = GraphBuilder.directed().build(); - public static final Integer DEBUG_DAG_BUILD_UP = 2; - public static final Integer DEBUG_DAG_TRAVERSAL = 3; - public static final Integer DEBUG_DAG_LIVE_NODES = 4; - public static final Integer DEBUG_READ_ALL_DB_KEYS = 5; private static final HashSet<Integer> DEBUG_LEVEL = new HashSet<>(); static { - addDebugLevel(DEBUG_DAG_BUILD_UP); - addDebugLevel(DEBUG_DAG_TRAVERSAL); - addDebugLevel(DEBUG_DAG_LIVE_NODES); + addDebugLevel(RocksDBConsts.DEBUG_DAG_BUILD_UP); Review Comment: nit: Use static import constants to reduce the number of lines that have changed. ########## hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/CompactionLogDagPrinter.java: ########## @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.debug; - -import java.io.IOException; -import org.apache.hadoop.hdds.cli.DebugSubcommand; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.shell.Handler; -import org.apache.hadoop.ozone.shell.OzoneAddress; -import org.kohsuke.MetaInfServices; -import picocli.CommandLine; - -/** - * Handler to generate image for current compaction DAG in the OM leader node. - * ozone sh snapshot print-log-dag. - */ [email protected]( - name = "print-log-dag", - aliases = "pld", - description = "Create an image of the current compaction log DAG in OM.") -@MetaInfServices(DebugSubcommand.class) -public class CompactionLogDagPrinter extends Handler - implements DebugSubcommand { - - @CommandLine.Option(names = {"-f", "--file-name-prefix"}, - description = "Prefix to be use in image file name. (optional)") - private String fileNamePrefix; - - // TODO: Change graphType to enum. - @CommandLine.Option(names = {"-t", "--graph-type"}, - description = "Type of node name to use in the graph image. " + - "(optional)\n Accepted values are: \n" + - " file_name (default) : to use file name as node name in DAG,\n" + - " key_size: to show the no. of keys in the file along with file " + - "name in the DAG node name,\n" + - " cumulative_size: to show the cumulative size along with file " + - "name in the DAG node name.", - defaultValue = "file_name") - private String graphType; - - @Override - protected void execute(OzoneClient client, OzoneAddress address) - throws IOException { - String message = client.getObjectStore() - .printCompactionLogDag(fileNamePrefix, graphType); Review Comment: Please deprecate this API. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
