This is an automated email from the ASF dual-hosted git repository.

ashishkr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new c68308da9e HDDS-12599. Create an ozone debug CLI command to list all 
the containers based on final state (#8282)
c68308da9e is described below

commit c68308da9e70fc149810d453111f337ad2b12994
Author: sreejasahithi <[email protected]>
AuthorDate: Mon Apr 28 20:02:12 2025 +0530

    HDDS-12599. Create an ozone debug CLI command to list all the containers 
based on final state (#8282)
---
 .../parser/ContainerDatanodeDatabase.java          | 160 ++++++++++++++-------
 .../parser/ContainerLogFileParser.java             |  16 +--
 .../hadoop/ozone/containerlog/parser/DBConsts.java |  41 +++++-
 .../LogParser.java}                                |  15 +-
 .../container/ContainerLogController.java          |  25 +++-
 .../{ => logs}/container/ContainerLogParser.java   |  69 +++++----
 .../ozone/debug/logs/container/ListContainers.java |  86 +++++++++++
 .../debug/{ => logs}/container/package-info.java   |   5 +-
 .../debug/{container => logs}/package-info.java    |   4 +-
 .../resources/container-log-db-queries.properties  |  24 ----
 10 files changed, 303 insertions(+), 142 deletions(-)

diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/ContainerDatanodeDatabase.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/ContainerDatanodeDatabase.java
index 19f7402518..9dcf3fb718 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/ContainerDatanodeDatabase.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/ContainerDatanodeDatabase.java
@@ -17,8 +17,9 @@
 
 package org.apache.hadoop.ozone.containerlog.parser;
 
-import java.io.FileNotFoundException;
-import java.io.InputStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.nio.charset.StandardCharsets;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -26,11 +27,6 @@
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.stream.Collectors;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.sqlite.SQLiteConfig;
 
 /**
@@ -38,37 +34,20 @@
  * Provides methods for table creation, log data insertion, and index setup.
  */
 public class ContainerDatanodeDatabase {
-
-  private static Map<String, String> queries;
-
-  static {
-    loadProperties();
-  }
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ContainerDatanodeDatabase.class);
-
-  private static void loadProperties() {
-    Properties props = new Properties();
-    try (InputStream inputStream = 
ContainerDatanodeDatabase.class.getClassLoader()
-        .getResourceAsStream(DBConsts.PROPS_FILE)) {
-
-      if (inputStream != null) {
-        props.load(inputStream);
-        queries = props.entrySet().stream()
-            .collect(Collectors.toMap(
-                e -> e.getKey().toString(),
-                e -> e.getValue().toString()
-            ));
-      } else {
-        throw new FileNotFoundException("Property file '" + 
DBConsts.PROPS_FILE + "' not found.");
-      }
-    } catch (Exception e) {
-      LOG.error(e.getMessage());
+  
+  private static String databasePath;
+  
+  public static void setDatabasePath(String dbPath) {
+    if (databasePath == null) {
+      databasePath = dbPath;
     }
   }
 
   private static Connection getConnection() throws Exception {
+    if (databasePath == null) {
+      throw new IllegalStateException("Database path not set");
+    }
+    
     Class.forName(DBConsts.DRIVER);
 
     SQLiteConfig config = new SQLiteConfig();
@@ -79,11 +58,11 @@ private static Connection getConnection() throws Exception {
     config.setSynchronous(SQLiteConfig.SynchronousMode.OFF);
     config.setTempStore(SQLiteConfig.TempStore.MEMORY);
 
-    return DriverManager.getConnection(DBConsts.CONNECTION_PREFIX + 
DBConsts.DATABASE_NAME, config.toProperties());
+    return DriverManager.getConnection(DBConsts.CONNECTION_PREFIX + 
databasePath, config.toProperties());
   }
 
   public void createDatanodeContainerLogTable() throws SQLException {
-    String createTableSQL = queries.get("CREATE_DATANODE_CONTAINER_LOG_TABLE");
+    String createTableSQL = DBConsts.CREATE_DATANODE_CONTAINER_LOG_TABLE;
     try (Connection connection = getConnection();
          Statement dropStmt = connection.createStatement();
          Statement createStmt = connection.createStatement()) {
@@ -91,26 +70,26 @@ public void createDatanodeContainerLogTable() throws 
SQLException {
       createStmt.execute(createTableSQL);
       createDatanodeContainerIndex(createStmt);
     } catch (SQLException e) {
-      LOG.error("Error while creating the table: {}", e.getMessage());
+      System.err.println("Error while creating the table: " + e.getMessage());
       throw e;
     } catch (Exception e) {
-      LOG.error(e.getMessage());
+      System.err.println("Unexpected error: " + e.getMessage());
       throw new RuntimeException(e);
     }
   }
 
   private void createContainerLogTable() throws SQLException {
-    String createTableSQL = queries.get("CREATE_CONTAINER_LOG_TABLE");
+    String createTableSQL = DBConsts.CREATE_CONTAINER_LOG_TABLE;
     try (Connection connection = getConnection();
          Statement dropStmt = connection.createStatement();
          Statement createStmt = connection.createStatement()) {
       dropTable(DBConsts.CONTAINER_LOG_TABLE_NAME, dropStmt);
       createStmt.execute(createTableSQL);
     } catch (SQLException e) {
-      LOG.error("Error while creating the table: {}", e.getMessage());
+      System.err.println("Error while creating the table: " + e.getMessage());
       throw e;
     } catch (Exception e) {
-      LOG.error(e.getMessage());
+      System.err.println("Unexpected error: " + e.getMessage());
       throw new RuntimeException(e);
     }
   }
@@ -123,7 +102,7 @@ private void createContainerLogTable() throws SQLException {
   
   public synchronized void 
insertContainerDatanodeData(List<DatanodeContainerInfo> transitionList) throws 
SQLException {
 
-    String insertSQL = queries.get("INSERT_DATANODE_CONTAINER_LOG");
+    String insertSQL = DBConsts.INSERT_DATANODE_CONTAINER_LOG;
 
     long containerId = 0;
     String datanodeId = null;
@@ -159,16 +138,16 @@ public synchronized void 
insertContainerDatanodeData(List<DatanodeContainerInfo>
         preparedStatement.executeBatch();
       }
     } catch (SQLException e) {
-      LOG.error("Failed to insert container log for container {} on datanode 
{}", containerId, datanodeId, e);
+      System.err.println("Failed to insert container log for container " + 
containerId + " on datanode " + datanodeId);
       throw e;
     } catch (Exception e) {
-      LOG.error(e.getMessage());
+      System.err.println("Unexpected error: " + e.getMessage());
       throw new RuntimeException(e);
     }
   }
 
   private void createDatanodeContainerIndex(Statement stmt) throws 
SQLException {
-    String createIndexSQL = queries.get("CREATE_DATANODE_CONTAINER_INDEX");
+    String createIndexSQL = DBConsts.CREATE_DATANODE_CONTAINER_INDEX;
     stmt.execute(createIndexSQL);
   }
 
@@ -179,8 +158,8 @@ private void createDatanodeContainerIndex(Statement stmt) 
throws SQLException {
 
   public void insertLatestContainerLogData() throws SQLException {
     createContainerLogTable();
-    String selectSQL = queries.get("SELECT_LATEST_CONTAINER_LOG");
-    String insertSQL = queries.get("INSERT_CONTAINER_LOG");
+    String selectSQL = DBConsts.SELECT_LATEST_CONTAINER_LOG;
+    String insertSQL = DBConsts.INSERT_CONTAINER_LOG;
 
     try (Connection connection = getConnection();
          PreparedStatement selectStmt = connection.prepareStatement(selectSQL);
@@ -208,8 +187,8 @@ public void insertLatestContainerLogData() throws 
SQLException {
             count = 0;
           }
         } catch (SQLException e) {
-          LOG.error("Failed to insert container log entry for container {} on 
datanode {} ",
-              containerId, datanodeId, e);
+          System.err.println("Failed to insert container log entry for 
container " + containerId + " on datanode "
+              + datanodeId);
           throw e;
         }
       }
@@ -218,18 +197,95 @@ public void insertLatestContainerLogData() throws 
SQLException {
         insertStmt.executeBatch();
       }
     } catch (SQLException e) {
-      LOG.error("Failed to insert container log entry: {}", e.getMessage());
+      System.err.println("Failed to insert container log entry: " + 
e.getMessage());
       throw e;
     } catch (Exception e) {
-      LOG.error(e.getMessage());
+      System.err.println("Unexpected error: " + e.getMessage());
       throw new RuntimeException(e);
     }
   }
 
   private void dropTable(String tableName, Statement stmt) throws SQLException 
{
-    String dropTableSQL = queries.get("DROP_TABLE").replace("{table_name}", 
tableName);
+    String dropTableSQL = DBConsts.DROP_TABLE.replace("{table_name}", 
tableName);
     stmt.executeUpdate(dropTableSQL);
   }
 
+  private void createContainerLogIndex(Statement stmt) throws SQLException {
+    String createIndexSQL = DBConsts.CREATE_INDEX_LATEST_STATE;
+    stmt.execute(createIndexSQL);
+  }
+
+  /**
+   * Lists containers filtered by the specified state and writes their details 
to stdout 
+   * unless redirected to a file explicitly.
+   * The output includes timestamp, datanode ID, container ID, BCSID, error 
message, and index value,
+   * written in a human-readable table format to a file or console.
+   * Behavior based on the {@code limit} parameter:
+   * If {@code limit} is provided, only up to the specified number of rows are 
printed.
+   * If the number of matching containers exceeds the {@code limit},
+   * a note is printed indicating more containers exist.
+   *
+   * @param state the container state to filter by (e.g., "OPEN", "CLOSED", 
"QUASI_CLOSED")
+   * @param limit the maximum number of rows to display; use {@link 
Integer#MAX_VALUE} to fetch all rows
+   */
+
+  public void listContainersByState(String state, Integer limit) throws 
SQLException {
+    int count = 0;
+
+    boolean limitProvided = limit != Integer.MAX_VALUE;
+
+    String baseQuery = DBConsts.SELECT_LATEST_CONTAINER_LOGS_BY_STATE;
+    String finalQuery = limitProvided ? baseQuery + " LIMIT ?" : baseQuery;
+
+    try (Connection connection = getConnection();
+         Statement stmt = connection.createStatement()) {
+
+      createContainerLogIndex(stmt);
+
+      try (PreparedStatement pstmt = connection.prepareStatement(finalQuery)) {
+        pstmt.setString(1, state);
+        if (limitProvided) {
+          pstmt.setInt(2, limit + 1);
+        }
+
+        try (ResultSet rs = pstmt.executeQuery();
+             PrintWriter writer = new PrintWriter(new 
OutputStreamWriter(System.out,
+                 StandardCharsets.UTF_8), true)) {
+
+          writer.printf("%-25s | %-35s | %-15s | %-15s | %-40s | %-12s%n",
+              "Timestamp", "Datanode ID", "Container ID", "BCSID", "Message", 
"Index Value");
+          
writer.println("-------------------------------------------------------------------------------------"
 +
+              
"---------------------------------------------------------------------------------------");
+
+          while (rs.next()) {
+            if (limitProvided && count >= limit) {
+              writer.println("Note: There might be more containers. Use -all 
option to list all entries");
+              break;
+            }
+            String timestamp = rs.getString("timestamp");
+            String datanodeId = rs.getString("datanode_id");
+            long containerId = rs.getLong("container_id");
+            long latestBcsid = rs.getLong("latest_bcsid");
+            String errorMessage = rs.getString("error_message");
+            int indexValue = rs.getInt("index_value");
+            count++;
+
+            writer.printf("%-25s | %-35s | %-15d | %-15d | %-40s | %-12d%n",
+                timestamp, datanodeId, containerId, latestBcsid, errorMessage, 
indexValue);
+          }
+          
+          if (count == 0) {
+            writer.printf("No containers found for state: %s%n", state);
+          } else {
+            writer.printf("Number of containers listed: %d%n", count);
+          }
+        }
+      }
+    } catch (SQLException e) {
+      throw new SQLException("Error while retrieving containers with state " + 
state);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
 }
 
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/ContainerLogFileParser.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/ContainerLogFileParser.java
index bafce798e5..c41beb328f 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/ContainerLogFileParser.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/ContainerLogFileParser.java
@@ -62,7 +62,7 @@ public class ContainerLogFileParser {
    */
 
   public void processLogEntries(String logDirectoryPath, 
ContainerDatanodeDatabase dbstore, int threadCount)
-      throws SQLException {
+      throws SQLException, IOException, InterruptedException {
     try (Stream<Path> paths = Files.walk(Paths.get(logDirectoryPath))) {
 
       List<Path> files = 
paths.filter(Files::isRegularFile).collect(Collectors.toList());
@@ -116,10 +116,6 @@ public void processLogEntries(String logDirectoryPath, 
ContainerDatanodeDatabase
         throw new SQLException("Log file processing failed.");
       }
 
-    } catch (IOException | InterruptedException e) {
-      e.printStackTrace();
-    } catch (NumberFormatException e) {
-      System.err.println("Invalid datanode ID");
     }
   }
 
@@ -133,7 +129,7 @@ public void processLogEntries(String logDirectoryPath, 
ContainerDatanodeDatabase
    */
   
   private void processFile(String logFilePath, ContainerDatanodeDatabase 
dbstore, String datanodeId) 
-      throws SQLException {
+      throws SQLException, IOException {
     List<DatanodeContainerInfo> batchList = new ArrayList<>(MAX_OBJ_IN_LIST + 
100);
 
     try (BufferedReader reader = 
Files.newBufferedReader(Paths.get(logFilePath), StandardCharsets.UTF_8)) {
@@ -199,11 +195,7 @@ private void processFile(String logFilePath, 
ContainerDatanodeDatabase dbstore,
               batchList.clear();
             }
           } catch (SQLException e) {
-            throw new SQLException(e.getMessage());
-          } catch (Exception e) {
-            System.err.println(
-                "Error processing the batch for container: " + id + " at 
datanode: " + datanodeId);
-            e.printStackTrace();
+            throw e;
           }
         } else {
           System.err.println("Log line does not have all required fields: " + 
line);
@@ -214,8 +206,6 @@ private void processFile(String logFilePath, 
ContainerDatanodeDatabase dbstore,
         batchList.clear();
       }
 
-    } catch (IOException e) {
-      e.printStackTrace();
     }
   }
 }
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/DBConsts.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/DBConsts.java
index d4b3c5a320..5cef4a335d 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/DBConsts.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/containerlog/parser/DBConsts.java
@@ -21,16 +21,49 @@
  * Constants used for ContainerDatanodeDatabase.
  */
 public final class DBConsts {
-
+  
+  public static final String DEFAULT_DB_FILENAME = "container_datanode.db";
   public static final String DRIVER = "org.sqlite.JDBC";
   public static final String CONNECTION_PREFIX = "jdbc:sqlite:";
-  public static final String DATABASE_NAME = "container_datanode.db";
-  public static final String PROPS_FILE = 
"container-log-db-queries.properties";
   public static final int CACHE_SIZE = 1000000;
   public static final int BATCH_SIZE = 2500;
   public static final String DATANODE_CONTAINER_LOG_TABLE_NAME = 
"DatanodeContainerLogTable";
   public static final String CONTAINER_LOG_TABLE_NAME = "ContainerLogTable";
-
+  public static final String CREATE_DATANODE_CONTAINER_LOG_TABLE = 
+      "CREATE TABLE IF NOT EXISTS DatanodeContainerLogTable (datanode_id TEXT 
NOT NULL, " +
+          "container_id INTEGER NOT NULL, timestamp TEXT NOT NULL, 
container_state TEXT, bcsid INTEGER, " +
+          "error_message TEXT, log_level TEXT NOT NULL," +
+          " index_value INTEGER);";
+  public static final String CREATE_CONTAINER_LOG_TABLE = 
+      "CREATE TABLE IF NOT EXISTS ContainerLogTable (datanode_id TEXT NOT 
NULL, container_id INTEGER NOT NULL," +
+          " latest_state TEXT, latest_bcsid INTEGER, PRIMARY KEY (datanode_id, 
container_id));";
+  public static final String CREATE_DATANODE_CONTAINER_INDEX = 
+      "CREATE INDEX IF NOT EXISTS idx_datanode_container ON 
DatanodeContainerLogTable (datanode_id," +
+          " container_id, timestamp);";
+  public static final String INSERT_DATANODE_CONTAINER_LOG = 
+      "INSERT INTO DatanodeContainerLogTable (datanode_id, container_id, 
timestamp, container_state, bcsid," +
+          " error_message, log_level, index_value) VALUES (?, ?, ?, ?, ?, ?, 
?, ?);";
+  public static final String INSERT_CONTAINER_LOG = 
+      "INSERT OR REPLACE INTO ContainerLogTable (datanode_id, container_id, 
latest_state," +
+          " latest_bcsid) VALUES (?, ?, ?, ?);";
+  public static final String SELECT_LATEST_CONTAINER_LOG = 
+      "SELECT a.datanode_id, a.container_id, a.container_state, a.bcsid, 
a.timestamp FROM DatanodeContainerLogTable" +
+          " AS a JOIN  (SELECT datanode_id, container_id, MAX(timestamp) as 
timestamp FROM DatanodeContainerLogTable" +
+          " GROUP BY datanode_id, container_id) as b ON a.datanode_id = 
b.datanode_id AND " +
+          "a.container_id = b.container_id AND a.timestamp=b.timestamp;";
+  public static final String DROP_TABLE = "DROP TABLE IF EXISTS {table_name};";
+  public static final String CREATE_INDEX_LATEST_STATE = 
+      "CREATE INDEX IF NOT EXISTS idx_container_log_state ON 
ContainerLogTable(latest_state);";
+  public static final String SELECT_LATEST_CONTAINER_LOGS_BY_STATE = 
+      "SELECT cl.datanode_id, cl.container_id, cl.latest_state, 
cl.latest_bcsid, dcl.error_message, dcl.index_value," +
+          " dcl.timestamp FROM ContainerLogTable cl LEFT JOIN 
DatanodeContainerLogTable dcl ON" +
+          " cl.datanode_id = dcl.datanode_id AND cl.container_id = 
dcl.container_id AND cl.latest_bcsid = dcl.bcsid " +
+          "AND cl.latest_state = dcl.container_state WHERE cl.latest_state = ? 
" +
+          "AND dcl.timestamp = (SELECT MAX(timestamp) FROM 
DatanodeContainerLogTable sub_dcl " +
+          "WHERE sub_dcl.datanode_id = cl.datanode_id AND" +
+          " sub_dcl.container_id = cl.container_id AND sub_dcl.bcsid = 
cl.latest_bcsid" +
+          " AND sub_dcl.container_state = cl.latest_state)";
+  
   private DBConsts() {
     //Never constructed
   }
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogController.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/LogParser.java
similarity index 67%
copy from 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogController.java
copy to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/LogParser.java
index 1043d89147..20847dde18 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogController.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/LogParser.java
@@ -15,24 +15,25 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.ozone.debug.container;
+package org.apache.hadoop.ozone.debug.logs;
 
 import org.apache.hadoop.hdds.cli.DebugSubcommand;
+import org.apache.hadoop.ozone.debug.logs.container.ContainerLogController;
 import org.kohsuke.MetaInfServices;
 import picocli.CommandLine;
 
 /**
- * A controller for managing container log operations like parsing and listing 
containers.
+ * Entry point for Ozone debug log parsing and analysis commands.
  */
 
 @CommandLine.Command(
-    name = "container",
+    name = "log",
     subcommands = {
-        ContainerLogParser.class
+        ContainerLogController.class
     },
-    description = "Parse, Store, Retrieve"
+    description = "This serves as a common entry point for all commands that 
parse and analyze logs," +
+        "regardless of their source or type and require logs to be extracted 
first."
 )
 @MetaInfServices(DebugSubcommand.class)
-public class ContainerLogController implements DebugSubcommand  {
-
+public class LogParser implements DebugSubcommand {
 }
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogController.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerLogController.java
similarity index 58%
rename from 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogController.java
rename to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerLogController.java
index 1043d89147..4ba220c9d4 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogController.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerLogController.java
@@ -15,10 +15,8 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.ozone.debug.container;
+package org.apache.hadoop.ozone.debug.logs.container;
 
-import org.apache.hadoop.hdds.cli.DebugSubcommand;
-import org.kohsuke.MetaInfServices;
 import picocli.CommandLine;
 
 /**
@@ -28,11 +26,24 @@
 @CommandLine.Command(
     name = "container",
     subcommands = {
-        ContainerLogParser.class
+        ContainerLogParser.class,
+        ListContainers.class
     },
-    description = "Parse, Store, Retrieve"
+    description = "Tool to parse and store container logs from datanodes into 
a temporary SQLite database." +
+            " Supports querying state transitions of container replicas using 
various subcommands."
 )
-@MetaInfServices(DebugSubcommand.class)
-public class ContainerLogController implements DebugSubcommand  {
 
+public class ContainerLogController {
+  @CommandLine.Option(names = {"--db"},
+      scope = CommandLine.ScopeType.INHERIT,
+      description = "Path to the SQLite database file where the parsed 
information from logs is stored.")
+  private String dbPath;
+
+  public String getDbPath() {
+    return dbPath;
+  }
+  
+  public void setDbPath(String dbPath) {
+    this.dbPath = dbPath;
+  }
 }
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogParser.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerLogParser.java
similarity index 51%
rename from 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogParser.java
rename to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerLogParser.java
index c9ef86d5dd..df1288543b 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerLogParser.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ContainerLogParser.java
@@ -15,15 +15,15 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.ozone.debug.container;
+package org.apache.hadoop.ozone.debug.logs.container;
 
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
-import java.sql.SQLException;
 import java.util.concurrent.Callable;
 import org.apache.hadoop.ozone.containerlog.parser.ContainerDatanodeDatabase;
 import org.apache.hadoop.ozone.containerlog.parser.ContainerLogFileParser;
+import org.apache.hadoop.ozone.containerlog.parser.DBConsts;
 import picocli.CommandLine;
 
 /**
@@ -31,18 +31,20 @@
  */
 
 @CommandLine.Command(
-    name = "container_log_parse",
-    description = "parse the container logs"
+    name = "parse",
+    description = "Parse container logs and extract key details such as 
datanode ID, container ID, state, " +
+            "BCSID, timestamp, log level, index value, and messages (if any)."
 )
 public class ContainerLogParser implements Callable<Void> {
   private static final int DEFAULT_THREAD_COUNT = 10;
   
-  @CommandLine.Option(names = {"--parse"},
-      description = "path to the dir which contains log files")
+  @CommandLine.Option(names = {"--path"},
+      description = "Path to the folder which contains container log files to 
be parsed.",
+      required = true)
   private String path;
 
   @CommandLine.Option(names = {"--thread-count"},
-      description = "Thread count for concurrent processing.",
+      description = "Thread count for concurrent log file processing.",
       defaultValue = "10")
   private int threadCount;
 
@@ -57,36 +59,41 @@ public Void call() throws Exception {
       threadCount = DEFAULT_THREAD_COUNT;
     }
     
-    if (path != null) {
-      Path logPath = Paths.get(path);
-      if (!Files.exists(logPath) || !Files.isDirectory(logPath)) {
-        System.err.println("Invalid path provided: " + path);
-        return null;
-      }
-
-      ContainerDatanodeDatabase cdd = new ContainerDatanodeDatabase();
-      ContainerLogFileParser parser = new ContainerLogFileParser();
-
-      try {
+    Path logPath = Paths.get(path);
+    if (!Files.exists(logPath) || !Files.isDirectory(logPath)) {
+      System.err.println("Invalid path provided: " + path);
+      return null;
+    }
+    
+    Path providedDbPath;
+    if (parent.getDbPath() == null) {
+      providedDbPath = Paths.get(System.getProperty("user.dir"), 
DBConsts.DEFAULT_DB_FILENAME);
 
-        cdd.createDatanodeContainerLogTable();
+      System.out.println("No database path provided. Creating new database at: 
" + providedDbPath);
+    } else {
+      providedDbPath = Paths.get(parent.getDbPath());
+      Path parentDir = providedDbPath.getParent();
 
-        parser.processLogEntries(path, cdd, threadCount);
+      if (parentDir != null && !Files.exists(parentDir)) {
+        System.err.println("The parent directory of the provided database path 
does not exist: " + parentDir);
+        return null;
+      }
+    }
+    
+    ContainerDatanodeDatabase.setDatabasePath(providedDbPath.toString());
+    ContainerDatanodeDatabase cdd = new ContainerDatanodeDatabase();
+    ContainerLogFileParser parser = new ContainerLogFileParser();
 
-        cdd.insertLatestContainerLogData();
-        System.out.println("Successfully parsed the log files and updated the 
respective tables");
+    try {
+      cdd.createDatanodeContainerLogTable();
 
-      } catch (SQLException e) {
-        System.err.println("Error occurred while processing logs or inserting 
data into the database: "
-            + e.getMessage());
-      } catch (Exception e) {
-        System.err.println("An unexpected error occurred: " + e.getMessage());
-      }
+      parser.processLogEntries(path, cdd, threadCount);
 
-    } else {
-      System.out.println("path to logs folder not provided");
+      cdd.insertLatestContainerLogData();
+      System.out.println("Successfully parsed the log files and updated the 
respective tables");
+    } catch (Exception e) {
+      throw e;
     }
-
     return null;
   }
 
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ListContainers.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ListContainers.java
new file mode 100644
index 0000000000..7bfa66a742
--- /dev/null
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/ListContainers.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.debug.logs.container;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.concurrent.Callable;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.containerlog.parser.ContainerDatanodeDatabase;
+import org.apache.hadoop.ozone.containerlog.parser.DBConsts;
+import org.apache.hadoop.ozone.shell.ListOptions;
+import picocli.CommandLine;
+
+
+/**
+ * List containers based on the parameter given.
+ */
+
[email protected](
+    name = "list",
+    description = "Finds containers from the database based on the option 
provided."
+)
+public class ListContainers implements Callable<Void> {
+  
+  @CommandLine.Option(names = {"--state"},
+      description = "Life cycle state of the container.",
+      required = true)
+  private HddsProtos.LifeCycleState state;
+
+  @CommandLine.Mixin
+  private ListOptions listOptions;
+
+  @CommandLine.ParentCommand
+  private ContainerLogController parent;
+
+  @Override
+  public Void call() throws Exception {
+    Path providedDbPath;
+    if (parent.getDbPath() == null) {
+      providedDbPath = Paths.get(System.getProperty("user.dir"), 
DBConsts.DEFAULT_DB_FILENAME);
+
+      if (Files.exists(providedDbPath) && Files.isRegularFile(providedDbPath)) 
{
+        System.out.println("Using default database file found in current 
directory: " + providedDbPath);
+      } else {
+        System.err.println("No database path provided and default file '" + 
DBConsts.DEFAULT_DB_FILENAME + "' not " +
+            "found in current directory. Please provide a valid database 
path");
+        return null;
+      }
+    } else {
+      providedDbPath = Paths.get(parent.getDbPath());
+      Path parentDir = providedDbPath.getParent();
+
+      if (parentDir != null && !Files.exists(parentDir)) {
+        System.err.println("The parent directory of the provided database path 
does not exist: " + parentDir);
+        return null;
+      }
+    }
+
+    ContainerDatanodeDatabase.setDatabasePath(providedDbPath.toString());
+    
+    ContainerDatanodeDatabase cdd = new ContainerDatanodeDatabase();
+    try {
+      cdd.listContainersByState(state.name(), listOptions.getLimit());
+    } catch (Exception e) {
+      throw e;
+    }
+    
+    return null;
+  }
+}
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/package-info.java
similarity index 86%
copy from 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
copy to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/package-info.java
index 9354dfe28b..a4a7260a41 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/container/package-info.java
@@ -16,7 +16,8 @@
  */
 
 /**
- * Provides functionality for managing container log operations, including 
parsing, processing, and storing log data.
+ * Provides functionality for managing container log operations, including 
parsing, processing, 
+ * storing extracted data into DB and analysis.
  */
 
-package org.apache.hadoop.ozone.debug.container;
+package org.apache.hadoop.ozone.debug.logs.container;
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/package-info.java
similarity index 82%
rename from 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
rename to 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/package-info.java
index 9354dfe28b..2609b76dfa 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/logs/package-info.java
@@ -16,7 +16,7 @@
  */
 
 /**
- * Provides functionality for managing container log operations, including 
parsing, processing, and storing log data.
+ * Provides commands for parsing and analyzing extracted logs.
  */
 
-package org.apache.hadoop.ozone.debug.container;
+package org.apache.hadoop.ozone.debug.logs;
diff --git 
a/hadoop-ozone/tools/src/main/resources/container-log-db-queries.properties 
b/hadoop-ozone/tools/src/main/resources/container-log-db-queries.properties
deleted file mode 100644
index 01a4055567..0000000000
--- a/hadoop-ozone/tools/src/main/resources/container-log-db-queries.properties
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-CREATE_DATANODE_CONTAINER_LOG_TABLE=CREATE TABLE IF NOT EXISTS 
DatanodeContainerLogTable (datanode_id TEXT NOT NULL, container_id INTEGER NOT 
NULL, timestamp TEXT NOT NULL, container_state TEXT, bcsid INTEGER, 
error_message TEXT, log_level TEXT NOT NULL, index_value INTEGER);
-CREATE_CONTAINER_LOG_TABLE=CREATE TABLE IF NOT EXISTS ContainerLogTable 
(datanode_id TEXT NOT NULL, container_id INTEGER NOT NULL, latest_state TEXT, 
latest_bcsid INTEGER, PRIMARY KEY (datanode_id, container_id));
-CREATE_DATANODE_CONTAINER_INDEX=CREATE INDEX IF NOT EXISTS 
idx_datanode_container ON DatanodeContainerLogTable (datanode_id, container_id, 
timestamp);
-INSERT_DATANODE_CONTAINER_LOG=INSERT INTO DatanodeContainerLogTable 
(datanode_id, container_id, timestamp, container_state, bcsid, error_message, 
log_level, index_value) VALUES (?, ?, ?, ?, ?, ?, ?, ?);
-INSERT_CONTAINER_LOG=INSERT OR REPLACE INTO ContainerLogTable (datanode_id, 
container_id, latest_state, latest_bcsid) VALUES (?, ?, ?, ?);
-SELECT_LATEST_CONTAINER_LOG=SELECT a.datanode_id, a.container_id, 
a.container_state, a.bcsid, a.timestamp FROM DatanodeContainerLogTable AS a 
JOIN  (SELECT datanode_id, container_id, MAX(timestamp) as timestamp FROM 
DatanodeContainerLogTable GROUP BY datanode_id, container_id) as b ON 
a.datanode_id = b.datanode_id AND a.container_id = b.container_id AND 
a.timestamp=b.timestamp;
-DROP_TABLE=DROP TABLE IF EXISTS {table_name};


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to