ashishkumar50 commented on code in PR #4880:
URL: https://github.com/apache/ozone/pull/4880#discussion_r1235086937


##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java:
##########
@@ -102,4 +102,22 @@ public void setContainerSizeCountTaskInterval(Duration 
interval) {
     this.containerSizeCountTaskInterval = interval.toMillis();
   }
 
+  @Config(key = "scmtablecounttask.interval",
+      type = ConfigType.TIME,
+      defaultValue = "60s",
+      tags = { ConfigTag.RECON, ConfigTag.OZONE },
+      description = "The time interval to wait between each runs of " +
+          "SCM table count task."
+  )
+  private long scmTableCountTaskInterval =
+      Duration.ofMinutes(1).toMillis();
+
+  public Duration getScmTableCountTaskInterval() {
+    return Duration.ofMillis(scmTableCountTaskInterval);
+  }
+
+  public void setScmTableCountTaskInterval(Duration interval) {

Review Comment:
   Remove unused method `setScmTableCountTaskInterval`



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ScmTableCountTask.java:
##########
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.tasks;
+
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.recon.scm.ReconScmTask;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition;
+import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
+import org.hadoop.ozone.recon.schema.tables.daos.ScmTableCountDao;
+import org.hadoop.ozone.recon.schema.tables.pojos.ScmTableCount;
+import org.jooq.DSLContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import java.util.HashMap;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Iterator;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static 
org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.DELETED_BLOCKS;
+import static 
org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.SCM_TABLE_COUNT_TABLE_NAME;
+
+
+/**
+ * Any background task that tracks SCM's table counts.
+ */
+public class ScmTableCountTask extends ReconScmTask {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ScmTableCountTask.class);
+
+  private final DBStore scmDBStore;
+  private final ScmTableCountDao scmTableCountDao;
+  private final DSLContext dslContext;
+  private final long interval;
+  private ReadWriteLock lock = new ReentrantReadWriteLock(true);
+
+  public ScmTableCountTask(ReconStorageContainerManagerFacade reconSCM,
+                           ReconTaskStatusDao reconTaskStatusDao,
+                           ReconTaskConfig reconTaskConfig,
+                           ScmTableCountDao scmTableCountDao,
+                           UtilizationSchemaDefinition schemaDefinition) {
+    super(reconTaskStatusDao);
+    this.scmDBStore = reconSCM.getScmDBStore();
+    this.scmTableCountDao = scmTableCountDao;
+    this.dslContext = schemaDefinition.getDSLContext();
+    this.interval = reconTaskConfig.getScmTableCountTaskInterval().toMillis();
+  }
+
+  @Override
+  protected synchronized void run() {
+    try {
+      while (canRun()) {
+        wait(interval);
+        long startTime, endTime, duration, durationMilliseconds;
+        try {
+          int execute =
+              dslContext.truncate(SCM_TABLE_COUNT_TABLE_NAME).execute();
+          LOG.info("Deleted {} records from {}", execute,
+              SCM_TABLE_COUNT_TABLE_NAME);
+        } catch (Exception e) {
+          LOG.error("An error occurred while truncating the table {}: {}",
+              SCM_TABLE_COUNT_TABLE_NAME, e.getMessage(), e);
+          return;
+        }
+        startTime = System.nanoTime();
+        processTableCount();
+        endTime = System.nanoTime();
+        duration = endTime - startTime;
+        durationMilliseconds = duration / 1_000_000;
+        LOG.info("Elapsed Time in milliseconds for processTableCount() " +
+            "execution: {}", durationMilliseconds);
+      }
+    } catch (Throwable t) {
+      LOG.error("Error while running ScmTableCountTask: {}", t);
+      if (t instanceof InterruptedException) {
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+  /**
+   * Processes the table count by iterating over SCM tables and retrieving the
+   * counts of objects in each table. The counts are then stored in the Recon
+   * database.
+   *
+   * @throws IOException if an I/O error occurs during table count processing.
+   */
+  public void processTableCount() throws IOException {
+    // Acquire write lock
+    lock.writeLock().lock();
+    try {
+      // Initialize the object count map
+      HashMap<String, Long> objectCountMap = initializeCountMap();
+
+      // Iterate over SCM tables
+      for (String tableName : getTaskTables()) {
+        Table table = scmDBStore.getTable(tableName);
+
+        try (TableIterator keyIter = table.iterator()) {
+          // Retrieve the count of objects in the table
+          long count = getCount(keyIter);
+          objectCountMap.put(getRowKeyFromTable(tableName), count);
+        } catch (IOException ioEx) {
+          LOG.error("Unable to populate SCM Table Count in Recon DB.", ioEx);
+        }
+      }
+
+      // Write the counts to the Recon database
+      writeCountsToDB(objectCountMap);
+
+      objectCountMap.clear();
+      LOG.info("Completed writing SCM table counts to DB.");
+    } finally {
+      // Release write lock
+      lock.writeLock().unlock();
+    }
+  }
+
+
+  /**
+   * Writes the object counts from the object count map to the Recon database.
+   *
+   * @param objectCountMap a map containing table names as keys and their 
object
+   *                       counts as values.
+   */
+  private void writeCountsToDB(Map<String, Long> objectCountMap) {
+    List<ScmTableCount> insertToDb = new ArrayList<>();
+
+    // Iterate over the object count map
+    for (Map.Entry<String, Long> entry : objectCountMap.entrySet()) {
+      // Create a new ScmTableCount object
+      ScmTableCount scmTableCountRecord = new ScmTableCount();
+      scmTableCountRecord.setTableName(entry.getKey());
+      scmTableCountRecord.setCount(entry.getValue());
+
+      // Add the ScmTableCount object to the list
+      insertToDb.add(scmTableCountRecord);
+    }
+    // Insert the list of ScmTableCount objects into the Recon database
+    scmTableCountDao.insert(insertToDb);
+  }
+
+  /**
+   * Returns the count of items in the iterator.
+   *
+   * @param iterator the iterator to count the items from.
+   * @return the count of items in the iterator.
+   */
+  private long getCount(Iterator iterator) {

Review Comment:
   Remove count getCount method



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ScmTableCountTask.java:
##########
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.tasks;
+
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.recon.scm.ReconScmTask;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition;
+import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
+import org.hadoop.ozone.recon.schema.tables.daos.ScmTableCountDao;
+import org.hadoop.ozone.recon.schema.tables.pojos.ScmTableCount;
+import org.jooq.DSLContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import java.util.HashMap;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Iterator;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static 
org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.DELETED_BLOCKS;
+import static 
org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.SCM_TABLE_COUNT_TABLE_NAME;
+
+
+/**
+ * Any background task that tracks SCM's table counts.
+ */
+public class ScmTableCountTask extends ReconScmTask {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ScmTableCountTask.class);
+
+  private final DBStore scmDBStore;
+  private final ScmTableCountDao scmTableCountDao;
+  private final DSLContext dslContext;
+  private final long interval;
+  private ReadWriteLock lock = new ReentrantReadWriteLock(true);
+
+  public ScmTableCountTask(ReconStorageContainerManagerFacade reconSCM,
+                           ReconTaskStatusDao reconTaskStatusDao,
+                           ReconTaskConfig reconTaskConfig,
+                           ScmTableCountDao scmTableCountDao,
+                           UtilizationSchemaDefinition schemaDefinition) {
+    super(reconTaskStatusDao);
+    this.scmDBStore = reconSCM.getScmDBStore();
+    this.scmTableCountDao = scmTableCountDao;
+    this.dslContext = schemaDefinition.getDSLContext();
+    this.interval = reconTaskConfig.getScmTableCountTaskInterval().toMillis();
+  }
+
+  @Override
+  protected synchronized void run() {
+    try {
+      while (canRun()) {
+        wait(interval);
+        long startTime, endTime, duration, durationMilliseconds;
+        try {
+          int execute =
+              dslContext.truncate(SCM_TABLE_COUNT_TABLE_NAME).execute();
+          LOG.info("Deleted {} records from {}", execute,
+              SCM_TABLE_COUNT_TABLE_NAME);
+        } catch (Exception e) {
+          LOG.error("An error occurred while truncating the table {}: {}",
+              SCM_TABLE_COUNT_TABLE_NAME, e.getMessage(), e);
+          return;
+        }
+        startTime = System.nanoTime();
+        processTableCount();
+        endTime = System.nanoTime();
+        duration = endTime - startTime;
+        durationMilliseconds = duration / 1_000_000;
+        LOG.info("Elapsed Time in milliseconds for processTableCount() " +
+            "execution: {}", durationMilliseconds);
+      }
+    } catch (Throwable t) {
+      LOG.error("Error while running ScmTableCountTask: {}", t);
+      if (t instanceof InterruptedException) {
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+  /**
+   * Processes the table count by iterating over SCM tables and retrieving the
+   * counts of objects in each table. The counts are then stored in the Recon
+   * database.
+   *
+   * @throws IOException if an I/O error occurs during table count processing.
+   */
+  public void processTableCount() throws IOException {
+    // Acquire write lock
+    lock.writeLock().lock();
+    try {
+      // Initialize the object count map
+      HashMap<String, Long> objectCountMap = initializeCountMap();
+
+      // Iterate over SCM tables
+      for (String tableName : getTaskTables()) {
+        Table table = scmDBStore.getTable(tableName);
+
+        try (TableIterator keyIter = table.iterator()) {
+          // Retrieve the count of objects in the table
+          long count = getCount(keyIter);
+          objectCountMap.put(getRowKeyFromTable(tableName), count);
+        } catch (IOException ioEx) {
+          LOG.error("Unable to populate SCM Table Count in Recon DB.", ioEx);
+        }
+      }
+
+      // Write the counts to the Recon database
+      writeCountsToDB(objectCountMap);
+
+      objectCountMap.clear();
+      LOG.info("Completed writing SCM table counts to DB.");
+    } finally {
+      // Release write lock
+      lock.writeLock().unlock();
+    }
+  }
+
+
+  /**
+   * Writes the object counts from the object count map to the Recon database.
+   *
+   * @param objectCountMap a map containing table names as keys and their 
object
+   *                       counts as values.
+   */
+  private void writeCountsToDB(Map<String, Long> objectCountMap) {
+    List<ScmTableCount> insertToDb = new ArrayList<>();
+
+    // Iterate over the object count map
+    for (Map.Entry<String, Long> entry : objectCountMap.entrySet()) {
+      // Create a new ScmTableCount object
+      ScmTableCount scmTableCountRecord = new ScmTableCount();
+      scmTableCountRecord.setTableName(entry.getKey());
+      scmTableCountRecord.setCount(entry.getValue());
+
+      // Add the ScmTableCount object to the list
+      insertToDb.add(scmTableCountRecord);
+    }
+    // Insert the list of ScmTableCount objects into the Recon database
+    scmTableCountDao.insert(insertToDb);
+  }
+
+  /**
+   * Returns the count of items in the iterator.
+   *
+   * @param iterator the iterator to count the items from.
+   * @return the count of items in the iterator.
+   */
+  private long getCount(Iterator iterator) {
+    long count = 0L;
+    while (iterator.hasNext()) {
+      count++;
+      iterator.next();
+    }
+    return count;
+  }
+
+  private HashMap<String, Long> initializeCountMap() throws IOException {
+    Collection<String> tables = getTaskTables();
+    HashMap<String, Long> objectCountMap = new HashMap<>(tables.size());

Review Comment:
   ```suggestion
       Map<String, Long> objectCountMap = new HashMap<>(tables.size());
   ```



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ScmTableCountTask.java:
##########
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.tasks;
+
+import org.apache.hadoop.hdds.utils.db.DBStore;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.recon.scm.ReconScmTask;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition;
+import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
+import org.hadoop.ozone.recon.schema.tables.daos.ScmTableCountDao;
+import org.hadoop.ozone.recon.schema.tables.pojos.ScmTableCount;
+import org.jooq.DSLContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+import java.util.HashMap;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Iterator;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static 
org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.DELETED_BLOCKS;
+import static 
org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.SCM_TABLE_COUNT_TABLE_NAME;
+
+
+/**
+ * Any background task that tracks SCM's table counts.
+ */
+public class ScmTableCountTask extends ReconScmTask {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ScmTableCountTask.class);
+
+  private final DBStore scmDBStore;
+  private final ScmTableCountDao scmTableCountDao;
+  private final DSLContext dslContext;
+  private final long interval;
+  private ReadWriteLock lock = new ReentrantReadWriteLock(true);
+
+  public ScmTableCountTask(ReconStorageContainerManagerFacade reconSCM,
+                           ReconTaskStatusDao reconTaskStatusDao,
+                           ReconTaskConfig reconTaskConfig,
+                           ScmTableCountDao scmTableCountDao,
+                           UtilizationSchemaDefinition schemaDefinition) {
+    super(reconTaskStatusDao);
+    this.scmDBStore = reconSCM.getScmDBStore();
+    this.scmTableCountDao = scmTableCountDao;
+    this.dslContext = schemaDefinition.getDSLContext();
+    this.interval = reconTaskConfig.getScmTableCountTaskInterval().toMillis();
+  }
+
+  @Override
+  protected synchronized void run() {
+    try {
+      while (canRun()) {
+        wait(interval);
+        long startTime, endTime, duration, durationMilliseconds;
+        try {
+          int execute =
+              dslContext.truncate(SCM_TABLE_COUNT_TABLE_NAME).execute();
+          LOG.info("Deleted {} records from {}", execute,
+              SCM_TABLE_COUNT_TABLE_NAME);
+        } catch (Exception e) {
+          LOG.error("An error occurred while truncating the table {}: {}",
+              SCM_TABLE_COUNT_TABLE_NAME, e.getMessage(), e);
+          return;
+        }
+        startTime = System.nanoTime();
+        processTableCount();
+        endTime = System.nanoTime();
+        duration = endTime - startTime;
+        durationMilliseconds = duration / 1_000_000;
+        LOG.info("Elapsed Time in milliseconds for processTableCount() " +
+            "execution: {}", durationMilliseconds);
+      }
+    } catch (Throwable t) {
+      LOG.error("Error while running ScmTableCountTask: {}", t);
+      if (t instanceof InterruptedException) {
+        Thread.currentThread().interrupt();
+      }
+    }
+  }
+
+  /**
+   * Processes the table count by iterating over SCM tables and retrieving the
+   * counts of objects in each table. The counts are then stored in the Recon
+   * database.
+   *
+   * @throws IOException if an I/O error occurs during table count processing.
+   */
+  public void processTableCount() throws IOException {
+    // Acquire write lock
+    lock.writeLock().lock();
+    try {
+      // Initialize the object count map
+      HashMap<String, Long> objectCountMap = initializeCountMap();
+
+      // Iterate over SCM tables
+      for (String tableName : getTaskTables()) {
+        Table table = scmDBStore.getTable(tableName);
+
+        try (TableIterator keyIter = table.iterator()) {
+          // Retrieve the count of objects in the table
+          long count = getCount(keyIter);

Review Comment:
   ```suggestion
             long count = Iterators.size(keyIter);
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to