This is an automated email from the ASF dual-hosted git repository.
sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new c1efa330d3 HDDS-8627. Recon - API for Count of deletePending
directories (#5037)
c1efa330d3 is described below
commit c1efa330d338d586cbe3a10886f9ce936b5b77df
Author: Arafat2198 <[email protected]>
AuthorDate: Fri Feb 9 07:56:44 2024 +0530
HDDS-8627. Recon - API for Count of deletePending directories (#5037)
---
.../ozone/recon/api/OMDBInsightEndpoint.java | 31 ++
.../recon/tasks/DeletedKeysInsightHandler.java | 147 +++++++
.../hadoop/ozone/recon/tasks/OmTableHandler.java | 131 ++++++
.../ozone/recon/tasks/OmTableInsightTask.java | 321 ++++-----------
.../ozone/recon/tasks/OpenKeysInsightHandler.java | 163 ++++++++
.../ozone/recon/OMMetadataManagerTestUtils.java | 18 +-
.../hadoop/ozone/recon/api/TestEndpoints.java | 15 +-
.../ozone/recon/tasks/TestOmTableInsightTask.java | 448 ++++++++++++++++-----
8 files changed, 925 insertions(+), 349 deletions(-)
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
index 84f55749a6..baa9c522be 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java
@@ -58,6 +58,7 @@ import static
org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
+import static
org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE;
import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT;
import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT;
import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY;
@@ -652,6 +653,36 @@ public class OMDBInsightEndpoint {
return Response.ok(deletedDirInsightInfo).build();
}
+ /**
+ * Retrieves the summary of deleted directories.
+ *
+ * This method calculates and returns a summary of deleted directories.
+ * @return The HTTP response body includes a map with the following entries:
+ * - "totalDeletedDirectories": the total number of deleted directories
+ *
+ * Example response:
+ * {
+ * "totalDeletedDirectories": 8,
+ * }
+ */
+ @GET
+ @Path("/deletePending/dirs/summary")
+ public Response getDeletedDirectorySummary() {
+ Map<String, Long> dirSummary = new HashMap<>();
+ // Create a keys summary for deleted directories
+ createSummaryForDeletedDirectories(dirSummary);
+ return Response.ok(dirSummary).build();
+ }
+
+ private void createSummaryForDeletedDirectories(
+ Map<String, Long> dirSummary) {
+ // Fetch the necessary metrics for deleted directories.
+ Long deletedDirCount = getValueFromId(globalStatsDao.findById(
+ OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE)));
+ // Calculate the total number of deleted directories
+ dirSummary.put("totalDeletedDirectories", deletedDirCount);
+ }
+
private void updateReplicatedAndUnReplicatedTotal(
KeyInsightInfoResponse deletedKeyAndDirInsightInfo,
RepeatedOmKeyInfo repeatedOmKeyInfo) {
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java
new file mode 100644
index 0000000000..5a6d7a256e
--- /dev/null
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.tasks;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.commons.lang3.tuple.Triple;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+
+/**
+ * Manages records in the Deleted Table, updating counts and sizes of
+ * pending Key Deletions in the backend.
+ */
+public class DeletedKeysInsightHandler implements OmTableHandler {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(DeletedKeysInsightHandler.class);
+
+ /**
+ * Invoked by the process method to add information on those keys that have
+ * been backlogged in the backend for deletion.
+ */
+ @Override
+ public void handlePutEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap) {
+
+ String countKey = getTableCountKeyFromTable(tableName);
+ String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName);
+ String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName);
+
+ if (event.getValue() != null) {
+ RepeatedOmKeyInfo repeatedOmKeyInfo =
+ (RepeatedOmKeyInfo) event.getValue();
+ objectCountMap.computeIfPresent(countKey,
+ (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size());
+ Pair<Long, Long> result = repeatedOmKeyInfo.getTotalSize();
+ unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey,
+ (k, size) -> size + result.getLeft());
+ replicatedSizeMap.computeIfPresent(replicatedSizeKey,
+ (k, size) -> size + result.getRight());
+ } else {
+ LOG.warn("Put event does not have the Key Info for {}.",
+ event.getKey());
+ }
+
+ }
+
+ /**
+ * Invoked by the process method to remove information on those keys that
have
+ * been successfully deleted from the backend.
+ */
+ @Override
+ public void handleDeleteEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap) {
+
+ String countKey = getTableCountKeyFromTable(tableName);
+ String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName);
+ String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName);
+
+ if (event.getValue() != null) {
+ RepeatedOmKeyInfo repeatedOmKeyInfo =
+ (RepeatedOmKeyInfo) event.getValue();
+ objectCountMap.computeIfPresent(countKey, (k, count) ->
+ count > 0 ? count - repeatedOmKeyInfo.getOmKeyInfoList().size() :
0L);
+ Pair<Long, Long> result = repeatedOmKeyInfo.getTotalSize();
+ unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey,
+ (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L);
+ replicatedSizeMap.computeIfPresent(replicatedSizeKey,
+ (k, size) -> size > result.getRight() ? size - result.getRight() :
+ 0L);
+ } else {
+ LOG.warn("Delete event does not have the Key Info for {}.",
+ event.getKey());
+ }
+ }
+
+ /**
+ * Invoked by the process method to update the statistics on the keys
+ * pending to be deleted.
+ */
+ @Override
+ public void handleUpdateEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap) {
+ // The size of deleted keys cannot change hence no-op.
+ return;
+ }
+
+ /**
+ * Invoked by the reprocess method to calculate the records count of the
+ * deleted table and the sizes of replicated and unreplicated keys that are
+ * pending deletion in Ozone.
+ */
+ @Override
+ public Triple<Long, Long, Long> getTableSizeAndCount(
+ TableIterator<String, ? extends Table.KeyValue<String, ?>> iterator)
+ throws IOException {
+ long count = 0;
+ long unReplicatedSize = 0;
+ long replicatedSize = 0;
+
+ if (iterator != null) {
+ while (iterator.hasNext()) {
+ Table.KeyValue<String, ?> kv = iterator.next();
+ if (kv != null && kv.getValue() != null) {
+ RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv
+ .getValue();
+ Pair<Long, Long> result = repeatedOmKeyInfo.getTotalSize();
+ unReplicatedSize += result.getRight();
+ replicatedSize += result.getLeft();
+ count += repeatedOmKeyInfo.getOmKeyInfoList().size();
+ }
+ }
+ }
+ return Triple.of(count, unReplicatedSize, replicatedSize);
+ }
+}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java
new file mode 100644
index 0000000000..5ae23b68a7
--- /dev/null
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.tasks;
+
+import org.apache.commons.lang3.tuple.Triple;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+
+import java.io.IOException;
+import java.util.HashMap;
+
+/**
+ * Interface for handling PUT, DELETE and UPDATE events for size-related
+ * tables for OM Insights.
+ */
+public interface OmTableHandler {
+
+ /**
+ * Handles a PUT event for size-related tables by updating both the data
+ * sizes and their corresponding record counts in the tables.
+ *
+ * @param event The PUT event to be processed.
+ * @param tableName Table name associated with the event.
+ * @param objectCountMap A map storing object counts.
+ * @param unReplicatedSizeMap A map storing unReplicated size counts.
+ * @param replicatedSizeMap A map storing replicated size counts.
+ */
+ void handlePutEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap);
+
+
+ /**
+ * Handles a DELETE event for size-related tables by updating both the data
+ * sizes and their corresponding record counts in the tables.
+ *
+ * @param event The DELETE event to be processed.
+ * @param tableName Table name associated with the event.
+ * @param objectCountMap A map storing object counts.
+ * @param unReplicatedSizeMap A map storing unReplicated size counts.
+ * @param replicatedSizeMap A map storing replicated size counts.
+ */
+ void handleDeleteEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap);
+
+
+ /**
+ * Handles an UPDATE event for size-related tables by updating both the data
+ * sizes and their corresponding record counts in the tables.
+ *
+ * @param event The UPDATE event to be processed.
+ * @param tableName Table name associated with the event.
+ * @param objectCountMap A map storing object counts.
+ * @param unReplicatedSizeMap A map storing unReplicated size counts.
+ * @param replicatedSizeMap A map storing replicated size counts.
+ */
+ void handleUpdateEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap);
+
+
+ /**
+ * Returns a triple with the total count of records (left), total
unreplicated
+ * size (middle), and total replicated size (right) in the given iterator.
+ * Increments count for each record and adds the dataSize if a record's value
+ * is an instance of OmKeyInfo,RepeatedOmKeyInfo.
+ * If the iterator is null, returns (0, 0, 0).
+ *
+ * @param iterator The iterator over the table to be iterated.
+ * @return A Triple with three Long values representing the count,
+ * unReplicated size and replicated size.
+ * @throws IOException If an I/O error occurs during the iterator traversal.
+ */
+ Triple<Long, Long, Long> getTableSizeAndCount(
+ TableIterator<String, ? extends Table.KeyValue<String, ?>> iterator)
+ throws IOException;
+
+
+ /**
+ * Returns the count key for the given table.
+ *
+ * @param tableName The name of the table.
+ * @return The count key for the table.
+ */
+ default String getTableCountKeyFromTable(String tableName) {
+ return tableName + "Count";
+ }
+
+ /**
+ * Returns the replicated size key for the given table.
+ *
+ * @param tableName The name of the table.
+ * @return The replicated size key for the table.
+ */
+ default String getReplicatedSizeKeyFromTable(String tableName) {
+ return tableName + "ReplicatedDataSize";
+ }
+
+ /**
+ * Returns the unreplicated size key for the given table.
+ *
+ * @param tableName The name of the table.
+ * @return The unreplicated size key for the table.
+ */
+ default String getUnReplicatedSizeKeyFromTable(String tableName) {
+ return tableName + "UnReplicatedDataSize";
+ }
+}
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java
index c814d9d9e3..3e84f311c9 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java
@@ -26,8 +26,6 @@ import org.apache.commons.lang3.tuple.Triple;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.OMMetadataManager;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats;
@@ -37,22 +35,20 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.sql.Timestamp;
-import java.util.ArrayList;
-import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
-import java.util.List;
import java.util.Map;
-
-
+import java.util.Collection;
import java.util.Map.Entry;
+import java.util.ArrayList;
+import java.util.List;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
-import static org.jooq.impl.DSL.currentTimestamp;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
import static org.jooq.impl.DSL.select;
import static org.jooq.impl.DSL.using;
+import static org.jooq.impl.DSL.currentTimestamp;
/**
* Class to iterate over the OM DB and store the total counts of volumes,
@@ -65,14 +61,21 @@ public class OmTableInsightTask implements ReconOmTask {
private GlobalStatsDao globalStatsDao;
private Configuration sqlConfiguration;
private ReconOMMetadataManager reconOMMetadataManager;
+ private Map<String, OmTableHandler> tableHandlers;
@Inject
public OmTableInsightTask(GlobalStatsDao globalStatsDao,
- Configuration sqlConfiguration,
- ReconOMMetadataManager reconOMMetadataManager) {
+ Configuration sqlConfiguration,
+ ReconOMMetadataManager reconOMMetadataManager) {
this.globalStatsDao = globalStatsDao;
this.sqlConfiguration = sqlConfiguration;
this.reconOMMetadataManager = reconOMMetadataManager;
+
+ // Initialize table handlers
+ tableHandlers = new HashMap<>();
+ tableHandlers.put(OPEN_KEY_TABLE, new OpenKeysInsightHandler());
+ tableHandlers.put(OPEN_FILE_TABLE, new OpenKeysInsightHandler());
+ tableHandlers.put(DELETED_TABLE, new DeletedKeysInsightHandler());
}
/**
@@ -90,8 +93,8 @@ public class OmTableInsightTask implements ReconOmTask {
@Override
public Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager) {
HashMap<String, Long> objectCountMap = initializeCountMap();
- HashMap<String, Long> unReplicatedSizeCountMap = initializeSizeMap(false);
- HashMap<String, Long> replicatedSizeCountMap = initializeSizeMap(true);
+ HashMap<String, Long> unReplicatedSizeMap = initializeSizeMap(false);
+ HashMap<String, Long> replicatedSizeMap = initializeSizeMap(true);
for (String tableName : getTaskTables()) {
Table table = omMetadataManager.getTable(tableName);
@@ -100,16 +103,16 @@ public class OmTableInsightTask implements ReconOmTask {
return new ImmutablePair<>(getTaskName(), false);
}
- try (
- TableIterator<String, ? extends Table.KeyValue<String, ?>> iterator
- = table.iterator()) {
- if (getTablesToCalculateSize().contains(tableName)) {
- Triple<Long, Long, Long> details = getTableSizeAndCount(iterator);
+ try (TableIterator<String, ? extends Table.KeyValue<String, ?>> iterator
+ = table.iterator()) {
+ if (tableHandlers.containsKey(tableName)) {
+ Triple<Long, Long, Long> details =
+ tableHandlers.get(tableName).getTableSizeAndCount(iterator);
objectCountMap.put(getTableCountKeyFromTable(tableName),
details.getLeft());
- unReplicatedSizeCountMap.put(
+ unReplicatedSizeMap.put(
getUnReplicatedSizeKeyFromTable(tableName), details.getMiddle());
- replicatedSizeCountMap.put(getReplicatedSizeKeyFromTable(tableName),
+ replicatedSizeMap.put(getReplicatedSizeKeyFromTable(tableName),
details.getRight());
} else {
long count = Iterators.size(iterator);
@@ -124,72 +127,17 @@ public class OmTableInsightTask implements ReconOmTask {
if (!objectCountMap.isEmpty()) {
writeDataToDB(objectCountMap);
}
- if (!unReplicatedSizeCountMap.isEmpty()) {
- writeDataToDB(unReplicatedSizeCountMap);
+ if (!unReplicatedSizeMap.isEmpty()) {
+ writeDataToDB(unReplicatedSizeMap);
}
- if (!replicatedSizeCountMap.isEmpty()) {
- writeDataToDB(replicatedSizeCountMap);
+ if (!replicatedSizeMap.isEmpty()) {
+ writeDataToDB(replicatedSizeMap);
}
LOG.info("Completed a 'reprocess' run of OmTableInsightTask.");
return new ImmutablePair<>(getTaskName(), true);
}
- /**
- * Returns a triple with the total count of records (left), total
unreplicated
- * size (middle), and total replicated size (right) in the given iterator.
- * Increments count for each record and adds the dataSize if a record's value
- * is an instance of OmKeyInfo. If the iterator is null, returns (0, 0, 0).
- *
- * @param iterator The iterator over the table to be iterated.
- * @return A Triple with three Long values representing the count,
- * unreplicated size and replicated size.
- * @throws IOException If an I/O error occurs during the iterator traversal.
- */
- private Triple<Long, Long, Long> getTableSizeAndCount(
- TableIterator<String, ? extends Table.KeyValue<String, ?>> iterator)
- throws IOException {
- long count = 0;
- long unReplicatedSize = 0;
- long replicatedSize = 0;
-
- if (iterator != null) {
- while (iterator.hasNext()) {
- Table.KeyValue<String, ?> kv = iterator.next();
- if (kv != null && kv.getValue() != null) {
- if (kv.getValue() instanceof OmKeyInfo) {
- OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue();
- unReplicatedSize += omKeyInfo.getDataSize();
- replicatedSize += omKeyInfo.getReplicatedSize();
- count++;
- }
- if (kv.getValue() instanceof RepeatedOmKeyInfo) {
- RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv
- .getValue();
- Pair<Long, Long> result = repeatedOmKeyInfo.getTotalSize();
- unReplicatedSize += result.getRight();
- replicatedSize += result.getLeft();
- // Since we can have multiple deleted keys of same name
- count += repeatedOmKeyInfo.getOmKeyInfoList().size();
- }
- }
- }
- }
-
- return Triple.of(count, unReplicatedSize, replicatedSize);
- }
-
- /**
- * Returns a collection of table names that require data size calculation.
- */
- public Collection<String> getTablesToCalculateSize() {
- List<String> taskTables = new ArrayList<>();
- taskTables.add(OPEN_KEY_TABLE);
- taskTables.add(OPEN_FILE_TABLE);
- taskTables.add(DELETED_TABLE);
- return taskTables;
- }
-
@Override
public String getTaskName() {
return "OmTableInsightTask";
@@ -211,10 +159,9 @@ public class OmTableInsightTask implements ReconOmTask {
Iterator<OMDBUpdateEvent> eventIterator = events.getIterator();
// Initialize maps to store count and size information
HashMap<String, Long> objectCountMap = initializeCountMap();
- HashMap<String, Long> unreplicatedSizeCountMap = initializeSizeMap(false);
- HashMap<String, Long> replicatedSizeCountMap = initializeSizeMap(true);
+ HashMap<String, Long> unReplicatedSizeMap = initializeSizeMap(false);
+ HashMap<String, Long> replicatedSizeMap = initializeSizeMap(true);
final Collection<String> taskTables = getTaskTables();
- final Collection<String> sizeRelatedTables = getTablesToCalculateSize();
// Process each update event
while (eventIterator.hasNext()) {
@@ -223,22 +170,21 @@ public class OmTableInsightTask implements ReconOmTask {
if (!taskTables.contains(tableName)) {
continue;
}
-
try {
switch (omdbUpdateEvent.getAction()) {
case PUT:
- handlePutEvent(omdbUpdateEvent, tableName, sizeRelatedTables,
- objectCountMap, unreplicatedSizeCountMap,
replicatedSizeCountMap);
+ handlePutEvent(omdbUpdateEvent, tableName, objectCountMap,
+ unReplicatedSizeMap, replicatedSizeMap);
break;
case DELETE:
- handleDeleteEvent(omdbUpdateEvent, tableName, sizeRelatedTables,
- objectCountMap, unreplicatedSizeCountMap,
replicatedSizeCountMap);
+ handleDeleteEvent(omdbUpdateEvent, tableName, objectCountMap,
+ unReplicatedSizeMap, replicatedSizeMap);
break;
case UPDATE:
- handleUpdateEvent(omdbUpdateEvent, tableName, sizeRelatedTables,
- objectCountMap, unreplicatedSizeCountMap,
replicatedSizeCountMap);
+ handleUpdateEvent(omdbUpdateEvent, tableName, objectCountMap,
+ unReplicatedSizeMap, replicatedSizeMap);
break;
default:
@@ -256,11 +202,11 @@ public class OmTableInsightTask implements ReconOmTask {
if (!objectCountMap.isEmpty()) {
writeDataToDB(objectCountMap);
}
- if (!unreplicatedSizeCountMap.isEmpty()) {
- writeDataToDB(unreplicatedSizeCountMap);
+ if (!unReplicatedSizeMap.isEmpty()) {
+ writeDataToDB(unReplicatedSizeMap);
}
- if (!replicatedSizeCountMap.isEmpty()) {
- writeDataToDB(replicatedSizeCountMap);
+ if (!replicatedSizeMap.isEmpty()) {
+ writeDataToDB(replicatedSizeMap);
}
LOG.info("Completed a 'process' run of OmTableInsightTask.");
return new ImmutablePair<>(getTaskName(), true);
@@ -268,65 +214,34 @@ public class OmTableInsightTask implements ReconOmTask {
private void handlePutEvent(OMDBUpdateEvent<String, Object> event,
String tableName,
- Collection<String> sizeRelatedTables,
HashMap<String, Long> objectCountMap,
- HashMap<String, Long> unreplicatedSizeCountMap,
- HashMap<String, Long> replicatedSizeCountMap) {
-
- if (sizeRelatedTables.contains(tableName)) {
- handleSizeRelatedTablePutEvent(event, tableName, objectCountMap,
- unreplicatedSizeCountMap, replicatedSizeCountMap);
- } else {
- String countKey = getTableCountKeyFromTable(tableName);
- objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L);
- }
- }
-
- private void handleSizeRelatedTablePutEvent(
- OMDBUpdateEvent<String, Object> event,
- String tableName,
- HashMap<String, Long> objectCountMap,
- HashMap<String, Long> unreplicatedSizeCountMap,
- HashMap<String, Long> replicatedSizeCountMap) {
-
- String countKey = getTableCountKeyFromTable(tableName);
- String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName);
- String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName);
-
- if (event.getValue() instanceof OmKeyInfo) {
- // Handle PUT for OpenKeyTable & OpenFileTable
- OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue();
- objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L);
- unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey,
- (k, size) -> size + omKeyInfo.getDataSize());
- replicatedSizeCountMap.computeIfPresent(replicatedSizeKey,
- (k, size) -> size + omKeyInfo.getReplicatedSize());
- } else if (event.getValue() instanceof RepeatedOmKeyInfo) {
- // Handle PUT for DeletedTable
- RepeatedOmKeyInfo repeatedOmKeyInfo =
- (RepeatedOmKeyInfo) event.getValue();
- objectCountMap.computeIfPresent(countKey,
- (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size());
- Pair<Long, Long> result = repeatedOmKeyInfo.getTotalSize();
- unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey,
- (k, size) -> size + result.getLeft());
- replicatedSizeCountMap.computeIfPresent(replicatedSizeKey,
- (k, size) -> size + result.getRight());
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap)
+ throws IOException {
+ OmTableHandler tableHandler = tableHandlers.get(tableName);
+ if (event.getValue() != null) {
+ if (tableHandler != null) {
+ tableHandler.handlePutEvent(event, tableName, objectCountMap,
+ unReplicatedSizeMap, replicatedSizeMap);
+ } else {
+ String countKey = getTableCountKeyFromTable(tableName);
+ objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L);
+ }
}
}
private void handleDeleteEvent(OMDBUpdateEvent<String, Object> event,
String tableName,
- Collection<String> sizeRelatedTables,
HashMap<String, Long> objectCountMap,
- HashMap<String, Long>
unreplicatedSizeCountMap,
- HashMap<String, Long> replicatedSizeCountMap)
{
-
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap)
+ throws IOException {
+ OmTableHandler tableHandler = tableHandlers.get(tableName);
if (event.getValue() != null) {
- if (sizeRelatedTables.contains(tableName)) {
- handleSizeRelatedTableDeleteEvent(event, tableName, objectCountMap,
- unreplicatedSizeCountMap, replicatedSizeCountMap);
+ if (tableHandler != null) {
+ tableHandler.handleDeleteEvent(event, tableName, objectCountMap,
+ unReplicatedSizeMap, replicatedSizeMap);
} else {
String countKey = getTableCountKeyFromTable(tableName);
objectCountMap.computeIfPresent(countKey,
@@ -335,109 +250,28 @@ public class OmTableInsightTask implements ReconOmTask {
}
}
- private void handleSizeRelatedTableDeleteEvent(
- OMDBUpdateEvent<String, Object> event,
- String tableName,
- HashMap<String, Long> objectCountMap,
- HashMap<String, Long> unreplicatedSizeCountMap,
- HashMap<String, Long> replicatedSizeCountMap) {
-
- String countKey = getTableCountKeyFromTable(tableName);
- String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName);
- String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName);
-
- if (event.getValue() instanceof OmKeyInfo) {
- // Handle DELETE for OpenKeyTable & OpenFileTable
- OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue();
- objectCountMap.computeIfPresent(countKey,
- (k, count) -> count > 0 ? count - 1L : 0L);
- unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey,
- (k, size) -> size > omKeyInfo.getDataSize() ?
- size - omKeyInfo.getDataSize() : 0L);
- replicatedSizeCountMap.computeIfPresent(replicatedSizeKey,
- (k, size) -> size > omKeyInfo.getReplicatedSize() ?
- size - omKeyInfo.getReplicatedSize() : 0L);
- } else if (event.getValue() instanceof RepeatedOmKeyInfo) {
- // Handle DELETE for DeletedTable
- RepeatedOmKeyInfo repeatedOmKeyInfo =
- (RepeatedOmKeyInfo) event.getValue();
- objectCountMap.computeIfPresent(countKey, (k, count) -> count > 0 ?
- count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L);
- Pair<Long, Long> result = repeatedOmKeyInfo.getTotalSize();
- unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey,
- (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L);
- replicatedSizeCountMap.computeIfPresent(replicatedSizeKey,
- (k, size) -> size > result.getRight() ? size - result.getRight() :
- 0L);
- }
- }
private void handleUpdateEvent(OMDBUpdateEvent<String, Object> event,
String tableName,
- Collection<String> sizeRelatedTables,
HashMap<String, Long> objectCountMap,
- HashMap<String, Long>
unreplicatedSizeCountMap,
- HashMap<String, Long> replicatedSizeCountMap)
{
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap) {
+ OmTableHandler tableHandler = tableHandlers.get(tableName);
if (event.getValue() != null) {
- if (sizeRelatedTables.contains(tableName)) {
+ if (tableHandler != null) {
// Handle update for only size related tables
- handleSizeRelatedTableUpdateEvent(event, tableName, objectCountMap,
- unreplicatedSizeCountMap, replicatedSizeCountMap);
+ tableHandler.handleUpdateEvent(event, tableName, objectCountMap,
+ unReplicatedSizeMap, replicatedSizeMap);
}
}
}
-
- private void handleSizeRelatedTableUpdateEvent(
- OMDBUpdateEvent<String, Object> event,
- String tableName,
- HashMap<String, Long> objectCountMap,
- HashMap<String, Long> unreplicatedSizeCountMap,
- HashMap<String, Long> replicatedSizeCountMap) {
-
- if (event.getOldValue() == null) {
- LOG.warn("Update event does not have the old Key Info for {}.",
- event.getKey());
- return;
- }
- String countKey = getTableCountKeyFromTable(tableName);
- String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName);
- String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName);
-
- // In Update event the count for the open table will not change. So we
don't
- // need to update the count. Except for RepeatedOmKeyInfo, for which the
- // size of omKeyInfoList can change
- if (event.getValue() instanceof OmKeyInfo) {
- // Handle UPDATE for OpenKeyTable & OpenFileTable
- OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue();
- OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue();
- unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey,
- (k, size) -> size - oldKeyInfo.getDataSize() +
- newKeyInfo.getDataSize());
- replicatedSizeCountMap.computeIfPresent(replicatedSizeKey,
- (k, size) -> size - oldKeyInfo.getReplicatedSize() +
- newKeyInfo.getReplicatedSize());
- } else if (event.getValue() instanceof RepeatedOmKeyInfo) {
- // Handle UPDATE for DeletedTable
- RepeatedOmKeyInfo oldRepeatedOmKeyInfo =
- (RepeatedOmKeyInfo) event.getOldValue();
- RepeatedOmKeyInfo newRepeatedOmKeyInfo =
- (RepeatedOmKeyInfo) event.getValue();
- objectCountMap.computeIfPresent(countKey,
- (k, count) -> count > 0 ?
- count - oldRepeatedOmKeyInfo.getOmKeyInfoList().size() +
- newRepeatedOmKeyInfo.getOmKeyInfoList().size() : 0L);
- Pair<Long, Long> oldSize = oldRepeatedOmKeyInfo.getTotalSize();
- Pair<Long, Long> newSize = newRepeatedOmKeyInfo.getTotalSize();
- unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey,
- (k, size) -> size - oldSize.getLeft() + newSize.getLeft());
- replicatedSizeCountMap.computeIfPresent(replicatedSizeKey,
- (k, size) -> size - oldSize.getRight() + newSize.getRight());
- }
- }
-
-
+ /**
+ * Write the updated count and size information to the database.
+ *
+ * @param dataMap Map containing the updated count and size information.
+ */
private void writeDataToDB(Map<String, Long> dataMap) {
List<GlobalStats> insertGlobalStats = new ArrayList<>();
List<GlobalStats> updateGlobalStats = new ArrayList<>();
@@ -461,6 +295,11 @@ public class OmTableInsightTask implements ReconOmTask {
globalStatsDao.update(updateGlobalStats);
}
+ /**
+ * Initializes and returns a count map with the counts for the tables.
+ *
+ * @return The count map containing the counts for each table.
+ */
private HashMap<String, Long> initializeCountMap() {
Collection<String> tables = getTaskTables();
HashMap<String, Long> objectCountMap = new HashMap<>(tables.size());
@@ -478,11 +317,13 @@ public class OmTableInsightTask implements ReconOmTask {
* @return The size map containing the size counts for each table.
*/
private HashMap<String, Long> initializeSizeMap(boolean replicated) {
- Collection<String> tables = getTablesToCalculateSize();
- HashMap<String, Long> sizeCountMap = new HashMap<>(tables.size());
- for (String tableName : tables) {
- String key = replicated ? getReplicatedSizeKeyFromTable(tableName) :
- getUnReplicatedSizeKeyFromTable(tableName);
+ HashMap<String, Long> sizeCountMap = new HashMap<>();
+ for (Map.Entry<String, OmTableHandler> entry : tableHandlers.entrySet()) {
+ String tableName = entry.getKey();
+ OmTableHandler tableHandler = entry.getValue();
+ String key =
+ replicated ? tableHandler.getReplicatedSizeKeyFromTable(tableName) :
+ tableHandler.getUnReplicatedSizeKeyFromTable(tableName);
sizeCountMap.put(key, getValueForKey(key));
}
return sizeCountMap;
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java
new file mode 100644
index 0000000000..7a27d29d8f
--- /dev/null
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.tasks;
+
+import org.apache.commons.lang3.tuple.Triple;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+
+/**
+ * Manages records in the OpenKey Table, updating counts and sizes of
+ * open keys in the backend.
+ */
+public class OpenKeysInsightHandler implements OmTableHandler {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OpenKeysInsightHandler.class);
+
+ /**
+ * Invoked by the process method to add information on those keys that have
+ * been open in the backend.
+ */
+ @Override
+ public void handlePutEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap) {
+
+ String countKey = getTableCountKeyFromTable(tableName);
+ String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName);
+ String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName);
+
+ if (event.getValue() != null) {
+ OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue();
+ objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L);
+ unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey,
+ (k, size) -> size + omKeyInfo.getDataSize());
+ replicatedSizeMap.computeIfPresent(replicatedSizeKey,
+ (k, size) -> size + omKeyInfo.getReplicatedSize());
+ } else {
+ LOG.warn("Put event does not have the Key Info for {}.",
+ event.getKey());
+ }
+ }
+
+ /**
+ * Invoked by the process method to delete information on those keys that are
+ * no longer closed in the backend.
+ */
+ @Override
+ public void handleDeleteEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap) {
+
+ String countKey = getTableCountKeyFromTable(tableName);
+ String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName);
+ String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName);
+
+ if (event.getValue() != null) {
+ OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue();
+ objectCountMap.computeIfPresent(countKey,
+ (k, count) -> count > 0 ? count - 1L : 0L);
+ unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey,
+ (k, size) -> size > omKeyInfo.getDataSize() ?
+ size - omKeyInfo.getDataSize() : 0L);
+ replicatedSizeMap.computeIfPresent(replicatedSizeKey,
+ (k, size) -> size > omKeyInfo.getReplicatedSize() ?
+ size - omKeyInfo.getReplicatedSize() : 0L);
+ } else {
+ LOG.warn("Delete event does not have the Key Info for {}.",
+ event.getKey());
+ }
+ }
+
+ /**
+ * Invoked by the process method to update information on those open keys
that
+ * have been updated in the backend.
+ */
+ @Override
+ public void handleUpdateEvent(OMDBUpdateEvent<String, Object> event,
+ String tableName,
+ HashMap<String, Long> objectCountMap,
+ HashMap<String, Long> unReplicatedSizeMap,
+ HashMap<String, Long> replicatedSizeMap) {
+
+ if (event.getValue() != null) {
+ if (event.getOldValue() == null) {
+ LOG.warn("Update event does not have the old Key Info for {}.",
+ event.getKey());
+ return;
+ }
+ String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName);
+ String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName);
+
+ // In Update event the count for the open table will not change. So we
+ // don't need to update the count.
+ OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue();
+ OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue();
+ unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey,
+ (k, size) -> size - oldKeyInfo.getDataSize() +
+ newKeyInfo.getDataSize());
+ replicatedSizeMap.computeIfPresent(replicatedSizeKey,
+ (k, size) -> size - oldKeyInfo.getReplicatedSize() +
+ newKeyInfo.getReplicatedSize());
+ } else {
+ LOG.warn("Update event does not have the Key Info for {}.",
+ event.getKey());
+ }
+ }
+
+ /**
+ * This method is called by the reprocess method. It calculates the record
+ * counts for both the open key table and the open file table. Additionally,
+ * it computes the sizes of both replicated and unreplicated keys
+ * that are currently open in the backend.
+ */
+ @Override
+ public Triple<Long, Long, Long> getTableSizeAndCount(
+ TableIterator<String, ? extends Table.KeyValue<String, ?>> iterator)
+ throws IOException {
+ long count = 0;
+ long unReplicatedSize = 0;
+ long replicatedSize = 0;
+
+ if (iterator != null) {
+ while (iterator.hasNext()) {
+ Table.KeyValue<String, ?> kv = iterator.next();
+ if (kv != null && kv.getValue() != null) {
+ OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue();
+ unReplicatedSize += omKeyInfo.getDataSize();
+ replicatedSize += omKeyInfo.getReplicatedSize();
+ count++;
+ }
+ }
+ }
+ return Triple.of(count, unReplicatedSize, replicatedSize);
+ }
+
+}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
index 42d69e030f..b1aecc9a4f 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java
@@ -397,23 +397,31 @@ public final class OMMetadataManagerTestUtils {
.build());
}
+ @SuppressWarnings("parameternumber")
public static void writeDeletedDirToOm(OMMetadataManager omMetadataManager,
String bucketName,
String volumeName,
String dirName,
long parentObjectId,
long bucketObjectId,
- long volumeObjectId)
+ long volumeObjectId,
+ long objectId)
throws IOException {
- // DB key in DeletedDirectoryTable => "volumeID/bucketID/parentId/dirName"
- String omKey = omMetadataManager.getOzonePathKey(volumeObjectId,
- bucketObjectId, parentObjectId, dirName);
+ // DB key in DeletedDirectoryTable =>
+ // "volumeID/bucketID/parentId/dirName/dirObjectId"
+
+ String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeObjectId,
+ bucketObjectId, parentObjectId, dirName);
+ String ozoneDeleteKey = omMetadataManager.getOzoneDeletePathKey(
+ objectId, ozoneDbKey);
+
- omMetadataManager.getDeletedDirTable().put(omKey,
+ omMetadataManager.getDeletedDirTable().put(ozoneDeleteKey,
new OmKeyInfo.Builder()
.setBucketName(bucketName)
.setVolumeName(volumeName)
.setKeyName(dirName)
+ .setObjectID(objectId)
.setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE))
.build());
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index 05d9927d6c..42aabef0cf 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -288,8 +288,9 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
utilizationSchemaDefinition);
fileSizeCountTask =
new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition);
- omTableInsightTask = new OmTableInsightTask(
- globalStatsDao, sqlConfiguration, reconOMMetadataManager);
+ omTableInsightTask =
+ new OmTableInsightTask(globalStatsDao, sqlConfiguration,
+ reconOMMetadataManager);
containerHealthSchemaManager =
reconTestInjector.getInstance(ContainerHealthSchemaManager.class);
clusterStateEndpoint =
@@ -515,11 +516,11 @@ public class TestEndpoints extends AbstractReconSqlDBTest
{
// Populate the deletedDirectories table in OM DB
writeDeletedDirToOm(reconOMMetadataManager, "Bucket1", "Volume1", "dir1",
- 3L, 2L, 1L);
+ 3L, 2L, 1L, 23L);
writeDeletedDirToOm(reconOMMetadataManager, "Bucket2", "Volume2", "dir2",
- 6L, 5L, 4L);
+ 6L, 5L, 4L, 22L);
writeDeletedDirToOm(reconOMMetadataManager, "Bucket3", "Volume3", "dir3",
- 9L, 8L, 7L);
+ 9L, 8L, 7L, 21L);
// Truncate global stats table before running each test
dslContext.truncate(GLOBAL_STATS);
@@ -594,7 +595,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
(DatanodesResponse) response1.getEntity();
DatanodeMetadata datanodeMetadata1 =
datanodesResponse1.getDatanodes().stream().filter(datanodeMetadata ->
- datanodeMetadata.getHostname().equals("host1.datanode"))
+ datanodeMetadata.getHostname().equals("host1.datanode"))
.findFirst().orElse(null);
return (datanodeMetadata1 != null &&
datanodeMetadata1.getContainers() == 1 &&
@@ -699,7 +700,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
byte[] fileBytes = FileUtils.readFileToByteArray(
new File(classLoader.getResource(PROMETHEUS_TEST_RESPONSE_FILE)
.getFile())
- );
+ );
verify(outputStreamMock).write(fileBytes, 0, fileBytes.length);
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
index df014f4276..56d8fe2131 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java
@@ -21,20 +21,28 @@ package org.apache.hadoop.ozone.recon.tasks;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.api.types.NSSummary;
import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
-import
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder;
-
+import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl;
import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
import org.jooq.DSLContext;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
import org.junit.jupiter.api.io.TempDir;
import java.io.IOException;
@@ -44,18 +52,20 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE;
+import static
org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE;
-import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
-import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm;
-import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm;
-import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm;
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
+import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
+import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedDirToOm;
+import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm;
+import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm;
+import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm;
import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE;
import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT;
import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE;
@@ -66,29 +76,83 @@ import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
- * Unit test for Object Count Task.
+ * This test class is designed for the OM Table Insight Task. It conducts tests
+ * for tables that require both Size and Count, as well as for those that only
+ * require Count.
*/
public class TestOmTableInsightTask extends AbstractReconSqlDBTest {
@TempDir
private Path temporaryFolder;
- private GlobalStatsDao globalStatsDao;
- private OmTableInsightTask omTableInsightTask;
- private DSLContext dslContext;
+ private static GlobalStatsDao globalStatsDao;
+ private static OmTableInsightTask omTableInsightTask;
+ private static DSLContext dslContext;
private boolean isSetupDone = false;
- private ReconOMMetadataManager reconOMMetadataManager;
+ private static ReconOMMetadataManager reconOMMetadataManager;
+ private static NSSummaryTaskWithFSO nSSummaryTaskWithFso;
+ private static OzoneConfiguration ozoneConfiguration;
+ private static ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager;
+
+ // Object names in FSO-enabled format
+ private static final String VOL = "volume1";
+ private static final String BUCKET_ONE = "bucket1";
+ private static final String BUCKET_TWO = "bucket2";
+ private static final String KEY_ONE = "file1";
+ private static final String KEY_TWO = "file2";
+ private static final String KEY_THREE = "dir1/dir2/file3";
+ private static final String FILE_ONE = "file1";
+ private static final String FILE_TWO = "file2";
+ private static final String FILE_THREE = "file3";
+ private static final String DIR_ONE = "dir1";
+ private static final String DIR_TWO = "dir2";
+ private static final String DIR_THREE = "dir3";
+
+
+ private static final long VOL_OBJECT_ID = 0L;
+ private static final long BUCKET_ONE_OBJECT_ID = 1L;
+ private static final long BUCKET_TWO_OBJECT_ID = 2L;
+ private static final long KEY_ONE_OBJECT_ID = 3L;
+ private static final long DIR_ONE_OBJECT_ID = 14L;
+ private static final long KEY_TWO_OBJECT_ID = 5L;
+ private static final long DIR_TWO_OBJECT_ID = 17L;
+ private static final long KEY_THREE_OBJECT_ID = 8L;
+ private static final long DIR_THREE_OBJECT_ID = 10L;
+
+ private static final long KEY_ONE_SIZE = 500L;
+ private static final long KEY_TWO_SIZE = 1025L;
+ private static final long KEY_THREE_SIZE = 2000L;
+
+ // mock client's path requests
+ private static final String TEST_USER = "TestUser";
+
+ @Mock
+ private Table<Long, NSSummary> nsSummaryTable;
public TestOmTableInsightTask() {
super();
}
private void initializeInjector() throws IOException {
+ ozoneConfiguration = new OzoneConfiguration();
reconOMMetadataManager = getTestReconOmMetadataManager(
initializeNewOmMetadataManager(Files.createDirectory(
temporaryFolder.resolve("JunitOmDBDir")).toFile()),
Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile());
globalStatsDao = getDao(GlobalStatsDao.class);
+
+ ReconTestInjector reconTestInjector =
+ new ReconTestInjector.Builder(temporaryFolder.toFile())
+ .withReconSqlDb()
+ .withReconOm(reconOMMetadataManager)
+ .withContainerDB()
+ .build();
+ reconNamespaceSummaryManager = reconTestInjector.getInstance(
+ ReconNamespaceSummaryManagerImpl.class);
+
omTableInsightTask = new OmTableInsightTask(
globalStatsDao, getConfiguration(), reconOMMetadataManager);
+ nSSummaryTaskWithFso = new NSSummaryTaskWithFSO(
+ reconNamespaceSummaryManager, reconOMMetadataManager,
+ ozoneConfiguration);
dslContext = getDslContext();
}
@@ -99,10 +163,182 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
initializeInjector();
isSetupDone = true;
}
+ MockitoAnnotations.openMocks(this);
// Truncate table before running each test
dslContext.truncate(GLOBAL_STATS);
}
+ /**
+ * Populate OM-DB with the following structure.
+ * volume1
+ * | \
+ * bucket1 bucket2
+ * / \ \
+ * dir1 dir2 dir3
+ * / \ \
+ * file1 file2 file3
+ *
+ * @throws IOException
+ */
+ private void populateOMDB() throws IOException {
+
+ // Create 2 Buckets bucket1 and bucket2
+ OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder()
+ .setVolumeName(VOL)
+ .setBucketName(BUCKET_ONE)
+ .setObjectID(BUCKET_ONE_OBJECT_ID)
+ .build();
+ String bucketKey = reconOMMetadataManager.getBucketKey(
+ bucketInfo1.getVolumeName(), bucketInfo1.getBucketName());
+ reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo1);
+ OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder()
+ .setVolumeName(VOL)
+ .setBucketName(BUCKET_TWO)
+ .setObjectID(BUCKET_TWO_OBJECT_ID)
+ .build();
+ bucketKey = reconOMMetadataManager.getBucketKey(
+ bucketInfo2.getVolumeName(), bucketInfo2.getBucketName());
+ reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo2);
+
+ // Create a single volume named volume1
+ String volumeKey = reconOMMetadataManager.getVolumeKey(VOL);
+ OmVolumeArgs args =
+ OmVolumeArgs.newBuilder()
+ .setObjectID(VOL_OBJECT_ID)
+ .setVolume(VOL)
+ .setAdminName(TEST_USER)
+ .setOwnerName(TEST_USER)
+ .build();
+ reconOMMetadataManager.getVolumeTable().put(volumeKey, args);
+
+ // Generate keys for the File Table
+ writeKeyToOm(reconOMMetadataManager,
+ KEY_ONE,
+ BUCKET_ONE,
+ VOL,
+ FILE_ONE,
+ KEY_ONE_OBJECT_ID,
+ DIR_ONE_OBJECT_ID,
+ BUCKET_ONE_OBJECT_ID,
+ VOL_OBJECT_ID,
+ KEY_ONE_SIZE,
+ BucketLayout.FILE_SYSTEM_OPTIMIZED);
+ writeKeyToOm(reconOMMetadataManager,
+ KEY_TWO,
+ BUCKET_ONE,
+ VOL,
+ FILE_TWO,
+ KEY_TWO_OBJECT_ID,
+ DIR_ONE_OBJECT_ID,
+ BUCKET_ONE_OBJECT_ID,
+ VOL_OBJECT_ID,
+ KEY_TWO_SIZE,
+ BucketLayout.FILE_SYSTEM_OPTIMIZED);
+ writeKeyToOm(reconOMMetadataManager,
+ KEY_THREE,
+ BUCKET_ONE,
+ VOL,
+ FILE_THREE,
+ KEY_THREE_OBJECT_ID,
+ DIR_TWO_OBJECT_ID,
+ BUCKET_ONE_OBJECT_ID,
+ VOL_OBJECT_ID,
+ KEY_THREE_SIZE,
+ BucketLayout.FILE_SYSTEM_OPTIMIZED);
+
+ // Generate Deleted Directories in OM
+ writeDeletedDirToOm(reconOMMetadataManager,
+ BUCKET_ONE,
+ VOL,
+ DIR_ONE,
+ BUCKET_ONE_OBJECT_ID,
+ BUCKET_ONE_OBJECT_ID,
+ VOL_OBJECT_ID,
+ DIR_ONE_OBJECT_ID);
+ writeDeletedDirToOm(reconOMMetadataManager,
+ BUCKET_ONE,
+ VOL,
+ DIR_TWO,
+ BUCKET_ONE_OBJECT_ID,
+ BUCKET_ONE_OBJECT_ID,
+ VOL_OBJECT_ID,
+ DIR_TWO_OBJECT_ID);
+ writeDeletedDirToOm(reconOMMetadataManager,
+ BUCKET_TWO,
+ VOL,
+ DIR_THREE,
+ BUCKET_TWO_OBJECT_ID,
+ BUCKET_TWO_OBJECT_ID,
+ VOL_OBJECT_ID,
+ DIR_THREE_OBJECT_ID);
+ }
+
+ @Test
+ public void testReprocessForDeletedDirectory() throws Exception {
+ // Create keys and deleted directories
+ populateOMDB();
+
+ // Generate NamespaceSummary for the OM DB
+ nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
+
+ Pair<String, Boolean> result =
+ omTableInsightTask.reprocess(reconOMMetadataManager);
+ assertTrue(result.getRight());
+ assertEquals(3, getCountForTable(DELETED_DIR_TABLE));
+ }
+
+ @Test
+ public void testProcessForDeletedDirectoryTable() throws IOException {
+ // Prepare mock data size
+ Long expectedSize1 = 1000L;
+ Long expectedSize2 = 2000L;
+ NSSummary nsSummary1 = new NSSummary();
+ NSSummary nsSummary2 = new NSSummary();
+ nsSummary1.setSizeOfFiles(expectedSize1);
+ nsSummary2.setSizeOfFiles(expectedSize2);
+ when(nsSummaryTable.get(1L)).thenReturn(nsSummary1);
+ when(nsSummaryTable.get(2L)).thenReturn(nsSummary1);
+ when(nsSummaryTable.get(3L)).thenReturn(nsSummary2);
+ when(nsSummaryTable.get(4L)).thenReturn(nsSummary2);
+ when(nsSummaryTable.get(5L)).thenReturn(nsSummary2);
+
+ /* DB key in DeletedDirectoryTable =>
+ "/volumeId/bucketId/parentId/dirName/dirObjectId" */
+ List<String> paths = Arrays.asList(
+ "/18/28/22/dir1/1",
+ "/18/26/23/dir1/2",
+ "/18/20/24/dir1/3",
+ "/18/21/25/dir1/4",
+ "/18/27/26/dir1/5"
+ );
+
+ // Testing PUT events
+ // Create 5 OMDBUpdateEvent instances for 5 different deletedDirectory
paths
+ ArrayList<OMDBUpdateEvent> putEvents = new ArrayList<>();
+ for (long i = 0L; i < 5L; i++) {
+ putEvents.add(getOMUpdateEvent(paths.get((int) i),
+ getOmKeyInfo("vol1", "bucket1", DIR_ONE, (i + 1), false),
+ DELETED_DIR_TABLE, PUT, null));
+ }
+ OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents);
+ omTableInsightTask.process(putEventBatch);
+ assertEquals(5, getCountForTable(DELETED_DIR_TABLE));
+
+
+ // Testing DELETE events
+ // Create 2 OMDBUpdateEvent instances for 2 different deletedDirectory
paths
+ ArrayList<OMDBUpdateEvent> deleteEvents = new ArrayList<>();
+ deleteEvents.add(getOMUpdateEvent(paths.get(0),
+ getOmKeyInfo("vol1", "bucket1", DIR_ONE, 1L, false), DELETED_DIR_TABLE,
+ DELETE, null));
+ deleteEvents.add(getOMUpdateEvent(paths.get(2),
+ getOmKeyInfo("vol1", "bucket1", DIR_ONE, 3L, false), DELETED_DIR_TABLE,
+ DELETE, null));
+ OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents);
+ omTableInsightTask.process(deleteEventBatch);
+ assertEquals(3, getCountForTable(DELETED_DIR_TABLE));
+ }
+
@Test
public void testReprocessForCount() throws Exception {
OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
@@ -110,27 +346,32 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
// Mock 5 rows in each table and test the count
for (String tableName : omTableInsightTask.getTaskTables()) {
TypedTable<String, Object> table = mock(TypedTable.class);
- TypedTable.TypedTableIterator mockIter = mock(TypedTable
- .TypedTableIterator.class);
+ TypedTable.TypedTableIterator mockIter =
+ mock(TypedTable.TypedTableIterator.class);
when(table.iterator()).thenReturn(mockIter);
when(omMetadataManager.getTable(tableName)).thenReturn(table);
- when(mockIter.hasNext())
- .thenReturn(true)
- .thenReturn(true)
- .thenReturn(true)
- .thenReturn(true)
- .thenReturn(true)
- .thenReturn(false);
+ when(mockIter.hasNext()).thenReturn(true, true, true, true, true, false);
+
TypedTable.TypedKeyValue mockKeyValue =
mock(TypedTable.TypedKeyValue.class);
- when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class));
+
+ if (tableName.equals(DELETED_TABLE)) {
+ RepeatedOmKeyInfo keyInfo = mock(RepeatedOmKeyInfo.class);
+ when(keyInfo.getTotalSize()).thenReturn(ImmutablePair.of(100L, 100L));
+ when(keyInfo.getOmKeyInfoList()).thenReturn(
+ Arrays.asList(mock(OmKeyInfo.class)));
+ when(mockKeyValue.getValue()).thenReturn(keyInfo);
+ } else {
+ when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class));
+ }
+
when(mockIter.next()).thenReturn(mockKeyValue);
}
Pair<String, Boolean> result =
omTableInsightTask.reprocess(omMetadataManager);
- assertTrue(result.getRight());
+ assertTrue(result.getRight());
assertEquals(5L, getCountForTable(KEY_TABLE));
assertEquals(5L, getCountForTable(VOLUME_TABLE));
assertEquals(5L, getCountForTable(BUCKET_TABLE));
@@ -138,7 +379,6 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
assertEquals(5L, getCountForTable(DELETED_TABLE));
}
-
@Test
public void testReprocessForOpenKeyTable() throws Exception {
// Populate the OpenKeys table in OM DB
@@ -203,44 +443,73 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
@Test
public void testProcessForCount() {
- ArrayList<OMDBUpdateEvent> events = new ArrayList<>();
- // Create 5 put, 1 delete and 1 update event for each table
+ List<OMDBUpdateEvent> initialEvents = new ArrayList<>();
+
+ // Creating events for each table except the deleted table
for (String tableName : omTableInsightTask.getTaskTables()) {
+ if (tableName.equals(DELETED_TABLE)) {
+ continue; // Skipping deleted table as it has a separate test
+ }
+
+ // Adding 5 PUT events per table
for (int i = 0; i < 5; i++) {
- events.add(getOMUpdateEvent("item" + i, null, tableName, PUT, null));
+ initialEvents.add(
+ getOMUpdateEvent("item" + i, mock(OmKeyInfo.class), tableName, PUT,
+ null));
}
- // for delete event, if value is set to null, the counter will not be
- // decremented. This is because the value will be null if item does not
- // exist in the database and there is no need to delete.
- events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName,
- DELETE, null));
- events.add(getOMUpdateEvent("item1", null, tableName, UPDATE, null));
+
+ // Adding 1 DELETE event where value is null, indicating non-existence
+ // in the database.
+ initialEvents.add(
+ getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, DELETE,
+ null));
+ // Adding 1 UPDATE event. This should not affect the count.
+ initialEvents.add(
+ getOMUpdateEvent("item1", mock(OmKeyInfo.class), tableName, UPDATE,
+ mock(OmKeyInfo.class)));
}
- OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events);
- omTableInsightTask.process(omUpdateEventBatch);
- // Verify 4 items in each table. (5 puts - 1 delete + 0 update)
- assertEquals(4L, getCountForTable(KEY_TABLE));
- assertEquals(4L, getCountForTable(VOLUME_TABLE));
- assertEquals(4L, getCountForTable(BUCKET_TABLE));
- assertEquals(4L, getCountForTable(FILE_TABLE));
+ // Processing the initial batch of events
+ OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents);
+ omTableInsightTask.process(initialBatch);
- // add a new key and simulate delete on non-existing item (value: null)
- ArrayList<OMDBUpdateEvent> newEvents = new ArrayList<>();
+ // Verifying the count in each table
for (String tableName : omTableInsightTask.getTaskTables()) {
- newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT, null));
- // This delete event should be a noop since value is null
- newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE, null));
+ if (tableName.equals(DELETED_TABLE)) {
+ continue;
+ }
+ assertEquals(4L, getCountForTable(
+ tableName)); // 4 items expected after processing (5 puts - 1 delete)
}
- omUpdateEventBatch = new OMUpdateEventBatch(newEvents);
- omTableInsightTask.process(omUpdateEventBatch);
+ List<OMDBUpdateEvent> additionalEvents = new ArrayList<>();
+ // Simulating new PUT and DELETE events
+ for (String tableName : omTableInsightTask.getTaskTables()) {
+ if (tableName.equals(DELETED_TABLE)) {
+ continue;
+ }
+ // Adding 1 new PUT event
+ additionalEvents.add(
+ getOMUpdateEvent("item6", mock(OmKeyInfo.class), tableName, PUT,
+ null));
+ // Attempting to delete a non-existing item (value: null)
+ additionalEvents.add(
+ getOMUpdateEvent("item0", null, tableName, DELETE, null));
+ }
- // Verify 5 items in each table. (1 new put + 0 delete)
- assertEquals(5L, getCountForTable(KEY_TABLE));
- assertEquals(5L, getCountForTable(VOLUME_TABLE));
- assertEquals(5L, getCountForTable(BUCKET_TABLE));
- assertEquals(5L, getCountForTable(FILE_TABLE));
+ // Processing the additional events
+ OMUpdateEventBatch additionalBatch =
+ new OMUpdateEventBatch(additionalEvents);
+ omTableInsightTask.process(additionalBatch);
+ // Verifying the final count in each table
+ for (String tableName : omTableInsightTask.getTaskTables()) {
+ if (tableName.equals(DELETED_TABLE)) {
+ continue;
+ }
+ // 5 items expected after processing the additional events.
+ assertEquals(5L, getCountForTable(
+ tableName));
+ }
}
@Test
@@ -251,35 +520,38 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
when(omKeyInfo.getDataSize()).thenReturn(sizeToBeReturned);
when(omKeyInfo.getReplicatedSize()).thenReturn(sizeToBeReturned * 3);
- // Test PUT events
+ // Test PUT events.
+ // Add 5 PUT events for OpenKeyTable and OpenFileTable.
ArrayList<OMDBUpdateEvent> putEvents = new ArrayList<>();
- for (String tableName : omTableInsightTask.getTablesToCalculateSize()) {
- for (int i = 0; i < 5; i++) {
- putEvents.add(
- getOMUpdateEvent("item" + i, omKeyInfo, tableName, PUT, null));
- }
+ for (int i = 0; i < 10; i++) {
+ String table = (i < 5) ? OPEN_KEY_TABLE : OPEN_FILE_TABLE;
+ putEvents.add(getOMUpdateEvent("item" + i, omKeyInfo, table, PUT, null));
}
+
OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents);
omTableInsightTask.process(putEventBatch);
- // After 5 PUTs, size should be 5 * 1000 = 5000 for each size-related table
- for (String tableName : omTableInsightTask.getTablesToCalculateSize()) {
+ // After 5 PUTs, size should be 5 * 1000 = 5000
+ for (String tableName : new ArrayList<>(
+ Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) {
assertEquals(5000L, getUnReplicatedSizeForTable(tableName));
assertEquals(15000L, getReplicatedSizeForTable(tableName));
}
// Test DELETE events
ArrayList<OMDBUpdateEvent> deleteEvents = new ArrayList<>();
- for (String tableName : omTableInsightTask.getTablesToCalculateSize()) {
- // Delete "item0"
- deleteEvents.add(
- getOMUpdateEvent("item0", omKeyInfo, tableName, DELETE, null));
- }
+ // Delete "item0" for OpenKeyTable and OpenFileTable.
+ deleteEvents.add(
+ getOMUpdateEvent("item0", omKeyInfo, OPEN_KEY_TABLE, DELETE, null));
+ deleteEvents.add(
+ getOMUpdateEvent("item0", omKeyInfo, OPEN_FILE_TABLE, DELETE, null));
+
OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents);
omTableInsightTask.process(deleteEventBatch);
// After deleting "item0", size should be 4 * 1000 = 4000
- for (String tableName : omTableInsightTask.getTablesToCalculateSize()) {
+ for (String tableName : new ArrayList<>(
+ Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) {
assertEquals(4000L, getUnReplicatedSizeForTable(tableName));
assertEquals(12000L, getReplicatedSizeForTable(tableName));
}
@@ -287,7 +559,8 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
// Test UPDATE events
ArrayList<OMDBUpdateEvent> updateEvents = new ArrayList<>();
Long newSizeToBeReturned = 2000L;
- for (String tableName : omTableInsightTask.getTablesToCalculateSize()) {
+ for (String tableName : new ArrayList<>(
+ Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) {
// Update "item1" with a new size
OmKeyInfo newKeyInfo = mock(OmKeyInfo.class);
when(newKeyInfo.getDataSize()).thenReturn(newSizeToBeReturned);
@@ -295,12 +568,14 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
updateEvents.add(
getOMUpdateEvent("item1", newKeyInfo, tableName, UPDATE, omKeyInfo));
}
+
OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents);
omTableInsightTask.process(updateEventBatch);
// After updating "item1", size should be 4000 - 1000 + 2000 = 5000
// presentValue - oldValue + newValue = updatedValue
- for (String tableName : omTableInsightTask.getTablesToCalculateSize()) {
+ for (String tableName : new ArrayList<>(
+ Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) {
assertEquals(5000L, getUnReplicatedSizeForTable(tableName));
assertEquals(15000L, getReplicatedSizeForTable(tableName));
}
@@ -313,9 +588,10 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
new ImmutablePair<>(1000L, 3000L);
ArrayList<OmKeyInfo> omKeyInfoList = new ArrayList<>();
// Add 5 OmKeyInfo objects to the list
- for (int i = 0; i < 5; i++) {
+ for (long i = 0; i < 5; i++) {
OmKeyInfo omKeyInfo =
- getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", true);
+ getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", i + 1,
+ true);
// Set properties of OmKeyInfo object if needed
omKeyInfoList.add(omKeyInfo);
}
@@ -353,38 +629,14 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
// After deleting "item0", size should be 4 * 1000 = 4000
assertEquals(4000L, getUnReplicatedSizeForTable(DELETED_TABLE));
assertEquals(12000L, getReplicatedSizeForTable(DELETED_TABLE));
-
-
- // Test UPDATE events
- ArrayList<OMDBUpdateEvent> updateEvents = new ArrayList<>();
- // Update "item1" with new sizes
- ImmutablePair<Long, Long> newSizesToBeReturned =
- new ImmutablePair<>(500L, 1500L);
- RepeatedOmKeyInfo newRepeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class);
- when(newRepeatedOmKeyInfo.getTotalSize()).thenReturn(newSizesToBeReturned);
- when(newRepeatedOmKeyInfo.getOmKeyInfoList()).thenReturn(
- omKeyInfoList.subList(1, 5));
- OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents);
- // For item1, newSize=500 and totalCount of deleted keys should be 4
- updateEvents.add(
- getOMUpdateEvent("item1", newRepeatedOmKeyInfo, DELETED_TABLE, UPDATE,
- repeatedOmKeyInfo));
- omTableInsightTask.process(updateEventBatch);
- // Since one key has been deleted, total deleted keys should be 19
- assertEquals(19L, getCountForTable(DELETED_TABLE));
- // After updating "item1", size should be 4000 - 1000 + 500 = 3500
- // presentValue - oldValue + newValue = updatedValue
- assertEquals(3500L, getUnReplicatedSizeForTable(DELETED_TABLE));
- assertEquals(10500L, getReplicatedSizeForTable(DELETED_TABLE));
}
-
private OMDBUpdateEvent getOMUpdateEvent(
String name, Object value,
String table,
OMDBUpdateEvent.OMDBUpdateAction action,
Object oldValue) {
- return new OMUpdateEventBuilder()
+ return new OMDBUpdateEvent.OMUpdateEventBuilder()
.setAction(action)
.setKey(name)
.setValue(value)
@@ -409,7 +661,8 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
}
private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
- String keyName, boolean isFile) {
+ String keyName, Long objectID,
+ boolean isFile) {
return new OmKeyInfo.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -418,6 +671,7 @@ public class TestOmTableInsightTask extends
AbstractReconSqlDBTest {
.setReplicationConfig(StandaloneReplicationConfig
.getInstance(HddsProtos.ReplicationFactor.ONE))
.setDataSize(100L)
+ .setObjectID(objectID)
.build();
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]