devmadhuu commented on code in PR #5159:
URL: https://github.com/apache/ozone/pull/5159#discussion_r1299681384
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -70,8 +90,11 @@ public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED);
- cluster =
- MiniOzoneCluster.newBuilder(conf)
+ conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 60000);
+ conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 60000);
Review Comment:
Keep this slightly lower. may be around 10
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -70,8 +90,11 @@ public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED);
- cluster =
- MiniOzoneCluster.newBuilder(conf)
+ conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 60000);
+ conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 60000);
+ conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 60000,
Review Comment:
This is also very high.keep this slightly lower. may be around 100
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
Review Comment:
Not sure what is the meaning of this message and what is doubleBuffer. Pls
check, Seems not relevant to test case.
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the Recon
+ * metadata. This method waits for a specific period of time for the row
count
+ * of the specified table to match the expected count.
+ */
+ private void assertReconTableRowCount(Table<String, ?> table, int count)
Review Comment:
These assert methods for OM and Recon. tables can be refactored and
optimized to have one by passing relevant args.
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the Recon
+ * metadata. This method waits for a specific period of time for the row
count
+ * of the specified table to match the expected count.
+ */
+ private void assertReconTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertReconTableRowCount(count, table),
1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertReconTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getReconServer().getOzoneManagerServiceProvider()
+ .getOMMetadataManagerInstance().countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
Review Comment:
Pls check this message relevance.
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the Recon
+ * metadata. This method waits for a specific period of time for the row
count
+ * of the specified table to match the expected count.
+ */
+ private void assertReconTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertReconTableRowCount(count, table),
1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertReconTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getReconServer().getOzoneManagerServiceProvider()
+ .getOMMetadataManagerInstance().countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
Review Comment:
Seems not using this returned boolean for assert.
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
Review Comment:
Shouldn't we have an assert statement here ?
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
Review Comment:
Not using this returned boolean anywhere for assert.
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the Recon
+ * metadata. This method waits for a specific period of time for the row
count
+ * of the specified table to match the expected count.
+ */
+ private void assertReconTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertReconTableRowCount(count, table),
1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertReconTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getReconServer().getOzoneManagerServiceProvider()
+ .getOMMetadataManagerInstance().countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
Review Comment:
Shouldn't we have an assert statement here ?
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -162,6 +199,104 @@ public void testNamespaceSummaryAPI() throws Exception {
Assert.assertEquals(12, rootBasicEntity.getCountStats().getNumTotalKey());
}
+
+ /**
+ * Test case for verifying directory deletion and namespace summary updates.
+ * Three cases are tested:
+ * CASE-1: Creating a directory structure with files and verifying OM and
+ * Recon tables.
+ * CASE-2: Deleting files from the directory and verifying updated NS
summary.
+ * CASE-3: Deleting the entire directory and confirming the NS summary.
+ */
+ @Test
+ public void testNamespaceSummaryUpdatesForDirectoryDeletion()
+ throws Exception {
+
+ // CASE-1
+ // Create a directory structure with 10 files in dir1
+ addKeysToDirectory(1, 10, "/dir1");
+
+ // Fetch the file table and directory table from Ozone Manager.
+ OMMetadataManager ozoneMetadataManagerInstance =
+ cluster.getOzoneManager().getMetadataManager();
+ Table<String, OmKeyInfo> omFileTable =
+ ozoneMetadataManagerInstance.getKeyTable(getFSOBucketLayout());
+ Table<String, OmDirectoryInfo> omDirTable =
+ ozoneMetadataManagerInstance.getDirectoryTable();
+
+ // Verify the entries in the Ozone Manager tables.
+ assertOmTableRowCount(omFileTable, 10);
+ assertOmTableRowCount(omDirTable, 1);
+
+ // Sync data from Ozone Manager to Recon.
+ OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl)
+ cluster.getReconServer().getOzoneManagerServiceProvider();
+ impl.syncDataFromOM();
+
+ // Retrieve tables from Recon's OM-DB.
+ ReconOMMetadataManager reconOmMetadataManagerInstance =
+ (ReconOMMetadataManager) cluster.getReconServer()
+ .getOzoneManagerServiceProvider().getOMMetadataManagerInstance();
+ Table<String, OmKeyInfo> reconFileTable =
+ reconOmMetadataManagerInstance.getKeyTable(getFSOBucketLayout());
+ Table<String, OmDirectoryInfo> reconDirTable =
+ reconOmMetadataManagerInstance.getDirectoryTable();
+ Table<String, OmKeyInfo> reconDeletedDirTable =
+ reconOmMetadataManagerInstance.getDeletedDirTable();
+
+ // Verify the entries in the Recon tables after sync.
+ assertReconTableRowCount(reconFileTable, 10);
+ assertReconTableRowCount(reconDirTable, 1);
+ assertReconTableRowCount(reconDeletedDirTable, 0);
+
+ // Retrieve the object ID of dir1 from directory table.
+ Long directoryObjectId = null;
+ try (
+ TableIterator<?, ? extends Table.KeyValue<?, OmDirectoryInfo>> iterator
+ = reconDirTable.iterator()) {
+ if (iterator.hasNext()) {
+ directoryObjectId = iterator.next().getValue().getObjectID();
Review Comment:
Other test cases not adding any directories in cluster ? How are we sure it
will retrieve "dir1" object Id.
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the Recon
+ * metadata. This method waits for a specific period of time for the row
count
+ * of the specified table to match the expected count.
+ */
+ private void assertReconTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertReconTableRowCount(count, table),
1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertReconTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getReconServer().getOzoneManagerServiceProvider()
+ .getOMMetadataManagerInstance().countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
+ }
+
+ /**
+ * Get the BucketLayout with the FileSystem Optimized configuration.
+ */
+ private static BucketLayout getFSOBucketLayout() {
Review Comment:
We have added test cases for FSO buckets, Can we add for OBS bucket as well ?
##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java:
##########
@@ -171,4 +306,90 @@ private void addKeys(int start, int end, String dirPrefix)
throws Exception {
writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i);
}
}
+
+ /**
+ * This method is designed to create a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are added to a
+ * specified volume and bucket.
+ */
+ private void addKeysToDirectory(int startIndex, int endIndex, String dirName)
+ throws Exception {
+ store.createVolume("vol");
+ OzoneVolume volume = store.getVolume("vol");
+ volume.createBucket("bucket");
+ for (int i = startIndex; i <= endIndex; i++) {
+ writeTestData("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * This method is designed to delete a series of keys with incremental
indices
+ * and associate them with a given directory. The keys are deleted from a
+ * specified volume and bucket.
+ */
+ private void deleteKeysFromDirectory(int startIndex, int endIndex,
+ String dirName)
+ throws Exception {
+ for (int i = startIndex; i <= endIndex; i++) {
+ deleteKey("vol", "bucket", dirName + "/key" + i);
+ }
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the OM metadata.
+ * This method waits for a specific period of time for the row count of the
+ * specified table to match the expected count.
+ */
+ private void assertOmTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertOmTableRowCount(count, table), 1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertOmTableRowCount(int expectedCount,
+ Table<String, ?> table) {
+ long count = 0L;
+ try {
+ count = cluster.getOzoneManager().getMetadataManager()
+ .countRowsInTable(table);
+ LOG.info("{} actual row count={}, expectedCount={}", table.getName(),
+ count, expectedCount);
+ } catch (IOException ex) {
+ fail("testDoubleBuffer failed with: " + ex);
+ }
+ return count == expectedCount;
+ }
+
+ /**
+ * Helper method to assert the row count of a given table in the Recon
+ * metadata. This method waits for a specific period of time for the row
count
+ * of the specified table to match the expected count.
+ */
+ private void assertReconTableRowCount(Table<String, ?> table, int count)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(() -> assertReconTableRowCount(count, table),
1000,
+ 120000); // 2 minutes
+ }
+
+ private boolean assertReconTableRowCount(int expectedCount,
Review Comment:
These assert methods for OM and Recon. tables can be refactored and
optimized to have one by passing relevant args.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]