This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new f9a240da19 HDDS-5463. [FSO] Recon Container API does not work
correctly with FSO. (#4182)
f9a240da19 is described below
commit f9a240da19cc265347fd27ba3fabb596c66d75ab
Author: Arafat2198 <[email protected]>
AuthorDate: Fri Mar 31 13:09:59 2023 +0530
HDDS-5463. [FSO] Recon Container API does not work correctly with FSO.
(#4182)
---
.../hadoop/ozone/recon/api/ContainerEndpoint.java | 29 ++-
.../ozone/recon/api/handlers/BucketHandler.java | 6 +
.../ozone/recon/tasks/ContainerKeyMapperTask.java | 38 ++--
.../ozone/recon/api/TestContainerEndpoint.java | 253 ++++++++++++++++++++-
.../recon/tasks/TestContainerKeyMapperTask.java | 221 +++++++++++++++++-
5 files changed, 504 insertions(+), 43 deletions(-)
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
index 8059bb4c4d..de492f1ae9 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
@@ -62,8 +62,11 @@ import
org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
import
org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates;
import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static
org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_BATCH_NUMBER;
import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT;
@@ -89,13 +92,21 @@ public class ContainerEndpoint {
private final ReconContainerManager containerManager;
private final ContainerHealthSchemaManager containerHealthSchemaManager;
+ private final ReconNamespaceSummaryManager reconNamespaceSummaryManager;
+ private final OzoneStorageContainerManager reconSCM;
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ContainerEndpoint.class);
+ private BucketLayout layout = BucketLayout.DEFAULT;
@Inject
public ContainerEndpoint(OzoneStorageContainerManager reconSCM,
- ContainerHealthSchemaManager containerHealthSchemaManager) {
+ ContainerHealthSchemaManager containerHealthSchemaManager,
+ ReconNamespaceSummaryManager reconNamespaceSummaryManager) {
this.containerManager =
(ReconContainerManager) reconSCM.getContainerManager();
this.containerHealthSchemaManager = containerHealthSchemaManager;
+ this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
+ this.reconSCM = reconSCM;
}
/**
@@ -165,16 +176,22 @@ public class ContainerEndpoint {
Map<ContainerKeyPrefix, Integer> containerKeyPrefixMap =
reconContainerMetadataManager.getKeyPrefixesForContainer(containerID,
prevKeyPrefix);
-
// Get set of Container-Key mappings for given containerId.
for (ContainerKeyPrefix containerKeyPrefix : containerKeyPrefixMap
.keySet()) {
- // Directly calling get() on the Key table instead of iterating since
- // only full keys are supported now. When we change to using a prefix
- // of the key, this needs to change to prefix seek.
- OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout())
+ // Directly calling getSkipCache() on the Key/FileTable table
+ // instead of iterating since only full keys are supported now. We will
+ // try to get the OmKeyInfo object by searching the KEY_TABLE table
with
+ // the key prefix. If it's not found, we will then search the
FILE_TABLE
+ OmKeyInfo omKeyInfo =
omMetadataManager.getKeyTable(BucketLayout.LEGACY)
.getSkipCache(containerKeyPrefix.getKeyPrefix());
+ if (omKeyInfo == null) {
+ omKeyInfo =
+ omMetadataManager.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED)
+ .getSkipCache(containerKeyPrefix.getKeyPrefix());
+ }
+
if (null != omKeyInfo) {
// Filter keys by version.
List<OmKeyLocationInfoGroup> matchedKeys = omKeyInfo
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
index 7377441556..5121d91787 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java
@@ -175,6 +175,12 @@ public abstract class BucketHandler {
.equals(BucketLayout.LEGACY)) {
return new LegacyBucketHandler(reconNamespaceSummaryManager,
omMetadataManager, reconSCM, bucketInfo);
+ } else if (bucketInfo.getBucketLayout()
+ .equals(BucketLayout.OBJECT_STORE)) {
+ // TODO: HDDS-7810 Write a handler for object store bucket
+ // We can use LegacyBucketHandler for OBS bucket for now.
+ return new LegacyBucketHandler(reconNamespaceSummaryManager,
+ omMetadataManager, reconSCM, bucketInfo);
} else {
LOG.error("Unsupported bucket layout: " +
bucketInfo.getBucketLayout());
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
index 01fcdaf765..83e2f3afe5 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java
@@ -18,14 +18,15 @@
package org.apache.hadoop.ozone.recon.tasks;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -90,18 +91,24 @@ public class ContainerKeyMapperTask implements ReconOmTask {
reconContainerMetadataManager
.reinitWithNewContainerDataFromOm(new HashMap<>());
- Table<String, OmKeyInfo> omKeyInfoTable =
- omMetadataManager.getKeyTable(getBucketLayout());
- try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- keyIter = omKeyInfoTable.iterator()) {
- while (keyIter.hasNext()) {
- Table.KeyValue<String, OmKeyInfo> kv = keyIter.next();
- OmKeyInfo omKeyInfo = kv.getValue();
- handlePutOMKeyEvent(kv.getKey(), omKeyInfo, containerKeyMap,
- containerKeyCountMap, deletedKeyCountList);
- omKeyCount++;
+ // loop over both key table and file table
+ for (BucketLayout layout : Arrays.asList(BucketLayout.LEGACY,
+ BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
+ Table<String, OmKeyInfo> omKeyInfoTable =
+ omMetadataManager.getKeyTable(layout);
+ try (
+ TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+ keyIter = omKeyInfoTable.iterator()) {
+ while (keyIter.hasNext()) {
+ Table.KeyValue<String, OmKeyInfo> kv = keyIter.next();
+ OmKeyInfo omKeyInfo = kv.getValue();
+ handlePutOMKeyEvent(kv.getKey(), omKeyInfo, containerKeyMap,
+ containerKeyCountMap, deletedKeyCountList);
+ omKeyCount++;
+ }
}
}
+
LOG.info("Completed 'reprocess' of ContainerKeyMapperTask.");
Instant end = Instant.now();
long duration = Duration.between(start, end).toMillis();
@@ -127,7 +134,10 @@ public class ContainerKeyMapperTask implements ReconOmTask
{
}
public Collection<String> getTaskTables() {
- return Collections.singletonList(KEY_TABLE);
+ List<String> taskTables = new ArrayList<>();
+ taskTables.add(KEY_TABLE);
+ taskTables.add(FILE_TABLE);
+ return taskTables;
}
@Override
@@ -373,8 +383,4 @@ public class ContainerKeyMapperTask implements ReconOmTask {
}
}
- private BucketLayout getBucketLayout() {
- return BucketLayout.DEFAULT;
- }
-
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
index c383c92589..2aa84e59ce 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
@@ -24,6 +24,7 @@ import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestRe
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm;
import static org.junit.Assert.assertNotNull;
+import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@@ -60,7 +61,10 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.recon.ReconTestInjector;
@@ -112,6 +116,30 @@ public class TestContainerEndpoint {
private Pipeline pipeline;
private PipelineID pipelineID;
private long keyCount = 5L;
+ private static final String FSO_KEY_NAME1 = "dir1/file7";
+ private static final String FSO_KEY_NAME2 = "dir1/dir2/file8";
+ private static final String FSO_KEY_NAME3 = "dir1/dir2/file9";
+ private static final String FSO_KEY_NAME4 = "dir1/dir2/dir3/file10";
+ private static final String BUCKET_NAME = "fsoBucket";
+ private static final String VOLUME_NAME = "sampleVol2";
+ private static final String FILE_NAME1 = "file7";
+ private static final String FILE_NAME2 = "file8";
+ private static final String FILE_NAME3 = "file9";
+ private static final String FILE_NAME4 = "file10";
+ private static final long FILE_ONE_OBJECT_ID = 13L;
+ private static final long FILE_TWO_OBJECT_ID = 14L;
+ private static final long FILE_THREE_OBJECT_ID = 15L;
+ private static final long FILE_FOUR_OBJECT_ID = 16L;
+ private static final long PARENT_OBJECT_ID = 2L; // dir1 objectID
+ private static final long PARENT_OBJECT_ID2 = 3L; // dir2 objectID
+ private static final long PARENT_OBJECT_ID3 = 4L; // dir3 objectID
+ private static final long BUCKET_OBJECT_ID = 1L; // fsoBucket objectID
+ private static final long VOL_OBJECT_ID = 0L; // sampleVol2 objectID
+ private static final long CONTAINER_ID_1 = 20L;
+ private static final long CONTAINER_ID_2 = 21L;
+ private static final long CONTAINER_ID_3 = 22L;
+ private static final long LOCAL_ID = 0L;
+ private static final long KEY_ONE_SIZE = 500L; // 500 bytes
private UUID uuid1;
private UUID uuid2;
@@ -227,17 +255,156 @@ public class TestContainerEndpoint {
//Generate Recon container DB data.
OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
- Table tableMock = mock(Table.class);
- when(tableMock.getName()).thenReturn("KeyTable");
- when(omMetadataManagerMock.getKeyTable(getBucketLayout()))
- .thenReturn(tableMock);
- ContainerKeyMapperTask containerKeyMapperTask =
+ Table keyTableMock = mock(Table.class);
+ Table fileTableMock = mock(Table.class);
+
+ when(keyTableMock.getName()).thenReturn("KeyTable");
+ when(fileTableMock.getName()).thenReturn("FileTable");
+
+ when(omMetadataManagerMock.getKeyTable(BucketLayout.LEGACY))
+ .thenReturn(keyTableMock);
+
+ when(omMetadataManagerMock.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED))
+ .thenReturn(fileTableMock);
+
+ reprocessContainerKeyMapper();
+ }
+
+ private void reprocessContainerKeyMapper() {
+ ContainerKeyMapperTask containerKeyMapperTask =
new ContainerKeyMapperTask(reconContainerMetadataManager);
containerKeyMapperTask.reprocess(reconOMMetadataManager);
}
+ private void setUpFSOData() throws IOException {
+
+ // Create another new volume and add it to the volume table
+ String volumeKey = reconOMMetadataManager.getVolumeKey(VOLUME_NAME);
+ OmVolumeArgs args = OmVolumeArgs.newBuilder()
+ .setVolume(VOLUME_NAME)
+ .setAdminName("TestUser")
+ .setOwnerName("TestUser")
+ .setObjectID(0L)
+ .build();
+ reconOMMetadataManager.getVolumeTable().put(volumeKey, args);
+
+ // Create another new bucket and add it to the bucket table
+ OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+ .setVolumeName(VOLUME_NAME)
+ .setBucketName(BUCKET_NAME)
+ .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED)
+ .setObjectID(1L)
+ .build();
+ String bucketKey =
+ reconOMMetadataManager.getBucketKey(bucketInfo.getVolumeName(),
+ bucketInfo.getBucketName());
+ reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
+
+ // Create a new directory and add it to the directory table
+ OmDirectoryInfo dirInfo1 = OmDirectoryInfo.newBuilder()
+ .setName("dir1")
+ .setParentObjectID(1L)
+ .setUpdateID(1L)
+ .setObjectID(2L)
+ .build();
+ OmDirectoryInfo dirInfo2 = OmDirectoryInfo.newBuilder()
+ .setName("dir2")
+ .setParentObjectID(1L)
+ .setUpdateID(1L)
+ .setObjectID(3L)
+ .build();
+ String dirKey1 = reconOMMetadataManager.getOzonePathKey(0, 1, 1L, "dir1");
+ String dirKey2 = reconOMMetadataManager.getOzonePathKey(0, 1, 2L, "dir2");
+ reconOMMetadataManager.getDirectoryTable().put(dirKey1, dirInfo1);
+ reconOMMetadataManager.getDirectoryTable().put(dirKey2, dirInfo2);
+
+ OmKeyLocationInfoGroup locationInfoGroup =
+ getLocationInfoGroup1();
+
+ // add the multi-block key to Recon's OM
+ writeKeyToOm(reconOMMetadataManager,
+ FSO_KEY_NAME1,
+ BUCKET_NAME,
+ VOLUME_NAME,
+ FILE_NAME1,
+ FILE_ONE_OBJECT_ID,
+ PARENT_OBJECT_ID,
+ BUCKET_OBJECT_ID,
+ VOL_OBJECT_ID,
+ Collections.singletonList(locationInfoGroup),
+ BucketLayout.FILE_SYSTEM_OPTIMIZED,
+ KEY_ONE_SIZE);
+
+ // add the multi-block key to Recon's OM
+ writeKeyToOm(reconOMMetadataManager,
+ FSO_KEY_NAME2,
+ BUCKET_NAME,
+ VOLUME_NAME,
+ FILE_NAME2,
+ FILE_TWO_OBJECT_ID,
+ PARENT_OBJECT_ID2,
+ BUCKET_OBJECT_ID,
+ VOL_OBJECT_ID,
+ Collections.singletonList(locationInfoGroup),
+ BucketLayout.FILE_SYSTEM_OPTIMIZED,
+ KEY_ONE_SIZE);
+
+ // add the multi-block key to Recon's OM
+ writeKeyToOm(reconOMMetadataManager,
+ FSO_KEY_NAME3,
+ BUCKET_NAME,
+ VOLUME_NAME,
+ FILE_NAME3,
+ FILE_THREE_OBJECT_ID,
+ PARENT_OBJECT_ID2,
+ BUCKET_OBJECT_ID,
+ VOL_OBJECT_ID,
+ Collections.singletonList(locationInfoGroup),
+ BucketLayout.FILE_SYSTEM_OPTIMIZED,
+ KEY_ONE_SIZE);
+
+ // add the multi-block key to Recon's OM
+ writeKeyToOm(reconOMMetadataManager,
+ FSO_KEY_NAME4,
+ BUCKET_NAME,
+ VOLUME_NAME,
+ FILE_NAME4,
+ FILE_FOUR_OBJECT_ID,
+ PARENT_OBJECT_ID3,
+ BUCKET_OBJECT_ID,
+ VOL_OBJECT_ID,
+ Collections.singletonList(locationInfoGroup),
+ BucketLayout.FILE_SYSTEM_OPTIMIZED,
+ KEY_ONE_SIZE);
+ }
+
+ private OmKeyLocationInfoGroup getLocationInfoGroup1() {
+ List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
+ BlockID block1 = new BlockID(CONTAINER_ID_1, LOCAL_ID);
+ BlockID block2 = new BlockID(CONTAINER_ID_2, LOCAL_ID);
+ BlockID block3 = new BlockID(CONTAINER_ID_3, LOCAL_ID);
+
+ OmKeyLocationInfo location1 = new OmKeyLocationInfo.Builder()
+ .setBlockID(block1)
+ .setLength(1000L)
+ .build();
+ OmKeyLocationInfo location2 = new OmKeyLocationInfo.Builder()
+ .setBlockID(block2)
+ .setLength(2000L)
+ .build();
+ OmKeyLocationInfo location3 = new OmKeyLocationInfo.Builder()
+ .setBlockID(block3)
+ .setLength(3000L)
+ .build();
+ locationInfoList.add(location1);
+ locationInfoList.add(location2);
+ locationInfoList.add(location3);
+
+ return new OmKeyLocationInfoGroup(0L, locationInfoList);
+ }
+
@Test
- public void testGetKeysForContainer() {
+ public void testGetKeysForContainer() throws IOException {
Response response = containerEndpoint.getKeysForContainer(1L, -1, "");
KeysResponse data = (KeysResponse) response.getEntity();
@@ -278,10 +445,41 @@ public class TestContainerEndpoint {
keyMetadataList = data.getKeys();
assertEquals(1, keyMetadataList.size());
assertEquals(3, data.getTotalCount());
+
+ // Now to check if the ContainerEndpoint also reads the File table
+ // Set up test data for FSO keys
+ setUpFSOData();
+ // Reprocess the container key mapper to ensure the latest mapping is used
+ reprocessContainerKeyMapper();
+ response = containerEndpoint.getKeysForContainer(20L, -1, "");
+
+ // Ensure that the expected number of keys is returned
+ data = (KeysResponse) response.getEntity();
+ keyMetadataList = data.getKeys();
+
+ assertEquals(4, data.getTotalCount());
+ assertEquals(4, keyMetadataList.size());
+
+ // Retrieve the first key from the list and verify its metadata
+ iterator = keyMetadataList.iterator();
+ keyMetadata = iterator.next();
+ assertEquals(FSO_KEY_NAME1, keyMetadata.getKey());
+ assertEquals(1, keyMetadata.getVersions().size());
+ assertEquals(1, keyMetadata.getBlockIds().size());
+ blockIds = keyMetadata.getBlockIds();
+ assertEquals(0, blockIds.get(0L).get(0).getLocalID());
+
+ keyMetadata = iterator.next();
+ assertEquals(FSO_KEY_NAME2, keyMetadata.getKey());
+ assertEquals(1, keyMetadata.getVersions().size());
+ assertEquals(1, keyMetadata.getBlockIds().size());
+ blockIds = keyMetadata.getBlockIds();
+ assertEquals(0, blockIds.get(0L).get(0).getLocalID());
+
}
@Test
- public void testGetKeysForContainerWithPrevKey() {
+ public void testGetKeysForContainerWithPrevKey() throws IOException {
// test if prev-key param works as expected
Response response = containerEndpoint.getKeysForContainer(
1L, -1, "/sampleVol/bucketOne/key_one");
@@ -297,10 +495,12 @@ public class TestContainerEndpoint {
Iterator<KeyMetadata> iterator = keyMetadataList.iterator();
KeyMetadata keyMetadata = iterator.next();
+ // assert that the returned key metadata is correct
assertEquals("key_two", keyMetadata.getKey());
assertEquals(2, keyMetadata.getVersions().size());
assertEquals(2, keyMetadata.getBlockIds().size());
+ // test for an empty prev-key parameter
response = containerEndpoint.getKeysForContainer(
1L, -1, StringUtils.EMPTY);
data = (KeysResponse) response.getEntity();
@@ -320,12 +520,45 @@ public class TestContainerEndpoint {
assertEquals(3, data.getTotalCount());
assertEquals(0, keyMetadataList.size());
+ // test for a container ID that does not exist
response = containerEndpoint.getKeysForContainer(
5L, -1, "");
data = (KeysResponse) response.getEntity();
keyMetadataList = data.getKeys();
assertEquals(0, keyMetadataList.size());
assertEquals(0, data.getTotalCount());
+
+ // Now to check if the ContainerEndpoint also reads the File table
+ // Set up test data for FSO keys
+ setUpFSOData();
+ // Reprocess the container key mapper to ensure the latest mapping is used
+ reprocessContainerKeyMapper();
+ response = containerEndpoint.getKeysForContainer(20L, -1, "/0/1/2/file7");
+
+ // Ensure that the expected number of keys is returned
+ data = (KeysResponse) response.getEntity();
+ keyMetadataList = data.getKeys();
+
+ assertEquals(4, data.getTotalCount());
+ assertEquals(3, keyMetadataList.size());
+
+ // Retrieve the first key from the list and verify its metadata
+ iterator = keyMetadataList.iterator();
+ keyMetadata = iterator.next();
+ assertEquals(FSO_KEY_NAME2, keyMetadata.getKey());
+ assertEquals(1, keyMetadata.getVersions().size());
+ assertEquals(1, keyMetadata.getBlockIds().size());
+ Map<Long, List<KeyMetadata.ContainerBlockMetadata>> blockIds =
+ keyMetadata.getBlockIds();
+ assertEquals(0, blockIds.get(0L).get(0).getLocalID());
+
+ keyMetadata = iterator.next();
+ assertEquals(FSO_KEY_NAME3, keyMetadata.getKey());
+ assertEquals(1, keyMetadata.getVersions().size());
+ assertEquals(1, keyMetadata.getBlockIds().size());
+ blockIds = keyMetadata.getBlockIds();
+ assertEquals(0, blockIds.get(0L).get(0).getLocalID());
+
}
@Test
@@ -578,7 +811,7 @@ public class TestContainerEndpoint {
@Test
public void testUnhealthyContainersFilteredResponse()
throws IOException, TimeoutException {
- String missing = UnHealthyContainerStates.MISSING.toString();
+ String missing = UnHealthyContainerStates.MISSING.toString();
Response response = containerEndpoint
.getUnhealthyContainers(missing, 1000, 1);
@@ -714,7 +947,7 @@ public class TestContainerEndpoint {
}
private void createUnhealthyRecords(int missing, int overRep, int underRep,
- int misRep) {
+ int misRep) {
int cid = 0;
for (int i = 0; i < missing; i++) {
createUnhealthyRecord(++cid, UnHealthyContainerStates.MISSING.toString(),
@@ -738,7 +971,7 @@ public class TestContainerEndpoint {
}
private void createUnhealthyRecord(int id, String state, int expected,
- int actual, int delta, String reason) {
+ int actual, int delta, String reason) {
long cID = Integer.toUnsignedLong(id);
UnhealthyContainers missing = new UnhealthyContainers();
missing.setContainerId(cID);
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
index b9594551a0..be2617a1f1 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.recon.tasks;
+import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider;
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo;
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
@@ -28,6 +29,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
+import java.util.Iterator;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -65,6 +67,17 @@ public class TestContainerKeyMapperTask {
private ReconOMMetadataManager reconOMMetadataManager;
private OzoneManagerServiceProviderImpl ozoneManagerServiceProvider;
+ private static final String FSO_KEY_NAME = "dir1/file7";
+ private static final String BUCKET_NAME = "bucket1";
+ private static final String VOLUME_NAME = "vol";
+ private static final String FILE_NAME = "file7";
+ private static final String INSERTED_KEY = "keyToBeInserted";
+ private static final String DELETED_KEY = "keyToBeDeleted";
+ private static final long KEY_ONE_OBJECT_ID = 3L; // 3 bytes
+ private static final long BUCKET_ONE_OBJECT_ID = 1L;
+ private static final long VOL_OBJECT_ID = 0L;
+ private static final long KEY_ONE_SIZE = 500L; // 500 bytes
+
@Before
public void setUp() throws Exception {
omMetadataManager = initializeNewOmMetadataManager(
@@ -85,7 +98,7 @@ public class TestContainerKeyMapperTask {
}
@Test
- public void testReprocessOMDB() throws Exception {
+ public void testKeyTableReprocess() throws Exception {
Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
reconContainerMetadataManager.getKeyPrefixesForContainer(1);
@@ -113,9 +126,9 @@ public class TestContainerKeyMapperTask {
OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
writeDataToOm(reconOMMetadataManager,
- "key_one",
- "bucketOne",
- "sampleVol",
+ FILE_NAME,
+ BUCKET_NAME,
+ VOLUME_NAME,
Collections.singletonList(omKeyLocationInfoGroup));
ContainerKeyMapperTask containerKeyMapperTask =
@@ -125,8 +138,8 @@ public class TestContainerKeyMapperTask {
keyPrefixesForContainer =
reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertEquals(1, keyPrefixesForContainer.size());
- String omKey = omMetadataManager.getOzoneKey("sampleVol",
- "bucketOne", "key_one");
+ String omKey = omMetadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME,
+ FILE_NAME);
ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(1,
omKey, 0);
assertEquals(1,
@@ -150,7 +163,81 @@ public class TestContainerKeyMapperTask {
}
@Test
- public void testProcessOMEvents() throws IOException {
+ public void testFileTableReprocess() throws Exception {
+ // Make sure the key prefixes are empty for container 1
+ Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
+ reconContainerMetadataManager.getKeyPrefixesForContainer(1L);
+ assertTrue(keyPrefixesForContainer.isEmpty());
+
+ // Make sure the key prefixes are empty for container 2
+ keyPrefixesForContainer =
+ reconContainerMetadataManager.getKeyPrefixesForContainer(2L);
+ assertTrue(keyPrefixesForContainer.isEmpty());
+
+ // Create a random pipeline and a list of OmKeyLocationInfo objects
+ Pipeline pipeline = getRandomPipeline();
+ List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+ BlockID blockID1 = new BlockID(1L, 1L);
+ OmKeyLocationInfo omKeyLocationInfo1 =
+ getOmKeyLocationInfo(blockID1, pipeline);
+ BlockID blockID2 = new BlockID(2L, 1L);
+ OmKeyLocationInfo omKeyLocationInfo2 =
+ getOmKeyLocationInfo(blockID2, pipeline);
+ omKeyLocationInfoList.add(omKeyLocationInfo1);
+ omKeyLocationInfoList.add(omKeyLocationInfo2);
+ OmKeyLocationInfoGroup omKeyLocationInfoGroup =
+ new OmKeyLocationInfoGroup(0L, omKeyLocationInfoList);
+
+ // Write the key to OM
+ writeKeyToOm(reconOMMetadataManager,
+ FSO_KEY_NAME,
+ BUCKET_NAME,
+ VOLUME_NAME,
+ FILE_NAME,
+ KEY_ONE_OBJECT_ID,
+ BUCKET_ONE_OBJECT_ID,
+ BUCKET_ONE_OBJECT_ID,
+ VOL_OBJECT_ID,
+ Collections.singletonList(omKeyLocationInfoGroup),
+ BucketLayout.FILE_SYSTEM_OPTIMIZED,
+ KEY_ONE_SIZE);
+
+ // Reprocess container key mappings
+ ContainerKeyMapperTask containerKeyMapperTask =
+ new ContainerKeyMapperTask(reconContainerMetadataManager);
+ containerKeyMapperTask.reprocess(reconOMMetadataManager);
+
+ // Check the key prefixes for container 1
+ keyPrefixesForContainer =
+ reconContainerMetadataManager.getKeyPrefixesForContainer(1L);
+ String omKey =
+ omMetadataManager.getOzonePathKey(VOL_OBJECT_ID, BUCKET_ONE_OBJECT_ID,
+ BUCKET_ONE_OBJECT_ID, FILE_NAME);
+ ContainerKeyPrefix containerKeyPrefix =
+ new ContainerKeyPrefix(1L, omKey, 0L);
+ assertEquals(1L, keyPrefixesForContainer.size());
+ assertEquals(1L,
+ keyPrefixesForContainer.get(containerKeyPrefix).intValue());
+
+ // Check the key prefixes for container 2
+ keyPrefixesForContainer =
+ reconContainerMetadataManager.getKeyPrefixesForContainer(2L);
+ containerKeyPrefix = new ContainerKeyPrefix(2L, omKey, 0L);
+ assertEquals(1L, keyPrefixesForContainer.size());
+ assertEquals(1L,
+ keyPrefixesForContainer.get(containerKeyPrefix).intValue());
+
+ // Check that the container key counts are updated
+ assertEquals(1L,
reconContainerMetadataManager.getKeyCountForContainer(1L));
+ assertEquals(1L,
reconContainerMetadataManager.getKeyCountForContainer(2L));
+ assertEquals(0L,
reconContainerMetadataManager.getKeyCountForContainer(3L));
+
+ // Check that the container count is updated
+ assertEquals(2L, reconContainerMetadataManager.getCountForContainers());
+ }
+
+ @Test
+ public void testKeyTableProcess() throws IOException {
Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
reconContainerMetadataManager.getKeyPrefixesForContainer(1);
assertTrue(keyPrefixesForContainer.isEmpty());
@@ -176,9 +263,9 @@ public class TestContainerKeyMapperTask {
OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
- String bucket = "bucketOne";
- String volume = "sampleVol";
- String key = "key_one";
+ String bucket = BUCKET_NAME;
+ String volume = VOLUME_NAME;
+ String key = FILE_NAME;
String omKey = omMetadataManager.getOzoneKey(volume, bucket, key);
OmKeyInfo omKeyInfo = buildOmKeyInfo(volume, bucket, key,
omKeyLocationInfoGroup);
@@ -205,7 +292,7 @@ public class TestContainerKeyMapperTask {
omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0,
omKeyLocationInfoList);
- String key2 = "key_two";
+ String key2 = DELETED_KEY;
writeDataToOm(reconOMMetadataManager, key2, bucket, volume, Collections
.singletonList(omKeyLocationInfoGroup));
@@ -266,6 +353,118 @@ public class TestContainerKeyMapperTask {
assertEquals(3, reconContainerMetadataManager.getCountForContainers());
}
+ @Test
+ public void testFileTableProcess() throws Exception {
+ // Verify that keyPrefixesForContainer is empty for container 1 and 2
+ Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
+ reconContainerMetadataManager.getKeyPrefixesForContainer(1);
+ assertTrue(keyPrefixesForContainer.isEmpty());
+
+ keyPrefixesForContainer = reconContainerMetadataManager
+ .getKeyPrefixesForContainer(2);
+ assertTrue(keyPrefixesForContainer.isEmpty());
+
+ // Create a random pipeline and a list of OmKeyLocationInfo objects
+ Pipeline pipeline = getRandomPipeline();
+ List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+ BlockID blockID1 = new BlockID(1L, 1L);
+ OmKeyLocationInfo omKeyLocationInfo1 =
+ getOmKeyLocationInfo(blockID1, pipeline);
+ BlockID blockID2 = new BlockID(2L, 1L);
+ OmKeyLocationInfo omKeyLocationInfo2 =
+ getOmKeyLocationInfo(blockID2, pipeline);
+ omKeyLocationInfoList.add(omKeyLocationInfo1);
+ omKeyLocationInfoList.add(omKeyLocationInfo2);
+ OmKeyLocationInfoGroup omKeyLocationInfoGroup =
+ new OmKeyLocationInfoGroup(0L, omKeyLocationInfoList);
+
+ // Reprocess container key mappings
+ ContainerKeyMapperTask containerKeyMapperTask =
+ new ContainerKeyMapperTask(reconContainerMetadataManager);
+
+ String bucket = BUCKET_NAME;
+ String volume = VOLUME_NAME;
+ String key = INSERTED_KEY;
+ String omKey = omMetadataManager.getOzoneKey(volume, bucket, key);
+ OmKeyInfo omKeyInfo = buildOmKeyInfo(volume, bucket, key,
+ omKeyLocationInfoGroup);
+
+ OMDBUpdateEvent keyEvent1 = new OMDBUpdateEvent.
+ OMUpdateEventBuilder<String, OmKeyInfo>()
+ .setKey(omKey)
+ .setValue(omKeyInfo)
+ .setTable(
+ omMetadataManager.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED)
+ .getName())
+ .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
+ .build();
+
+ String key2 = DELETED_KEY;
+
+ omKey = omMetadataManager.getOzoneKey(volume, bucket, key2);
+ OMDBUpdateEvent keyEvent2 = new OMDBUpdateEvent.
+ OMUpdateEventBuilder<String, OmKeyInfo>()
+ .setKey(omKey)
+ .setValue(omKeyInfo)
+ .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT)
+ .setTable(
+ omMetadataManager.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED)
+ .getName())
+ .build();
+
+ OMUpdateEventBatch omUpdateEventBatch =
+ new OMUpdateEventBatch(new ArrayList<OMDBUpdateEvent>() {
+ {
+ add(keyEvent1);
+ add(keyEvent2);
+ }
+ });
+
+ // Process PUT event for both the keys
+ containerKeyMapperTask.process(omUpdateEventBatch);
+
+ keyPrefixesForContainer = reconContainerMetadataManager
+ .getKeyPrefixesForContainer(1);
+ assertEquals(2, keyPrefixesForContainer.size());
+ Iterator<ContainerKeyPrefix> iterator =
+ keyPrefixesForContainer.keySet().iterator();
+ ContainerKeyPrefix firstKeyPrefix = iterator.next();
+ ContainerKeyPrefix secondKeyPrefix = iterator.next();
+
+ assertEquals("/" + VOLUME_NAME + "/" + BUCKET_NAME + "/" + DELETED_KEY,
+ firstKeyPrefix.getKeyPrefix());
+ assertEquals("/" + VOLUME_NAME + "/" + BUCKET_NAME + "/" + INSERTED_KEY,
+ secondKeyPrefix.getKeyPrefix());
+
+ omKey = omMetadataManager.getOzoneKey(volume, bucket, key2);
+ OMDBUpdateEvent keyEvent3 = new OMDBUpdateEvent.
+ OMUpdateEventBuilder<String, OmKeyInfo>()
+ .setKey(omKey)
+ .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE)
+ .setTable(
+ omMetadataManager.getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED)
+ .getName())
+ .build();
+ OMUpdateEventBatch omUpdateEventBatch2 =
+ new OMUpdateEventBatch(new ArrayList<OMDBUpdateEvent>() {
+ {
+ add(keyEvent3);
+ }
+ });
+
+ // Process DELETE event for key2
+ containerKeyMapperTask.process(omUpdateEventBatch2);
+
+ keyPrefixesForContainer = reconContainerMetadataManager
+ .getKeyPrefixesForContainer(1);
+ // The second key is deleted
+ assertEquals(1, keyPrefixesForContainer.size());
+ iterator = keyPrefixesForContainer.keySet().iterator();
+ firstKeyPrefix = iterator.next();
+ assertEquals("/" + VOLUME_NAME + "/" + BUCKET_NAME + "/" + INSERTED_KEY,
+ firstKeyPrefix.getKeyPrefix());
+ }
+
private OmKeyInfo buildOmKeyInfo(String volume,
String bucket,
String key,
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]