adoroszlai commented on a change in pull request #1523:
URL: https://github.com/apache/ozone/pull/1523#discussion_r533181203
##########
File path:
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java
##########
@@ -397,4 +410,118 @@ public void testSkip() throws Exception {
Assert.assertEquals(inputData[chunkSize + 50 + i], readData[i]);
}
}
+
+ @Test
+ public void readAfterReplication() throws Exception {
+ testReadAfterReplication(false);
+ }
+
+ @Test
+ public void unbuffer() throws Exception {
+ testReadAfterReplication(true);
+ }
+
+ private void testReadAfterReplication(boolean doUnbuffer) throws Exception {
+ Assume.assumeTrue(cluster.getHddsDatanodes().size() > 3);
+
+ int dataLength = 2 * chunkSize;
+ String keyName = getKeyName();
+ OzoneOutputStream key = TestHelper.createKey(keyName,
+ ReplicationType.RATIS, dataLength, objectStore, volumeName,
bucketName);
+
+ byte[] data = writeRandomBytes(key, dataLength);
+
+ OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(keyName)
+ .setType(HddsProtos.ReplicationType.RATIS)
+ .setFactor(HddsProtos.ReplicationFactor.THREE)
+ .build();
+ OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
+
+ OmKeyLocationInfoGroup locations = keyInfo.getLatestVersionLocations();
+ Assert.assertNotNull(locations);
+ List<OmKeyLocationInfo> locationInfoList = locations.getLocationList();
+ Assert.assertEquals(1, locationInfoList.size());
+ OmKeyLocationInfo loc = locationInfoList.get(0);
+ long containerID = loc.getContainerID();
+ Assert.assertEquals(3, countReplicas(containerID, cluster));
+
+ TestHelper.waitForContainerClose(cluster, containerID);
+
+ List<DatanodeDetails> pipelineNodes = loc.getPipeline().getNodes();
+
+ // read chunk data
+ try (KeyInputStream keyInputStream = (KeyInputStream) objectStore
+ .getVolume(volumeName).getBucket(bucketName)
+ .readKey(keyName).getInputStream()) {
+
+ int b = keyInputStream.read();
+ Assert.assertNotEquals(-1, b);
+
+ if (doUnbuffer) {
+ keyInputStream.unbuffer();
+ }
+
+ // stop one node, wait for container to be replicated to another one
+ cluster.shutdownHddsDatanode(pipelineNodes.get(0));
+ waitForNodeToBecomeDead(pipelineNodes.get(0));
+ waitForReplicaCount(containerID, 2, cluster);
+ waitForReplicaCount(containerID, 3, cluster);
Review comment:
We need to waitForReplicaCount=2 first to ensure one of the original
replicas is lost.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]