[
https://issues.apache.org/jira/browse/HDFS-15762?focusedWorklogId=532638&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-532638
]
ASF GitHub Bot logged work on HDFS-15762:
-----------------------------------------
Author: ASF GitHub Bot
Created on: 07/Jan/21 17:12
Start Date: 07/Jan/21 17:12
Worklog Time Spent: 10m
Work Description: amahussein commented on a change in pull request #2598:
URL: https://github.com/apache/hadoop/pull/2598#discussion_r553452665
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
##########
@@ -251,62 +252,48 @@ public void testMultipleNNPortOverwriteDownStream()
throws Exception {
clientConf.set(HADOOP_RPC_PROTECTION, "privacy");
FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf);
doTest(fsPrivacy, PATH1);
- for (int i = 0; i < 2; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferClient saslClient = dn.getSaslClient();
- String qop = null;
- // It may take some time for the qop to populate
- // to all DNs, check in a loop.
- for (int trial = 0; trial < 10; trial++) {
- qop = saslClient.getTargetQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ long count = dataNodes.stream()
+ .map(dn -> dn.getSaslClient().getTargetQOP())
+ .filter(Objects::nonNull).filter(qop -> qop.equals("auth")).count();
+ // For each data pipeline, targetQOPs of sasl clients in the first two
+ // datanodes become equal to auth.
+ // Note that it is not necessarily the case for all datanodes,
+ // since a datanode may be always at the last position in pipelines.
+ assertTrue("At least two qops should be auth", count >= 2);
Review comment:
The original code was expecting that the qop will take sometime to
populate.
```java
// It may take some time for the qop to populate
// to all DNs, check in a loop.
```
The new changes got rid of retrying. Did you consider wrapping the lambda
count expression inside `GenericTestUtils.waitFor()` ?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
##########
@@ -251,62 +252,48 @@ public void testMultipleNNPortOverwriteDownStream()
throws Exception {
clientConf.set(HADOOP_RPC_PROTECTION, "privacy");
FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf);
doTest(fsPrivacy, PATH1);
- for (int i = 0; i < 2; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferClient saslClient = dn.getSaslClient();
- String qop = null;
- // It may take some time for the qop to populate
- // to all DNs, check in a loop.
- for (int trial = 0; trial < 10; trial++) {
- qop = saslClient.getTargetQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ long count = dataNodes.stream()
+ .map(dn -> dn.getSaslClient().getTargetQOP())
+ .filter(Objects::nonNull).filter(qop -> qop.equals("auth")).count();
+ // For each data pipeline, targetQOPs of sasl clients in the first two
+ // datanodes become equal to auth.
+ // Note that it is not necessarily the case for all datanodes,
+ // since a datanode may be always at the last position in pipelines.
+ assertTrue("At least two qops should be auth", count >= 2);
clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
+ // Reset targetQOPs to null to clear the last state.
+ resetTargetQOP(dataNodes);
doTest(fsIntegrity, PATH2);
- for (int i = 0; i < 2; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferClient saslClient = dn.getSaslClient();
- String qop = null;
- for (int trial = 0; trial < 10; trial++) {
- qop = saslClient.getTargetQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ count = dataNodes.stream().map(dn -> dn.getSaslClient().getTargetQOP())
Review comment:
Same comment about `GenericTestUtils.waitFor()`
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
##########
@@ -251,62 +252,48 @@ public void testMultipleNNPortOverwriteDownStream()
throws Exception {
clientConf.set(HADOOP_RPC_PROTECTION, "privacy");
FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf);
doTest(fsPrivacy, PATH1);
- for (int i = 0; i < 2; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferClient saslClient = dn.getSaslClient();
- String qop = null;
- // It may take some time for the qop to populate
- // to all DNs, check in a loop.
- for (int trial = 0; trial < 10; trial++) {
- qop = saslClient.getTargetQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ long count = dataNodes.stream()
+ .map(dn -> dn.getSaslClient().getTargetQOP())
+ .filter(Objects::nonNull).filter(qop -> qop.equals("auth")).count();
+ // For each data pipeline, targetQOPs of sasl clients in the first two
+ // datanodes become equal to auth.
+ // Note that it is not necessarily the case for all datanodes,
+ // since a datanode may be always at the last position in pipelines.
+ assertTrue("At least two qops should be auth", count >= 2);
clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
+ // Reset targetQOPs to null to clear the last state.
+ resetTargetQOP(dataNodes);
doTest(fsIntegrity, PATH2);
- for (int i = 0; i < 2; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferClient saslClient = dn.getSaslClient();
- String qop = null;
- for (int trial = 0; trial < 10; trial++) {
- qop = saslClient.getTargetQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ count = dataNodes.stream().map(dn -> dn.getSaslClient().getTargetQOP())
+ .filter(Objects::nonNull).filter(qop -> qop.equals("auth")).count();
+ assertTrue("At least two qops should be auth", count >= 2);
clientConf.set(HADOOP_RPC_PROTECTION, "authentication");
FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf);
+ // Reset negotiatedQOPs to null to clear the last state.
+ resetNegotiatedQOP(dataNodes);
doTest(fsAuth, PATH3);
- for (int i = 0; i < 3; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferServer saslServer = dn.getSaslServer();
- String qop = null;
- for (int trial = 0; trial < 10; trial++) {
- qop = saslServer.getNegotiatedQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ count = dataNodes.stream()
+ .map(dn -> dn.getSaslServer().getNegotiatedQOP())
+ .filter(Objects::nonNull).filter(qop -> qop.equals("auth")).count();
+ assertEquals("All qops should be auth", 3, count);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
+ private static void resetTargetQOP(List<DataNode> dns) {
Review comment:
Why a reset step gets added to the test? Was the unit test wrong in not
resetting the value before?
I see that the original code only checks for not-null to consider that the
updates. Is there a better way to be more specific for each step?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
##########
@@ -251,62 +252,48 @@ public void testMultipleNNPortOverwriteDownStream()
throws Exception {
clientConf.set(HADOOP_RPC_PROTECTION, "privacy");
FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf);
doTest(fsPrivacy, PATH1);
- for (int i = 0; i < 2; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferClient saslClient = dn.getSaslClient();
- String qop = null;
- // It may take some time for the qop to populate
- // to all DNs, check in a loop.
- for (int trial = 0; trial < 10; trial++) {
- qop = saslClient.getTargetQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ long count = dataNodes.stream()
+ .map(dn -> dn.getSaslClient().getTargetQOP())
+ .filter(Objects::nonNull).filter(qop -> qop.equals("auth")).count();
+ // For each data pipeline, targetQOPs of sasl clients in the first two
+ // datanodes become equal to auth.
+ // Note that it is not necessarily the case for all datanodes,
+ // since a datanode may be always at the last position in pipelines.
+ assertTrue("At least two qops should be auth", count >= 2);
clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
+ // Reset targetQOPs to null to clear the last state.
+ resetTargetQOP(dataNodes);
doTest(fsIntegrity, PATH2);
- for (int i = 0; i < 2; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferClient saslClient = dn.getSaslClient();
- String qop = null;
- for (int trial = 0; trial < 10; trial++) {
- qop = saslClient.getTargetQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ count = dataNodes.stream().map(dn -> dn.getSaslClient().getTargetQOP())
+ .filter(Objects::nonNull).filter(qop -> qop.equals("auth")).count();
+ assertTrue("At least two qops should be auth", count >= 2);
clientConf.set(HADOOP_RPC_PROTECTION, "authentication");
FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf);
+ // Reset negotiatedQOPs to null to clear the last state.
+ resetNegotiatedQOP(dataNodes);
doTest(fsAuth, PATH3);
- for (int i = 0; i < 3; i++) {
- DataNode dn = dataNodes.get(i);
- SaslDataTransferServer saslServer = dn.getSaslServer();
- String qop = null;
- for (int trial = 0; trial < 10; trial++) {
- qop = saslServer.getNegotiatedQOP();
- if (qop != null) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals("auth", qop);
- }
+ count = dataNodes.stream()
Review comment:
Same comment about `GenericTestUtils.waitFor()`
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
##########
@@ -348,6 +348,11 @@ public String getNegotiatedQOP() {
return negotiatedQOP;
}
+ @VisibleForTesting
+ public void setNegotiatedQOP(String negotiatedQOP) {
Review comment:
Same comment as `setTargetQOP()` about adding a public accessor.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
##########
@@ -405,6 +405,11 @@ public String getTargetQOP() {
return targetQOP;
}
+ @VisibleForTesting
+ public void setTargetQOP(String targetQOP) {
Review comment:
I am not sure if this is the best way to achieve the fixes to the test
case. Making a public setter for a field public only to enable a change into a
unitTest breaks the abstraction and encapsulation of the Object.
Is there any other way to make this public setter?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
Issue Time Tracking
-------------------
Worklog Id: (was: 532638)
Time Spent: 1h 20m (was: 1h 10m)
> TestMultipleNNPortQOP#testMultipleNNPortOverwriteDownStream fails
> intermittently
> --------------------------------------------------------------------------------
>
> Key: HDFS-15762
> URL: https://issues.apache.org/jira/browse/HDFS-15762
> Project: Hadoop HDFS
> Issue Type: Sub-task
> Reporter: Toshihiko Uchida
> Assignee: Toshihiko Uchida
> Priority: Minor
> Labels: flaky-test, pull-request-available
> Attachments: PR2585#1-TestMultipleNNPortQOP-output.txt
>
> Time Spent: 1h 20m
> Remaining Estimate: 0h
>
> This unit test failed in https://github.com/apache/hadoop/pull/2585 due to an
> AssertionError.
> {code}
> java.lang.AssertionError: expected:<auth> but was:<null>
> at org.junit.Assert.fail(Assert.java:88)
> at org.junit.Assert.failNotEquals(Assert.java:834)
> at org.junit.Assert.assertEquals(Assert.java:118)
> at org.junit.Assert.assertEquals(Assert.java:144)
> at
> org.apache.hadoop.hdfs.TestMultipleNNPortQOP.testMultipleNNPortOverwriteDownStream(TestMultipleNNPortQOP.java:267)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498)
> at
> org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
> at
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
> at
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
> at
> org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
> at
> org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
> at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325)
> at
> org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78)
> at
> org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
> at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290)
> at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71)
> at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288)
> at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58)
> at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268)
> at
> org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
> at
> org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
> at org.junit.runners.ParentRunner.run(ParentRunner.java:363)
> at
> org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:365)
> at
> org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:273)
> at
> org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:238)
> at
> org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:159)
> at
> org.apache.maven.surefire.booter.ForkedBooter.invokeProviderInSameClassLoader(ForkedBooter.java:384)
> at
> org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:345)
> at
> org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:126)
> at
> org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:418)
> {code}
> The failure occurred at the following assertion.
> {code}
> doTest(fsPrivacy, PATH1);
> for (int i = 0; i < 2; i++) {
> DataNode dn = dataNodes.get(i);
> SaslDataTransferClient saslClient = dn.getSaslClient();
> String qop = null;
> // It may take some time for the qop to populate
> // to all DNs, check in a loop.
> for (int trial = 0; trial < 10; trial++) {
> qop = saslClient.getTargetQOP();
> if (qop != null) {
> break;
> }
> Thread.sleep(100);
> }
> assertEquals("auth", qop);
> }
> {code}
--
This message was sent by Atlassian Jira
(v8.3.4#803005)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]