[
https://issues.apache.org/jira/browse/HDFS-14013?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17289660#comment-17289660
]
Hadoop QA commented on HDFS-14013:
----------------------------------
| (x) *{color:red}-1 overall{color}* |
\\
\\
|| Vote || Subsystem || Runtime || Logfile || Comment ||
| {color:blue}0{color} | {color:blue} reexec {color} | {color:blue} 1m
53s{color} | {color:blue}{color} | {color:blue} Docker mode activated. {color} |
|| || || || {color:brown} Prechecks {color} || ||
| {color:green}+1{color} | {color:green} dupname {color} | {color:green} 0m
0s{color} | {color:green}{color} | {color:green} No case conflicting files
found. {color} |
| {color:green}+1{color} | {color:green} @author {color} | {color:green} 0m
0s{color} | {color:green}{color} | {color:green} The patch does not contain any
@author tags. {color} |
| {color:green}+1{color} | {color:green} {color} | {color:green} 0m 0s{color}
| {color:green}test4tests{color} | {color:green} The patch appears to include 1
new or modified test files. {color} |
|| || || || {color:brown} trunk Compile Tests {color} || ||
| {color:blue}0{color} | {color:blue} mvndep {color} | {color:blue} 1m
47s{color} | {color:blue}{color} | {color:blue} Maven dependency ordering for
branch {color} |
| {color:green}+1{color} | {color:green} mvninstall {color} | {color:green} 22m
41s{color} | {color:green}{color} | {color:green} trunk passed {color} |
| {color:green}+1{color} | {color:green} compile {color} | {color:green} 22m
47s{color} | {color:green}{color} | {color:green} trunk passed with JDK
Ubuntu-11.0.10+9-Ubuntu-0ubuntu1.20.04 {color} |
| {color:green}+1{color} | {color:green} compile {color} | {color:green} 19m
21s{color} | {color:green}{color} | {color:green} trunk passed with JDK Private
Build-1.8.0_282-8u282-b08-0ubuntu1~20.04-b08 {color} |
| {color:green}+1{color} | {color:green} checkstyle {color} | {color:green} 3m
54s{color} | {color:green}{color} | {color:green} trunk passed {color} |
| {color:green}+1{color} | {color:green} mvnsite {color} | {color:green} 2m
59s{color} | {color:green}{color} | {color:green} trunk passed {color} |
| {color:green}+1{color} | {color:green} shadedclient {color} | {color:green}
22m 30s{color} | {color:green}{color} | {color:green} branch has no errors when
building and testing our client artifacts. {color} |
| {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 1m
58s{color} | {color:green}{color} | {color:green} trunk passed with JDK
Ubuntu-11.0.10+9-Ubuntu-0ubuntu1.20.04 {color} |
| {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 3m
8s{color} | {color:green}{color} | {color:green} trunk passed with JDK Private
Build-1.8.0_282-8u282-b08-0ubuntu1~20.04-b08 {color} |
| {color:blue}0{color} | {color:blue} spotbugs {color} | {color:blue} 3m
23s{color} | {color:blue}{color} | {color:blue} Used deprecated FindBugs
config; considering switching to SpotBugs. {color} |
| {color:green}+1{color} | {color:green} findbugs {color} | {color:green} 5m
38s{color} | {color:green}{color} | {color:green} trunk passed {color} |
|| || || || {color:brown} Patch Compile Tests {color} || ||
| {color:blue}0{color} | {color:blue} mvndep {color} | {color:blue} 0m
21s{color} | {color:blue}{color} | {color:blue} Maven dependency ordering for
patch {color} |
| {color:green}+1{color} | {color:green} mvninstall {color} | {color:green} 2m
8s{color} | {color:green}{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} compile {color} | {color:green} 21m
53s{color} | {color:green}{color} | {color:green} the patch passed with JDK
Ubuntu-11.0.10+9-Ubuntu-0ubuntu1.20.04 {color} |
| {color:green}+1{color} | {color:green} javac {color} | {color:green} 21m
53s{color} | {color:green}{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} compile {color} | {color:green} 19m
18s{color} | {color:green}{color} | {color:green} the patch passed with JDK
Private Build-1.8.0_282-8u282-b08-0ubuntu1~20.04-b08 {color} |
| {color:green}+1{color} | {color:green} javac {color} | {color:green} 19m
18s{color} | {color:green}{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} checkstyle {color} | {color:green} 3m
59s{color} | {color:green}{color} | {color:green} root: The patch generated 0
new + 38 unchanged - 1 fixed = 38 total (was 39) {color} |
| {color:green}+1{color} | {color:green} mvnsite {color} | {color:green} 2m
58s{color} | {color:green}{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} whitespace {color} | {color:green} 0m
0s{color} | {color:green}{color} | {color:green} The patch has no whitespace
issues. {color} |
| {color:green}+1{color} | {color:green} shadedclient {color} | {color:green}
15m 21s{color} | {color:green}{color} | {color:green} patch has no errors when
building and testing our client artifacts. {color} |
| {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 2m
18s{color} | {color:green}{color} | {color:green} the patch passed with JDK
Ubuntu-11.0.10+9-Ubuntu-0ubuntu1.20.04 {color} |
| {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 3m
8s{color} | {color:green}{color} | {color:green} the patch passed with JDK
Private Build-1.8.0_282-8u282-b08-0ubuntu1~20.04-b08 {color} |
| {color:green}+1{color} | {color:green} findbugs {color} | {color:green} 6m
10s{color} | {color:green}{color} | {color:green} the patch passed {color} |
|| || || || {color:brown} Other Tests {color} || ||
| {color:red}-1{color} | {color:red} unit {color} | {color:red} 20m 34s{color}
|
{color:red}https://ci-hadoop.apache.org/job/PreCommit-HDFS-Build/495/artifact/out/patch-unit-hadoop-common-project_hadoop-common.txt{color}
| {color:red} hadoop-common in the patch passed. {color} |
| {color:red}-1{color} | {color:red} unit {color} | {color:red}273m 47s{color}
|
{color:red}https://ci-hadoop.apache.org/job/PreCommit-HDFS-Build/495/artifact/out/patch-unit-hadoop-hdfs-project_hadoop-hdfs.txt{color}
| {color:red} hadoop-hdfs in the patch passed. {color} |
| {color:green}+1{color} | {color:green} asflicense {color} | {color:green} 1m
4s{color} | {color:green}{color} | {color:green} The patch does not generate
ASF License warnings. {color} |
| {color:black}{color} | {color:black} {color} | {color:black}478m 23s{color} |
{color:black}{color} | {color:black}{color} |
\\
\\
|| Reason || Tests ||
| Failed junit tests | hadoop.ha.TestZKFailoverController |
| | hadoop.metrics2.source.TestJvmMetrics |
| | hadoop.hdfs.TestDatanodeRegistration |
| | hadoop.hdfs.TestClientProtocolForPipelineRecovery |
| | hadoop.hdfs.TestRollingUpgrade |
| | hadoop.hdfs.TestReconstructStripedFile |
| | hadoop.hdfs.server.balancer.TestBalancerRPCDelay |
| | hadoop.hdfs.TestSafeModeWithStripedFileWithRandomECPolicy |
| | hadoop.hdfs.server.balancer.TestBalancer |
| | hadoop.hdfs.TestDFSStripedOutputStreamWithRandomECPolicy |
| | hadoop.hdfs.server.balancer.TestBalancerWithMultipleNameNodes |
| | hadoop.hdfs.TestDecommissionWithStripedBackoffMonitor |
| | hadoop.hdfs.server.blockmanagement.TestBlockInfoStriped |
| | hadoop.hdfs.tools.TestDFSZKFailoverController |
| | hadoop.hdfs.TestReconstructStripedFileWithRandomECPolicy |
\\
\\
|| Subsystem || Report/Notes ||
| Docker | ClientAPI=1.41 ServerAPI=1.41 base:
https://ci-hadoop.apache.org/job/PreCommit-HDFS-Build/495/artifact/out/Dockerfile
|
| JIRA Issue | HDFS-14013 |
| JIRA Patch URL |
https://issues.apache.org/jira/secure/attachment/13021062/HDFS-14013.001.patch |
| Optional Tests | dupname asflicense compile javac javadoc mvninstall mvnsite
unit shadedclient findbugs checkstyle |
| uname | Linux 4556bd285e18 4.15.0-101-generic #102-Ubuntu SMP Mon May 11
10:07:26 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux |
| Build tool | maven |
| Personality | personality/hadoop.sh |
| git revision | trunk / 7b7c0019f42 |
| Default Java | Private Build-1.8.0_282-8u282-b08-0ubuntu1~20.04-b08 |
| Multi-JDK versions |
/usr/lib/jvm/java-11-openjdk-amd64:Ubuntu-11.0.10+9-Ubuntu-0ubuntu1.20.04
/usr/lib/jvm/java-8-openjdk-amd64:Private
Build-1.8.0_282-8u282-b08-0ubuntu1~20.04-b08 |
| Test Results |
https://ci-hadoop.apache.org/job/PreCommit-HDFS-Build/495/testReport/ |
| Max. process+thread count | 2022 (vs. ulimit of 5500) |
| modules | C: hadoop-common-project/hadoop-common
hadoop-hdfs-project/hadoop-hdfs U: . |
| Console output |
https://ci-hadoop.apache.org/job/PreCommit-HDFS-Build/495/console |
| versions | git=2.25.1 maven=3.6.3 findbugs=4.0.6 |
| Powered by | Apache Yetus 0.13.0-SNAPSHOT https://yetus.apache.org |
This message was automatically generated.
> HDFS ZKFC on standby NN not starting with credentials stored in hdfs
> --------------------------------------------------------------------
>
> Key: HDFS-14013
> URL: https://issues.apache.org/jira/browse/HDFS-14013
> Project: Hadoop HDFS
> Issue Type: Bug
> Components: hdfs
> Affects Versions: 3.1.1
> Reporter: Krzysztof Adamski
> Assignee: Stephen O'Donnell
> Priority: Major
> Labels: zkfc
> Attachments: HDFS-14013.001.patch, hadoop-hdfs-zkfc-server1.log
>
>
> HDFS ZKFailoverController is not starting correctly on strandby NameNode when
> credential provideris stored in hdfs. Removing credential provider entry from
> core-site helps. See full exception stack attached.
> It looks like if it only checks the credentials on the same host namenode,
> not redirects to the active one. It may make sense to delay credential check
> after active namenode is elected and redirect to the active namenode as well.
>
>
> {code:java}
> 2018-10-22 08:17:09,251 FATAL tools.DFSZKFailoverController
> (DFSZKFailoverController.java:main(197)) - DFSZKFailOverController exiting
> due to earlier exception java.io.IOException: Configuration problem with
> provider path. 2018-10-22 08:17:09,252 DEBUG util.ExitUtil
> (ExitUtil.java:terminate(209)) - Exiting with status 1: java.io.IOException:
> Configuration problem with provider path. 1: java.io.IOException:
> Configuration problem with provider path. at
> org.apache.hadoop.util.ExitUtil.terminate(ExitUtil.java:265) at
> org.apache.hadoop.hdfs.tools.DFSZKFailoverController.main(DFSZKFailoverController.java:199)
> Caused by: java.io.IOException: Configuration problem with provider path.
> at
> org.apache.hadoop.conf.Configuration.getPasswordFromCredentialProviders(Configuration.java:2363)
> at
> org.apache.hadoop.conf.Configuration.getPassword(Configuration.java:2282)
> at
> org.apache.hadoop.security.SecurityUtil.getZKAuthInfos(SecurityUtil.java:732)
> at
> org.apache.hadoop.ha.ZKFailoverController.initZK(ZKFailoverController.java:343)
> at
> org.apache.hadoop.ha.ZKFailoverController.doRun(ZKFailoverController.java:194)
> at
> org.apache.hadoop.ha.ZKFailoverController.access$000(ZKFailoverController.java:60)
> at
> org.apache.hadoop.ha.ZKFailoverController$1.run(ZKFailoverController.java:175)
> at
> org.apache.hadoop.ha.ZKFailoverController$1.run(ZKFailoverController.java:171)
> at java.security.AccessController.doPrivileged(Native Method) at
> javax.security.auth.Subject.doAs(Subject.java:360) at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1710)
> at
> org.apache.hadoop.security.SecurityUtil.doAsLoginUserOrFatal(SecurityUtil.java:480)
> at
> org.apache.hadoop.ha.ZKFailoverController.run(ZKFailoverController.java:171)
> at
> org.apache.hadoop.hdfs.tools.DFSZKFailoverController.main(DFSZKFailoverController.java:195)
> Caused by:
> org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.ipc.StandbyException):
> Operation category READ is not supported in state standby. Visit
> https://s.apache.org/sbnn-error at
> org.apache.hadoop.hdfs.server.namenode.ha.StandbyState.checkOperation(StandbyState.java:88)
> at
> org.apache.hadoop.hdfs.server.namenode.NameNode$NameNodeHAContext.checkOperation(NameNode.java:1951)
> at
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkOperation(FSNamesystem.java:1427)
> at
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getFileInfo(FSNamesystem.java:3100)
> at
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.getFileInfo(NameNodeRpcServer.java:1154)
> at
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.getFileInfo(ClientNamenodeProtocolServerSideTranslatorPB.java:966)
> at
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
> at
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:524)
> at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1025) at
> org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:876) at
> org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:822) at
> java.security.AccessController.doPrivileged(Native Method) at
> javax.security.auth.Subject.doAs(Subject.java:422) at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
> at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2682)
> at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1497) at
> org.apache.hadoop.ipc.Client.call(Client.java:1443) at
> org.apache.hadoop.ipc.Client.call(Client.java:1353) at
> org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:228)
> at
> org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116)
> at com.sun.proxy.$Proxy9.getFileInfo(Unknown Source) at
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:900)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498) at
> org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:422)
> at
> org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:165)
> at
> org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:157)
> at
> org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95)
> at
> org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:359)
> at com.sun.proxy.$Proxy10.getFileInfo(Unknown Source) at
> org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:1654) at
> org.apache.hadoop.hdfs.DistributedFileSystem$29.doCall(DistributedFileSystem.java:1583)
> at
> org.apache.hadoop.hdfs.DistributedFileSystem$29.doCall(DistributedFileSystem.java:1580)
> at
> org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
> at
> org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1595)
> at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1734) at
> org.apache.hadoop.security.alias.JavaKeyStoreProvider.keystoreExists(JavaKeyStoreProvider.java:65)
> at
> org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider.locateKeystore(AbstractJavaKeyStoreProvider.java:319)
> at
> org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider.<init>(AbstractJavaKeyStoreProvider.java:86)
> at
> org.apache.hadoop.security.alias.JavaKeyStoreProvider.<init>(JavaKeyStoreProvider.java:49)
> at
> org.apache.hadoop.security.alias.JavaKeyStoreProvider.<init>(JavaKeyStoreProvider.java:41)
> at
> org.apache.hadoop.security.alias.JavaKeyStoreProvider$Factory.createProvider(JavaKeyStoreProvider.java:100)
> at
> org.apache.hadoop.security.alias.CredentialProviderFactory.getProviders(CredentialProviderFactory.java:73)
> at
> org.apache.hadoop.conf.Configuration.getPasswordFromCredentialProviders(Configuration.java:2344)
> ... 13 more{code}
>
--
This message was sent by Atlassian Jira
(v8.3.4#803005)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]