[ 
https://issues.apache.org/jira/browse/HDFS-13138?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16374300#comment-16374300
 ] 

genericqa commented on HDFS-13138:
----------------------------------

| (x) *{color:red}-1 overall{color}* |
\\
\\
|| Vote || Subsystem || Runtime || Comment ||
| {color:blue}0{color} | {color:blue} reexec {color} | {color:blue} 11m 
16s{color} | {color:blue} Docker mode activated. {color} |
|| || || || {color:brown} Prechecks {color} ||
| {color:green}+1{color} | {color:green} @author {color} | {color:green}  0m  
0s{color} | {color:green} The patch does not contain any @author tags. {color} |
| {color:green}+1{color} | {color:green} test4tests {color} | {color:green}  0m 
 0s{color} | {color:green} The patch appears to include 1 new or modified test 
files. {color} |
|| || || || {color:brown} branch-2.7 Compile Tests {color} ||
| {color:green}+1{color} | {color:green} mvninstall {color} | {color:green}  8m 
 2s{color} | {color:green} branch-2.7 passed {color} |
| {color:green}+1{color} | {color:green} compile {color} | {color:green}  1m  
6s{color} | {color:green} branch-2.7 passed {color} |
| {color:green}+1{color} | {color:green} checkstyle {color} | {color:green}  0m 
27s{color} | {color:green} branch-2.7 passed {color} |
| {color:green}+1{color} | {color:green} mvnsite {color} | {color:green}  1m  
3s{color} | {color:green} branch-2.7 passed {color} |
| {color:green}+1{color} | {color:green} findbugs {color} | {color:green}  3m  
0s{color} | {color:green} branch-2.7 passed {color} |
| {color:green}+1{color} | {color:green} javadoc {color} | {color:green}  1m 
48s{color} | {color:green} branch-2.7 passed {color} |
|| || || || {color:brown} Patch Compile Tests {color} ||
| {color:green}+1{color} | {color:green} mvninstall {color} | {color:green}  0m 
55s{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} compile {color} | {color:green}  1m  
1s{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} javac {color} | {color:green}  1m  
1s{color} | {color:green} the patch passed {color} |
| {color:orange}-0{color} | {color:orange} checkstyle {color} | {color:orange}  
0m 23s{color} | {color:orange} hadoop-hdfs-project/hadoop-hdfs: The patch 
generated 1 new + 184 unchanged - 0 fixed = 185 total (was 184) {color} |
| {color:green}+1{color} | {color:green} mvnsite {color} | {color:green}  0m 
58s{color} | {color:green} the patch passed {color} |
| {color:red}-1{color} | {color:red} whitespace {color} | {color:red}  0m  
0s{color} | {color:red} The patch has 60 line(s) that end in whitespace. Use 
git apply --whitespace=fix <<patch_file>>. Refer 
https://git-scm.com/docs/git-apply {color} |
| {color:green}+1{color} | {color:green} findbugs {color} | {color:green}  3m 
14s{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} javadoc {color} | {color:green}  1m 
46s{color} | {color:green} the patch passed {color} |
|| || || || {color:brown} Other Tests {color} ||
| {color:red}-1{color} | {color:red} unit {color} | {color:red}205m 30s{color} 
| {color:red} hadoop-hdfs in the patch failed. {color} |
| {color:red}-1{color} | {color:red} asflicense {color} | {color:red}  2m 
38s{color} | {color:red} The patch generated 246 ASF License warnings. {color} |
| {color:black}{color} | {color:black} {color} | {color:black}244m 49s{color} | 
{color:black} {color} |
\\
\\
|| Reason || Tests ||
| Unreaped Processes | hadoop-hdfs:52 |
| Failed junit tests | hadoop.hdfs.TestFsShellPermission |
|   | hadoop.hdfs.TestDistributedFileSystem |
| Timed out junit tests | org.apache.hadoop.hdfs.TestEncryptionZones |
|   | org.apache.hadoop.hdfs.TestLeaseRecovery2 |
|   | org.apache.hadoop.hdfs.tools.TestDFSAdminWithHA |
|   | org.apache.hadoop.hdfs.qjournal.client.TestQuorumJournalManager |
|   | org.apache.hadoop.hdfs.tools.offlineImageViewer.TestOfflineImageViewer |
|   | org.apache.hadoop.hdfs.tools.offlineEditsViewer.TestOfflineEditsViewer |
|   | org.apache.hadoop.hdfs.TestDatanodeRegistration |
|   | org.apache.hadoop.hdfs.TestDFSClientFailover |
|   | org.apache.hadoop.hdfs.TestSetrepDecreasing |
|   | org.apache.hadoop.hdfs.TestReplaceDatanodeOnFailure |
|   | org.apache.hadoop.hdfs.qjournal.server.TestJournalNode |
|   | org.apache.hadoop.hdfs.TestQuota |
|   | org.apache.hadoop.hdfs.TestDataTransferKeepalive |
|   | org.apache.hadoop.hdfs.TestDatanodeDeath |
|   | org.apache.hadoop.hdfs.TestFileLengthOnClusterRestart |
|   | org.apache.hadoop.hdfs.TestFileAppend |
|   | org.apache.hadoop.hdfs.TestPread |
|   | org.apache.hadoop.hdfs.TestSafeMode |
|   | org.apache.hadoop.hdfs.TestFileAppend4 |
|   | org.apache.hadoop.hdfs.TestDFSFinalize |
|   | org.apache.hadoop.hdfs.qjournal.client.TestQJMWithFaults |
|   | org.apache.hadoop.hdfs.TestDFSUpgradeFromImage |
|   | org.apache.hadoop.hdfs.web.TestWebHdfsTokens |
|   | org.apache.hadoop.hdfs.security.TestDelegationToken |
|   | org.apache.hadoop.hdfs.TestFileCorruption |
|   | org.apache.hadoop.hdfs.TestCrcCorruption |
|   | org.apache.hadoop.hdfs.TestFileCreationDelete |
|   | org.apache.hadoop.hdfs.TestEncryptionZonesWithKMS |
|   | org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM |
|   | org.apache.hadoop.hdfs.TestBlockStoragePolicy |
|   | org.apache.hadoop.hdfs.TestRollingUpgrade |
|   | org.apache.hadoop.hdfs.TestLease |
|   | org.apache.hadoop.hdfs.qjournal.TestNNWithQJM |
|   | org.apache.hadoop.hdfs.web.TestWebHDFS |
|   | org.apache.hadoop.hdfs.tools.TestDFSHAAdminMiniCluster |
|   | org.apache.hadoop.hdfs.tools.TestGetGroups |
|   | org.apache.hadoop.hdfs.web.TestWebHDFSXAttr |
|   | org.apache.hadoop.hdfs.TestRollingUpgradeRollback |
|   | org.apache.hadoop.hdfs.TestBlockReaderLocal |
|   | org.apache.hadoop.hdfs.web.TestWebHdfsWithMultipleNameNodes |
|   | org.apache.hadoop.hdfs.security.TestDelegationTokenForProxyUser |
|   | org.apache.hadoop.hdfs.web.TestWebHDFSForHA |
|   | org.apache.hadoop.hdfs.TestBalancerBandwidth |
|   | org.apache.hadoop.hdfs.TestEncryptedTransfer |
|   | org.apache.hadoop.hdfs.TestDFSShell |
|   | org.apache.hadoop.hdfs.web.TestHftpFileSystem |
|   | org.apache.hadoop.hdfs.TestPersistBlocks |
|   | org.apache.hadoop.hdfs.tools.TestDebugAdmin |
|   | org.apache.hadoop.hdfs.TestGetFileChecksum |
|   | org.apache.hadoop.hdfs.web.TestWebHDFSAcl |
\\
\\
|| Subsystem || Report/Notes ||
| Docker | Client=17.05.0-ce Server=17.05.0-ce Image:yetus/hadoop:ea57d10 |
| JIRA Issue | HDFS-13138 |
| JIRA Patch URL | 
https://issues.apache.org/jira/secure/attachment/12911690/HDFS-13138.003.branch-2.7.patch
 |
| Optional Tests |  asflicense  compile  javac  javadoc  mvninstall  mvnsite  
unit  shadedclient  findbugs  checkstyle  |
| uname | Linux 6e61e9814b50 3.13.0-139-generic #188-Ubuntu SMP Tue Jan 9 
14:43:09 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux |
| Build tool | maven |
| Personality | /testptch/patchprocess/precommit/personality/provided.sh |
| git revision | branch-2.7 / 1f2ab8b |
| maven | version: Apache Maven 3.0.5 |
| Default Java | 1.7.0_151 |
| findbugs | v3.0.0 |
| checkstyle | 
https://builds.apache.org/job/PreCommit-HDFS-Build/23166/artifact/out/diff-checkstyle-hadoop-hdfs-project_hadoop-hdfs.txt
 |
| whitespace | 
https://builds.apache.org/job/PreCommit-HDFS-Build/23166/artifact/out/whitespace-eol.txt
 |
| Unreaped Processes Log | 
https://builds.apache.org/job/PreCommit-HDFS-Build/23166/artifact/out/patch-unit-hadoop-hdfs-project_hadoop-hdfs-reaper.txt
 |
| unit | 
https://builds.apache.org/job/PreCommit-HDFS-Build/23166/artifact/out/patch-unit-hadoop-hdfs-project_hadoop-hdfs.txt
 |
|  Test Results | 
https://builds.apache.org/job/PreCommit-HDFS-Build/23166/testReport/ |
| asflicense | 
https://builds.apache.org/job/PreCommit-HDFS-Build/23166/artifact/out/patch-asflicense-problems.txt
 |
| Max. process+thread count | 9931 (vs. ulimit of 10000) |
| modules | C: hadoop-hdfs-project/hadoop-hdfs U: 
hadoop-hdfs-project/hadoop-hdfs |
| Console output | 
https://builds.apache.org/job/PreCommit-HDFS-Build/23166/console |
| Powered by | Apache Yetus 0.8.0-SNAPSHOT   http://yetus.apache.org |


This message was automatically generated.



> webhdfs of federated namenode does not  work properly
> -----------------------------------------------------
>
>                 Key: HDFS-13138
>                 URL: https://issues.apache.org/jira/browse/HDFS-13138
>             Project: Hadoop HDFS
>          Issue Type: Bug
>          Components: webhdfs
>    Affects Versions: 2.7.1, 3.0.0
>            Reporter: KWON BYUNGCHANG
>            Priority: Major
>         Attachments: HDFS-13138.001.branch-2.7.patch, HDFS-13138.001.patch, 
> HDFS-13138.002.branch-2.7.patch, HDFS-13138.002.patch, 
> HDFS-13138.003.branch-2.7.patch, HDFS-13138.003.patch
>
>
> my cluster has multiple namenodes using HDFS Federation.
> webhdfs that is not defaultFS does not work properly.
> when I uploaded to non defaultFS namenode  using webhdfs.
> uploaded file was founded at defaultFS namenode.
>  
> I think root cause is that
>   clientNamenodeAddress of non defaultFS namenode is always fs.defaultFS.
>  
> [https://github.com/apache/hadoop/blob/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java#L462]
>  
> {code:java}
> /**
>    * Set the namenode address that will be used by clients to access this
>    * namenode or name service. This needs to be called before the config
>    * is overriden.
>    */
>   public void setClientNamenodeAddress(Configuration conf) {
>     String nnAddr = conf.get(FS_DEFAULT_NAME_KEY);
>     if (nnAddr == null) {
>       // default fs is not set.
>       clientNamenodeAddress = null;
>       return;
>     }
>     LOG.info("{} is {}", FS_DEFAULT_NAME_KEY, nnAddr);
>     URI nnUri = URI.create(nnAddr);
>     String nnHost = nnUri.getHost();
>     if (nnHost == null) {
>       clientNamenodeAddress = null;
>       return;
>     }
>     if (DFSUtilClient.getNameServiceIds(conf).contains(nnHost)) {
>       // host name is logical
>       clientNamenodeAddress = nnHost;
>     } else if (nnUri.getPort() > 0) {
>       // physical address with a valid port
>       clientNamenodeAddress = nnUri.getAuthority();
>     } else {
>       // the port is missing or 0. Figure out real bind address later.
>       clientNamenodeAddress = null;
>       return;
>     }
>     LOG.info("Clients are to use {} to access"
>         + " this namenode/service.", clientNamenodeAddress );
>   }
> {code}
>  
> so webhdfs is redirected to datanode having wrong namenoderpcaddress parameter
> finally file was located namenode of fs,defaultFS
>  
> workaround is
>   configure fs.defaultFS of each namenode to its own nameservice.  
> e.g.
>   hdfs://ns1  has fs.defaultFS=hdfs://ns1
>   hdfs://ns2  has fs.defaultFS=hdfs://ns2
>   ....
>  
>  
>  
>  



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org

Reply via email to