[ https://issues.apache.org/jira/browse/TEZ-3864?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Ayush Saxena resolved TEZ-3864. ------------------------------- Resolution: Information Provided > Tez failed to intergrate with hadoop(2.8.2) > ------------------------------------------- > > Key: TEZ-3864 > URL: https://issues.apache.org/jira/browse/TEZ-3864 > Project: Apache Tez > Issue Type: Bug > Affects Versions: 0.9.0 > Reporter: Shen Yinjie > Priority: Major > > When I intergrated tez(0.9.0) with hadoop(2.8.2), always failed to running > tez service check:orderedwordcount, > "hadoop --config /etc/hadoop/conf jar /usr/lib/tez/tez-examples*.jar > ordeib/tez/tez-examples*.jar orderedwordcount > /tmp/tezsmokeinput/sample-tez-test /tmp/tezsmokeoutput/" > But all containers could not run successfully, container logs just > print exceptions as follows: > "....TaskAttempt 2 failed, info=[Error: Error while running task ( failure ) > : java.lang.RuntimeException: java.io.IOException: Failed on local exception: > java.nio.channels.ClosedByInterruptException; Host Details : local host is: > "wjf1-hc/xx.xx.xx.xx"; destination host is: "wjf1-hc":8020; > at > org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat$TezGroupedSplitsRecordReader.initNextRecordReader(TezGroupedSplitsInputFormat.java:209) > at > org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat$TezGroupedSplitsRecordReader.initialize(TezGroupedSplitsInputFormat.java:156) > at > org.apache.tez.mapreduce.lib.MRReaderMapReduce.setupNewRecordReader(MRReaderMapReduce.java:157) > at > org.apache.tez.mapreduce.lib.MRReaderMapReduce.setSplit(MRReaderMapReduce.java:88) > at > org.apache.tez.mapreduce.input.MRInput.initFromEventInternal(MRInput.java:703) > at org.apache.tez.mapreduce.input.MRInput.processSplitEvent(MRInput.java:631) > at org.apache.tez.mapreduce.input.MRInput.handleEvents(MRInput.java:590) > at > org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.handleEvent(LogicalIOProcessorRuntimeTask.java:719) > at > org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.access$600(LogicalIOProcessorRuntimeTask.java:106) > at > org.apache.tez.runtime.LogicalIOProcessorRuntimeTask$1.runInternal(LogicalIOProcessorRuntimeTask.java:796) > at org.apache.tez.common.RunnableWithNdc.run(RunnableWithNdc.java:35) > at java.lang.Thread.run(Thread.java:745) > Caused by: java.io.IOException: Failed on local exception: > java.nio.channels.ClosedByInterruptException; Host Details : local host is: > "wjf1-hc/xx.xx.xx.xx"; destination host is: "wjf1-hc":8020; > at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:785) > at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1499) > at org.apache.hadoop.ipc.Client.call(Client.java:1441) > at org.apache.hadoop.ipc.Client.call(Client.java:1351) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:235) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116) > at com.sun.proxy.$Proxy14.getBlockLocations(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:259) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:498) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:409) > at > org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:163) > at > org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:155) > at > org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:346) > at com.sun.proxy.$Proxy15.getBlockLocations(Unknown Source) > at org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:830) > at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:819) > at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:808) > at > org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:319) > at org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:281) > at org.apache.hadoop.hdfs.DFSInputStream.<init>(DFSInputStream.java:270) > at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1119) > at > org.apache.hadoop.hdfs.DistributedFileSystem$4.doCall(DistributedFileSystem.java:343) > at > org.apache.hadoop.hdfs.DistributedFileSystem$4.doCall(DistributedFileSystem.java:339) > at > org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) > at > org.apache.hadoop.hdfs.DistributedFileSystem.open(DistributedFileSystem.java:351) > at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:787) > at > org.apache.hadoop.mapreduce.lib.input.LineRecordReader.initialize(LineRecordReader.java:85) > at > org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat$TezGroupedSplitsRecordReader.initNextRecordReader(TezGroupedSplitsInputFormat.java:207) > ... 11 more > Caused by: java.nio.channels.ClosedByInterruptException > at > java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:202) > at sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:659) > at > org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:192) > at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531) > at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495) > at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:687) > at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:783) > at org.apache.hadoop.ipc.Client$Connection.access$3500(Client.java:415) > at org.apache.hadoop.ipc.Client.getConnection(Client.java:1556) > at org.apache.hadoop.ipc.Client.call(Client.java:1387) > ... 40 more > , errorMessage=java.lang.RuntimeException: java.io.IOException: Failed on > local exception: java.nio.channels.ClosedByInterruptException; Host Details : > local host is: "wjf1-hc/xx.xx.xx.xx"; destination host is: "wjf1-hc":8020; > at > org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat$TezGroupedSplitsRecordReader.initNextRecordReader(TezGroupedSplitsInputFormat.java:209) > at > org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat$TezGroupedSplitsRecordReader.initialize(TezGroupedSplitsInputFormat.java:156) > at > org.apache.tez.mapreduce.lib.MRReaderMapReduce.setupNewRecordReader(MRReaderMapReduce.java:157) > at > org.apache.tez.mapreduce.lib.MRReaderMapReduce.setSplit(MRReaderMapReduce.java:88) > at > org.apache.tez.mapreduce.input.MRInput.initFromEventInternal(MRInput.java:703) > at org.apache.tez.mapreduce.input.MRInput.processSplitEvent(MRInput.java:631) > at org.apache.tez.mapreduce.input.MRInput.handleEvents(MRInput.java:590) > at > org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.handleEvent(LogicalIOProcessorRuntimeTask.java:719) > at > org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.access$600(LogicalIOProcessorRuntimeTask.java:106) > at > org.apache.tez.runtime.LogicalIOProcessorRuntimeTask$1.runInternal(LogicalIOProcessorRuntimeTask.java:796) > at org.apache.tez.common.RunnableWithNdc.run(RunnableWithNdc.java:35) > at java.lang.Thread.run(Thread.java:745) > Caused by: java.io.IOException: Failed on local exception: > java.nio.channels.ClosedByInterruptException; Host Details : local host is: > "wjf1-hc/xx.xx.xx.xx"; destination host is: "wjf1-hc":8020; > at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:785) > at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1499) > at org.apache.hadoop.ipc.Client.call(Client.java:1441) > at org.apache.hadoop.ipc.Client.call(Client.java:1351) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:235) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:116) > at com.sun.proxy.$Proxy14.getBlockLocations(Unknown Source) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:259) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at > sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at > sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:498) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:409) > at > org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:163) > at > org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:155) > at > org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:95) > at > org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:346) > at com.sun.proxy.$Proxy15.getBlockLocations(Unknown Source) > at org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:830) > at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:819) > at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:808) > at > org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:319) > at org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:281) > at org.apache.hadoop.hdfs.DFSInputStream.<init>(DFSInputStream.java:270) > at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1119) > at > org.apache.hadoop.hdfs.DistributedFileSystem$4.doCall(DistributedFileSystem.java:343) > at > org.apache.hadoop.hdfs.DistributedFileSystem$4.doCall(DistributedFileSystem.java:339) > at > org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) > at > org.apache.hadoop.hdfs.DistributedFileSystem.open(DistributedFileSystem.java:351) > at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:787) > at > org.apache.hadoop.mapreduce.lib.input.LineRecordReader.initialize(LineRecordReader.java:85) > at > org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat$TezGroupedSplitsRecordReader.initNextRecordReader(TezGroupedSplitsInputFormat.java:207) > ... 11 more > Caused by: java.nio.channels.ClosedByInterruptException > at > java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:202) > at sun.nio.ch.SocketChannelImpl.connect(SocketChannelImpl.java:659) > at > org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:192) > at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:531) > at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:495) > at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:687) > at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:783) > at org.apache.hadoop.ipc.Client$Connection.access$3500(Client.java:415) > at org.apache.hadoop.ipc.Client.getConnection(Client.java:1556) > at org.apache.hadoop.ipc.Client.call(Client.java:1387) > ... 40 more > :java.lang.RuntimeException: java.io.IOException: Failed on local exception: > java.nio.channels.ClosedByInterruptException; Host Details : local host is: > "wjf1-hc/xx.xx.xx.xx"; destination host is: "wjf1-hc":8020; > ......." > seems that task tries to connect to namenode, but some error occurred. > and my hadoop is with namenode federation on. -- This message was sent by Atlassian Jira (v8.20.10#820010)