[ 
https://issues.apache.org/jira/browse/HADOOP-2669?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12570895#action_12570895
 ] 

Pete Wyckoff commented on HADOOP-2669:
--------------------------------------

rException: Could not find any valid local directory for 
task_200802191501_0724_r_000007_0/map_94.out
        at 
org.apache.hadoop.fs.LocalDirAllocator$AllocatorPerContext.getLocalPathForWrite(LocalDirAllocator.java:313)
        at 
org.apache.hadoop.fs.LocalDirAllocator.getLocalPathForWrite(LocalDirAllocator.java:124)
        at 
org.apache.hadoop.mapred.MapOutputFile.getInputFileForWrite(MapOutputFile.java:133)
        at 
org.apache.hadoop.mapred.ReduceTask$ReduceCopier$InMemFSMergeThread.run(ReduceTask.java:1314)

2008-02-20 05:17:29,319 INFO org.apache.hadoop.mapred.ReduceTask: 
task_200802191501_0724_r_000007_0 done copying 
task_200802191501_0724_m_000237_0 output from hadoopX.facebook.com..
2008-02-20 05:17:29,319 INFO org.apache.hadoop.mapred.ReduceTask: 
task_200802191501_0724_r_000007_0 Copying task_200802191501_0724_m_000146_0 
output from hadoopA.facebook.com..
2008-02-20 05:17:29,322 ERROR org.apache.hadoop.mapred.ReduceTask: Map output 
copy failure: java.lang.NullPointerException
        at 
org.apache.hadoop.fs.InMemoryFileSystem$RawInMemoryFileSystem$InMemoryOutputStream.close(InMemoryFileSystem.java:161)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:49)
        at 
org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:64)
        at 
org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.close(ChecksumFileSystem.java:332)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:49)
        at 
org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:64)
        at 
org.apache.hadoop.mapred.MapOutputLocation.getFile(MapOutputLocation.java:253)
        at 
org.apache.hadoop.mapred.ReduceTask$ReduceCopier$MapOutputCopier.copyOutput(ReduceTask.java:714)
        at 
org.apache.hadoop.mapred.ReduceTask$ReduceCopier$MapOutputCopier.run(ReduceTask.java:666)

2008-02-20 05:17:29,322 INFO org.apache.hadoop.mapred.ReduceTask: 
task_200802191501_0724_r_000007_0 Copying task_200802191501_0724_m_000055_0 
output from hadoopY.facebook.com..
2008-02-20 05:17:29,322 WARN org.apache.hadoop.mapred.TaskTracker: Error 
running child
java.io.IOException: task_200802191501_0724_r_000007_0The reduce copier failed
        at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:254)
        at 
org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:1804)
2008-02-20 05:17:29,324 ERROR org.apache.hadoop.mapred.ReduceTask: Map output 
copy failure: java.lang.NullPointerException
        at 
org.apache.hadoop.fs.InMemoryFileSystem$RawInMemoryFileSystem$InMemoryOutputStream.close(InMemoryFileSystem.java:161)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:49)
        at 
org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:64)
        at 
org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.close(ChecksumFileSystem.java:332)
        at 
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.close(FSDataOutputStream.java:49)
        at 
org.apache.hadoop.fs.FSDataOutputStream.close(FSDataOutputStream.java:64)
        at 
org.apache.hadoop.mapred.MapOutputLocation.getFile(MapOutputLocation.java:253)
        at 
org.apache.hadoop.mapred.ReduceTask$ReduceCopier$MapOutputCopier.copyOutput(ReduceTask.java:714)
        at 
org.apache.hadoop.mapred.ReduceTask$ReduceCopier$MapOutputCopier.run(ReduceTask.java:666)

2008-02-20 05:17:29,325 INFO org.apache.hadoop.mapred.ReduceTask: 
task_200802191501_0724_r_000007_0 Copying task_200802191501_0724_m_000120_0 
output from hadoopZ.facebook.com..


> DFS client lost lease during writing into DFS files
> ---------------------------------------------------
>
>                 Key: HADOOP-2669
>                 URL: https://issues.apache.org/jira/browse/HADOOP-2669
>             Project: Hadoop Core
>          Issue Type: Bug
>          Components: dfs
>            Reporter: Runping Qi
>
> I have a program that reads a block compressed sequence file, does some 
> processing on the records and writes the
> processed records into another  block compressed sequence file.
> During execution of the program, I got the following exception: 
> org.apache.hadoop.ipc.RemoteException: 
> org.apache.hadoop.dfs.LeaseExpiredException: No lease on xxxxx/part-00000
>         at 
> org.apache.hadoop.dfs.FSNamesystem.getAdditionalBlock(FSNamesystem.java:976)
>         at org.apache.hadoop.dfs.NameNode.addBlock(NameNode.java:293)
>         at sun.reflect.GeneratedMethodAccessor47.invoke(Unknown Source)
>         at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
>         at java.lang.reflect.Method.invoke(Method.java:597)
>         at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:379)
>         at org.apache.hadoop.ipc.Server$Handler.run(Server.java:596)
>         at org.apache.hadoop.ipc.Client.call(Client.java:482)
>         at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:184)
>         at org.apache.hadoop.dfs.$Proxy0.addBlock(Unknown Source)
>         at sun.reflect.GeneratedMethodAccessor3.invoke(Unknown Source)
>         at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
>         at java.lang.reflect.Method.invoke(Method.java:597)
>         at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
>         at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
>         at org.apache.hadoop.dfs.$Proxy0.addBlock(Unknown Source)
>         at 
> org.apache.hadoop.dfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:1554)
>         at 
> org.apache.hadoop.dfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:1500)
>         at 
> org.apache.hadoop.dfs.DFSClient$DFSOutputStream.endBlock(DFSClient.java:1626)
>         at 
> org.apache.hadoop.dfs.DFSClient$DFSOutputStream.writeChunk(DFSClient.java:1602)
>         at 
> org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunk(FSOutputSummer.java:140)
>         at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:100)
>         at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:86)
>         at 
> org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:39)
>         at java.io.DataOutputStream.write(DataOutputStream.java:90)
>         at 
> org.apache.hadoop.io.SequenceFile$BlockCompressWriter.writeBuffer(SequenceFile.java:1181)
>         at 
> org.apache.hadoop.io.SequenceFile$BlockCompressWriter.sync(SequenceFile.java:1198)
>         at 
> org.apache.hadoop.io.SequenceFile$BlockCompressWriter.append(SequenceFile.java:1248)
>         at 
> org.apache.hadoop.mapred.SequenceFileOutputFormat$1.write(SequenceFileOutputFormat.java:69)
>      

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.

Reply via email to