[ 
https://issues.apache.org/jira/browse/HDDS-874?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Arpit Agarwal resolved HDDS-874.
--------------------------------
    Resolution: Abandoned

This was never looked at, however it's likely to be obsolete. We've made many 
improvements to the Ozone write path reliability in the last one year. Let's 
reopen if this is still an issue.

> ozonefs write operation failed
> ------------------------------
>
>                 Key: HDDS-874
>                 URL: https://issues.apache.org/jira/browse/HDDS-874
>             Project: Hadoop Distributed Data Store
>          Issue Type: Bug
>          Components: Ozone Filesystem
>            Reporter: Nilotpal Nandi
>            Priority: Major
>
> Here is the error thrown on console while trying to write a file to ozone 
> filesystem
> {noformat}
> [root@ctr-e139-1542663976389-11261-01-000005 test_files]# ozone fs 
> -copyFromLocal 5GB /
> 2018-11-27 04:24:14,117 WARN scm.XceiverClientRatis: 3 way commit failed
> java.util.concurrent.TimeoutException
>  at 
> java.util.concurrent.CompletableFuture.timedGet(CompletableFuture.java:1771)
>  at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1915)
>  at 
> org.apache.hadoop.hdds.scm.XceiverClientRatis.watchForCommit(XceiverClientRatis.java:162)
>  at 
> org.apache.hadoop.hdds.scm.storage.ChunkOutputStream.watchForCommit(ChunkOutputStream.java:303)
>  at 
> org.apache.hadoop.hdds.scm.storage.ChunkOutputStream.handleFullBuffer(ChunkOutputStream.java:289)
>  at 
> org.apache.hadoop.hdds.scm.storage.ChunkOutputStream.write(ChunkOutputStream.java:193)
>  at 
> org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream$ChunkOutputStreamEntry.write(ChunkGroupOutputStream.java:695)
>  at 
> org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream.handleWrite(ChunkGroupOutputStream.java:292)
>  at 
> org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream.write(ChunkGroupOutputStream.java:251)
>  at 
> org.apache.hadoop.fs.ozone.OzoneFSOutputStream.write(OzoneFSOutputStream.java:47)
>  at 
> org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:57)
>  at java.io.DataOutputStream.write(DataOutputStream.java:107)
>  at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:96)
>  at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:68)
>  at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:129)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination$TargetFileSystem.writeStreamToFile(CommandWithDestination.java:485)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.copyStreamToTarget(CommandWithDestination.java:407)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.copyFileToTarget(CommandWithDestination.java:342)
>  at 
> org.apache.hadoop.fs.shell.CopyCommands$CopyFromLocal.copyFile(CopyCommands.java:357)
>  at 
> org.apache.hadoop.fs.shell.CopyCommands$CopyFromLocal.copyFileToTarget(CopyCommands.java:365)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.processPath(CommandWithDestination.java:277)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.processPath(CommandWithDestination.java:262)
>  at org.apache.hadoop.fs.shell.Command.processPathInternal(Command.java:367)
>  at org.apache.hadoop.fs.shell.Command.processPaths(Command.java:331)
>  at org.apache.hadoop.fs.shell.Command.processPathArgument(Command.java:304)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.processPathArgument(CommandWithDestination.java:257)
>  at org.apache.hadoop.fs.shell.Command.processArgument(Command.java:286)
>  at org.apache.hadoop.fs.shell.Command.processArguments(Command.java:270)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.processArguments(CommandWithDestination.java:228)
>  at 
> org.apache.hadoop.fs.shell.CopyCommands$Put.processArguments(CopyCommands.java:295)
>  at 
> org.apache.hadoop.fs.shell.CopyCommands$CopyFromLocal.processArguments(CopyCommands.java:385)
>  at 
> org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:120)
>  at org.apache.hadoop.fs.shell.Command.run(Command.java:177)
>  at org.apache.hadoop.fs.FsShell.run(FsShell.java:327)
>  at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76)
>  at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:90)
>  at org.apache.hadoop.fs.FsShell.main(FsShell.java:390)
> 2018-11-27 04:24:17,497 INFO scm.XceiverClientRatis: Could not commit 146 to 
> all the nodes.Committed by majority.
> 2018-11-27 04:24:55,867 WARN scm.XceiverClientRatis: 3 way commit failed
> java.util.concurrent.TimeoutException
>  at 
> java.util.concurrent.CompletableFuture.timedGet(CompletableFuture.java:1771)
>  at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:1915)
>  at 
> org.apache.hadoop.hdds.scm.XceiverClientRatis.watchForCommit(XceiverClientRatis.java:162)
>  at 
> org.apache.hadoop.hdds.scm.storage.ChunkOutputStream.watchForCommit(ChunkOutputStream.java:303)
>  at 
> org.apache.hadoop.hdds.scm.storage.ChunkOutputStream.handleFullBuffer(ChunkOutputStream.java:289)
>  at 
> org.apache.hadoop.hdds.scm.storage.ChunkOutputStream.write(ChunkOutputStream.java:193)
>  at 
> org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream$ChunkOutputStreamEntry.write(ChunkGroupOutputStream.java:695)
>  at 
> org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream.handleWrite(ChunkGroupOutputStream.java:292)
>  at 
> org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream.write(ChunkGroupOutputStream.java:251)
>  at 
> org.apache.hadoop.fs.ozone.OzoneFSOutputStream.write(OzoneFSOutputStream.java:47)
>  at 
> org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:57)
>  at java.io.DataOutputStream.write(DataOutputStream.java:107)
>  at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:96)
>  at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:68)
>  at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:129)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination$TargetFileSystem.writeStreamToFile(CommandWithDestination.java:485)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.copyStreamToTarget(CommandWithDestination.java:407)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.copyFileToTarget(CommandWithDestination.java:342)
>  at 
> org.apache.hadoop.fs.shell.CopyCommands$CopyFromLocal.copyFile(CopyCommands.java:357)
>  at 
> org.apache.hadoop.fs.shell.CopyCommands$CopyFromLocal.copyFileToTarget(CopyCommands.java:365)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.processPath(CommandWithDestination.java:277)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.processPath(CommandWithDestination.java:262)
>  at org.apache.hadoop.fs.shell.Command.processPathInternal(Command.java:367)
>  at org.apache.hadoop.fs.shell.Command.processPaths(Command.java:331)
>  at org.apache.hadoop.fs.shell.Command.processPathArgument(Command.java:304)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.processPathArgument(CommandWithDestination.java:257)
>  at org.apache.hadoop.fs.shell.Command.processArgument(Command.java:286)
>  at org.apache.hadoop.fs.shell.Command.processArguments(Command.java:270)
>  at 
> org.apache.hadoop.fs.shell.CommandWithDestination.processArguments(CommandWithDestination.java:228)
>  at 
> org.apache.hadoop.fs.shell.CopyCommands$Put.processArguments(CopyCommands.java:295)
>  at 
> org.apache.hadoop.fs.shell.CopyCommands$CopyFromLocal.processArguments(CopyCommands.java:385)
>  at 
> org.apache.hadoop.fs.shell.FsCommand.processRawArguments(FsCommand.java:120)
>  at org.apache.hadoop.fs.shell.Command.run(Command.java:177)
>  at org.apache.hadoop.fs.FsShell.run(FsShell.java:327)
>  at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76)
>  at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:90)
>  at org.apache.hadoop.fs.FsShell.main(FsShell.java:390)
> 2018-11-27 04:24:55,990 INFO scm.XceiverClientRatis: Could not commit 146 to 
> all the nodes.Committed by majority.{noformat}



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to