[
https://issues.apache.org/jira/browse/HDDS-10949?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Wei-Chiu Chuang resolved HDDS-10949.
------------------------------------
Resolution: Duplicate
> IndexOutOfBoundsException encountered while running hive-write in LR setup
> --------------------------------------------------------------------------
>
> Key: HDDS-10949
> URL: https://issues.apache.org/jira/browse/HDDS-10949
> Project: Apache Ozone
> Issue Type: Bug
> Reporter: Jyotirmoy Sinha
> Priority: Major
>
> IndexOutOfBoundsException encountered while running hive-write in LR setup
> Error stacktrace -
> {code:java}
> E java.lang.IndexOutOfBoundsException
> E at java.nio.ByteBuffer.wrap(ByteBuffer.java:375)
> E at
> org.apache.hadoop.ozone.client.checksum.ECBlockChecksumComputer.computeCompositeCrc(ECBlockChecksumComputer.java:163)
> E at
> org.apache.hadoop.ozone.client.checksum.ECBlockChecksumComputer.compute(ECBlockChecksumComputer.java:65)
> E at
> org.apache.hadoop.ozone.client.checksum.ECFileChecksumHelper.getBlockChecksumFromChunkChecksums(ECFileChecksumHelper.java:148)
> E at
> org.apache.hadoop.ozone.client.checksum.ECFileChecksumHelper.checksumBlock(ECFileChecksumHelper.java:106)
> E at
> org.apache.hadoop.ozone.client.checksum.ECFileChecksumHelper.checksumBlocks(ECFileChecksumHelper.java:73)
> E at
> org.apache.hadoop.ozone.client.checksum.BaseFileChecksumHelper.compute(BaseFileChecksumHelper.java:220)
> E at
> org.apache.hadoop.fs.ozone.OzoneClientUtils.getFileChecksumWithCombineMode(OzoneClientUtils.java:223)
> E at
> org.apache.hadoop.fs.ozone.BasicRootedOzoneClientAdapterImpl.getFileChecksum(BasicRootedOzoneClientAdapterImpl.java:1271)
> E at
> org.apache.hadoop.fs.ozone.BasicRootedOzoneFileSystem.getFileChecksum(BasicRootedOzoneFileSystem.java:996)
> E at
> org.apache.hadoop.fs.FileSystem.getFileChecksum(FileSystem.java:2831)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.addInsertNonDirectoryInformation(Hive.java:3703)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.addInsertFileInformation(Hive.java:3676)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.addWriteNotificationLog(Hive.java:3581)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.addWriteNotificationLog(Hive.java:3566)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.loadTable(Hive.java:3224)
> E at
> org.apache.hadoop.hive.ql.exec.MoveTask.execute(MoveTask.java:465)
> E at
> org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:213)
> E at
> org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:105)
> E at
> org.apache.hadoop.hive.ql.Executor.launchTask(Executor.java:357)
> E at
> org.apache.hadoop.hive.ql.Executor.launchTasks(Executor.java:330)
> E at
> org.apache.hadoop.hive.ql.Executor.runTasks(Executor.java:246)
> E at
> org.apache.hadoop.hive.ql.Executor.execute(Executor.java:109)
> E at
> org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:772)
> E at org.apache.hadoop.hive.ql.Driver.run(Driver.java:511)
> E at org.apache.hadoop.hive.ql.Driver.run(Driver.java:505)
> E at
> org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:166)
> E at
> org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:229)
> E at
> org.apache.hive.service.cli.operation.SQLOperation.access$700(SQLOperation.java:91)
> E at
> org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork$1.run(SQLOperation.java:329)
> E at java.security.AccessController.doPrivileged(Native Method)
> E at javax.security.auth.Subject.doAs(Subject.java:422)
> E at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1898)
> E at
> org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork.run(SQLOperation.java:347)
> E at
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
> E at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> E at
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
> E at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> E at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> E at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> E at java.lang.Thread.run(Thread.java:748)
> E
> E ERROR : FAILED: Execution Error, return code 40000 from
> org.apache.hadoop.hive.ql.exec.MoveTask. java.lang.IndexOutOfBoundsException
> E at java.nio.ByteBuffer.wrap(ByteBuffer.java:375)
> E at
> org.apache.hadoop.ozone.client.checksum.ECBlockChecksumComputer.computeCompositeCrc(ECBlockChecksumComputer.java:163)
> E at
> org.apache.hadoop.ozone.client.checksum.ECBlockChecksumComputer.compute(ECBlockChecksumComputer.java:65)
> E at
> org.apache.hadoop.ozone.client.checksum.ECFileChecksumHelper.getBlockChecksumFromChunkChecksums(ECFileChecksumHelper.java:148)
> E at
> org.apache.hadoop.ozone.client.checksum.ECFileChecksumHelper.checksumBlock(ECFileChecksumHelper.java:106)
> E at
> org.apache.hadoop.ozone.client.checksum.ECFileChecksumHelper.checksumBlocks(ECFileChecksumHelper.java:73)
> E at
> org.apache.hadoop.ozone.client.checksum.BaseFileChecksumHelper.compute(BaseFileChecksumHelper.java:220)
> E at
> org.apache.hadoop.fs.ozone.OzoneClientUtils.getFileChecksumWithCombineMode(OzoneClientUtils.java:223)
> E at
> org.apache.hadoop.fs.ozone.BasicRootedOzoneClientAdapterImpl.getFileChecksum(BasicRootedOzoneClientAdapterImpl.java:1271)
> E at
> org.apache.hadoop.fs.ozone.BasicRootedOzoneFileSystem.getFileChecksum(BasicRootedOzoneFileSystem.java:996)
> E at
> org.apache.hadoop.fs.FileSystem.getFileChecksum(FileSystem.java:2831)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.addInsertNonDirectoryInformation(Hive.java:3703)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.addInsertFileInformation(Hive.java:3676)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.addWriteNotificationLog(Hive.java:3581)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.addWriteNotificationLog(Hive.java:3566)
> E at
> org.apache.hadoop.hive.ql.metadata.Hive.loadTable(Hive.java:3224)
> E at
> org.apache.hadoop.hive.ql.exec.MoveTask.execute(MoveTask.java:465)
> E at
> org.apache.hadoop.hive.ql.exec.Task.executeTask(Task.java:213)
> E at
> org.apache.hadoop.hive.ql.exec.TaskRunner.runSequential(TaskRunner.java:105)
> E at
> org.apache.hadoop.hive.ql.Executor.launchTask(Executor.java:357)
> E at
> org.apache.hadoop.hive.ql.Executor.launchTasks(Executor.java:330)
> E at
> org.apache.hadoop.hive.ql.Executor.runTasks(Executor.java:246)
> E at
> org.apache.hadoop.hive.ql.Executor.execute(Executor.java:109)
> E at
> org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:772)
> E at org.apache.hadoop.hive.ql.Driver.run(Driver.java:511)
> E at org.apache.hadoop.hive.ql.Driver.run(Driver.java:505)
> E at
> org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:166)
> E at
> org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:229)
> E at
> org.apache.hive.service.cli.operation.SQLOperation.access$700(SQLOperation.java:91)
> E at
> org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork$1.run(SQLOperation.java:329)
> E at java.security.AccessController.doPrivileged(Native Method)
> E at javax.security.auth.Subject.doAs(Subject.java:422)
> E at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1898)
> E at
> org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork.run(SQLOperation.java:347)
> E at
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
> E at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> E at
> java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
> E at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> E at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> E at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> E at java.lang.Thread.run(Thread.java:748)
> E {code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]