[ 
https://issues.apache.org/jira/browse/PIG-5450?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17832318#comment-17832318
 ] 

Koji Noguchi commented on PIG-5450:
-----------------------------------

Weird full trace.
{noformat}
024-03-27 10:50:40,088 [task-result-getter-0] WARN 
org.apache.spark.scheduler.TaskSetManager - Lost task 0.0 in stage 0.0 (TID 0) 
(gsrd238n05.red.ygrid.yahoo.com executor 1): org.apache.spark.SparkException: 
Task failed while writing rows
at 
org.apache.spark.internal.io.SparkHadoopWriter$.executeTask(SparkHadoopWriter.scala:163)
at 
org.apache.spark.internal.io.SparkHadoopWriter$.$anonfun$write$1(SparkHadoopWriter.scala:88)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:131)
at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:506)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1491)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:509)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.VerifyError: Bad return type
Exception Details:
Location:
org/apache/orc/impl/TypeUtils.createColumn(Lorg/apache/orc/TypeDescription;Lorg/apache/orc/TypeDescription$RowBatchVersion;I)Lorg/apache/hadoop/hive/ql/exec/vector/ColumnVector;
 @117: areturn
Reason:
Type 'org/apache/hadoop/hive/ql/exec/vector/DateColumnVector' (current frame, 
stack[0]) is not assignable to 
'org/apache/hadoop/hive/ql/exec/vector/ColumnVector' (from method signature)
Current Frame:
bci: @117
flags: { }
locals: { 'org/apache/orc/TypeDescription', 
'org/apache/orc/TypeDescription$RowBatchVersion', integer }
stack: { 'org/apache/hadoop/hive/ql/exec/vector/DateColumnVector' }
Bytecode:
0x0000000: b200 022a b600 03b6 0004 2eaa 0000 0181
0x0000010: 0000 0001 0000 0013 0000 0059 0000 0059
0x0000020: 0000 0059 0000 0059 0000 0059 0000 0062
0x0000030: 0000 006b 0000 006b 0000 0074 0000 0074
0x0000040: 0000 007d 0000 00ad 0000 00ad 0000 00ad
0x0000050: 0000 00ad 0000 00b6 0000 00f7 0000 0138
0x0000060: 0000 0155 bb00 0559 1cb7 0006 b0bb 0007
0x0000070: 591c b700 08b0 bb00 0959 1cb7 000a b0bb
0x0000080: 000b 591c b700 0cb0 2ab6 000d 3e2a b600
0x0000090: 0e36 042b b200 0fa5 0009 1d10 12a4 000f
0x00000a0: bb00 1159 1c1d 1504 b700 12b0 bb00 1359
0x00000b0: 1c1d 1504 b700 14b0 bb00 1559 1cb7 0016
0x00000c0: b02a b600 174e 2db9 0018 0100 bd00 193a
0x00000d0: 0403 3605 1505 1904 bea2 001e 1904 1505
0x00000e0: 2d15 05b9 001a 0200 c000 102b 1cb8 001b
0x00000f0: 5384 0501 a7ff e0bb 001c 591c 1904 b700
0x0000100: 1db0 2ab6 0017 4e2d b900 1801 00bd 0019
0x0000110: 3a04 0336 0515 0519 04be a200 1e19 0415
0x0000120: 052d 1505 b900 1a02 00c0 0010 2b1c b800
0x0000130: 1b53 8405 01a7 ffe0 bb00 1e59 1c19 04b7
0x0000140: 001f b02a b600 174e bb00 2059 1c2d 03b9
0x0000150: 001a 0200 c000 102b 1cb8 001b b700 21b0
0x0000160: 2ab6 0017 4ebb 0022 591c 2d03 b900 1a02
0x0000170: 00c0 0010 2b1c b800 1b2d 04b9 001a 0200
0x0000180: c000 102b 1cb8 001b b700 23b0 bb00 2459
0x0000190: bb00 2559 b700 2612 27b6 0028 2ab6 0003
0x00001a0: b600 29b6 002a b700 2bbf
Stackmap Table:
same_frame_extended(@100)
same_frame(@109)
same_frame(@118)
same_frame(@127)
same_frame(@136)
append_frame(@160,Integer,Integer)
same_frame(@172)
chop_frame(@184,2)
same_frame(@193)
append_frame(@212,Object[_75],Object[_76],Integer)
chop_frame(@247,1)
chop_frame(@258,2)
append_frame(@277,Object[_75],Object[_76],Integer)
chop_frame(@312,1)
chop_frame(@323,2)
same_frame(@352)
same_frame(@396)

at org.apache.orc.TypeDescription.createRowBatch(TypeDescription.java:483)
at org.apache.hadoop.hive.ql.io.orc.WriterImpl._init_(WriterImpl.java:100)
at org.apache.hadoop.hive.ql.io.orc.OrcFile.createWriter(OrcFile.java:334)
at 
org.apache.hadoop.hive.ql.io.orc.OrcNewOutputFormat$OrcRecordWriter.write(OrcNewOutputFormat.java:51)
at 
org.apache.hadoop.hive.ql.io.orc.OrcNewOutputFormat$OrcRecordWriter.write(OrcNewOutputFormat.java:37)
at org.apache.pig.builtin.OrcStorage.putNext(OrcStorage.java:249)
at 
org.apache.pig.backend.hadoop.executionengine.physicalLayer.relationalOperators.StoreFuncDecorator.putNext(StoreFuncDecorator.java:75)
at 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigOutputFormat$PigRecordWriter.write(PigOutputFormat.java:146)
at 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigOutputFormat$PigRecordWriter.write(PigOutputFormat.java:98)
at 
org.apache.spark.internal.io.HadoopMapReduceWriteConfigUtil.write(SparkHadoopWriter.scala:368)
at 
org.apache.spark.internal.io.SparkHadoopWriter$.$anonfun$executeTask$1(SparkHadoopWriter.scala:138)
at 
org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1525)
at 
org.apache.spark.internal.io.SparkHadoopWriter$.executeTask(SparkHadoopWriter.scala:135)
... 9 more
Suppressed: 
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException):
 Failed to CREATE_FILE 
/user/gtrain/.Trash/240327160000/81550086/h3/pige2e_spark/output_pige2e/_temporary/1/_temporary/attempt_1694019138198_2588853_m_000134_0/out/gtrain-1711536593-orc.conf/Orc_1.out.intermediate/_temporary/1494317182/_temporary/attempt_202403271050267949716090078319975_0003_r_000000_0/part-r-00000
 for gtrain:DFSClient_attempt__0001_m_000001_1_-1444003586_41@10.205.130.22 on 
10.205.130.22 because 
gtrain:DFSClient_attempt__0001_m_000001_1_-1444003586_41@10.205.130.22 is 
already the current lease holder.
at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.recoverLeaseInternal(FSNamesystem.java:2442)
at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.startFile(FSDirWriteFileOp.java:340)
at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2339)
at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2269)
at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:787)
at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:429)
at 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:530)
at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:500)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1081)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1006)
at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:939)
at java.security.AccessController.doPrivileged(Native Method)
at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:2215)
at 
org.apache.hadoop.security.UserGroupInformation.doAsPrivileged(UserGroupInformation.java:2203)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2880)

at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1569)
at org.apache.hadoop.ipc.Client.call(Client.java:1515)
at org.apache.hadoop.ipc.Client.call(Client.java:1412)
at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:232)
at 
org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:120)
at com.sun.proxy.$Proxy18.create(Unknown Source)
at 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.create(ClientNamenodeProtocolTranslatorPB.java:303)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:510)
at 
org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:211)
at 
org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:203)
at 
org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:139)
at 
org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:445)
at com.sun.proxy.$Proxy19.create(Unknown Source)
at 
org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:268)
at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1246)
at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1181)
at 
org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:476)
at 
org.apache.hadoop.hdfs.DistributedFileSystem$8.doCall(DistributedFileSystem.java:473)
at 
org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at 
org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:487)
at 
org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:414)
at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:1031)
at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:1012)
at org.apache.orc.impl.PhysicalFsWriter._init_(PhysicalFsWriter.java:115)
at org.apache.orc.impl.WriterImpl._init_(WriterImpl.java:168)
at org.apache.hadoop.hive.ql.io.orc.WriterImpl._init_(WriterImpl.java:94)
at org.apache.hadoop.hive.ql.io.orc.OrcFile.createWriter(OrcFile.java:334)
at 
org.apache.hadoop.hive.ql.io.orc.OrcNewOutputFormat$OrcRecordWriter.close(OrcNewOutputFormat.java:65)
at 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigOutputFormat$PigRecordWriter.close(PigOutputFormat.java:157)
at 
org.apache.spark.internal.io.HadoopMapReduceWriteConfigUtil.closeWriter(SparkHadoopWriter.scala:373)
at 
org.apache.spark.internal.io.SparkHadoopWriter$.$anonfun$executeTask$2(SparkHadoopWriter.scala:150)
at 
org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1536)
... 10 more
 {noformat}

> Pig-on-Spark3 E2E ORC test failing with java.lang.VerifyError: Bad return type
> ------------------------------------------------------------------------------
>
>                 Key: PIG-5450
>                 URL: https://issues.apache.org/jira/browse/PIG-5450
>             Project: Pig
>          Issue Type: Bug
>          Components: spark
>            Reporter: Koji Noguchi
>            Assignee: Koji Noguchi
>            Priority: Major
>
> {noformat}
> Caused by: java.lang.VerifyError: Bad return type
> Exception Details:
> Location:
> org/apache/orc/impl/TypeUtils.createColumn(Lorg/apache/orc/TypeDescription;Lorg/apache/orc/TypeDescription$RowBatchVersion;I)Lorg/apache/hadoop/hive/ql/exec/vector/ColumnVector;
>  @117: areturn
> Reason:
> Type 'org/apache/hadoop/hive/ql/exec/vector/DateColumnVector' (current frame, 
> stack[0]) is not assignable to 
> 'org/apache/hadoop/hive/ql/exec/vector/ColumnVector' (from method signature)
>  {noformat}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to