[ https://issues.apache.org/jira/browse/HIVE-23790?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Aasha Medhi reassigned HIVE-23790: ---------------------------------- > The error message length of 2000 is exceeded for scheduled query > ---------------------------------------------------------------- > > Key: HIVE-23790 > URL: https://issues.apache.org/jira/browse/HIVE-23790 > Project: Hive > Issue Type: Task > Reporter: Aasha Medhi > Assignee: Aasha Medhi > Priority: Major > > {code:java} > 2020-07-01 08:24:23,916 ERROR org.apache.thrift.server.TThreadPoolServer: > [pool-7-thread-189]: Error occurred during processing of message. > org.datanucleus.exceptions.NucleusUserException: Attempt to store value > "FAILED: Execution Error, return code 30045 from > org.apache.hadoop.hive.ql.exec.repl.DirCopyTask. Permission denied: > user=hive, access=WRITE, inode="/":hdfs:supergroup:drwxr-xr-x > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:496) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:336) > at > org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer$RangerAccessControlEnforcer.checkDefaultEnforcer(RangerHdfsAuthorizer.java:626) > at > org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer$RangerAccessControlEnforcer.checkRangerPermission(RangerHdfsAuthorizer.java:388) > at > org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer$RangerAccessControlEnforcer.checkPermissionWithContext(RangerHdfsAuthorizer.java:229) > at > org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:239) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1908) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkPermission(FSDirectory.java:1892) > at > org.apache.hadoop.hdfs.server.namenode.FSDirectory.checkAncestorAccess(FSDirectory.java:1851) > at > org.apache.hadoop.hdfs.server.namenode.FSDirMkdirOp.mkdirs(FSDirMkdirOp.java:60) > at > org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:3226) > at > org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.mkdirs(NameNodeRpcServer.java:1130) > at > org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.mkdirs(ClientNamenodeProtocolServerSideTranslatorPB.java:729) > at > org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) > at > org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:528) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1070) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:985) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:913) > at java.base/java.security.AccessController.doPrivileged(Native Method) > at java.base/javax.security.auth.Subject.doAs(Subject.java:423) > at > org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1876) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2882) > " in column ""ERROR_MESSAGE"" that has maximum length of 2000. Please correct > your data! > at > org.datanucleus.store.rdbms.mapping.datastore.CharRDBMSMapping.setString(CharRDBMSMapping.java:254) > ~[datanucleus-rdbms-4.1.19.jar:?] > at > org.datanucleus.store.rdbms.mapping.java.SingleFieldMapping.setString(SingleFieldMapping.java:180) > ~[datanucleus-rdbms-4.1.19.jar:?] > at > org.datanucleus.store.rdbms.fieldmanager.ParameterSetter.storeStringField(ParameterSetter.java:158) > ~[datanucleus-rdbms-4.1.19.jar:?] > at > org.datanucleus.state.AbstractStateManager.providedStringField(AbstractStateManager.java:1448) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.state.StateManagerImpl.providedStringField(StateManagerImpl.java:120) > ~[datanucleus-core-4.1.17.jar:?] > at > org.apache.hadoop.hive.metastore.model.MScheduledExecution.dnProvideField(MScheduledExecution.java) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at > org.apache.hadoop.hive.metastore.model.MScheduledExecution.dnProvideFields(MScheduledExecution.java) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at > org.datanucleus.state.StateManagerImpl.provideFields(StateManagerImpl.java:1170) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.store.rdbms.request.UpdateRequest.execute(UpdateRequest.java:326) > ~[datanucleus-rdbms-4.1.19.jar:?] > at > org.datanucleus.store.rdbms.RDBMSPersistenceHandler.updateObjectInTable(RDBMSPersistenceHandler.java:409) > ~[datanucleus-rdbms-4.1.19.jar:?] > at > org.datanucleus.store.rdbms.RDBMSPersistenceHandler.updateObject(RDBMSPersistenceHandler.java:383) > ~[datanucleus-rdbms-4.1.19.jar:?] > at > org.datanucleus.state.StateManagerImpl.flush(StateManagerImpl.java:4585) > ~[datanucleus-core-4.1.17.jar:?] > at org.datanucleus.flush.FlushOrdered.execute(FlushOrdered.java:106) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.ExecutionContextImpl.flushInternal(ExecutionContextImpl.java:4055) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.ExecutionContextThreadedImpl.flushInternal(ExecutionContextThreadedImpl.java:450) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.ExecutionContextImpl.markDirty(ExecutionContextImpl.java:3811) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.ExecutionContextThreadedImpl.markDirty(ExecutionContextThreadedImpl.java:422) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.state.StateManagerImpl.postWriteField(StateManagerImpl.java:4374) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.state.StateManagerImpl.replaceField(StateManagerImpl.java:3047) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.state.StateManagerImpl.updateField(StateManagerImpl.java:1865) > ~[datanucleus-core-4.1.17.jar:?] > at > org.datanucleus.state.StateManagerImpl.setObjectField(StateManagerImpl.java:1766) > ~[datanucleus-core-4.1.17.jar:?] > at > org.apache.hadoop.hive.metastore.model.MScheduledQuery.dnSetactiveExecution(MScheduledQuery.java) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at > org.apache.hadoop.hive.metastore.model.MScheduledQuery.setActiveExecution(MScheduledQuery.java:117) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at > org.apache.hadoop.hive.metastore.ObjectStore.scheduledQueryProgress(ObjectStore.java:13488) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at jdk.internal.reflect.GeneratedMethodAccessor200.invoke(Unknown > Source) ~[?:?] > at > jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > ~[?:?] > at java.lang.reflect.Method.invoke(Method.java:566) ~[?:?] > at > org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:97) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at com.sun.proxy.$Proxy27.scheduledQueryProgress(Unknown Source) ~[?:?] > at > org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.scheduled_query_progress(HiveMetaStore.java:10022) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at jdk.internal.reflect.GeneratedMethodAccessor199.invoke(Unknown > Source) ~[?:?] > at > jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > ~[?:?] > at java.lang.reflect.Method.invoke(Method.java:566) ~[?:?] > at > org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at > org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000.7.2.1.0-246] > at com.sun.proxy.$Proxy29.scheduled_query_progress(Unknown Source) > ~[?:?] > at > org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$scheduled_query_progress.getResult(ThriftHiveMetastore.java:21821) > ~[hive-exec-3.1.3000.7.2.1.0-246.jar:3.1.3000 > {code} -- This message was sent by Atlassian Jira (v8.3.4#803005)