[ 
https://issues.apache.org/jira/browse/IMPALA-13277?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17906891#comment-17906891
 ] 

Riza Suminto commented on IMPALA-13277:
---------------------------------------

I recently hit similar HMS error. I filed IMPALA-13620 for it.

Attached is [^hive-metastore.log] containing such InvalidOperationException. 
More specifically:
{noformat}
2024-12-18T15:37:16,438  WARN [TThreadPoolServer WorkerProcess-29] 
metastore.ObjectStore: Failed to get database hive.functional_kudu, returning 
NoSuchObjectException
org.apache.hadoop.hive.metastore.api.MetaException: objectstore - Connection is 
not available, request timed out after 30000ms.
        at 
org.apache.hadoop.hive.metastore.ObjectStore$GetHelper.run(ObjectStore.java:4310)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore.getDatabaseInternal(ObjectStore.java:903)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore.getDatabase(ObjectStore.java:875) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at sun.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?]
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 ~[?:1.8.0_382]
        at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_382]
        at 
org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:97) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at com.sun.proxy.$Proxy33.getDatabase(Unknown Source) ~[?:?]
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.get_database_core(HiveMetaStore.java:1600)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.getTableInternal(HiveMetaStore.java:3901)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.get_table_req(HiveMetaStore.java:3873)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at sun.reflect.GeneratedMethodAccessor6.invoke(Unknown Source) ~[?:?]
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 ~[?:1.8.0_382]
        at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_382]
        at 
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:160)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:121)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at com.sun.proxy.$Proxy34.get_table_req(Unknown Source) ~[?:?]
        at 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$get_table_req.getResult(ThriftHiveMetastore.java:20303)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$get_table_req.getResult(ThriftHiveMetastore.java:20282)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:38) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:111)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:107)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at java.security.AccessController.doPrivileged(Native Method) 
~[?:1.8.0_382]
        at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0_382]
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1910)
 ~[hadoop-common-3.1.1.7.3.1.0-160.jar:?]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:119)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:250)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) 
~[?:1.8.0_382]
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
~[?:1.8.0_382]
        at java.lang.Thread.run(Thread.java:750) ~[?:1.8.0_382]
2024-12-18T15:37:16,438 ERROR [TThreadPoolServer WorkerProcess-36] 
metastore.ObjectStore: 
javax.jdo.JDODataStoreException: objectstore - Connection is not available, 
request timed out after 30000ms.
        at 
org.datanucleus.api.jdo.NucleusJDOHelper.getJDOExceptionForNucleusException(NucleusJDOHelper.java:542)
 ~[datanucleus-api-jdo-5.2.8.jar:?]
        at org.datanucleus.api.jdo.JDOQuery.executeInternal(JDOQuery.java:456) 
~[datanucleus-api-jdo-5.2.8.jar:?]
        at org.datanucleus.api.jdo.JDOQuery.execute(JDOQuery.java:290) 
~[datanucleus-api-jdo-5.2.8.jar:?]
        at 
org.apache.hadoop.hive.metastore.ObjectStore.getMDatabase(ObjectStore.java:858) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore.getJDODatabase(ObjectStore.java:911)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore$1.getJdoResult(ObjectStore.java:901)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore$1.getJdoResult(ObjectStore.java:893)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore$GetHelper.run(ObjectStore.java:4302)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore.getDatabaseInternal(ObjectStore.java:903)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore.getDatabase(ObjectStore.java:875) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at sun.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?]
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 ~[?:1.8.0_382]
        at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_382]
        at 
org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:97) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at com.sun.proxy.$Proxy33.getDatabase(Unknown Source) ~[?:?]
        at 
org.apache.hadoop.hive.metastore.HiveAlterHandler.alterTable(HiveAlterHandler.java:174)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.alter_table_core(HiveMetaStore.java:6395)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.alter_table_req(HiveMetaStore.java:6330)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source) ~[?:?]
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 ~[?:1.8.0_382]
        at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_382]
        at 
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:160)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:121)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at com.sun.proxy.$Proxy34.alter_table_req(Unknown Source) ~[?:?]
        at 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$alter_table_req.getResult(ThriftHiveMetastore.java:20559)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$alter_table_req.getResult(ThriftHiveMetastore.java:20538)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:38) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:111)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:107)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at java.security.AccessController.doPrivileged(Native Method) 
~[?:1.8.0_382]
        at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0_382]
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1910)
 ~[hadoop-common-3.1.1.7.3.1.0-160.jar:?]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:119)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:250)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) 
~[?:1.8.0_382]
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
~[?:1.8.0_382]
        at java.lang.Thread.run(Thread.java:750) ~[?:1.8.0_382]
Caused by: java.sql.SQLTransientConnectionException: objectstore - Connection 
is not available, request timed out after 30000ms.
        at 
com.zaxxer.hikari.pool.HikariPool.createTimeoutException(HikariPool.java:602) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at com.zaxxer.hikari.pool.HikariPool.getConnection(HikariPool.java:195) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at com.zaxxer.hikari.pool.HikariPool.getConnection(HikariPool.java:145) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
com.zaxxer.hikari.HikariDataSource.getConnection(HikariDataSource.java:85) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.datanucleus.store.rdbms.ConnectionFactoryImpl$ManagedConnectionImpl.getConnection(ConnectionFactoryImpl.java:413)
 ~[datanucleus-rdbms-5.2.10.jar:?]
        at 
org.datanucleus.store.rdbms.ConnectionFactoryImpl$ManagedConnectionImpl.getXAResource(ConnectionFactoryImpl.java:379)
 ~[datanucleus-rdbms-5.2.10.jar:?]
        at 
org.datanucleus.store.connection.ConnectionManagerImpl.allocateManagedConnection(ConnectionManagerImpl.java:422)
 ~[datanucleus-core-5.2.10.jar:?]
        at 
org.datanucleus.store.connection.ConnectionManagerImpl.getConnection(ConnectionManagerImpl.java:213)
 ~[datanucleus-core-5.2.10.jar:?]
        at 
org.datanucleus.store.connection.ConnectionManager.getConnection(ConnectionManager.java:62)
 ~[datanucleus-core-5.2.10.jar:?]
        at 
org.datanucleus.store.rdbms.query.JDOQLQuery.performExecute(JDOQLQuery.java:611)
 ~[datanucleus-rdbms-5.2.10.jar:?]
        at org.datanucleus.store.query.Query.executeQuery(Query.java:1975) 
~[datanucleus-core-5.2.10.jar:?]
        at org.datanucleus.store.query.Query.executeWithArray(Query.java:1864) 
~[datanucleus-core-5.2.10.jar:?]
        at org.datanucleus.api.jdo.JDOQuery.executeInternal(JDOQuery.java:433) 
~[datanucleus-api-jdo-5.2.8.jar:?]
        ... 35 more
2024-12-18T15:37:16,454  INFO [TThreadPoolServer WorkerProcess-30] 
metastore.HiveMetaStore: 33: source:127.0.0.1 alter_table: 
hive.functional_kudu.binary_tbl_big newtbl=binary_tbl_big
2024-12-18T15:37:16,454  INFO [TThreadPoolServer WorkerProcess-30] 
HiveMetaStore.audit: ugi=rsuminto    ip=127.0.0.1    cmd=source:127.0.0.1 
alter_table: hive.functional_kudu.binary_tbl_big newtbl=binary_tbl_big     
2024-12-18T15:37:16,461  INFO [TThreadPoolServer WorkerProcess-43] 
metastore.ObjectStore: Updating table level column statistics for 
table=hive.functional_kudu.timestamp_primary_key colName=id
2024-12-18T15:37:16,461  INFO [TThreadPoolServer WorkerProcess-43] 
metastore.ObjectStore: Updating table level column statistics for 
table=hive.functional_kudu.timestamp_primary_key colName=t
2024-12-18T15:37:16,461  INFO [TThreadPoolServer WorkerProcess-48] 
metastore.HiveMetaStore: 54: source:127.0.0.1 alter_table: 
hive.functional_kudu.nullrows newtbl=nullrows
2024-12-18T15:37:16,453  WARN [TThreadPoolServer WorkerProcess-36] 
metastore.ObjectStore: Failed to get database hive.functional_kudu, returning 
NoSuchObjectException
org.apache.hadoop.hive.metastore.api.MetaException: objectstore - Connection is 
not available, request timed out after 30000ms.
        at 
org.apache.hadoop.hive.metastore.ObjectStore$GetHelper.run(ObjectStore.java:4310)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore.getDatabaseInternal(ObjectStore.java:903)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.ObjectStore.getDatabase(ObjectStore.java:875) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at sun.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?]
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 ~[?:1.8.0_382]
        at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_382]
        at 
org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:97) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at com.sun.proxy.$Proxy33.getDatabase(Unknown Source) ~[?:?]
        at 
org.apache.hadoop.hive.metastore.HiveAlterHandler.alterTable(HiveAlterHandler.java:174)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.alter_table_core(HiveMetaStore.java:6395)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.alter_table_req(HiveMetaStore.java:6330)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source) ~[?:?]
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 ~[?:1.8.0_382]
        at java.lang.reflect.Method.invoke(Method.java:498) ~[?:1.8.0_382]
        at 
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:160)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:121)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at com.sun.proxy.$Proxy34.alter_table_req(Unknown Source) ~[?:?]
        at 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$alter_table_req.getResult(ThriftHiveMetastore.java:20559)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$alter_table_req.getResult(ThriftHiveMetastore.java:20538)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:38) 
~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:111)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:107)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at java.security.AccessController.doPrivileged(Native Method) 
~[?:1.8.0_382]
        at javax.security.auth.Subject.doAs(Subject.java:422) ~[?:1.8.0_382]
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1910)
 ~[hadoop-common-3.1.1.7.3.1.0-160.jar:?]
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:119)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:250)
 ~[hive-exec-3.1.3000.7.3.1.0-160.jar:3.1.3000.7.3.1.0-160]
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) 
~[?:1.8.0_382]
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
~[?:1.8.0_382]
        at java.lang.Thread.run(Thread.java:750) ~[?:1.8.0_382]
2024-12-18T15:37:16,462  INFO [TThreadPoolServer WorkerProcess-48] 
HiveMetaStore.audit: ugi=rsuminto    ip=127.0.0.1    cmd=source:127.0.0.1 
alter_table: hive.functional_kudu.nullrows newtbl=nullrows 
2024-12-18T15:37:16,462  INFO [TThreadPoolServer WorkerProcess-43] 
metastore.ObjectStore: Updating table level column statistics for 
table=hive.functional_kudu.timestamp_primary_key colName=tkey
2024-12-18T15:37:16,463  INFO [TThreadPoolServer WorkerProcess-48] 
metastore.HiveMetaStore: 54: Opening raw store with implementation 
class:org.apache.hadoop.hive.metastore.ObjectStore
2024-12-18T15:37:16,463  INFO [TThreadPoolServer WorkerProcess-48] 
metastore.ObjectStore: RawStore: 
org.apache.hadoop.hive.metastore.ObjectStore@32546df2, with PersistenceManager: 
null will be shutdown
2024-12-18T15:37:16,463  INFO [TThreadPoolServer WorkerProcess-48] 
metastore.ObjectStore: RawStore: 
org.apache.hadoop.hive.metastore.ObjectStore@32546df2, with PersistenceManager: 
org.datanucleus.api.jdo.JDOPersistenceManager@3b439342 created in the thread 
with id: 110
2024-12-18T15:37:16,464  INFO [TThreadPoolServer WorkerProcess-41] 
metastore.ObjectStore: Updating table level column statistics for 
table=hive.functional_kudu.nullescapedtable colName=a
2024-12-18T15:37:16,464  INFO [TThreadPoolServer WorkerProcess-41] 
metastore.ObjectStore: Updating table level column statistics for 
table=hive.functional_kudu.nullescapedtable colName=b
2024-12-18T15:37:16,466  INFO [TThreadPoolServer WorkerProcess-41] 
metastore.ObjectStore: Updating table level column statistics for 
table=hive.functional_kudu.nullescapedtable colName=c
2024-12-18T15:37:16,466 ERROR [TThreadPoolServer WorkerProcess-36] 
metastore.HiveAlterHandler: Failed to alter table 
hive.functional_kudu.binary_tbl
2024-12-18T15:37:16,466  INFO [TThreadPoolServer WorkerProcess-49] 
metastore.HiveMetaStore: 55: source:127.0.0.1 alter_table: 
hive.functional_kudu.alltypesaggnonulls newtbl=alltypesaggnonulls
2024-12-18T15:37:16,466  INFO [TThreadPoolServer WorkerProcess-41] 
metastore.ObjectStore: Updating table level column statistics for 
table=hive.functional_kudu.nullescapedtable colName=d
2024-12-18T15:37:16,466  INFO [TThreadPoolServer WorkerProcess-49] 
HiveMetaStore.audit: ugi=rsuminto    ip=127.0.0.1    cmd=source:127.0.0.1 
alter_table: hive.functional_kudu.alltypesaggnonulls newtbl=alltypesaggnonulls  
   
2024-12-18T15:37:16,466 ERROR [TThreadPoolServer WorkerProcess-36] 
metastore.RetryingHMSHandler: InvalidOperationException(message:Unable to 
change partition or table. Database functional_kudu does not exist Check 
metastore logs for detailed stack.functional_kudu: objectstore - Connection is 
not available, request timed out after 30000ms.)
        at 
org.apache.hadoop.hive.metastore.HiveAlterHandler.alterTable(HiveAlterHandler.java:480)
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.alter_table_core(HiveMetaStore.java:6395)
        at 
org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.alter_table_req(HiveMetaStore.java:6330)
        at sun.reflect.GeneratedMethodAccessor37.invoke(Unknown Source)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:160)
        at 
org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:121)
        at com.sun.proxy.$Proxy34.alter_table_req(Unknown Source)
        at 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$alter_table_req.getResult(ThriftHiveMetastore.java:20559)
        at 
org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$alter_table_req.getResult(ThriftHiveMetastore.java:20538)
        at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:38)
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:111)
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor$1.run(TUGIBasedProcessor.java:107)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:422)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1910)
        at 
org.apache.hadoop.hive.metastore.TUGIBasedProcessor.process(TUGIBasedProcessor.java:119)
        at 
org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:250)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:750){noformat}
I think it is misleading to propagate 

javax.jdo.JDODataStoreException: objectstore - Connection is not available, 
request timed out after 30000ms.

Back to client, Impala CatalogD, as InvalidOperationException.

> test_create_cache_many_tables fails with 'InvalidOperationException: Alter 
> table in REMOTE database is not allowed'
> -------------------------------------------------------------------------------------------------------------------
>
>                 Key: IMPALA-13277
>                 URL: https://issues.apache.org/jira/browse/IMPALA-13277
>             Project: IMPALA
>          Issue Type: Bug
>            Reporter: Andrew Sherman
>            Priority: Critical
>         Attachments: hive-metastore.log
>
>
> This is weird as the client side errors do not show up in Impala logs.
> h3. Error Message
> {code}
> /data/jenkins/workspace/impala-cdw-master-staging-exhaustive-redhat8-shard2/repos/Impala/tests/stress/test_ddl_stress.py:83:
>  in test_create_cache_many_tables self.client.execute("alter table %s set 
> cached in 'testPool'" % tbl_name) common/impala_connection.py:220: in execute 
> fetch_profile_after_close=fetch_profile_after_close) 
> beeswax/impala_beeswax.py:191: in execute handle = 
> self.__execute_query(query_string.strip(), user=user) 
> beeswax/impala_beeswax.py:384: in __execute_query 
> self.wait_for_finished(handle) beeswax/impala_beeswax.py:405: in 
> wait_for_finished raise ImpalaBeeswaxException("Query aborted:" + error_log, 
> None) E ImpalaBeeswaxException: ImpalaBeeswaxException: E Query 
> aborted:ImpalaRuntimeException: Error making 'alter_table' RPC to Hive 
> Metastore: E CAUSED BY: InvalidOperationException: Alter table in REMOTE 
> database is not allowed
> {code}
> h3. Stacktrace
> {code}
> /data/jenkins/workspace/impala-cdw-master-staging-exhaustive-redhat8-shard2/repos/Impala/tests/stress/test_ddl_stress.py:83:
>  in test_create_cache_many_tables self.client.execute("alter table %s set 
> cached in 'testPool'" % tbl_name) common/impala_connection.py:220: in execute 
> fetch_profile_after_close=fetch_profile_after_close) 
> beeswax/impala_beeswax.py:191: in execute handle = 
> self.__execute_query(query_string.strip(), user=user) 
> beeswax/impala_beeswax.py:384: in __execute_query 
> self.wait_for_finished(handle) beeswax/impala_beeswax.py:405: in 
> wait_for_finished raise ImpalaBeeswaxException("Query aborted:" + error_log, 
> None) E ImpalaBeeswaxException: ImpalaBeeswaxException: E Query 
> aborted:ImpalaRuntimeException: Error making 'alter_table' RPC to Hive 
> Metastore: E CAUSED BY: InvalidOperationException: Alter table in REMOTE 
> database is not allowed
> {code}
> h3. Standard Error
> {code}
> SET 
> client_identifier=stress/test_ddl_stress.py::TestDdlStress::()::test_create_cache_many_tables[protocol:beeswax|exec_option:{'test_replan':1;'batch_size':0;'num_nodes':0;'disable_codegen_rows_threshold':0;'disable_codegen':False;'abort_on_error':1;'exec_single_node_rows_thr;
>  -- connecting to: localhost:21000 -- 2024-08-04 18:43:00,007 INFO 
> MainThread: Could not connect to ('::1', 21000, 0, 0) Traceback (most recent 
> call last): File 
> "/data/jenkins/workspace/impala-cdw-master-staging-exhaustive-redhat8-shard2/repos/Impala/infra/python/env-gcc10.4.0/lib/python2.7/site-packages/thrift/transport/TSocket.py",
>  line 137, in open handle.connect(sockaddr) File 
> "/data/jenkins/workspace/impala-cdw-master-staging-exhaustive-redhat8-shard2/Impala-Toolchain/toolchain-packages-gcc10.4.0/python-2.7.16/lib/python2.7/socket.py",
>  line 228, in meth return getattr(self._sock,name)(*args) error: [Errno 111] 
> Connection refused -- connecting to localhost:21050 with impyla -- 2024-08-04 
> 18:43:00,008 INFO MainThread: Could not connect to ('::1', 21050, 0, 0) 
> Traceback (most recent call last): File 
> "/data/jenkins/workspace/impala-cdw-master-staging-exhaustive-redhat8-shard2/repos/Impala/infra/python/env-gcc10.4.0/lib/python2.7/site-packages/thrift/transport/TSocket.py",
>  line 137, in open handle.connect(sockaddr) File 
> "/data/jenkins/workspace/impala-cdw-master-staging-exhaustive-redhat8-shard2/Impala-Toolchain/toolchain-packages-gcc10.4.0/python-2.7.16/lib/python2.7/socket.py",
>  line 228, in meth return getattr(self._sock,name)(*args) error: [Errno 111] 
> Connection refused -- 2024-08-04 18:43:00,063 INFO MainThread: Closing active 
> operation -- connecting to localhost:28000 with impyla -- 2024-08-04 
> 18:43:00,124 INFO MainThread: Closing active operation -- connecting to 
> localhost:11050 with impyla SET 
> client_identifier=stress/test_ddl_stress.py::TestDdlStress::()::test_create_cache_many_tables[protocol:beeswax|exec_option:{'test_replan':1;'batch_size':0;'num_nodes':0;'disable_codegen_rows_threshold':0;'disable_codegen':False;'abort_on_error':1;'exec_single_node_rows_thr;
>  SET test_replan=1; SET batch_size=0; SET num_nodes=0; SET 
> disable_codegen_rows_threshold=0; SET disable_codegen=False; SET 
> abort_on_error=1; SET exec_single_node_rows_threshold=0; -- executing against 
> localhost:21000 create database if not exists test_ddl_stress_db; -- 
> 2024-08-04 18:43:00,211 INFO MainThread: Started query 
> f14af7ee53c2341d:70d9cb0800000000 -- executing against localhost:21000 drop 
> table if exists test_ddl_stress_db.test_256da409_0; -- 2024-08-04 
> 18:43:00,264 INFO MainThread: Started query a844dd28ea288ff0:f8b2fbb700000000 
> -- executing against localhost:21000 drop table if exists 
> test_ddl_stress_db.test_256da409_0_part; -- 2024-08-04 18:43:00,317 INFO 
> MainThread: Started query 7f481d60aa5b4765:233bcdb900000000 -- executing 
> against localhost:21000 create table test_ddl_stress_db.test_256da409_0 (i 
> int); -- 2024-08-04 18:43:00,369 INFO MainThread: Started query 
> 8645b931286dd1fa:5d30c54f00000000 -- executing against localhost:21000 create 
> table test_ddl_stress_db.test_256da409_0_part (i int) partitioned by (j int); 
> -- 2024-08-04 18:43:00,472 INFO MainThread: Started query 
> 71499a290e05d245:674852ae00000000 -- executing against localhost:21000 insert 
> overwrite table test_ddl_stress_db.test_256da409_0 select int_col from 
> functional.alltypestiny; -- 2024-08-04 18:43:04,659 INFO MainThread: Started 
> query 9446c240b6a02509:f5e640ab00000000 -- executing against localhost:21000 
> insert overwrite table test_ddl_stress_db.test_256da409_0_part partition(j) 
> values (1, 1), (2, 2), (3, 3), (4, 4), (4, 4); -- 2024-08-04 18:43:10,674 
> INFO MainThread: Started query 4b409c71a4da4d7e:b2aa597b00000000 -- executing 
> against localhost:21000 alter table test_ddl_stress_db.test_256da409_0 set 
> cached in 'testPool'; -- 2024-08-04 18:43:11,689 INFO MainThread: Started 
> query 464e2738f8d0ced3:06ef54ba00000000
> {code}



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to