[ 
https://issues.apache.org/jira/browse/HIVE-26759?focusedWorklogId=827077&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-827077
 ]

ASF GitHub Bot logged work on HIVE-26759:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 18/Nov/22 08:37
            Start Date: 18/Nov/22 08:37
    Worklog Time Spent: 10m 
      Work Description: deniskuzZ commented on code in PR #3782:
URL: https://github.com/apache/hive/pull/3782#discussion_r1026154231


##########
standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnQueries.java:
##########
@@ -18,36 +18,34 @@
 package org.apache.hadoop.hive.metastore.txn;
 
 public class TxnQueries {
-  public static final String SHOW_COMPACTION_ORDERBY_CLAUSE = 
-    " ORDER BY CASE " +
-    "   WHEN \"CC_END\" > \"CC_START\" and \"CC_END\" > \"CC_COMMIT_TIME\" " +
-    "     THEN \"CC_END\" " +
-    "   WHEN \"CC_START\" > \"CC_COMMIT_TIME\" " +
-    "     THEN \"CC_START\" " +
-    "   ELSE \"CC_COMMIT_TIME\" " +
+  public static final String SHOW_COMPACTION_ORDERBY_CLAUSE = "  ORDER BY  
CASE " +
+    " WHEN \"CC_END\" > \"CC_START\" and \"CC_END\" > \"CC_COMMIT_TIME\" " +
+    " THEN \"CC_END\" " +
+    " WHEN \"CC_START\" > \"CC_COMMIT_TIME\" " +
+    " THEN \"CC_START\" " +
+    " ELSE \"CC_COMMIT_TIME\" " +
     " END desc ," +
     " \"CC_ENQUEUE_TIME\" asc";
 
-  public static final String SHOW_COMPACTION_QUERY = 
-    "SELECT XX.* FROM ( SELECT " +
-    "  \"CQ_DATABASE\" AS CC_DATABASE, \"CQ_TABLE\" AS CC_TABLE, 
\"CQ_PARTITION\" AS CC_PARTITION, " +
-    "  \"CQ_STATE\" AS CC_STATE, \"CQ_TYPE\" AS CC_TYPE, \"CQ_WORKER_ID\" AS 
CC_WORKER_ID, " +
-    "  \"CQ_START\" AS CC_START, -1 \"CC_END\", \"CQ_RUN_AS\" AS CC_RUN_AS, " +
-    "  \"CQ_HADOOP_JOB_ID\" AS CC_HADOOP_JOB_ID, \"CQ_ID\" AS CC_ID, 
\"CQ_ERROR_MESSAGE\" AS CC_ERROR_MESSAGE, " +
-    "  \"CQ_ENQUEUE_TIME\" AS CC_ENQUEUE_TIME, \"CQ_WORKER_VERSION\" AS 
CC_WORKER_VERSION, " +
-    "  \"CQ_INITIATOR_ID\" AS CC_INITIATOR_ID, \"CQ_INITIATOR_VERSION\" AS 
CC_INITIATOR_VERSION, " +
-    "  \"CQ_CLEANER_START\" AS CC_CLEANER_START, \"CQ_POOL_NAME\" AS 
CC_POOL_NAME, \"CQ_TXN_ID\" AS CC_TXN_ID, " +
-    "  \"CQ_NEXT_TXN_ID\" AS CC_NEXT_TXN_ID, \"CQ_COMMIT_TIME\" AS 
CC_COMMIT_TIME, " +
-    "  \"CQ_HIGHEST_WRITE_ID\" AS CC_HIGHEST_WRITE_ID " +
+  public static final String SHOW_COMPACTION_QUERY = "SELECT XX.* FROM ( 
SELECT " +
+    " \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", 
\"CQ_PARTITION\" AS \"CC_PARTITION\", " +
+    " \"CQ_STATE\" AS \"CC_STATE\", \"CQ_TYPE\" AS \"CC_TYPE\", 
\"CQ_WORKER_ID\" AS \"CC_WORKER_ID\", " +
+    " \"CQ_START\" AS \"CC_START\", -1 AS \"CC_END\", \"CQ_RUN_AS\" AS 
\"CC_RUN_AS\", " +
+    " \"CQ_HADOOP_JOB_ID\" AS \"CC_HADOOP_JOB_ID\", \"CQ_ID\" AS \"CC_ID\", 
\"CQ_ERROR_MESSAGE\" AS \"CC_ERROR_MESSAGE\", " +
+    " \"CQ_ENQUEUE_TIME\" AS \"CC_ENQUEUE_TIME\", \"CQ_WORKER_VERSION\" AS 
\"CC_WORKER_VERSION\", " +
+    " \"CQ_INITIATOR_ID\" AS \"CC_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\" AS 
\"CC_INITIATOR_VERSION\", " +
+    " \"CQ_CLEANER_START\" AS \"CC_CLEANER_START\", \"CQ_POOL_NAME\" AS 
\"CC_POOL_NAME\", \"CQ_TXN_ID\" AS \"CC_TXN_ID\", " +
+    " \"CQ_NEXT_TXN_ID\" AS \"CC_NEXT_TXN_ID\", \"CQ_COMMIT_TIME\" AS 
\"CC_COMMIT_TIME\", " +
+    " \"CQ_HIGHEST_WRITE_ID\" AS \"CC_HIGHEST_WRITE_ID\" " +
     "FROM " +
-    "  \"COMPACTION_QUEUE\" " +
+    " \"COMPACTION_QUEUE\" " +
     "UNION ALL " +
     "SELECT " +
-    "  \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", 
\"CC_TYPE\", \"CC_WORKER_ID\", " +
-    "  \"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", 
\"CC_ID\", \"CC_ERROR_MESSAGE\", " +
-    "  \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", 
\"CC_INITIATOR_VERSION\", " +
-    "   -1 , \"CC_POOL_NAME\", \"CC_TXN_ID\", \"CC_NEXT_TXN_ID\", 
\"CC_COMMIT_TIME\", " +
-    "  \"CC_HIGHEST_WRITE_ID\"" +
+    " \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", 
\"CC_TYPE\", \"CC_WORKER_ID\", " +
+    " \"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", 
\"CC_ID\", \"CC_ERROR_MESSAGE\", " +
+    " \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", 
\"CC_INITIATOR_VERSION\", " +
+    "  -1 AS \"CC_NEXT_TXN_ID\", \"CC_POOL_NAME\", \"CC_TXN_ID\", 
\"CC_NEXT_TXN_ID\", \"CC_COMMIT_TIME\", " +

Review Comment:
   there are 2 CC_NEXT_TXN_ID declarations





Issue Time Tracking
-------------------

    Worklog Id:     (was: 827077)
    Time Spent: 20m  (was: 10m)

> ERROR: column "CC_START" does not exist, when Postgres is used as Hive 
> metastore
> --------------------------------------------------------------------------------
>
>                 Key: HIVE-26759
>                 URL: https://issues.apache.org/jira/browse/HIVE-26759
>             Project: Hive
>          Issue Type: Bug
>          Components: Metastore
>    Affects Versions: 4.0.0-alpha-2
>            Reporter: Akshat Mathur
>            Assignee: Akshat Mathur
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 20m
>  Remaining Estimate: 0h
>
> This error is coming when Postgres is used as Hive Metastore. 
> hive-site.xml
>  
> {code:java}
> <?xml version="1.0"?>
> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
> <configuration>
>     <!-- UPSTREAM -->
>     <property>
>         <name>hive.server2.logging.operation.level</name>
>         <value>NONE</value>
>     </property>
>     <property>
>         <name>hive.log4j.file</name>
>         <value>hive-log4j.properties</value>
>     </property>
>     <property>
>         <name>metastore.log4j.file</name>
>         <value>metastore-log4j.properties</value>
>     </property>
>     <!-- Intellij -->
>     <property>
>         <name>hive.jar.path</name>
>         
> <value>/Users/am/Desktop/work/upstream/hive/ql/target/hive-exec-4.0.0-SNAPSHOT.jar</value>
>         <description>The location of hive_cli.jar that is used when 
> submitting jobs in a separate jvm.</description>
>     </property>
>     <property>
>         <name>hive.hadoop.classpath</name>
>         
> <value>/Users/am/Desktop/work/upstream/hive/ql/target/hive-exec-4.0.0-SNAPSHOT.jar</value>
>     </property>
>     <property>
>         <name>hive.metastore.local</name>
>         <value>false</value>
>     </property>
>     <property>
>         <name>hive.metastore.uris</name>
>         <value>thrift://localhost:9083</value>
>     </property>
>     <property>
>         <name>hive.metastore.warehouse.dir</name>
>         <value>/Users/am/Desktop/work/hivestuff/warehouse</value>
>     </property>
>     <property>
>         <name>hive.server2.metrics.enabled</name>
>         <value>true</value>
>     </property>
>  
>     <property>
>         <name>spark.eventLog.enabled</name>
>         <value>true</value>
>     </property>
>     <property>
>         <name>spark.eventLog.dir</name>
>         <value>/tmp/hive</value>
>     </property>
>     <!-- Intellij -->
>     
>     <property>
>         <name>metastore.metastore.event.db.notification.api.auth</name>
>         <value>false</value>
>     </property>
>     <property>
>         <name>hive.metastore.schema.verification</name>
>         <value>false</value>
>     </property>
>     <property>
>         <name>datanucleus.autoCreateTables</name>
>         <value>true</value>
>     </property>
>   
>     <property>
>         <name>hive.exec.scratchdir</name>
>         <values>/tmp/hive-${user.name}</values>
>     </property>
>         <property>
>             <name>javax.jdo.option.ConnectionURL</name>
>             <value>jdbc:postgresql://localhost:5432/hive_metastore</value>
>             <description>JDBC connect string for a JDBC 
> metastore</description>
>         </property>
>         <property>
>             <name>javax.jdo.option.ConnectionDriverName</name>
>             <value>org.postgresql.Driver</value>
>         </property>
>         <property>
>             <name>javax.jdo.option.ConnectionUserName</name>
>             <value>hive</value>
>         </property>
>         <property>
>             <name>javax.jdo.option.ConnectionPassword</name>
>             <value>hive</value>
>         </property>
>     <property>
>         <name>datanucleus.schema.autoCreateAll</name>
>         <value>true</value>
>     </property>
>     <property>
>         <name>hive.server2.enable.doAs</name>
>         <value>false</value>
>         <description></description>
>     </property>
>     <property>
>         <name>hive.server2.enable.impersonation</name>
>         <value>false</value>
>         <description></description>
>     </property>
>     <property>
>         <name>dfs.namenode.acls.enabled</name>
>         <value>false</value>
>     </property>
>     <!-- FAIR SCHEDULER -->
>     <!-- These following lines are needed to use ACID features -->
>     <!-- BEGIN -->
>     <!--
>     <property>
>       <name>hive.enforce.bucketing</name>
>       <value>true</value>
>     </property>
>     <property>
>       <name>hive.support.concurrency</name>
>       <value>true</value>
>     </property>
>     <property>
>       <name>hive.exec.dynamic.partition.mode</name>
>       <value>nonstrict</value>
>     </property>
>     <property>
>       <name>hive.txn.manager</name>
>       <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
>     </property>
>     <property>
>       <name>hive.lock.manager</name>
>       <value>org.apache.hadoop.hive.ql.lockmgr.DbLockManager</value>
>     </property>
>     <property>
>       <name>hive.compactor.initiator.on</name>
>       <value>true</value>
>     </property>
>     <property>
>       <name>hive.compactor.worker.threads</name>
>       <value>2</value>
>     </property>
>     -->
>     <!-- END -->
>     <property>
>         <name>hive.server2.webui.explain.output</name>
>         <value>true</value>
>     </property>
>     <property>
>         <name>hive.server2.webui.show.graph</name>
>         <value>true</value>
>     </property>
>     <property>
>         <name>hive.server2.webui.show.stats</name>
>         <value>true</value>
>     </property>
>     <property>
>         <name>hive.server2.webui.max.graph.size</name>
>         <value>40</value>
>     </property>
>     <!-- ACID -->
>     <property>
>         <name>hive.txn.manager</name>
>         <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
>     </property>
>     <property>
>         <name>hive.compactor.initiator.on</name>
>         <value>true</value>
>     </property>
>     <property>
>         <name>hive.compactor.worker.threads</name>
>         <value>3</value>
>     </property>
>     <property>
>         <name>metastore.compactor.worker.threads</name>
>         <value>4</value>
>     </property>
>     <property>
>         <name>hive.support.concurrency</name>
>         <value>true</value>
>     </property>
>     <property>
>         <name>hive.exec.dynamic.partition.mode</name>
>         <value>nonstrict</value>
>     </property>
>     <property>
>         <name>hive.lock.manager</name>
>         <value>org.apache.hadoop.hive.ql.lockmgr.DbLockManager</value>
>     </property>
>     <property>
>         <name>hive.compactor.crud.query.based</name>
>         <value>true</value>
>     </property>
>     <property>
>         <name>hive.metastore.runworker.in</name>
>         <value>hs2</value>
>     </property>
>     <!-- Random -->
>     <!--     <property>
>             <name>hive.users.in.admin.role</name>
>             <value>karencoppage</value>
>         </property> -->
>     <!--Timestamp-->
>     <!--     <property>
>             <name>hive.parquet.write.int64.timestamp</name>
>             <value>true</value>
>         </property>
>      -->
>     <!--for WebUI explain plan-->
>     <!--     <property>
>             <name>hive.server2.webui.max.historic.queries</name>
>             <value>40</value>
>         </property> -->
>     <!--     <property>
>             <name></name>
>             <value></value>
>         </property>
>      -->
> </configuration>
>  {code}
>  
>  
> Following is the stack trace when HMS service is started:
> {code:java}
> [Thread-5] ERROR org.apache.hadoop.hive.ql.txn.compactor.Initiator - 
> Initiator loop caught unexpected exception this time through the loop
> org.apache.hadoop.hive.metastore.api.MetaException: Unable to select from 
> transaction database org.postgresql.util.PSQLException: ERROR: column 
> "CC_START" does not exist
>   Position: 1215
>     at 
> org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2676)
>     at 
> org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2366)
>     at 
> org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:356)
>     at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:490)
>     at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:408)
>     at 
> org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:181)
>     at 
> org.postgresql.jdbc.PgPreparedStatement.executeQuery(PgPreparedStatement.java:133)
>     at 
> com.zaxxer.hikari.pool.ProxyPreparedStatement.executeQuery(ProxyPreparedStatement.java:52)
>     at 
> com.zaxxer.hikari.pool.HikariProxyPreparedStatement.executeQuery(HikariProxyPreparedStatement.java)
>     at 
> org.apache.hadoop.hive.metastore.txn.TxnHandler.showCompact(TxnHandler.java:3894)
>     at 
> org.apache.hadoop.hive.ql.txn.compactor.Initiator.run(Initiator.java:154)    
> at 
> org.apache.hadoop.hive.metastore.txn.TxnHandler.showCompact(TxnHandler.java:3946)
>  ~[classes/:?]
>     at 
> org.apache.hadoop.hive.ql.txn.compactor.Initiator.run(Initiator.java:154) 
> ~[hive-exec-4.0.0-SNAPSHOT.jar:4.0.0-SNAPSHOT] {code}
> This error disappears when derby is configured as HMS.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to