This is an automated email from the ASF dual-hosted git repository.

dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 88fb57621ee HIVE-26563 Add extra columns in Show Compactions output 
and sort the output (Kirti Ruge, reviewed by Denys Kuzmenko)
88fb57621ee is described below

commit 88fb57621eead67f94d8fb12187187c01bc26abb
Author: rkirtir <[email protected]>
AuthorDate: Tue Oct 18 15:22:16 2022 +0530

    HIVE-26563 Add extra columns in Show Compactions output and sort the output 
(Kirti Ruge, reviewed by Denys Kuzmenko)
    
    Closes #3625
---
 .../ql/txn/compactor/CompactionPoolOnTezTest.java  |  12 +-
 .../ql/txn/compactor/TestCrudCompactorOnTez.java   |  16 +-
 .../show/compactions/ShowCompactionsDesc.java      |   4 +-
 .../show/compactions/ShowCompactionsOperation.java |  16 +
 .../apache/hadoop/hive/ql/TestTxnCommands2.java    |  55 +++
 .../clientpositive/llap/dbtxnmgr_compact1.q.out    |   6 +-
 .../clientpositive/llap/dbtxnmgr_showlocks.q.out   |   4 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp    |  88 +++++
 .../src/gen/thrift/gen-cpp/hive_metastore_types.h  |  40 +-
 .../metastore/api/ShowCompactResponseElement.java  | 402 ++++++++++++++++++++-
 .../metastore/ShowCompactResponseElement.php       |  96 +++++
 .../src/gen/thrift/gen-py/hive_metastore/ttypes.py |  50 ++-
 .../src/gen/thrift/gen-rb/hive_metastore_types.rb  |  10 +-
 .../hadoop/hive/metastore/txn/TxnQueries.java      |  51 +++
 .../src/main/thrift/hive_metastore.thrift          |   9 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java      | 168 ++++-----
 16 files changed, 905 insertions(+), 122 deletions(-)

diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionPoolOnTezTest.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionPoolOnTezTest.java
index 9d96a0dc7cf..a366ea315fe 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionPoolOnTezTest.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionPoolOnTezTest.java
@@ -35,6 +35,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.regex.Pattern;
+import java.io.*;
 
 import static 
org.apache.hadoop.hive.ql.txn.compactor.TestCompactorBase.executeStatementOnDriver;
 
@@ -137,9 +138,9 @@ public class CompactionPoolOnTezTest extends 
CompactorOnTezTest {
     List results = new ArrayList();
     driver.getResults(results);
     Assert.assertEquals(3, results.size());
-    
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
 host\tWorker\tEnqueue Time\tStart Time\tDuration(ms)\tHadoopJobId\tError 
message\tInitiator host\tInitiator\tPool name",
-        results.get(0));
-    Pattern p = Pattern.compile("(1|2)\tdefault\t(compaction_test|table2)\t 
--- \tMAJOR\tinitiated.*(pool1|default)");
+    
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
 host\tWorker\tEnqueue Time\tStart Time\tDuration(ms)" +
+       "\tHadoopJobId\tError message\tInitiator host\tInitiator\tPool 
name\tTxnId\tNext TxnId\tCommit Time\tHighest WriteID", results.get(0));
+    Pattern p = Pattern.compile("(1|2)\tdefault\t(compaction_test|table2)\t 
--- \tMAJOR\tinitiated.*(pool1|default).*");
     for(int i = 1; i < results.size(); i++) {
       Assert.assertTrue(p.matcher(results.get(i).toString()).matches());
     }
@@ -162,9 +163,10 @@ public class CompactionPoolOnTezTest extends 
CompactorOnTezTest {
     List results = new ArrayList();
     driver.getResults(results);
     Assert.assertEquals(2, results.size());
-    
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
 host\tWorker\tEnqueue Time\tStart Time\tDuration(ms)\tHadoopJobId\tError 
message\tInitiator host\tInitiator\tPool name",
+    
Assert.assertEquals("CompactionId\tDatabase\tTable\tPartition\tType\tState\tWorker
 host\tWorker\tEnqueue Time\tStart Time\tDuration(ms)" +
+       "\tHadoopJobId\tError message\tInitiator host\tInitiator\tPool 
name\tTxnId\tNext TxnId\tCommit Time\tHighest WriteID",
         results.get(0));
-    Pattern p = Pattern.compile("1|2\tdefault\tcompaction_test\t --- 
\tMAJOR\tinitiated.*pool1");
+    Pattern p = Pattern.compile("1|2\tdefault\tcompaction_test\t --- 
\tMAJOR\tinitiated.*pool1.*");
     Assert.assertTrue(p.matcher(results.get(1).toString()).matches());
   }
 
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
index 9d41bb72f44..6f4ddbff5d0 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
@@ -231,12 +231,12 @@ public class TestCrudCompactorOnTez extends 
CompactorOnTezTest {
     if (2 != compacts.size()) {
       Assert.fail("Expecting 2 rows and found " + compacts.size() + " files " 
+ compacts);
     }
-    Assert.assertEquals("did not initiate", compacts.get(0).getState());
+    Assert.assertEquals("refused", compacts.get(0).getState());
     Assert.assertTrue(compacts.get(0).getErrorMessage()
-            .startsWith("Caught exception while trying to determine if we 
should compact"));
-    Assert.assertEquals("refused", compacts.get(1).getState());
-    Assert.assertTrue(compacts.get(1).getErrorMessage()
             .startsWith("Query based Minor compaction is not possible for full 
acid tables having raw format (non-acid) data in them."));
+    Assert.assertEquals("did not initiate", compacts.get(1).getState());
+    Assert.assertTrue(compacts.get(1).getErrorMessage()
+            .startsWith("Caught exception while trying to determine if we 
should compact "));
   }
 
   @Test
@@ -301,10 +301,10 @@ public class TestCrudCompactorOnTez extends 
CompactorOnTezTest {
     if (2 != compacts.size()) {
       Assert.fail("Expecting 2 rows and found " + compacts.size() + " files " 
+ compacts);
     }
-    Assert.assertEquals("did not initiate", compacts.get(0).getState());
-    Assert.assertTrue(compacts.get(0).getErrorMessage().startsWith("Caught 
exception while trying to determine if we should compact"));
-    Assert.assertEquals("refused", compacts.get(1).getState());
-    Assert.assertTrue(compacts.get(1).getErrorMessage().startsWith("Query 
based Minor compaction is not possible for full acid tables having raw format 
(non-acid) data in them."));
+    Assert.assertEquals("refused", compacts.get(0).getState());
+    Assert.assertTrue(compacts.get(0).getErrorMessage().startsWith("Query 
based Minor compaction is not possible for full acid tables having raw format 
(non-acid) data in them."));
+    Assert.assertEquals("did not initiate", compacts.get(1).getState());
+    Assert.assertTrue(compacts.get(1).getErrorMessage().startsWith("Caught 
exception while trying to determine if we should compact"));
   }
 
   /**
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
index c6a087e3e7b..be98e172917 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsDesc.java
@@ -33,8 +33,8 @@ public class ShowCompactionsDesc implements DDLDesc, 
Serializable {
 
   // @formatter:off
   public static final String SCHEMA =
-      
"compactionid,dbname,tabname,partname,type,state,workerhost,workerid,enqueuetime,starttime,duration,hadoopjobid,errormessage,initiatorhost,initiatorid,poolname#"
 +
-      
"string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string";
+      
"compactionid,dbname,tabname,partname,type,state,workerhost,workerid,enqueuetime,starttime,duration,hadoopjobid,errormessage,initiatorhost,initiatorid,poolname,txnid,nexttxnid,committime,hightestwriteid#"
 +
+      
"string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string";
   // @formatter:on
 
   private String resFile;
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
index 8a85e4bef10..7f26fa9eb55 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/show/compactions/ShowCompactionsOperation.java
@@ -100,6 +100,14 @@ public class ShowCompactionsOperation extends 
DDLOperation<ShowCompactionsDesc>
     os.writeBytes("Initiator");
     os.write(Utilities.tabCode);
     os.writeBytes("Pool name");
+    os.write(Utilities.tabCode);
+    os.writeBytes("TxnId");
+    os.write(Utilities.tabCode);
+    os.writeBytes("Next TxnId");
+    os.write(Utilities.tabCode);
+    os.writeBytes("Commit Time");
+    os.write(Utilities.tabCode);
+    os.writeBytes("Highest WriteID");
     os.write(Utilities.newLineCode);
   }
 
@@ -137,6 +145,14 @@ public class ShowCompactionsOperation extends 
DDLOperation<ShowCompactionsDesc>
     os.writeBytes(getThreadIdFromId(e.getInitiatorId()));
     os.write(Utilities.tabCode);
     os.writeBytes(e.getPoolName());
+    os.write(Utilities.tabCode);
+    os.writeBytes(e.isSetTxnId() ? Long.toString(e.getTxnId()) : NO_VAL);
+    os.write(Utilities.tabCode);
+    os.writeBytes(e.isSetNextTxnId() ? Long.toString(e.getNextTxnId()) : 
NO_VAL);
+    os.write(Utilities.tabCode);
+    os.writeBytes(e.isSetCommitTime() ? Long.toString(e.getCommitTime()) : 
NO_VAL);
+    os.write(Utilities.tabCode);
+    os.writeBytes(e.isSetHightestWriteId() ? 
Long.toString(e.getHightestWriteId()) : NO_VAL);
     os.write(Utilities.newLineCode);
   }
 }
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java 
b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 392a06b648c..f6ddf891100 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -3356,6 +3356,61 @@ public class TestTxnCommands2 extends 
TxnCommandsBaseForTests {
     Assert.assertFalse(fs.exists(new Path(tablePath + oldDelta4)));
   }
 
+  @Test
+  public void testShowCompactionOrder() throws Exception {
+
+    d.destroy();
+    hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+    d = new Driver(hiveConf);
+    //generate some compaction history
+    runStatementOnDriver("drop database if exists mydb1 cascade");
+    runStatementOnDriver("create database mydb1");
+
+    runStatementOnDriver("create table mydb1.tbl0 " + "(a int, b int) 
partitioned by (p string) clustered by (a) into " +
+            BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES 
('transactional'='true')");
+    runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+            " 
values(1,2,'p1'),(3,4,'p1'),(1,2,'p2'),(3,4,'p2'),(1,2,'p3'),(3,4,'p3')");
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p1') 
compact 'MAJOR'");
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p2') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+    TestTxnCommands2.runCleaner(hiveConf);
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION(p='p3') 
compact 'MAJOR'");
+    runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+            " 
values(4,5,'p1'),(6,7,'p1'),(4,5,'p2'),(6,7,'p2'),(4,5,'p3'),(6,7,'p3')");
+    TestTxnCommands2.runWorker(hiveConf);
+    TestTxnCommands2.runCleaner(hiveConf);
+    runStatementOnDriver("insert into mydb1.tbl0" + " PARTITION(p) " +
+            " 
values(11,12,'p1'),(13,14,'p1'),(11,12,'p2'),(13,14,'p2'),(11,12,'p3'),(13,14,'p3')");
+    runStatementOnDriver("alter table mydb1.tbl0" + " PARTITION (p='p1')  
compact 'MINOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+
+    runStatementOnDriver("create table mydb1.tbl1 " + "(a int, b int) 
partitioned by (ds string) clustered by (a) into " +
+            BUCKET_COUNT + " buckets stored as orc TBLPROPERTIES 
('transactional'='true')");
+    runStatementOnDriver("insert into mydb1.tbl1" + " PARTITION(ds) " +
+            " 
values(1,2,'today'),(3,4,'today'),(1,2,'tomorrow'),(3,4,'tomorrow'),(1,2,'yesterday'),(3,4,'yesterday')");
+    runStatementOnDriver("alter table mydb1.tbl1" + " PARTITION(ds='today') 
compact 'MAJOR'");
+    TestTxnCommands2.runWorker(hiveConf);
+
+    runStatementOnDriver("create table T (a int, b int) stored as orc 
TBLPROPERTIES ('transactional'='true')");
+    runStatementOnDriver("insert into T values(0,2)");//makes delta_1_1 in T1
+    runStatementOnDriver("insert into T values(1,4)");//makes delta_2_2 in T2
+
+    //create failed compaction attempt so that compactor txn is aborted
+    HiveConf.setBoolVar(hiveConf, 
HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true);
+    runStatementOnDriver("alter table T compact 'minor'");
+    TestTxnCommands2.runWorker(hiveConf);
+    // Verify  compaction order
+    List<ShowCompactResponseElement> compacts =
+            txnHandler.showCompact(new ShowCompactRequest()).getCompacts();
+    Assert.assertEquals(6, compacts.size());
+    Assert.assertEquals(TxnStore.INITIATED_RESPONSE, 
compacts.get(0).getState());
+    Assert.assertEquals(TxnStore.REFUSED_RESPONSE, compacts.get(1).getState());
+    Assert.assertEquals(TxnStore.CLEANING_RESPONSE, 
compacts.get(2).getState());
+    Assert.assertEquals(TxnStore.CLEANING_RESPONSE, 
compacts.get(3).getState());
+    Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, 
compacts.get(4).getState());
+    Assert.assertEquals(TxnStore.REFUSED_RESPONSE, compacts.get(5).getState());
+
+  }
   private void compactPartition(String table, CompactionType type, String 
partition)
       throws Exception {
     CompactionRequest compactionRequest = new CompactionRequest("default", 
table, type);
diff --git a/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out 
b/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
index 24f3b1340c4..2a0f87b3932 100644
--- a/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
+++ b/ql/src/test/results/clientpositive/llap/dbtxnmgr_compact1.q.out
@@ -53,9 +53,9 @@ PREHOOK: query: show compactions
 PREHOOK: type: SHOW COMPACTIONS
 POSTHOOK: query: show compactions
 POSTHOOK: type: SHOW COMPACTIONS
-CompactionId   Database        Table   Partition       Type    State   Worker 
host     Worker  Enqueue Time    Start Time      Duration(ms)    HadoopJobId    
 Error message   Initiator host  Initiator       Pool name
-1      default t1_n153  ---    MAJOR   initiated        ---     ---    
#Masked#         ---     ---     ---     ---    #Masked#        manual  default
-2      default t2_n153  ---    MINOR   initiated        ---     ---    
#Masked#         ---     ---     ---     ---    #Masked#        manual  default
+CompactionId   Database        Table   Partition       Type    State   Worker 
host     Worker  Enqueue Time    Start Time      Duration(ms)    HadoopJobId    
 Error message   Initiator host  Initiator       Pool name       TxnId   Next 
TxnId      Commit Time     Highest WriteID
+1      default t1_n153  ---    MAJOR   initiated        ---     ---    
#Masked#         ---     ---     ---     ---    #Masked#        manual  default 
0       0       0        --- 
+2      default t2_n153  ---    MINOR   initiated        ---     ---    
#Masked#         ---     ---     ---     ---    #Masked#        manual  default 
0       0       0        --- 
 PREHOOK: query: drop table T1_n153
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@t1_n153
diff --git a/ql/src/test/results/clientpositive/llap/dbtxnmgr_showlocks.q.out 
b/ql/src/test/results/clientpositive/llap/dbtxnmgr_showlocks.q.out
index ad2b8eceb56..728cec09ba9 100644
--- a/ql/src/test/results/clientpositive/llap/dbtxnmgr_showlocks.q.out
+++ b/ql/src/test/results/clientpositive/llap/dbtxnmgr_showlocks.q.out
@@ -149,8 +149,8 @@ PREHOOK: query: show compactions
 PREHOOK: type: SHOW COMPACTIONS
 POSTHOOK: query: show compactions
 POSTHOOK: type: SHOW COMPACTIONS
-CompactionId   Database        Table   Partition       Type    State   Worker 
host     Worker  Enqueue Time    Start Time      Duration(ms)    HadoopJobId    
 Error message   Initiator host  Initiator       Pool name
-1      default partitioned_acid_table  p=abc   MINOR   initiated        ---    
 ---    #Masked#         ---     ---     ---     ---    #Masked#        manual  
default
+CompactionId   Database        Table   Partition       Type    State   Worker 
host     Worker  Enqueue Time    Start Time      Duration(ms)    HadoopJobId    
 Error message   Initiator host  Initiator       Pool name       TxnId   Next 
TxnId      Commit Time     Highest WriteID
+1      default partitioned_acid_table  p=abc   MINOR   initiated        ---    
 ---    #Masked#         ---     ---     ---     ---    #Masked#        manual  
default 0       0       0        --- 
 PREHOOK: query: drop table partitioned_acid_table
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@partitioned_acid_table
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 01123063683..b76805eda02 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -28308,6 +28308,26 @@ void ShowCompactResponseElement::__set_poolName(const 
std::string& val) {
   this->poolName = val;
 __isset.poolName = true;
 }
+
+void ShowCompactResponseElement::__set_nextTxnId(const int64_t val) {
+  this->nextTxnId = val;
+__isset.nextTxnId = true;
+}
+
+void ShowCompactResponseElement::__set_txnId(const int64_t val) {
+  this->txnId = val;
+__isset.txnId = true;
+}
+
+void ShowCompactResponseElement::__set_commitTime(const int64_t val) {
+  this->commitTime = val;
+__isset.commitTime = true;
+}
+
+void ShowCompactResponseElement::__set_hightestWriteId(const int64_t val) {
+  this->hightestWriteId = val;
+__isset.hightestWriteId = true;
+}
 std::ostream& operator<<(std::ostream& out, const ShowCompactResponseElement& 
obj)
 {
   obj.printTo(out);
@@ -28502,6 +28522,38 @@ uint32_t 
ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol*
           xfer += iprot->skip(ftype);
         }
         break;
+      case 21:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->nextTxnId);
+          this->__isset.nextTxnId = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 22:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->txnId);
+          this->__isset.txnId = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 23:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->commitTime);
+          this->__isset.commitTime = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 24:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->hightestWriteId);
+          this->__isset.hightestWriteId = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -28623,6 +28675,26 @@ uint32_t 
ShowCompactResponseElement::write(::apache::thrift::protocol::TProtocol
     xfer += oprot->writeString(this->poolName);
     xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.nextTxnId) {
+    xfer += oprot->writeFieldBegin("nextTxnId", 
::apache::thrift::protocol::T_I64, 21);
+    xfer += oprot->writeI64(this->nextTxnId);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.txnId) {
+    xfer += oprot->writeFieldBegin("txnId", ::apache::thrift::protocol::T_I64, 
22);
+    xfer += oprot->writeI64(this->txnId);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.commitTime) {
+    xfer += oprot->writeFieldBegin("commitTime", 
::apache::thrift::protocol::T_I64, 23);
+    xfer += oprot->writeI64(this->commitTime);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.hightestWriteId) {
+    xfer += oprot->writeFieldBegin("hightestWriteId", 
::apache::thrift::protocol::T_I64, 24);
+    xfer += oprot->writeI64(this->hightestWriteId);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -28650,6 +28722,10 @@ void swap(ShowCompactResponseElement &a, 
ShowCompactResponseElement &b) {
   swap(a.initiatorVersion, b.initiatorVersion);
   swap(a.cleanerStart, b.cleanerStart);
   swap(a.poolName, b.poolName);
+  swap(a.nextTxnId, b.nextTxnId);
+  swap(a.txnId, b.txnId);
+  swap(a.commitTime, b.commitTime);
+  swap(a.hightestWriteId, b.hightestWriteId);
   swap(a.__isset, b.__isset);
 }
 
@@ -28674,6 +28750,10 @@ 
ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponse
   initiatorVersion = other998.initiatorVersion;
   cleanerStart = other998.cleanerStart;
   poolName = other998.poolName;
+  nextTxnId = other998.nextTxnId;
+  txnId = other998.txnId;
+  commitTime = other998.commitTime;
+  hightestWriteId = other998.hightestWriteId;
   __isset = other998.__isset;
 }
 ShowCompactResponseElement& ShowCompactResponseElement::operator=(const 
ShowCompactResponseElement& other999) {
@@ -28697,6 +28777,10 @@ ShowCompactResponseElement& 
ShowCompactResponseElement::operator=(const ShowComp
   initiatorVersion = other999.initiatorVersion;
   cleanerStart = other999.cleanerStart;
   poolName = other999.poolName;
+  nextTxnId = other999.nextTxnId;
+  txnId = other999.txnId;
+  commitTime = other999.commitTime;
+  hightestWriteId = other999.hightestWriteId;
   __isset = other999.__isset;
   return *this;
 }
@@ -28723,6 +28807,10 @@ void ShowCompactResponseElement::printTo(std::ostream& 
out) const {
   out << ", " << "initiatorVersion="; (__isset.initiatorVersion ? (out << 
to_string(initiatorVersion)) : (out << "<null>"));
   out << ", " << "cleanerStart="; (__isset.cleanerStart ? (out << 
to_string(cleanerStart)) : (out << "<null>"));
   out << ", " << "poolName="; (__isset.poolName ? (out << to_string(poolName)) 
: (out << "<null>"));
+  out << ", " << "nextTxnId="; (__isset.nextTxnId ? (out << 
to_string(nextTxnId)) : (out << "<null>"));
+  out << ", " << "txnId="; (__isset.txnId ? (out << to_string(txnId)) : (out 
<< "<null>"));
+  out << ", " << "commitTime="; (__isset.commitTime ? (out << 
to_string(commitTime)) : (out << "<null>"));
+  out << ", " << "hightestWriteId="; (__isset.hightestWriteId ? (out << 
to_string(hightestWriteId)) : (out << "<null>"));
   out << ")";
 }
 
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 053dc986d18..be0410bf880 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -11200,7 +11200,7 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b);
 std::ostream& operator<<(std::ostream& out, const ShowCompactRequest& obj);
 
 typedef struct _ShowCompactResponseElement__isset {
-  _ShowCompactResponseElement__isset() : partitionname(false), 
workerid(false), start(false), runAs(false), hightestTxnId(false), 
metaInfo(false), endTime(false), hadoopJobId(true), id(false), 
errorMessage(false), enqueueTime(false), workerVersion(false), 
initiatorId(false), initiatorVersion(false), cleanerStart(false), 
poolName(false) {}
+  _ShowCompactResponseElement__isset() : partitionname(false), 
workerid(false), start(false), runAs(false), hightestTxnId(false), 
metaInfo(false), endTime(false), hadoopJobId(true), id(false), 
errorMessage(false), enqueueTime(false), workerVersion(false), 
initiatorId(false), initiatorVersion(false), cleanerStart(false), 
poolName(false), nextTxnId(false), txnId(false), commitTime(false), 
hightestWriteId(false) {}
   bool partitionname :1;
   bool workerid :1;
   bool start :1;
@@ -11217,6 +11217,10 @@ typedef struct _ShowCompactResponseElement__isset {
   bool initiatorVersion :1;
   bool cleanerStart :1;
   bool poolName :1;
+  bool nextTxnId :1;
+  bool txnId :1;
+  bool commitTime :1;
+  bool hightestWriteId :1;
 } _ShowCompactResponseElement__isset;
 
 class ShowCompactResponseElement : public virtual ::apache::thrift::TBase {
@@ -11243,7 +11247,11 @@ class ShowCompactResponseElement : public virtual 
::apache::thrift::TBase {
                                  initiatorId(),
                                  initiatorVersion(),
                                  cleanerStart(0),
-                                 poolName() {
+                                 poolName(),
+                                 nextTxnId(0),
+                                 txnId(0),
+                                 commitTime(0),
+                                 hightestWriteId(0) {
   }
 
   virtual ~ShowCompactResponseElement() noexcept;
@@ -11271,6 +11279,10 @@ class ShowCompactResponseElement : public virtual 
::apache::thrift::TBase {
   std::string initiatorVersion;
   int64_t cleanerStart;
   std::string poolName;
+  int64_t nextTxnId;
+  int64_t txnId;
+  int64_t commitTime;
+  int64_t hightestWriteId;
 
   _ShowCompactResponseElement__isset __isset;
 
@@ -11314,6 +11326,14 @@ class ShowCompactResponseElement : public virtual 
::apache::thrift::TBase {
 
   void __set_poolName(const std::string& val);
 
+  void __set_nextTxnId(const int64_t val);
+
+  void __set_txnId(const int64_t val);
+
+  void __set_commitTime(const int64_t val);
+
+  void __set_hightestWriteId(const int64_t val);
+
   bool operator == (const ShowCompactResponseElement & rhs) const
   {
     if (!(dbname == rhs.dbname))
@@ -11388,6 +11408,22 @@ class ShowCompactResponseElement : public virtual 
::apache::thrift::TBase {
       return false;
     else if (__isset.poolName && !(poolName == rhs.poolName))
       return false;
+    if (__isset.nextTxnId != rhs.__isset.nextTxnId)
+      return false;
+    else if (__isset.nextTxnId && !(nextTxnId == rhs.nextTxnId))
+      return false;
+    if (__isset.txnId != rhs.__isset.txnId)
+      return false;
+    else if (__isset.txnId && !(txnId == rhs.txnId))
+      return false;
+    if (__isset.commitTime != rhs.__isset.commitTime)
+      return false;
+    else if (__isset.commitTime && !(commitTime == rhs.commitTime))
+      return false;
+    if (__isset.hightestWriteId != rhs.__isset.hightestWriteId)
+      return false;
+    else if (__isset.hightestWriteId && !(hightestWriteId == 
rhs.hightestWriteId))
+      return false;
     return true;
   }
   bool operator != (const ShowCompactResponseElement &rhs) const {
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
index ac6cb282d15..ee0f4bf5018 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
@@ -31,6 +31,10 @@ package org.apache.hadoop.hive.metastore.api;
   private static final org.apache.thrift.protocol.TField 
INITIATOR_VERSION_FIELD_DESC = new 
org.apache.thrift.protocol.TField("initiatorVersion", 
org.apache.thrift.protocol.TType.STRING, (short)18);
   private static final org.apache.thrift.protocol.TField 
CLEANER_START_FIELD_DESC = new 
org.apache.thrift.protocol.TField("cleanerStart", 
org.apache.thrift.protocol.TType.I64, (short)19);
   private static final org.apache.thrift.protocol.TField POOL_NAME_FIELD_DESC 
= new org.apache.thrift.protocol.TField("poolName", 
org.apache.thrift.protocol.TType.STRING, (short)20);
+  private static final org.apache.thrift.protocol.TField 
NEXT_TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("nextTxnId", 
org.apache.thrift.protocol.TType.I64, (short)21);
+  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = 
new org.apache.thrift.protocol.TField("txnId", 
org.apache.thrift.protocol.TType.I64, (short)22);
+  private static final org.apache.thrift.protocol.TField 
COMMIT_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("commitTime", 
org.apache.thrift.protocol.TType.I64, (short)23);
+  private static final org.apache.thrift.protocol.TField 
HIGHTEST_WRITE_ID_FIELD_DESC = new 
org.apache.thrift.protocol.TField("hightestWriteId", 
org.apache.thrift.protocol.TType.I64, (short)24);
 
   private static final org.apache.thrift.scheme.SchemeFactory 
STANDARD_SCHEME_FACTORY = new ShowCompactResponseElementStandardSchemeFactory();
   private static final org.apache.thrift.scheme.SchemeFactory 
TUPLE_SCHEME_FACTORY = new ShowCompactResponseElementTupleSchemeFactory();
@@ -55,6 +59,10 @@ package org.apache.hadoop.hive.metastore.api;
   private @org.apache.thrift.annotation.Nullable java.lang.String 
initiatorVersion; // optional
   private long cleanerStart; // optional
   private @org.apache.thrift.annotation.Nullable java.lang.String poolName; // 
optional
+  private long nextTxnId; // optional
+  private long txnId; // optional
+  private long commitTime; // optional
+  private long hightestWriteId; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -81,7 +89,11 @@ package org.apache.hadoop.hive.metastore.api;
     INITIATOR_ID((short)17, "initiatorId"),
     INITIATOR_VERSION((short)18, "initiatorVersion"),
     CLEANER_START((short)19, "cleanerStart"),
-    POOL_NAME((short)20, "poolName");
+    POOL_NAME((short)20, "poolName"),
+    NEXT_TXN_ID((short)21, "nextTxnId"),
+    TXN_ID((short)22, "txnId"),
+    COMMIT_TIME((short)23, "commitTime"),
+    HIGHTEST_WRITE_ID((short)24, "hightestWriteId");
 
     private static final java.util.Map<java.lang.String, _Fields> byName = new 
java.util.HashMap<java.lang.String, _Fields>();
 
@@ -137,6 +149,14 @@ package org.apache.hadoop.hive.metastore.api;
           return CLEANER_START;
         case 20: // POOL_NAME
           return POOL_NAME;
+        case 21: // NEXT_TXN_ID
+          return NEXT_TXN_ID;
+        case 22: // TXN_ID
+          return TXN_ID;
+        case 23: // COMMIT_TIME
+          return COMMIT_TIME;
+        case 24: // HIGHTEST_WRITE_ID
+          return HIGHTEST_WRITE_ID;
         default:
           return null;
       }
@@ -184,8 +204,12 @@ package org.apache.hadoop.hive.metastore.api;
   private static final int __ID_ISSET_ID = 3;
   private static final int __ENQUEUETIME_ISSET_ID = 4;
   private static final int __CLEANERSTART_ISSET_ID = 5;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = 
{_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID,_Fields.ERROR_MESSAGE,_Fields.ENQUEUE_TIME,_Fields.WORKER_VERSION,_Fields.INITIATOR_ID,_Fields.INITIATOR_VERSION,_Fields.CLEANER_START,_Fields.POOL_NAME};
+  private static final int __NEXTTXNID_ISSET_ID = 6;
+  private static final int __TXNID_ISSET_ID = 7;
+  private static final int __COMMITTIME_ISSET_ID = 8;
+  private static final int __HIGHTESTWRITEID_ISSET_ID = 9;
+  private short __isset_bitfield = 0;
+  private static final _Fields optionals[] = 
{_Fields.PARTITIONNAME,_Fields.WORKERID,_Fields.START,_Fields.RUN_AS,_Fields.HIGHTEST_TXN_ID,_Fields.META_INFO,_Fields.END_TIME,_Fields.HADOOP_JOB_ID,_Fields.ID,_Fields.ERROR_MESSAGE,_Fields.ENQUEUE_TIME,_Fields.WORKER_VERSION,_Fields.INITIATOR_ID,_Fields.INITIATOR_VERSION,_Fields.CLEANER_START,_Fields.POOL_NAME,_Fields.NEXT_TXN_ID,_Fields.TXN_ID,_Fields.COMMIT_TIME,_Fields.HIGHTEST_WRITE_ID};
   public static final java.util.Map<_Fields, 
org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = 
new java.util.EnumMap<_Fields, 
org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -229,6 +253,14 @@ package org.apache.hadoop.hive.metastore.api;
         new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.POOL_NAME, new 
org.apache.thrift.meta_data.FieldMetaData("poolName", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.NEXT_TXN_ID, new 
org.apache.thrift.meta_data.FieldMetaData("nextTxnId", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.TXN_ID, new 
org.apache.thrift.meta_data.FieldMetaData("txnId", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.COMMIT_TIME, new 
org.apache.thrift.meta_data.FieldMetaData("commitTime", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.HIGHTEST_WRITE_ID, new 
org.apache.thrift.meta_data.FieldMetaData("hightestWriteId", 
org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new 
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
     
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ShowCompactResponseElement.class,
 metaDataMap);
   }
@@ -304,6 +336,10 @@ package org.apache.hadoop.hive.metastore.api;
     if (other.isSetPoolName()) {
       this.poolName = other.poolName;
     }
+    this.nextTxnId = other.nextTxnId;
+    this.txnId = other.txnId;
+    this.commitTime = other.commitTime;
+    this.hightestWriteId = other.hightestWriteId;
   }
 
   public ShowCompactResponseElement deepCopy() {
@@ -339,6 +375,14 @@ package org.apache.hadoop.hive.metastore.api;
     setCleanerStartIsSet(false);
     this.cleanerStart = 0;
     this.poolName = null;
+    setNextTxnIdIsSet(false);
+    this.nextTxnId = 0;
+    setTxnIdIsSet(false);
+    this.txnId = 0;
+    setCommitTimeIsSet(false);
+    this.commitTime = 0;
+    setHightestWriteIdIsSet(false);
+    this.hightestWriteId = 0;
   }
 
   @org.apache.thrift.annotation.Nullable
@@ -817,6 +861,94 @@ package org.apache.hadoop.hive.metastore.api;
     }
   }
 
+  public long getNextTxnId() {
+    return this.nextTxnId;
+  }
+
+  public void setNextTxnId(long nextTxnId) {
+    this.nextTxnId = nextTxnId;
+    setNextTxnIdIsSet(true);
+  }
+
+  public void unsetNextTxnId() {
+    __isset_bitfield = 
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, 
__NEXTTXNID_ISSET_ID);
+  }
+
+  /** Returns true if field nextTxnId is set (has been assigned a value) and 
false otherwise */
+  public boolean isSetNextTxnId() {
+    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, 
__NEXTTXNID_ISSET_ID);
+  }
+
+  public void setNextTxnIdIsSet(boolean value) {
+    __isset_bitfield = 
org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __NEXTTXNID_ISSET_ID, 
value);
+  }
+
+  public long getTxnId() {
+    return this.txnId;
+  }
+
+  public void setTxnId(long txnId) {
+    this.txnId = txnId;
+    setTxnIdIsSet(true);
+  }
+
+  public void unsetTxnId() {
+    __isset_bitfield = 
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnId is set (has been assigned a value) and false 
otherwise */
+  public boolean isSetTxnId() {
+    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, 
__TXNID_ISSET_ID);
+  }
+
+  public void setTxnIdIsSet(boolean value) {
+    __isset_bitfield = 
org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, 
value);
+  }
+
+  public long getCommitTime() {
+    return this.commitTime;
+  }
+
+  public void setCommitTime(long commitTime) {
+    this.commitTime = commitTime;
+    setCommitTimeIsSet(true);
+  }
+
+  public void unsetCommitTime() {
+    __isset_bitfield = 
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, 
__COMMITTIME_ISSET_ID);
+  }
+
+  /** Returns true if field commitTime is set (has been assigned a value) and 
false otherwise */
+  public boolean isSetCommitTime() {
+    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, 
__COMMITTIME_ISSET_ID);
+  }
+
+  public void setCommitTimeIsSet(boolean value) {
+    __isset_bitfield = 
org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __COMMITTIME_ISSET_ID, 
value);
+  }
+
+  public long getHightestWriteId() {
+    return this.hightestWriteId;
+  }
+
+  public void setHightestWriteId(long hightestWriteId) {
+    this.hightestWriteId = hightestWriteId;
+    setHightestWriteIdIsSet(true);
+  }
+
+  public void unsetHightestWriteId() {
+    __isset_bitfield = 
org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, 
__HIGHTESTWRITEID_ISSET_ID);
+  }
+
+  /** Returns true if field hightestWriteId is set (has been assigned a value) 
and false otherwise */
+  public boolean isSetHightestWriteId() {
+    return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, 
__HIGHTESTWRITEID_ISSET_ID);
+  }
+
+  public void setHightestWriteIdIsSet(boolean value) {
+    __isset_bitfield = 
org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, 
__HIGHTESTWRITEID_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, 
@org.apache.thrift.annotation.Nullable java.lang.Object value) {
     switch (field) {
     case DBNAME:
@@ -979,6 +1111,38 @@ package org.apache.hadoop.hive.metastore.api;
       }
       break;
 
+    case NEXT_TXN_ID:
+      if (value == null) {
+        unsetNextTxnId();
+      } else {
+        setNextTxnId((java.lang.Long)value);
+      }
+      break;
+
+    case TXN_ID:
+      if (value == null) {
+        unsetTxnId();
+      } else {
+        setTxnId((java.lang.Long)value);
+      }
+      break;
+
+    case COMMIT_TIME:
+      if (value == null) {
+        unsetCommitTime();
+      } else {
+        setCommitTime((java.lang.Long)value);
+      }
+      break;
+
+    case HIGHTEST_WRITE_ID:
+      if (value == null) {
+        unsetHightestWriteId();
+      } else {
+        setHightestWriteId((java.lang.Long)value);
+      }
+      break;
+
     }
   }
 
@@ -1045,6 +1209,18 @@ package org.apache.hadoop.hive.metastore.api;
     case POOL_NAME:
       return getPoolName();
 
+    case NEXT_TXN_ID:
+      return getNextTxnId();
+
+    case TXN_ID:
+      return getTxnId();
+
+    case COMMIT_TIME:
+      return getCommitTime();
+
+    case HIGHTEST_WRITE_ID:
+      return getHightestWriteId();
+
     }
     throw new java.lang.IllegalStateException();
   }
@@ -1096,6 +1272,14 @@ package org.apache.hadoop.hive.metastore.api;
       return isSetCleanerStart();
     case POOL_NAME:
       return isSetPoolName();
+    case NEXT_TXN_ID:
+      return isSetNextTxnId();
+    case TXN_ID:
+      return isSetTxnId();
+    case COMMIT_TIME:
+      return isSetCommitTime();
+    case HIGHTEST_WRITE_ID:
+      return isSetHightestWriteId();
     }
     throw new java.lang.IllegalStateException();
   }
@@ -1293,6 +1477,42 @@ package org.apache.hadoop.hive.metastore.api;
         return false;
     }
 
+    boolean this_present_nextTxnId = true && this.isSetNextTxnId();
+    boolean that_present_nextTxnId = true && that.isSetNextTxnId();
+    if (this_present_nextTxnId || that_present_nextTxnId) {
+      if (!(this_present_nextTxnId && that_present_nextTxnId))
+        return false;
+      if (this.nextTxnId != that.nextTxnId)
+        return false;
+    }
+
+    boolean this_present_txnId = true && this.isSetTxnId();
+    boolean that_present_txnId = true && that.isSetTxnId();
+    if (this_present_txnId || that_present_txnId) {
+      if (!(this_present_txnId && that_present_txnId))
+        return false;
+      if (this.txnId != that.txnId)
+        return false;
+    }
+
+    boolean this_present_commitTime = true && this.isSetCommitTime();
+    boolean that_present_commitTime = true && that.isSetCommitTime();
+    if (this_present_commitTime || that_present_commitTime) {
+      if (!(this_present_commitTime && that_present_commitTime))
+        return false;
+      if (this.commitTime != that.commitTime)
+        return false;
+    }
+
+    boolean this_present_hightestWriteId = true && this.isSetHightestWriteId();
+    boolean that_present_hightestWriteId = true && that.isSetHightestWriteId();
+    if (this_present_hightestWriteId || that_present_hightestWriteId) {
+      if (!(this_present_hightestWriteId && that_present_hightestWriteId))
+        return false;
+      if (this.hightestWriteId != that.hightestWriteId)
+        return false;
+    }
+
     return true;
   }
 
@@ -1380,6 +1600,22 @@ package org.apache.hadoop.hive.metastore.api;
     if (isSetPoolName())
       hashCode = hashCode * 8191 + poolName.hashCode();
 
+    hashCode = hashCode * 8191 + ((isSetNextTxnId()) ? 131071 : 524287);
+    if (isSetNextTxnId())
+      hashCode = hashCode * 8191 + 
org.apache.thrift.TBaseHelper.hashCode(nextTxnId);
+
+    hashCode = hashCode * 8191 + ((isSetTxnId()) ? 131071 : 524287);
+    if (isSetTxnId())
+      hashCode = hashCode * 8191 + 
org.apache.thrift.TBaseHelper.hashCode(txnId);
+
+    hashCode = hashCode * 8191 + ((isSetCommitTime()) ? 131071 : 524287);
+    if (isSetCommitTime())
+      hashCode = hashCode * 8191 + 
org.apache.thrift.TBaseHelper.hashCode(commitTime);
+
+    hashCode = hashCode * 8191 + ((isSetHightestWriteId()) ? 131071 : 524287);
+    if (isSetHightestWriteId())
+      hashCode = hashCode * 8191 + 
org.apache.thrift.TBaseHelper.hashCode(hightestWriteId);
+
     return hashCode;
   }
 
@@ -1591,6 +1827,46 @@ package org.apache.hadoop.hive.metastore.api;
         return lastComparison;
       }
     }
+    lastComparison = java.lang.Boolean.compare(isSetNextTxnId(), 
other.isSetNextTxnId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNextTxnId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nextTxnId, 
other.nextTxnId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = java.lang.Boolean.compare(isSetTxnId(), 
other.isSetTxnId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, 
other.txnId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = java.lang.Boolean.compare(isSetCommitTime(), 
other.isSetCommitTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCommitTime()) {
+      lastComparison = 
org.apache.thrift.TBaseHelper.compareTo(this.commitTime, other.commitTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = java.lang.Boolean.compare(isSetHightestWriteId(), 
other.isSetHightestWriteId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHightestWriteId()) {
+      lastComparison = 
org.apache.thrift.TBaseHelper.compareTo(this.hightestWriteId, 
other.hightestWriteId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -1779,6 +2055,30 @@ package org.apache.hadoop.hive.metastore.api;
       }
       first = false;
     }
+    if (isSetNextTxnId()) {
+      if (!first) sb.append(", ");
+      sb.append("nextTxnId:");
+      sb.append(this.nextTxnId);
+      first = false;
+    }
+    if (isSetTxnId()) {
+      if (!first) sb.append(", ");
+      sb.append("txnId:");
+      sb.append(this.txnId);
+      first = false;
+    }
+    if (isSetCommitTime()) {
+      if (!first) sb.append(", ");
+      sb.append("commitTime:");
+      sb.append(this.commitTime);
+      first = false;
+    }
+    if (isSetHightestWriteId()) {
+      if (!first) sb.append(", ");
+      sb.append("hightestWriteId:");
+      sb.append(this.hightestWriteId);
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -2000,6 +2300,38 @@ package org.apache.hadoop.hive.metastore.api;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
             }
             break;
+          case 21: // NEXT_TXN_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.nextTxnId = iprot.readI64();
+              struct.setNextTxnIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
+            }
+            break;
+          case 22: // TXN_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnId = iprot.readI64();
+              struct.setTxnIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
+            }
+            break;
+          case 23: // COMMIT_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.commitTime = iprot.readI64();
+              struct.setCommitTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
+            }
+            break;
+          case 24: // HIGHTEST_WRITE_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.hightestWriteId = iprot.readI64();
+              struct.setHightestWriteIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
         }
@@ -2133,6 +2465,26 @@ package org.apache.hadoop.hive.metastore.api;
           oprot.writeFieldEnd();
         }
       }
+      if (struct.isSetNextTxnId()) {
+        oprot.writeFieldBegin(NEXT_TXN_ID_FIELD_DESC);
+        oprot.writeI64(struct.nextTxnId);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetTxnId()) {
+        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
+        oprot.writeI64(struct.txnId);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetCommitTime()) {
+        oprot.writeFieldBegin(COMMIT_TIME_FIELD_DESC);
+        oprot.writeI64(struct.commitTime);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetHightestWriteId()) {
+        oprot.writeFieldBegin(HIGHTEST_WRITE_ID_FIELD_DESC);
+        oprot.writeI64(struct.hightestWriteId);
+        oprot.writeFieldEnd();
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -2203,7 +2555,19 @@ package org.apache.hadoop.hive.metastore.api;
       if (struct.isSetPoolName()) {
         optionals.set(15);
       }
-      oprot.writeBitSet(optionals, 16);
+      if (struct.isSetNextTxnId()) {
+        optionals.set(16);
+      }
+      if (struct.isSetTxnId()) {
+        optionals.set(17);
+      }
+      if (struct.isSetCommitTime()) {
+        optionals.set(18);
+      }
+      if (struct.isSetHightestWriteId()) {
+        optionals.set(19);
+      }
+      oprot.writeBitSet(optionals, 20);
       if (struct.isSetPartitionname()) {
         oprot.writeString(struct.partitionname);
       }
@@ -2252,6 +2616,18 @@ package org.apache.hadoop.hive.metastore.api;
       if (struct.isSetPoolName()) {
         oprot.writeString(struct.poolName);
       }
+      if (struct.isSetNextTxnId()) {
+        oprot.writeI64(struct.nextTxnId);
+      }
+      if (struct.isSetTxnId()) {
+        oprot.writeI64(struct.txnId);
+      }
+      if (struct.isSetCommitTime()) {
+        oprot.writeI64(struct.commitTime);
+      }
+      if (struct.isSetHightestWriteId()) {
+        oprot.writeI64(struct.hightestWriteId);
+      }
     }
 
     @Override
@@ -2265,7 +2641,7 @@ package org.apache.hadoop.hive.metastore.api;
       struct.setTypeIsSet(true);
       struct.state = iprot.readString();
       struct.setStateIsSet(true);
-      java.util.BitSet incoming = iprot.readBitSet(16);
+      java.util.BitSet incoming = iprot.readBitSet(20);
       if (incoming.get(0)) {
         struct.partitionname = iprot.readString();
         struct.setPartitionnameIsSet(true);
@@ -2330,6 +2706,22 @@ package org.apache.hadoop.hive.metastore.api;
         struct.poolName = iprot.readString();
         struct.setPoolNameIsSet(true);
       }
+      if (incoming.get(16)) {
+        struct.nextTxnId = iprot.readI64();
+        struct.setNextTxnIdIsSet(true);
+      }
+      if (incoming.get(17)) {
+        struct.txnId = iprot.readI64();
+        struct.setTxnIdIsSet(true);
+      }
+      if (incoming.get(18)) {
+        struct.commitTime = iprot.readI64();
+        struct.setCommitTimeIsSet(true);
+      }
+      if (incoming.get(19)) {
+        struct.hightestWriteId = iprot.readI64();
+        struct.setHightestWriteIdIsSet(true);
+      }
     }
   }
 
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactResponseElement.php
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactResponseElement.php
index fd2ef1d47c0..a6a8f0967d9 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactResponseElement.php
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ShowCompactResponseElement.php
@@ -122,6 +122,26 @@ class ShowCompactResponseElement
             'isRequired' => false,
             'type' => TType::STRING,
         ),
+        21 => array(
+            'var' => 'nextTxnId',
+            'isRequired' => false,
+            'type' => TType::I64,
+        ),
+        22 => array(
+            'var' => 'txnId',
+            'isRequired' => false,
+            'type' => TType::I64,
+        ),
+        23 => array(
+            'var' => 'commitTime',
+            'isRequired' => false,
+            'type' => TType::I64,
+        ),
+        24 => array(
+            'var' => 'hightestWriteId',
+            'isRequired' => false,
+            'type' => TType::I64,
+        ),
     );
 
     /**
@@ -204,6 +224,22 @@ class ShowCompactResponseElement
      * @var string
      */
     public $poolName = null;
+    /**
+     * @var int
+     */
+    public $nextTxnId = null;
+    /**
+     * @var int
+     */
+    public $txnId = null;
+    /**
+     * @var int
+     */
+    public $commitTime = null;
+    /**
+     * @var int
+     */
+    public $hightestWriteId = null;
 
     public function __construct($vals = null)
     {
@@ -268,6 +304,18 @@ class ShowCompactResponseElement
             if (isset($vals['poolName'])) {
                 $this->poolName = $vals['poolName'];
             }
+            if (isset($vals['nextTxnId'])) {
+                $this->nextTxnId = $vals['nextTxnId'];
+            }
+            if (isset($vals['txnId'])) {
+                $this->txnId = $vals['txnId'];
+            }
+            if (isset($vals['commitTime'])) {
+                $this->commitTime = $vals['commitTime'];
+            }
+            if (isset($vals['hightestWriteId'])) {
+                $this->hightestWriteId = $vals['hightestWriteId'];
+            }
         }
     }
 
@@ -430,6 +478,34 @@ class ShowCompactResponseElement
                         $xfer += $input->skip($ftype);
                     }
                     break;
+                case 21:
+                    if ($ftype == TType::I64) {
+                        $xfer += $input->readI64($this->nextTxnId);
+                    } else {
+                        $xfer += $input->skip($ftype);
+                    }
+                    break;
+                case 22:
+                    if ($ftype == TType::I64) {
+                        $xfer += $input->readI64($this->txnId);
+                    } else {
+                        $xfer += $input->skip($ftype);
+                    }
+                    break;
+                case 23:
+                    if ($ftype == TType::I64) {
+                        $xfer += $input->readI64($this->commitTime);
+                    } else {
+                        $xfer += $input->skip($ftype);
+                    }
+                    break;
+                case 24:
+                    if ($ftype == TType::I64) {
+                        $xfer += $input->readI64($this->hightestWriteId);
+                    } else {
+                        $xfer += $input->skip($ftype);
+                    }
+                    break;
                 default:
                     $xfer += $input->skip($ftype);
                     break;
@@ -544,6 +620,26 @@ class ShowCompactResponseElement
             $xfer += $output->writeString($this->poolName);
             $xfer += $output->writeFieldEnd();
         }
+        if ($this->nextTxnId !== null) {
+            $xfer += $output->writeFieldBegin('nextTxnId', TType::I64, 21);
+            $xfer += $output->writeI64($this->nextTxnId);
+            $xfer += $output->writeFieldEnd();
+        }
+        if ($this->txnId !== null) {
+            $xfer += $output->writeFieldBegin('txnId', TType::I64, 22);
+            $xfer += $output->writeI64($this->txnId);
+            $xfer += $output->writeFieldEnd();
+        }
+        if ($this->commitTime !== null) {
+            $xfer += $output->writeFieldBegin('commitTime', TType::I64, 23);
+            $xfer += $output->writeI64($this->commitTime);
+            $xfer += $output->writeFieldEnd();
+        }
+        if ($this->hightestWriteId !== null) {
+            $xfer += $output->writeFieldBegin('hightestWriteId', TType::I64, 
24);
+            $xfer += $output->writeI64($this->hightestWriteId);
+            $xfer += $output->writeFieldEnd();
+        }
         $xfer += $output->writeFieldStop();
         $xfer += $output->writeStructEnd();
         return $xfer;
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 5a73be1a3bd..dd4990d9d17 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -16176,11 +16176,15 @@ class ShowCompactResponseElement(object):
      - initiatorVersion
      - cleanerStart
      - poolName
+     - nextTxnId
+     - txnId
+     - commitTime
+     - hightestWriteId
 
     """
 
 
-    def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, state=None, workerid=None, start=None, runAs=None, 
hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId="None", id=None, 
errorMessage=None, enqueueTime=None, workerVersion=None, initiatorId=None, 
initiatorVersion=None, cleanerStart=None, poolName=None,):
+    def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, state=None, workerid=None, start=None, runAs=None, 
hightestTxnId=None, metaInfo=None, endTime=None, hadoopJobId="None", id=None, 
errorMessage=None, enqueueTime=None, workerVersion=None, initiatorId=None, 
initiatorVersion=None, cleanerStart=None, poolName=None, nextTxnId=None, 
txnId=None, commitTime=None, hightestWriteId=None,):
         self.dbname = dbname
         self.tablename = tablename
         self.partitionname = partitionname
@@ -16201,6 +16205,10 @@ class ShowCompactResponseElement(object):
         self.initiatorVersion = initiatorVersion
         self.cleanerStart = cleanerStart
         self.poolName = poolName
+        self.nextTxnId = nextTxnId
+        self.txnId = txnId
+        self.commitTime = commitTime
+        self.hightestWriteId = hightestWriteId
 
     def read(self, iprot):
         if iprot._fast_decode is not None and isinstance(iprot.trans, 
TTransport.CReadableTransport) and self.thrift_spec is not None:
@@ -16311,6 +16319,26 @@ class ShowCompactResponseElement(object):
                     self.poolName = iprot.readString().decode('utf-8', 
errors='replace') if sys.version_info[0] == 2 else iprot.readString()
                 else:
                     iprot.skip(ftype)
+            elif fid == 21:
+                if ftype == TType.I64:
+                    self.nextTxnId = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 22:
+                if ftype == TType.I64:
+                    self.txnId = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 23:
+                if ftype == TType.I64:
+                    self.commitTime = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
+            elif fid == 24:
+                if ftype == TType.I64:
+                    self.hightestWriteId = iprot.readI64()
+                else:
+                    iprot.skip(ftype)
             else:
                 iprot.skip(ftype)
             iprot.readFieldEnd()
@@ -16401,6 +16429,22 @@ class ShowCompactResponseElement(object):
             oprot.writeFieldBegin('poolName', TType.STRING, 20)
             oprot.writeString(self.poolName.encode('utf-8') if 
sys.version_info[0] == 2 else self.poolName)
             oprot.writeFieldEnd()
+        if self.nextTxnId is not None:
+            oprot.writeFieldBegin('nextTxnId', TType.I64, 21)
+            oprot.writeI64(self.nextTxnId)
+            oprot.writeFieldEnd()
+        if self.txnId is not None:
+            oprot.writeFieldBegin('txnId', TType.I64, 22)
+            oprot.writeI64(self.txnId)
+            oprot.writeFieldEnd()
+        if self.commitTime is not None:
+            oprot.writeFieldBegin('commitTime', TType.I64, 23)
+            oprot.writeI64(self.commitTime)
+            oprot.writeFieldEnd()
+        if self.hightestWriteId is not None:
+            oprot.writeFieldBegin('hightestWriteId', TType.I64, 24)
+            oprot.writeI64(self.hightestWriteId)
+            oprot.writeFieldEnd()
         oprot.writeFieldStop()
         oprot.writeStructEnd()
 
@@ -31092,6 +31136,10 @@ ShowCompactResponseElement.thrift_spec = (
     (18, TType.STRING, 'initiatorVersion', 'UTF8', None, ),  # 18
     (19, TType.I64, 'cleanerStart', None, None, ),  # 19
     (20, TType.STRING, 'poolName', 'UTF8', None, ),  # 20
+    (21, TType.I64, 'nextTxnId', None, None, ),  # 21
+    (22, TType.I64, 'txnId', None, None, ),  # 22
+    (23, TType.I64, 'commitTime', None, None, ),  # 23
+    (24, TType.I64, 'hightestWriteId', None, None, ),  # 24
 )
 all_structs.append(ShowCompactResponse)
 ShowCompactResponse.thrift_spec = (
diff --git 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 686fabaa695..429abfd0617 100644
--- 
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ 
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -4695,6 +4695,10 @@ class ShowCompactResponseElement
   INITIATORVERSION = 18
   CLEANERSTART = 19
   POOLNAME = 20
+  NEXTTXNID = 21
+  TXNID = 22
+  COMMITTIME = 23
+  HIGHTESTWRITEID = 24
 
   FIELDS = {
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
@@ -4716,7 +4720,11 @@ class ShowCompactResponseElement
     INITIATORID => {:type => ::Thrift::Types::STRING, :name => 'initiatorId', 
:optional => true},
     INITIATORVERSION => {:type => ::Thrift::Types::STRING, :name => 
'initiatorVersion', :optional => true},
     CLEANERSTART => {:type => ::Thrift::Types::I64, :name => 'cleanerStart', 
:optional => true},
-    POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName', 
:optional => true}
+    POOLNAME => {:type => ::Thrift::Types::STRING, :name => 'poolName', 
:optional => true},
+    NEXTTXNID => {:type => ::Thrift::Types::I64, :name => 'nextTxnId', 
:optional => true},
+    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :optional => 
true},
+    COMMITTIME => {:type => ::Thrift::Types::I64, :name => 'commitTime', 
:optional => true},
+    HIGHTESTWRITEID => {:type => ::Thrift::Types::I64, :name => 
'hightestWriteId', :optional => true}
   }
 
   def struct_fields; FIELDS; end
diff --git 
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnQueries.java
 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnQueries.java
new file mode 100644
index 00000000000..9cb9285f88f
--- /dev/null
+++ 
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnQueries.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn;
+
+public class TxnQueries {
+  public static final String SHOW_COMPACTION_ORDERBY_CLAUSE = "  ORDER BY  
CASE " +
+    " WHEN CC_END > CC_START and CC_END > CC_COMMIT_TIME " +
+    " THEN CC_END " +
+    " WHEN CC_START > CC_COMMIT_TIME " +
+    " THEN CC_START " +
+    " ELSE CC_COMMIT_TIME " +
+    " END desc ," +
+    " CC_ENQUEUE_TIME asc";
+
+  public static final String SHOW_COMPACTION_QUERY = "SELECT XX.* FROM ( 
SELECT " +
+    " \"CQ_DATABASE\" AS CC_DATABASE, \"CQ_TABLE\" AS CC_TABLE, 
\"CQ_PARTITION\" AS CC_PARTITION, " +
+    " \"CQ_STATE\" AS CC_STATE, \"CQ_TYPE\" AS CC_TYPE, \"CQ_WORKER_ID\" AS 
CC_WORKER_ID, " +
+    " \"CQ_START\" AS CC_START, -1 \"CC_END\", \"CQ_RUN_AS\" AS CC_RUN_AS, " +
+    " \"CQ_HADOOP_JOB_ID\" AS CC_HADOOP_JOB_ID, \"CQ_ID\" AS CC_ID, 
\"CQ_ERROR_MESSAGE\" AS CC_ERROR_MESSAGE, " +
+    " \"CQ_ENQUEUE_TIME\" AS CC_ENQUEUE_TIME, \"CQ_WORKER_VERSION\" AS 
CC_WORKER_VERSION, " +
+    " \"CQ_INITIATOR_ID\" AS CC_INITIATOR_ID, \"CQ_INITIATOR_VERSION\" AS 
CC_INITIATOR_VERSION, " +
+    " \"CQ_CLEANER_START\" AS CC_CLEANER_START, \"CQ_POOL_NAME\" AS 
CC_POOL_NAME, \"CQ_TXN_ID\" AS CC_TXN_ID, " +
+    " \"CQ_NEXT_TXN_ID\" AS CC_NEXT_TXN_ID, \"CQ_COMMIT_TIME\" AS 
CC_COMMIT_TIME, " +
+    " \"CQ_HIGHEST_WRITE_ID\" AS CC_HIGHEST_WRITE_ID " +
+    "FROM " +
+    " \"COMPACTION_QUEUE\" " +
+    "UNION ALL " +
+    "SELECT " +
+    " \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", 
\"CC_TYPE\", \"CC_WORKER_ID\", " +
+    " \"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", 
\"CC_ID\", \"CC_ERROR_MESSAGE\", " +
+    " \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", 
\"CC_INITIATOR_VERSION\", " +
+    "  -1 , \"CC_POOL_NAME\", \"CC_TXN_ID\", \"CC_NEXT_TXN_ID\", 
\"CC_COMMIT_TIME\", " +
+    " \"CC_HIGHEST_WRITE_ID\"" +
+    "FROM " +
+    " \"COMPLETED_COMPACTIONS\" ) XX ";
+}
diff --git 
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift 
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 880865d4ec8..41b0ef57b4d 100644
--- 
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ 
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -1367,8 +1367,13 @@ struct ShowCompactResponseElement {
     16: optional string workerVersion,
     17: optional string initiatorId,
     18: optional string initiatorVersion,
-    19: optional i64 cleanerStart
-    20: optional string poolName
+    19: optional i64 cleanerStart,
+    20: optional string poolName,
+    21: optional i64 nextTxnId,
+    22: optional i64 txnId,
+    23: optional i64 commitTime,
+    24: optional i64 hightestWriteId
+
 }
 
 struct ShowCompactResponse {
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 7929e9dc1d1..7023b2f8e3f 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -167,7 +167,10 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Splitter;
 
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+import static org.apache.commons.lang3.StringUtils.isBlank;
 import static org.apache.commons.lang3.StringUtils.repeat;
+import static org.apache.commons.lang3.StringUtils.EMPTY;
 import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
 import static 
org.apache.hadoop.hive.metastore.txn.TxnUtils.executeQueriesInBatchNoCount;
 import static 
org.apache.hadoop.hive.metastore.txn.TxnUtils.executeQueriesInBatch;
@@ -3905,103 +3908,68 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
   @RetrySemantics.ReadOnly
   @SuppressWarnings("squid:S2095")
   public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws 
MetaException {
-    ShowCompactResponse response = new ShowCompactResponse(new ArrayList<>());
-    Connection dbConn = null;
-    PreparedStatement stmt = null;
     try {
-      try {
-        StringBuilder sb =new StringBuilder(2048);
-        sb.append(
-            "SELECT " +
-            "  \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", \"CQ_STATE\", 
\"CQ_TYPE\", \"CQ_WORKER_ID\", " +
-            "  \"CQ_START\", -1 \"CC_END\", \"CQ_RUN_AS\", 
\"CQ_HADOOP_JOB_ID\", \"CQ_ID\", \"CQ_ERROR_MESSAGE\", " +
-            "  \"CQ_ENQUEUE_TIME\", \"CQ_WORKER_VERSION\", 
\"CQ_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\", " +
-            "  \"CQ_CLEANER_START\", \"CQ_POOL_NAME\"" +
-            "FROM " +
-                "  \"COMPACTION_QUEUE\" "
-        );
-        if 
(org.apache.commons.lang3.StringUtils.isNotBlank(rqst.getPoolName())) {
-          sb.append("WHERE \"CQ_POOL_NAME\" = ? ");
-        }
-        sb.append(
-            "UNION ALL " +
-            "SELECT " +
-            "  \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", 
\"CC_TYPE\", \"CC_WORKER_ID\", " +
-            "  \"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", 
\"CC_ID\", \"CC_ERROR_MESSAGE\", " +
-            "  \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", 
\"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\", " +
-            "  -1 , \"CC_POOL_NAME\"" +
-            "FROM " +
-            "  \"COMPLETED_COMPACTIONS\" "
-        );
-        if 
(org.apache.commons.lang3.StringUtils.isNotBlank(rqst.getPoolName())) {
-          sb.append("WHERE \"CC_POOL_NAME\" = ?");
-        }
-        //todo: sort by cq_id?
-        //what I want is order by cc_end desc, cc_start asc (but derby has a 
bug https://issues.apache.org/jira/browse/DERBY-6013)
-        //to sort so that currently running jobs are at the end of the list 
(bottom of screen)
-        //and currently running ones are in sorted by start time
-        //w/o order by likely currently running compactions will be first (LHS 
of Union)
-
-        String query = sb.toString();
-        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
-        stmt = dbConn.prepareStatement(query);
-        if 
(org.apache.commons.lang3.StringUtils.isNotBlank(rqst.getPoolName())) {
-          stmt.setString(1, rqst.getPoolName());
-          stmt.setString(2, rqst.getPoolName());
-        }
-
-        LOG.debug("Going to execute query <{}>", query);
-        ResultSet rs = stmt.executeQuery();
-        while (rs.next()) {
-          ShowCompactResponseElement e = new ShowCompactResponseElement();
-          e.setDbname(rs.getString(1));
-          e.setTablename(rs.getString(2));
-          e.setPartitionname(rs.getString(3));
-          e.setState(compactorStateToResponse(rs.getString(4).charAt(0)));
-          try {
-            e.setType(dbCompactionType2ThriftType(rs.getString(5).charAt(0)));
-          } catch (MetaException ex) {
-            //do nothing to handle RU/D if we add another status
-          }
-          e.setWorkerid(rs.getString(6));
-          long start = rs.getLong(7);
-          if (!rs.wasNull()) {
-            e.setStart(start);
-          }
-          long endTime = rs.getLong(8);
-          if (endTime != -1) {
-            e.setEndTime(endTime);
-          }
-          e.setRunAs(rs.getString(9));
-          e.setHadoopJobId(rs.getString(10));
-          e.setId(rs.getLong(11));
-          e.setErrorMessage(rs.getString(12));
-          long enqueueTime = rs.getLong(13);
-          if (!rs.wasNull()) {
-            e.setEnqueueTime(enqueueTime);
-          }
-          e.setWorkerVersion(rs.getString(14));
-          e.setInitiatorId(rs.getString(15));
-          e.setInitiatorVersion(rs.getString(16));
-          long cleanerStart = rs.getLong(17);
-          if (!rs.wasNull() && (cleanerStart != -1)) {
-            e.setCleanerStart(cleanerStart);
-          }
-          String poolName = rs.getString(18);
-          if (org.apache.commons.lang3.StringUtils.isBlank(poolName)) {
-            e.setPoolName(DEFAULT_POOL_NAME);
-          } else {
-            e.setPoolName(poolName);
+      ShowCompactResponse response = new ShowCompactResponse(new 
ArrayList<>());
+      StringBuilder query = new 
StringBuilder(TxnQueries.SHOW_COMPACTION_QUERY).
+        append(getShowCompactionFilterClause(rqst)).
+        append(TxnQueries.SHOW_COMPACTION_ORDERBY_CLAUSE);
+      try (Connection dbConn = 
getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+        PreparedStatement stmt = 
sqlGenerator.prepareStmtWithParameters(dbConn, query.toString(),
+          getShowCompactionQueryParamList(rqst))) {
+          LOG.debug("Going to execute query <" + query + ">");
+          try (ResultSet rs = stmt.executeQuery()) {
+            while (rs.next()) {
+              ShowCompactResponseElement e = new ShowCompactResponseElement();
+              e.setDbname(rs.getString(1));
+              e.setTablename(rs.getString(2));
+              e.setPartitionname(rs.getString(3));
+              e.setState(compactorStateToResponse(rs.getString(4).charAt(0)));
+              try {
+                
e.setType(dbCompactionType2ThriftType(rs.getString(5).charAt(0)));
+              } catch (MetaException ex) {
+                //do nothing to handle RU/D if we add another status
+              }
+              e.setWorkerid(rs.getString(6));
+              long start = rs.getLong(7);
+              if (!rs.wasNull()) {
+                e.setStart(start);
+              }
+              long endTime = rs.getLong(8);
+              if (endTime != -1) {
+                e.setEndTime(endTime);
+              }
+              e.setRunAs(rs.getString(9));
+              e.setHadoopJobId(rs.getString(10));
+              e.setId(rs.getLong(11));
+              e.setErrorMessage(rs.getString(12));
+              long enqueueTime = rs.getLong(13);
+              if (!rs.wasNull()) {
+                e.setEnqueueTime(enqueueTime);
+              }
+              e.setWorkerVersion(rs.getString(14));
+              e.setInitiatorId(rs.getString(15));
+              e.setInitiatorVersion(rs.getString(16));
+              long cleanerStart = rs.getLong(17);
+              if (!rs.wasNull() && (cleanerStart != -1)) {
+                e.setCleanerStart(cleanerStart);
+              }
+              String poolName = rs.getString(18);
+              if (isBlank(poolName)) {
+                e.setPoolName(DEFAULT_POOL_NAME);
+              } else {
+                e.setPoolName(poolName);
+              }
+              e.setTxnId(rs.getLong(19));
+              e.setNextTxnId(rs.getLong(20));
+              e.setCommitTime(rs.getLong(21));
+              e.setHightestTxnId(rs.getLong(22));
+              response.addToCompacts(e);
+            }
           }
-          response.addToCompacts(e);
-        }
       } catch (SQLException e) {
         checkRetryable(e, "showCompact(" + rqst + ")");
         throw new MetaException("Unable to select from transaction database " +
             StringUtils.stringifyException(e));
-      } finally {
-        closeStmt(stmt);
-        closeDbConn(dbConn);
       }
       return response;
     } catch (RetryException e) {
@@ -4009,6 +3977,24 @@ abstract class TxnHandler implements TxnStore, 
TxnStore.MutexAPI {
     }
   }
 
+  private List<String> getShowCompactionQueryParamList(ShowCompactRequest 
request) throws MetaException {
+    String poolName = request.getPoolName();
+    List<String> params = new ArrayList<>();
+    if (isNotBlank(poolName)) {
+      params.add(poolName);
+    }
+    return params;
+  }
+
+  private String getShowCompactionFilterClause(ShowCompactRequest request) {
+    StringBuilder filter = new StringBuilder();
+    String poolName = request.getPoolName();
+    if (isNotBlank(poolName)) {
+      filter.append("\"CC_POOL_NAME\"=?");
+    }
+    return filter.length() > 0 ? " where " + filter.toString() : EMPTY;
+  }
+
   /**
    * We assume this is only called by metadata cache server to know if there 
are new base/delta files should be read.
    * The query filters compactions by state and only returns SUCCEEDED or 
READY_FOR_CLEANING compactions because

Reply via email to