[ 
https://issues.apache.org/jira/browse/HIVE-25115?focusedWorklogId=691177&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-691177
 ]

ASF GitHub Bot logged work on HIVE-25115:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 06/Dec/21 16:26
            Start Date: 06/Dec/21 16:26
    Worklog Time Spent: 10m 
      Work Description: klcopp commented on a change in pull request #2825:
URL: https://github.com/apache/hive/pull/2825#discussion_r763165482



##########
File path: 
itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
##########
@@ -1621,6 +1621,7 @@ public void mmTableOpenWriteId() throws Exception {
     verifyFooBarResult(tblName, 2);
     verifyHasBase(table.getSd(), fs, "base_0000005_v0000016");
     runCleaner(conf);
+    runCleaner(conf);

Review comment:
       Wherever the double cleaner runs, I think you should add a comment 
explaining why

##########
File path: 
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
##########
@@ -323,57 +323,62 @@ public void markCompacted(CompactionInfo info) throws 
MetaException {
   @Override
   @RetrySemantics.ReadOnly
   public List<CompactionInfo> findReadyToClean(long minOpenTxnWaterMark, long 
retentionTime) throws MetaException {
-    Connection dbConn = null;
-    List<CompactionInfo> rc = new ArrayList<>();
-
-    Statement stmt = null;
-    ResultSet rs = null;
     try {
-      try {
-        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
-        stmt = dbConn.createStatement();
+      List<CompactionInfo> rc = new ArrayList<>();
+      
+      try (Connection dbConn = 
getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+           Statement stmt = dbConn.createStatement()) {
         /*
          * By filtering on minOpenTxnWaterMark, we will only cleanup after 
every transaction is committed, that could see
          * the uncompacted deltas. This way the cleaner can clean up 
everything that was made obsolete by this compaction.
          */
-        String s = "SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", 
\"CQ_PARTITION\", "
-                + "\"CQ_TYPE\", \"CQ_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\" FROM 
\"COMPACTION_QUEUE\" WHERE \"CQ_STATE\" = '"
-                + READY_FOR_CLEANING + "'";
+        String whereClause = " WHERE \"CQ_STATE\" = '" + READY_FOR_CLEANING + 
"'";
         if (minOpenTxnWaterMark > 0) {
-          s = s + " AND (\"CQ_NEXT_TXN_ID\" <= " + minOpenTxnWaterMark + " OR 
\"CQ_NEXT_TXN_ID\" IS NULL)";
+          whereClause += " AND (\"CQ_NEXT_TXN_ID\" <= " + minOpenTxnWaterMark 
+ " OR \"CQ_NEXT_TXN_ID\" IS NULL)";
         }
         if (retentionTime > 0) {
-          s = s + " AND \"CQ_COMMIT_TIME\" < (" + getEpochFn(dbProduct) + " - 
" + retentionTime + ")";
+          whereClause += " AND \"CQ_COMMIT_TIME\" < (" + getEpochFn(dbProduct) 
+ " - " + retentionTime + ")";
         }
-        s = s + " ORDER BY \"CQ_HIGHEST_WRITE_ID\", \"CQ_ID\"";
+        String s = "SELECT \"CQ_ID\", \"cq1\".\"CQ_DATABASE\", 
\"cq1\".\"CQ_TABLE\", \"cq1\".\"CQ_PARTITION\"," +
+          "   \"CQ_TYPE\", \"CQ_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\", 
\"CQ_TBLPROPERTIES\"" +
+          "  FROM \"COMPACTION_QUEUE\" \"cq1\" " +
+          "INNER JOIN (" +
+          "  SELECT MIN(\"CQ_HIGHEST_WRITE_ID\") \"WRITE_ID\", 
\"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\"" +
+          "  FROM \"COMPACTION_QUEUE\"" 
+          + whereClause + 
+          "  GROUP BY \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\") \"cq2\" 
" +
+          "ON \"cq1\".\"CQ_DATABASE\" = \"cq2\".\"CQ_DATABASE\""+
+          "  AND \"cq1\".\"CQ_TABLE\" = \"cq2\".\"CQ_TABLE\""+
+          "  AND (\"cq1\".\"CQ_PARTITION\" = \"cq2\".\"CQ_PARTITION\"" +
+          "    OR \"cq1\".\"CQ_PARTITION\" IS NULL AND 
\"cq2\".\"CQ_PARTITION\" IS NULL)"
+          + whereClause + 
+          "  AND \"CQ_HIGHEST_WRITE_ID\" = \"WRITE_ID\"" +
+          "  ORDER BY \"CQ_ID\"";
         LOG.debug("Going to execute query <" + s + ">");
-        rs = stmt.executeQuery(s);
 
-        while (rs.next()) {
-          CompactionInfo info = new CompactionInfo();
-          info.id = rs.getLong(1);
-          info.dbname = rs.getString(2);
-          info.tableName = rs.getString(3);
-          info.partName = rs.getString(4);
-          info.type = dbCompactionType2ThriftType(rs.getString(5).charAt(0));
-          info.runAs = rs.getString(6);
-          info.highestWriteId = rs.getLong(7);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Found ready to clean: " + info.toString());
+        try (ResultSet rs = stmt.executeQuery(s)) {
+          while (rs.next()) {
+            CompactionInfo info = new CompactionInfo();
+            info.id = rs.getLong(1);
+            info.dbname = rs.getString(2);
+            info.tableName = rs.getString(3);
+            info.partName = rs.getString(4);
+            info.type = dbCompactionType2ThriftType(rs.getString(5).charAt(0));
+            info.runAs = rs.getString(6);
+            info.highestWriteId = rs.getLong(7);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Found ready to clean: " + info.toString());
+            }
+            rc.add(info);
           }
-          rc.add(info);
         }
         return rc;
       } catch (SQLException e) {
         LOG.error("Unable to select next element for cleaning, " + 
e.getMessage());
-        LOG.debug("Going to rollback");
-        rollbackDBConn(dbConn);

Review comment:
       Is this covered by the close() method?

##########
File path: 
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
##########
@@ -323,57 +323,62 @@ public void markCompacted(CompactionInfo info) throws 
MetaException {
   @Override
   @RetrySemantics.ReadOnly
   public List<CompactionInfo> findReadyToClean(long minOpenTxnWaterMark, long 
retentionTime) throws MetaException {
-    Connection dbConn = null;
-    List<CompactionInfo> rc = new ArrayList<>();
-
-    Statement stmt = null;
-    ResultSet rs = null;
     try {
-      try {
-        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
-        stmt = dbConn.createStatement();
+      List<CompactionInfo> rc = new ArrayList<>();
+      
+      try (Connection dbConn = 
getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+           Statement stmt = dbConn.createStatement()) {
         /*
          * By filtering on minOpenTxnWaterMark, we will only cleanup after 
every transaction is committed, that could see
          * the uncompacted deltas. This way the cleaner can clean up 
everything that was made obsolete by this compaction.
          */
-        String s = "SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", 
\"CQ_PARTITION\", "
-                + "\"CQ_TYPE\", \"CQ_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\" FROM 
\"COMPACTION_QUEUE\" WHERE \"CQ_STATE\" = '"
-                + READY_FOR_CLEANING + "'";
+        String whereClause = " WHERE \"CQ_STATE\" = '" + READY_FOR_CLEANING + 
"'";
         if (minOpenTxnWaterMark > 0) {
-          s = s + " AND (\"CQ_NEXT_TXN_ID\" <= " + minOpenTxnWaterMark + " OR 
\"CQ_NEXT_TXN_ID\" IS NULL)";
+          whereClause += " AND (\"CQ_NEXT_TXN_ID\" <= " + minOpenTxnWaterMark 
+ " OR \"CQ_NEXT_TXN_ID\" IS NULL)";
         }
         if (retentionTime > 0) {
-          s = s + " AND \"CQ_COMMIT_TIME\" < (" + getEpochFn(dbProduct) + " - 
" + retentionTime + ")";
+          whereClause += " AND \"CQ_COMMIT_TIME\" < (" + getEpochFn(dbProduct) 
+ " - " + retentionTime + ")";
         }
-        s = s + " ORDER BY \"CQ_HIGHEST_WRITE_ID\", \"CQ_ID\"";
+        String s = "SELECT \"CQ_ID\", \"cq1\".\"CQ_DATABASE\", 
\"cq1\".\"CQ_TABLE\", \"cq1\".\"CQ_PARTITION\"," +
+          "   \"CQ_TYPE\", \"CQ_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\", 
\"CQ_TBLPROPERTIES\"" +
+          "  FROM \"COMPACTION_QUEUE\" \"cq1\" " +
+          "INNER JOIN (" +

Review comment:
       Sorry, why is this join needed? Why not just the 
min(CQ_HIGHEST_WRITE_ID) + group by?
   Could you also write a short comment about what this query is doing and why




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 691177)
    Time Spent: 3h 50m  (was: 3h 40m)

> Compaction queue entries may accumulate in "ready for cleaning" state
> ---------------------------------------------------------------------
>
>                 Key: HIVE-25115
>                 URL: https://issues.apache.org/jira/browse/HIVE-25115
>             Project: Hive
>          Issue Type: Improvement
>            Reporter: Karen Coppage
>            Assignee: Denys Kuzmenko
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 3h 50m
>  Remaining Estimate: 0h
>
> If the Cleaner does not delete any files, the compaction queue entry is 
> thrown back to the queue and remains in "ready for cleaning" state.
> Problem: If 2 compactions run on the same table and enter "ready for 
> cleaning" state at the same time, only one "cleaning" will remove obsolete 
> files, the other entry will remain in the queue in "ready for cleaning" state.



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

Reply via email to