deniskuzZ commented on code in PR #4855:
URL: https://github.com/apache/hive/pull/4855#discussion_r1414163930


##########
ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java:
##########
@@ -499,67 +368,24 @@ protected Boolean findNextCompactionAndExecute(boolean 
collectGenericStats, bool
       }
     }
 
-    if (computeStats) {
-       statsUpdater.gatherStats(ci, conf, runJobAsSelf(ci.runAs) ? ci.runAs : 
table.getOwner(),
-              CompactorUtil.getCompactorJobQueueName(conf, ci, table), msc);
-    }
-    return true;
+    return compactionResult;
   }
 
-  /**
-   * Just AcidUtils.getAcidState, but with impersonation if needed.
-   */
-  private AcidDirectory getAcidStateForWorker(CompactionInfo ci, 
StorageDescriptor sd,
-          ValidCompactorWriteIdList tblValidWriteIds) throws IOException, 
InterruptedException {
-    if (runJobAsSelf(ci.runAs)) {
-      return AcidUtils.getAcidState(null, new Path(sd.getLocation()), conf,
-              tblValidWriteIds, Ref.from(false), true);
-    }
+  protected void computeStats(CompactionInfo ci, Table table, boolean 
computeStats) {
 
-    UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs, 
UserGroupInformation.getLoginUser());
-    try {
-      return ugi.doAs((PrivilegedExceptionAction<AcidDirectory>) () ->
-              AcidUtils.getAcidState(null, new Path(sd.getLocation()), conf, 
tblValidWriteIds,
-                      Ref.from(false), true));
-    } finally {
-      try {
-        FileSystem.closeAllForUGI(ugi);
-      } catch (IOException exception) {
-        LOG.error("Could not clean up file-system handles for UGI: " + ugi + " 
for " + ci.getFullPartitionName(),
-                exception);
-      }
-    }
-  }
-
-  private void cleanupResultDirs(StorageDescriptor sd, ValidWriteIdList 
writeIds, CompactionType ctype, AcidDirectory dir) {
-    // result directory for compactor to write new files
-    Path resultDir = QueryCompactor.Util.getCompactionResultDir(sd, writeIds, 
conf,
-        ctype == CompactionType.MAJOR, false, false, dir);
-    LOG.info("Deleting result directories created by the compactor:\n");
-    try {
-      FileSystem fs = resultDir.getFileSystem(conf);
-      LOG.info(resultDir.toString());
-      fs.delete(resultDir, true);
-
-      if (ctype == CompactionType.MINOR) {
-        Path deleteDeltaDir = QueryCompactor.Util.getCompactionResultDir(sd, 
writeIds, conf,
-            false, true, false, dir);
-
-        LOG.info(deleteDeltaDir.toString());
-        fs.delete(deleteDeltaDir, true);
-      }
-    } catch (IOException ex) {
-      LOG.error("Caught exception while cleaning result directories:", ex);
+    if (computeStats) {
+      statsUpdater.gatherStats(ci, conf, runJobAsSelf(ci.runAs) ? ci.runAs : 
table.getOwner(),
+          CompactorUtil.getCompactorJobQueueName(conf, ci, table), msc);
     }
   }
 
-  private void failCompactionIfSetForTest() {
+  protected void failCompactionIfSetForTest() {
     if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && 
conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION)) {
       throw new 
RuntimeException(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION.name() + "=true");
     }
   }
 
-  private void markFailed(CompactionInfo ci, String errorMessage) {
+  protected void markFailed(CompactionInfo ci, String errorMessage) {

Review Comment:
   should stay private, executor should return exception



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to