This is an automated email from the ASF dual-hosted git repository.

dmollitor pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 390ad7d  HIVE-23099: Improve Logger for Operation Child Classes (David 
Mollitor, reviewed by Ashutosh Chauhan)
390ad7d is described below

commit 390ad7d3a0dd40aeee04a17c71695784b38e7094
Author: David Mollitor <david.molli...@cloudera.com>
AuthorDate: Thu May 14 09:50:00 2020 -0400

    HIVE-23099: Improve Logger for Operation Child Classes (David Mollitor, 
reviewed by Ashutosh Chauhan)
---
 .../cli/operation/GetCrossReferenceOperation.java  | 14 +++++-----
 .../cli/operation/HiveCommandOperation.java        | 22 ++++++++--------
 .../hive/service/cli/operation/Operation.java      | 12 ++++-----
 .../hive/service/cli/operation/SQLOperation.java   | 30 +++++++++++-----------
 4 files changed, 38 insertions(+), 40 deletions(-)

diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java
 
b/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java
index 37f5b60..398af8a 100644
--- 
a/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java
+++ 
b/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java
@@ -116,7 +116,7 @@ public class GetCrossReferenceOperation extends 
MetadataOperation {
     this.foreignSchemaName = foreignSchema;
     this.foreignTableName = foreignTable;
     this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, 
getProtocolVersion(), false);
-    LOG.info("Starting GetCrossReferenceOperation with the following 
parameters:"
+    log.info("Starting GetCrossReferenceOperation with the following 
parameters:"
         + " parentCatalogName={}, parentSchemaName={}, parentTableName={}, 
foreignCatalog={}, "
         + "foreignSchema={}, foreignTable={}", parentCatalogName, 
parentSchemaName,
         parentTableName, foreignCatalog, foreignSchema, foreignTable);
@@ -125,7 +125,7 @@ public class GetCrossReferenceOperation extends 
MetadataOperation {
   @Override
   public void runInternal() throws HiveSQLException {
     setState(OperationState.RUNNING);
-    LOG.info("Fetching cross reference metadata");
+    log.info("Fetching cross reference metadata");
     try {
        IMetaStoreClient metastoreClient = 
getParentSession().getMetaStoreClient();
      ForeignKeysRequest fkReq = new ForeignKeysRequest(parentSchemaName, 
parentTableName, foreignSchemaName, foreignTableName);
@@ -141,16 +141,16 @@ public class GetCrossReferenceOperation extends 
MetadataOperation {
             fk.getKey_seq(), fk.getUpdate_rule(), fk.getDelete_rule(), 
fk.getFk_name(),
             fk.getPk_name(), 0};
         rowSet.addRow(rowData);
-        if (LOG.isDebugEnabled()) {
+        if (log.isDebugEnabled()) {
           String debugMessage = getDebugMessage("cross reference", 
RESULT_SET_SCHEMA);
-          LOG.debug(debugMessage, rowData);
+          log.debug(debugMessage, rowData);
         }
       }
-      if (LOG.isDebugEnabled() && rowSet.numRows() == 0) {
-        LOG.debug("No cross reference metadata has been returned.");
+      if (log.isDebugEnabled() && rowSet.numRows() == 0) {
+        log.debug("No cross reference metadata has been returned.");
       }
       setState(OperationState.FINISHED);
-      LOG.info("Fetching cross reference metadata has been successfully 
finished");
+      log.info("Fetching cross reference metadata has been successfully 
finished");
     } catch (Exception e) {
       setState(OperationState.ERROR);
       throw new HiveSQLException(e);
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
 
b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
index c83273b..3a0506b 100644
--- 
a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
+++ 
b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
@@ -67,8 +67,8 @@ public class HiveCommandOperation extends 
ExecuteStatementOperation {
 
   private void setupSessionIO(SessionState sessionState) {
     try {
-      LOG.info("Putting temp output to file " + 
sessionState.getTmpOutputFile().toString()
-          + " and error output to file " + 
sessionState.getTmpErrOutputFile().toString());
+      log.info("Putting temp output to file " + sessionState.getTmpOutputFile()
+          + " and error output to file " + sessionState.getTmpErrOutputFile());
       sessionState.in = null; // hive server's session input stream is not used
       // open a per-session file in auto-flush mode for writing temp results 
and tmp error output
       sessionState.out = new SessionStream(
@@ -78,10 +78,10 @@ public class HiveCommandOperation extends 
ExecuteStatementOperation {
           new FileOutputStream(sessionState.getTmpErrOutputFile()), true,
           StandardCharsets.UTF_8.name());
     } catch (IOException e) {
-      LOG.error("Error in creating temp output file ", e);
+      log.error("Error in creating temp output file", e);
 
       // Close file streams to avoid resource leaking
-      ServiceUtils.cleanup(LOG, parentSession.getSessionState().out, 
parentSession.getSessionState().err);
+      ServiceUtils.cleanup(log, parentSession.getSessionState().out, 
parentSession.getSessionState().err);
 
       try {
         sessionState.in = null;
@@ -90,7 +90,7 @@ public class HiveCommandOperation extends 
ExecuteStatementOperation {
         sessionState.err =
             new SessionStream(System.err, true, StandardCharsets.UTF_8.name());
       } catch (UnsupportedEncodingException ee) {
-        LOG.error("Error creating PrintStream", e);
+        log.error("Error creating PrintStream", e);
         sessionState.out = null;
         sessionState.err = null;
       }
@@ -99,7 +99,7 @@ public class HiveCommandOperation extends 
ExecuteStatementOperation {
 
 
   private void tearDownSessionIO() {
-    ServiceUtils.cleanup(LOG, parentSession.getSessionState().out, 
parentSession.getSessionState().err);
+    ServiceUtils.cleanup(log, parentSession.getSessionState().out, 
parentSession.getSessionState().err);
   }
 
   @Override
@@ -120,7 +120,7 @@ public class HiveCommandOperation extends 
ExecuteStatementOperation {
         resultSchema = new TableSchema();
       }
       if (response.getMessage() != null) {
-        LOG.info(response.getMessage());
+        log.info(response.getMessage());
       }
     } catch (CommandProcessorException e) {
       setState(OperationState.ERROR);
@@ -189,8 +189,7 @@ public class HiveCommandOperation extends 
ExecuteStatementOperation {
       try {
         resultReader = new BufferedReader(new FileReader(tmp));
       } catch (FileNotFoundException e) {
-        LOG.error("File " + tmp + " not found. ", e);
-        throw new HiveSQLException(e);
+        throw new HiveSQLException("File " + tmp + " not found", e);
       }
     }
     List<String> results = new ArrayList<String>();
@@ -205,8 +204,7 @@ public class HiveCommandOperation extends 
ExecuteStatementOperation {
           results.add(line);
         }
       } catch (IOException e) {
-        LOG.error("Reading temp results encountered an exception: ", e);
-        throw new HiveSQLException(e);
+        throw new HiveSQLException("Unable to read line from file", e);
       }
     }
     return results;
@@ -221,7 +219,7 @@ public class HiveCommandOperation extends 
ExecuteStatementOperation {
 
   private void resetResultReader() {
     if (resultReader != null) {
-      ServiceUtils.cleanup(LOG, resultReader);
+      ServiceUtils.cleanup(log, resultReader);
       resultReader = null;
     }
   }
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/Operation.java 
b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
index 3df8f6d..58f5492 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/Operation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
@@ -61,7 +61,7 @@ public abstract class Operation {
   private volatile MetricsScope currentStateScope;
   private final OperationHandle opHandle;
   public static final FetchOrientation DEFAULT_FETCH_ORIENTATION = 
FetchOrientation.FETCH_NEXT;
-  public static final Logger LOG = 
LoggerFactory.getLogger(Operation.class.getName());
+  protected final Logger log = LoggerFactory.getLogger(getClass());
   protected Boolean hasResultSet = null;
   protected volatile HiveSQLException operationException;
   protected volatile Future<?> backgroundHandle;
@@ -140,7 +140,7 @@ public abstract class Operation {
     try {
       taskStatus = getTaskStatus();
     } catch (HiveSQLException sqlException) {
-      LOG.error("Error getting task status for " + opHandle.toString(), 
sqlException);
+      log.error("Error getting task status for {}", opHandle, sqlException);
     }
     return new OperationStatus(state, taskStatus, operationStart, 
operationComplete, hasResultSet, operationException);
   }
@@ -240,7 +240,7 @@ public abstract class Operation {
     createOperationLog();
     LogUtils.registerLoggingContext(queryState.getConf());
 
-    LOG.info(
+    log.info(
         "[opType={}, queryId={}, startTime={}, sessionId={}, createTime={}, 
userName={}, ipAddress={}]",
         opHandle.getOperationType(),
         queryState.getQueryId(),
@@ -304,11 +304,11 @@ public abstract class Operation {
     
LogUtils.stopQueryAppender(LogDivertAppenderForTest.TEST_QUERY_ROUTING_APPENDER,
 queryId);
     if (isOperationLogEnabled) {
       if (opHandle == null) {
-        LOG.warn("Operation seems to be in invalid state, opHandle is null");
+        log.warn("Operation seems to be in invalid state, opHandle is null");
         return;
       }
       if (operationLog == null) {
-        LOG.warn("Operation [ " + opHandle.getHandleIdentifier() + " ] " + 
"logging is enabled, "
+        log.warn("Operation [ " + opHandle.getHandleIdentifier() + " ] " + 
"logging is enabled, "
             + "but its OperationLog object cannot be found. "
             + "Perhaps the operation has already terminated.");
       } else {
@@ -316,7 +316,7 @@ public abstract class Operation {
           scheduledExecutorService.schedule(new 
OperationLogCleaner(operationLog), operationLogCleanupDelayMs,
             TimeUnit.MILLISECONDS);
         } else {
-          LOG.info("Closing operation log {} without delay", operationLog);
+          log.info("Closing operation log {} without delay", operationLog);
           operationLog.close();
         }
       }
diff --git 
a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java 
b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
index 75b84d3..811f377 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -148,7 +148,7 @@ public class SQLOperation extends ExecuteStatementOperation 
{
       sessionState.err =
           new SessionStream(System.err, true, StandardCharsets.UTF_8.name());
     } catch (UnsupportedEncodingException e) {
-        LOG.error("Error creating PrintStream", e);
+      log.error("Error creating PrintStream", e);
         sessionState.out = null;
         sessionState.info = null;
         sessionState.err = null;
@@ -172,10 +172,10 @@ public class SQLOperation extends 
ExecuteStatementOperation {
         timeoutExecutor.schedule(() -> {
           try {
             final String queryId = queryState.getQueryId();
-            LOG.info("Query timed out after: " + queryTimeout + " seconds. 
Cancelling the execution now: " + queryId);
+            log.info("Query timed out after: " + queryTimeout + " seconds. 
Cancelling the execution now: " + queryId);
             SQLOperation.this.cancel(OperationState.TIMEDOUT);
           } catch (HiveSQLException e) {
-            LOG.error("Error cancelling the query after timeout: " + 
queryTimeout + " seconds", e);
+            log.error("Error cancelling the query after timeout: " + 
queryTimeout + " seconds", e);
           }
           return null;
         }, queryTimeout, TimeUnit.SECONDS);
@@ -215,7 +215,7 @@ public class SQLOperation extends ExecuteStatementOperation 
{
       OperationState opState = getStatus().getState();
       // Operation may have been cancelled by another thread
       if (opState.isTerminal()) {
-        LOG.info("Not running the query. Operation is already in terminal 
state: " + opState
+        log.info("Not running the query. Operation is already in terminal 
state: " + opState
             + ", perhaps cancelled due to query timeout or by another 
thread.");
         return;
       }
@@ -233,7 +233,7 @@ public class SQLOperation extends ExecuteStatementOperation 
{
           || (getStatus().getState() == OperationState.TIMEDOUT)
           || (getStatus().getState() == OperationState.CLOSED)
           || (getStatus().getState() == OperationState.FINISHED)) {
-        LOG.warn("Ignore exception in terminal state", e);
+        log.warn("Ignore exception in terminal state", e);
         return;
       }
       setState(OperationState.ERROR);
@@ -324,7 +324,7 @@ public class SQLOperation extends ExecuteStatementOperation 
{
           } catch (HiveSQLException e) {
             // TODO: why do we invent our own error path op top of the one 
from Future.get?
             setOperationException(e);
-            LOG.error("Error running hive query", e);
+            log.error("Error running hive query", e);
           } finally {
             LogUtils.unregisterLoggingContext();
 
@@ -341,7 +341,7 @@ public class SQLOperation extends ExecuteStatementOperation 
{
         currentUGI.doAs(doAsAction);
       } catch (Exception e) {
         setOperationException(new HiveSQLException(e));
-        LOG.error("Error running hive query as user : " + 
currentUGI.getShortUserName(), e);
+        log.error("Error running hive query as user : " + 
currentUGI.getShortUserName(), e);
       } finally {
         /**
          * We'll cache the ThreadLocal RawStore object for this background 
thread for an orderly cleanup
@@ -383,9 +383,9 @@ public class SQLOperation extends ExecuteStatementOperation 
{
         boolean success = backgroundHandle.cancel(true);
         String queryId = queryState.getQueryId();
         if (success) {
-          LOG.info("The running operation has been successfully interrupted: " 
+ queryId);
+          log.info("The running operation has been successfully interrupted: " 
+ queryId);
         } else if (state == OperationState.CANCELED) {
-          LOG.info("The running operation could not be cancelled, typically 
because it has already completed normally: " + queryId);
+          log.info("The running operation could not be cancelled, typically 
because it has already completed normally: " + queryId);
         }
       }
     }
@@ -398,7 +398,7 @@ public class SQLOperation extends ExecuteStatementOperation 
{
 
     SessionState ss = SessionState.get();
     if (ss == null) {
-      LOG.warn("Operation seems to be in invalid state, SessionState is null");
+      log.warn("Operation seems to be in invalid state, SessionState is null");
     } else {
       ss.deleteTmpOutputFile();
       ss.deleteTmpErrOutputFile();
@@ -415,12 +415,12 @@ public class SQLOperation extends 
ExecuteStatementOperation {
     String queryId = null;
     if (stateAfterCancel == OperationState.CANCELED) {
       queryId = queryState.getQueryId();
-      LOG.info("Cancelling the query execution: " + queryId);
+      log.info("Cancelling the query execution: " + queryId);
     }
     cleanup(stateAfterCancel);
     cleanupOperationLog(operationLogCleanupDelayMs);
     if (stateAfterCancel == OperationState.CANCELED) {
-      LOG.info("Successfully cancelled the query: " + queryId);
+      log.info("Successfully cancelled the query: " + queryId);
     }
   }
 
@@ -471,7 +471,7 @@ public class SQLOperation extends ExecuteStatementOperation 
{
       driver.setMaxRows(capacity);
       if (driver.getResults(convey)) {
         if (convey.size() == capacity) {
-          LOG.info("Result set buffer filled to capacity [{}]", capacity);
+          log.info("Result set buffer filled to capacity [{}]", capacity);
         }
         return decode(convey, rowSet);
       }
@@ -548,8 +548,8 @@ public class SQLOperation extends ExecuteStatementOperation 
{
           final String names = fieldSchemas.stream().map(i -> 
i.getName()).collect(Collectors.joining(","));
           final String types = fieldSchemas.stream().map(i -> 
i.getType()).collect(Collectors.joining(","));
 
-          LOG.debug("Column names: " + names);
-          LOG.debug("Column types: " + types);
+          log.debug("Column names: {}", names);
+          log.debug("Column types: {}", types);
 
           props.setProperty(serdeConstants.LIST_COLUMNS, names);
           props.setProperty(serdeConstants.LIST_COLUMN_TYPES, types);

Reply via email to