Repository: incubator-impala
Updated Branches:
  refs/heads/master 5a158dbcd -> b1edca2a5


Bracketing Java logging output with log level checks.

This reduces creation of intermediate objects and improves performance.

Change-Id: Ie0f5123dbf2caf3b03183c76820599920baa9785
Reviewed-on: http://gerrit.cloudera.org:8080/5284
Reviewed-by: Marcel Kornacker <mar...@cloudera.com>
Tested-by: Internal Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/352833b8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/352833b8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/352833b8

Branch: refs/heads/master
Commit: 352833b8cfcf5e0246f322fec1ee9b7612e0ed6a
Parents: 5a158db
Author: Marcel Kornacker <mar...@cloudera.com>
Authored: Wed Nov 30 12:51:20 2016 -0800
Committer: Internal Jenkins <cloudera-hud...@gerrit.cloudera.org>
Committed: Thu Dec 1 04:42:38 2016 +0000

----------------------------------------------------------------------
 .../impala/catalog/AuthorizationPolicy.java     | 22 ++++++---
 .../impala/catalog/CatalogServiceCatalog.java   | 34 +++++++++----
 .../java/org/apache/impala/catalog/Column.java  |  4 +-
 .../apache/impala/catalog/DataSourceTable.java  |  4 +-
 .../org/apache/impala/catalog/HdfsTable.java    | 50 +++++++++++++------
 .../apache/impala/catalog/ImpaladCatalog.java   | 16 ++++--
 .../impala/catalog/MetaStoreClientPool.java     |  4 +-
 .../impala/catalog/PartitionStatsUtil.java      |  2 +-
 .../java/org/apache/impala/catalog/Table.java   |  2 +-
 .../org/apache/impala/catalog/TableLoader.java  |  2 +-
 .../apache/impala/common/FileSystemUtil.java    | 26 ++++++----
 .../ExternalDataSourceExecutor.java             |  4 +-
 .../apache/impala/planner/AggregationNode.java  | 12 +++--
 .../apache/impala/planner/AnalyticEvalNode.java |  6 ++-
 .../apache/impala/planner/AnalyticPlanner.java  | 16 ++++--
 .../impala/planner/DataSourceScanNode.java      |  4 +-
 .../impala/planner/DistributedPlanner.java      | 30 +++++++-----
 .../apache/impala/planner/HBaseScanNode.java    |  8 ++-
 .../impala/planner/HdfsPartitionFilter.java     |  6 ++-
 .../org/apache/impala/planner/HdfsScanNode.java | 28 +++++++----
 .../org/apache/impala/planner/JoinNode.java     |  4 +-
 .../org/apache/impala/planner/KuduScanNode.java |  4 +-
 .../apache/impala/planner/ParallelPlanner.java  | 15 ++----
 .../java/org/apache/impala/planner/Planner.java | 16 +++---
 .../impala/planner/RuntimeFilterGenerator.java  |  6 ++-
 .../org/apache/impala/planner/SelectNode.java   |  4 +-
 .../impala/planner/SingleNodePlanner.java       | 32 ++++++++----
 .../org/apache/impala/planner/SortNode.java     | 12 +++--
 .../org/apache/impala/planner/UnionNode.java    |  4 +-
 .../impala/service/CatalogOpExecutor.java       | 51 +++++++++++++-------
 .../org/apache/impala/service/Frontend.java     | 14 ++++--
 .../org/apache/impala/service/JniFrontend.java  |  6 ++-
 .../impala/service/KuduCatalogOpExecutor.java   | 18 ++++---
 .../org/apache/impala/service/MetadataOp.java   | 10 ++--
 .../apache/impala/util/FsPermissionChecker.java |  7 ++-
 .../org/apache/impala/util/HdfsCachingUtil.java | 32 ++++++++----
 .../org/apache/impala/util/MetaStoreUtil.java   |  6 ++-
 .../apache/impala/util/RequestPoolService.java  | 19 +++++---
 .../apache/impala/util/SentryPolicyService.java | 41 ++++++++++------
 39 files changed, 388 insertions(+), 193 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java 
b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
index 946ca30..56fc2f7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
+++ b/fe/src/main/java/org/apache/impala/catalog/AuthorizationPolicy.java
@@ -116,15 +116,19 @@ public class AuthorizationPolicy implements 
PrivilegeCache {
    */
   public synchronized void addPrivilege(RolePrivilege privilege)
       throws CatalogException {
-    LOG.trace("Adding privilege: " + privilege.getName() +
-        " role ID: " + privilege.getRoleId());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Adding privilege: " + privilege.getName() +
+          " role ID: " + privilege.getRoleId());
+    }
     Role role = getRole(privilege.getRoleId());
     if (role == null) {
       throw new CatalogException(String.format("Error adding privilege: %s. 
Role ID " +
           "'%d' does not exist.", privilege.getName(), privilege.getRoleId()));
     }
-    LOG.trace("Adding privilege: " + privilege.getName() + " to role: " +
-        role.getName() + "ID: " + role.getId());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Adding privilege: " + privilege.getName() + " to role: " +
+          role.getName() + "ID: " + role.getId());
+    }
     role.addPrivilege(privilege);
   }
 
@@ -141,8 +145,10 @@ public class AuthorizationPolicy implements PrivilegeCache 
{
       throw new CatalogException(String.format("Error removing privilege: %s. 
Role ID " +
           "'%d' does not exist.", privilege.getName(), privilege.getRoleId()));
     }
-    LOG.trace("Removing privilege: '" + privilege.getName() + "' from Role ID: 
" +
-        privilege.getRoleId() + " Role Name: " + role.getName());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Removing privilege: '" + privilege.getName() + "' from Role 
ID: " +
+          privilege.getRoleId() + " Role Name: " + role.getName());
+    }
     return role.removePrivilege(privilege.getName());
   }
 
@@ -275,7 +281,9 @@ public class AuthorizationPolicy implements PrivilegeCache {
         for (RolePrivilege privilege: role.getPrivileges()) {
           String authorizeable = privilege.getName();
           if (authorizeable == null) {
-            LOG.trace("Ignoring invalid privilege: " + privilege.getName());
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("Ignoring invalid privilege: " + privilege.getName());
+            }
             continue;
           }
           privileges.add(authorizeable);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java 
b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
index 7997412..e7c84da 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
@@ -194,7 +194,9 @@ public class CatalogServiceCatalog extends Catalog {
     }
 
     public void run() {
-      LOG.trace("Reloading cache pool names from HDFS");
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Reloading cache pool names from HDFS");
+      }
       // Map of cache pool name to CachePoolInfo. Stored in a map to allow Set 
operations
       // to be performed on the keys.
       Map<String, CachePoolInfo> currentCachePools = Maps.newHashMap();
@@ -296,8 +298,10 @@ public class CatalogServiceCatalog extends Catalog {
               try {
                 catalogTbl.setTable(tbl.toThrift());
               } catch (Exception e) {
-                LOG.debug(String.format("Error calling toThrift() on table 
%s.%s: %s",
-                    db.getName(), tblName, e.getMessage()), e);
+                if (LOG.isDebugEnabled()) {
+                  LOG.debug(String.format("Error calling toThrift() on table 
%s.%s: %s",
+                      db.getName(), tblName, e.getMessage()), e);
+                }
                 continue;
               }
               catalogTbl.setCatalog_version(tbl.getCatalogVersion());
@@ -519,7 +523,9 @@ public class CatalogServiceCatalog extends Catalog {
   private void loadFunctionsFromDbParams(Db db,
       org.apache.hadoop.hive.metastore.api.Database msDb) {
     if (msDb == null || msDb.getParameters() == null) return;
-    LOG.info("Loading native functions for database: " + db.getName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Loading native functions for database: " + db.getName());
+    }
     TCompactProtocol.Factory protocolFactory = new TCompactProtocol.Factory();
     for (String key: msDb.getParameters().keySet()) {
       if (!key.startsWith(Db.FUNCTION_INDEX_PREFIX)) continue;
@@ -545,7 +551,9 @@ public class CatalogServiceCatalog extends Catalog {
   private void loadJavaFunctions(Db db,
       List<org.apache.hadoop.hive.metastore.api.Function> functions) {
     Preconditions.checkNotNull(functions);
-    LOG.info("Loading Java functions for database: " + db.getName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Loading Java functions for database: " + db.getName());
+    }
     for (org.apache.hadoop.hive.metastore.api.Function function: functions) {
       try {
         for (Function fn: extractFunctions(db.getName(), function)) {
@@ -880,7 +888,9 @@ public class CatalogServiceCatalog extends Catalog {
    * Throws a CatalogException if there is an error loading table metadata.
    */
   public Table reloadTable(Table tbl) throws CatalogException {
-    LOG.debug(String.format("Refreshing table metadata: %s", 
tbl.getFullName()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Refreshing table metadata: %s", 
tbl.getFullName()));
+    }
     TTableName tblName = new TTableName(tbl.getDb().getName().toLowerCase(),
         tbl.getName().toLowerCase());
     Db db = tbl.getDb();
@@ -1009,8 +1019,10 @@ public class CatalogServiceCatalog extends Catalog {
     Preconditions.checkNotNull(updatedObjects);
     updatedObjects.first = null;
     updatedObjects.second = null;
-    LOG.debug(String.format("Invalidating table metadata: %s.%s",
-        tableName.getDb_name(), tableName.getTable_name()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Invalidating table metadata: %s.%s",
+          tableName.getDb_name(), tableName.getTable_name()));
+    }
     String dbName = tableName.getDb_name();
     String tblName = tableName.getTable_name();
 
@@ -1249,8 +1261,10 @@ public class CatalogServiceCatalog extends Catalog {
       String partitionName = hdfsPartition == null
           ? HdfsTable.constructPartitionName(partitionSpec)
           : hdfsPartition.getPartitionName();
-      LOG.debug(String.format("Refreshing Partition metadata: %s %s",
-          hdfsTable.getFullName(), partitionName));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Refreshing Partition metadata: %s %s",
+            hdfsTable.getFullName(), partitionName));
+      }
       try (MetaStoreClient msClient = getMetaStoreClient()) {
         org.apache.hadoop.hive.metastore.api.Partition hmsPartition = null;
         try {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/Column.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Column.java 
b/fe/src/main/java/org/apache/impala/catalog/Column.java
index 0830f61..e01fa0a 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Column.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Column.java
@@ -68,7 +68,9 @@ public class Column {
 
   public boolean updateStats(ColumnStatisticsData statsData) {
     boolean statsDataCompatibleWithColType = stats_.update(type_, statsData);
-    LOG.debug("col stats: " + name_ + " #distinct=" + 
stats_.getNumDistinctValues());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("col stats: " + name_ + " #distinct=" + 
stats_.getNumDistinctValues());
+    }
     return statsDataCompatibleWithColType;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java 
b/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
index dab0c05..7370806 100644
--- a/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java
@@ -163,7 +163,9 @@ public class DataSourceTable extends Table {
     Preconditions.checkNotNull(msTbl);
     msTable_ = msTbl;
     clearColumns();
-    LOG.debug("load table: " + db_.getName() + "." + name_);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("load table: " + db_.getName() + "." + name_);
+    }
     String dataSourceName = getRequiredTableProperty(msTbl, 
TBL_PROP_DATA_SRC_NAME, null);
     String location = getRequiredTableProperty(msTbl, TBL_PROP_LOCATION, 
dataSourceName);
     String className = getRequiredTableProperty(msTbl, TBL_PROP_CLASS, 
dataSourceName);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 21b5359..66e28f8 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -370,7 +370,9 @@ public class HdfsTable extends Table {
     Preconditions.checkNotNull(fd);
     Preconditions.checkNotNull(perFsFileBlocks);
     Preconditions.checkArgument(!file.isDirectory());
-    LOG.debug("load block md for " + name_ + " file " + fd.getFileName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("load block md for " + name_ + " file " + fd.getFileName());
+    }
 
     if (!FileSystemUtil.hasGetFileBlockLocations(fs)) {
       synthesizeBlockMetadata(fs, fd, fileFormat);
@@ -463,8 +465,10 @@ public class HdfsTable extends Table {
       // part of the FileSystem interface, so we'll need to downcast.
       if (!(fs instanceof DistributedFileSystem)) continue;
 
-      LOG.trace("Loading disk ids for: " + getFullName() + ". nodes: " +
-          hostIndex_.size() + ". filesystem: " + fsKey);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Loading disk ids for: " + getFullName() + ". nodes: " +
+            hostIndex_.size() + ". filesystem: " + fsKey);
+      }
       DistributedFileSystem dfs = (DistributedFileSystem)fs;
       FileBlocksInfo blockLists = perFsFileBlocks.get(fsKey);
       Preconditions.checkNotNull(blockLists);
@@ -1083,7 +1087,9 @@ public class HdfsTable extends Table {
       // Load partition and file metadata
       if (reuseMetadata) {
         // Incrementally update this table's partitions and file metadata
-        LOG.debug("incremental update for table: " + db_.getName() + "." + 
name_);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("incremental update for table: " + db_.getName() + "." + 
name_);
+        }
         Preconditions.checkState(partitionsToUpdate == null || 
loadFileMetadata);
         updateMdFromHmsTable(msTbl);
         if (msTbl.getPartitionKeysSize() == 0) {
@@ -1093,7 +1099,9 @@ public class HdfsTable extends Table {
         }
       } else {
         // Load all partitions from Hive Metastore, including file metadata.
-        LOG.debug("load table from Hive Metastore: " + db_.getName() + "." + 
name_);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("load table from Hive Metastore: " + db_.getName() + "." + 
name_);
+        }
         List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions =
             Lists.newArrayList();
         msPartitions.addAll(MetaStoreUtil.fetchAllPartitions(
@@ -1133,7 +1141,9 @@ public class HdfsTable extends Table {
    * Updates the file metadata of an unpartitioned HdfsTable.
    */
   private void updateUnpartitionedTableFileMd() throws CatalogException {
-    LOG.debug("update unpartitioned table: " + name_);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("update unpartitioned table: " + name_);
+    }
     resetPartitions();
     org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
     Preconditions.checkNotNull(msTbl);
@@ -1156,7 +1166,9 @@ public class HdfsTable extends Table {
    */
   private void updatePartitionsFromHms(IMetaStoreClient client,
       Set<String> partitionsToUpdate, boolean loadFileMetadata) throws 
Exception {
-    LOG.debug("sync table partitions: " + name_);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("sync table partitions: " + name_);
+    }
     org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
     Preconditions.checkNotNull(msTbl);
     Preconditions.checkState(msTbl.getPartitionKeysSize() != 0);
@@ -1415,8 +1427,10 @@ public class HdfsTable extends Table {
       IMetaStoreClient client) throws Exception {
     Preconditions.checkNotNull(partitions);
     if (partitions.isEmpty()) return;
-    LOG.info(String.format("Incrementally updating %d/%d partitions.",
-        partitions.size(), partitionMap_.size()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Incrementally updating %d/%d partitions.",
+          partitions.size(), partitionMap_.size()));
+    }
     Set<String> partitionNames = Sets.newHashSet();
     for (HdfsPartition part: partitions) {
       partitionNames.add(part.getPartitionName());
@@ -1469,8 +1483,10 @@ public class HdfsTable extends Table {
   private void loadPartitionFileMetadata(List<HdfsPartition> partitions)
       throws Exception {
     Preconditions.checkNotNull(partitions);
-    LOG.info(String.format("loading file metadata for %d partitions",
-        partitions.size()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("loading file metadata for %d partitions",
+          partitions.size()));
+    }
     org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable();
     Preconditions.checkNotNull(msTbl);
     HdfsStorageDescriptor fileFormatDescriptor =
@@ -1782,8 +1798,10 @@ public class HdfsTable extends Table {
       List<List<String>> partitionsNotInHms) throws IOException {
     if (depth == partitionKeys.size()) {
       if (existingPartitions.contains(partitionExprs)) {
-        LOG.trace(String.format("Skip recovery of path '%s' because it already 
exists " +
-            "in metastore", path.toString()));
+        if (LOG.isTraceEnabled()) {
+          LOG.trace(String.format("Skip recovery of path '%s' because it 
already "
+              + "exists in metastore", path.toString()));
+        }
       } else {
         partitionsNotInHms.add(partitionValues);
         existingPartitions.add(partitionExprs);
@@ -1837,8 +1855,10 @@ public class HdfsTable extends Table {
           }
         }
       } catch (Exception ex) {
-        LOG.debug(String.format("Invalid partition value (%s) for Type (%s).",
-            partName[1], type.toSql()));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format("Invalid partition value (%s) for Type 
(%s).",
+              partName[1], type.toSql()));
+        }
         return null;
       }
     } else {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java 
b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
index a59f997..6a20fcc 100644
--- a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java
@@ -237,8 +237,10 @@ public class ImpaladCatalog extends Catalog {
       throws TableLoadingException, DatabaseNotFoundException {
     // This item is out of date and should not be applied to the catalog.
     if (catalogDeltaLog_.wasObjectRemovedAfter(catalogObject)) {
-      LOG.debug(String.format("Skipping update because a matching object was 
removed " +
-          "in a later catalog version: %s", catalogObject));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Skipping update because a matching object was 
removed " +
+            "in a later catalog version: %s", catalogObject));
+      }
       return;
     }
 
@@ -354,8 +356,10 @@ public class ImpaladCatalog extends Catalog {
       throws TableLoadingException {
     Db db = getDb(thriftTable.db_name);
     if (db == null) {
-      LOG.debug("Parent database of table does not exist: " +
-          thriftTable.db_name + "." + thriftTable.tbl_name);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Parent database of table does not exist: " +
+            thriftTable.db_name + "." + thriftTable.tbl_name);
+      }
       return;
     }
 
@@ -369,7 +373,9 @@ public class ImpaladCatalog extends Catalog {
     function.setCatalogVersion(catalogVersion);
     Db db = getDb(function.getFunctionName().getDb());
     if (db == null) {
-      LOG.debug("Parent database of function does not exist: " + 
function.getName());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Parent database of function does not exist: " + 
function.getName());
+      }
       return;
     }
     Function existingFn = db.getFunction(fn.getSignature());

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java 
b/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
index 29e5df9..76db6f7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
+++ b/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java
@@ -79,7 +79,9 @@ public class MetaStoreClientPool {
      * connection to the HMS before giving up and failing out with an 
exception.
      */
     private MetaStoreClient(HiveConf hiveConf, int cnxnTimeoutSec) {
-      LOG.debug("Creating MetaStoreClient. Pool Size = " + clientPool_.size());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Creating MetaStoreClient. Pool Size = " + 
clientPool_.size());
+      }
 
       long retryDelaySeconds = hiveConf.getTimeVar(
           HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, 
TimeUnit.SECONDS);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java 
b/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
index 8285e9b..4444de6 100644
--- a/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
+++ b/fe/src/main/java/org/apache/impala/catalog/PartitionStatsUtil.java
@@ -112,7 +112,7 @@ public class PartitionStatsUtil {
         partition.putToParameters(INCREMENTAL_STATS_CHUNK_PREFIX + i, 
chunks.get(i));
       }
     } catch (TException e) {
-      LOG.info("Error saving partition stats: ", e);
+      LOG.error("Error saving partition stats: ", e);
       // TODO: What to throw here?
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/Table.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java 
b/fe/src/main/java/org/apache/impala/catalog/Table.java
index 7bde786..214413b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -154,7 +154,7 @@ public abstract class Table implements CatalogObject {
    * the correctness of the system.
    */
   protected void loadAllColumnStats(IMetaStoreClient client) {
-    LOG.debug("Loading column stats for table: " + name_);
+    if (LOG.isDebugEnabled()) LOG.debug("Loading column stats for table: " + 
name_);
     List<ColumnStatisticsObj> colStats;
 
     // We need to only query those columns which may have stats; asking HMS 
for other

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/TableLoader.java 
b/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
index c3ae2ba..1d0d54e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
+++ b/fe/src/main/java/org/apache/impala/catalog/TableLoader.java
@@ -55,7 +55,7 @@ public class TableLoader {
    */
   public Table load(Db db, String tblName) {
     String fullTblName = db.getName() + "." + tblName;
-    LOG.info("Loading metadata for: " + fullTblName);
+    if (LOG.isDebugEnabled()) LOG.debug("Loading metadata for: " + 
fullTblName);
     Table table;
     // turn all exceptions into TableLoadingException
     try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java 
b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
index c81e27f..f167771 100644
--- a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
+++ b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java
@@ -57,7 +57,7 @@ public class FileSystemUtil {
     for (FileStatus fStatus: fs.listStatus(directory)) {
       // Only delete files that are not hidden.
       if (fStatus.isFile() && !isHiddenFile(fStatus.getPath().getName())) {
-        LOG.debug("Removing: " + fStatus.getPath());
+        if (LOG.isDebugEnabled()) LOG.debug("Removing: " + fStatus.getPath());
         fs.delete(fStatus.getPath(), false);
         ++numFilesDeleted;
       }
@@ -123,7 +123,9 @@ public class FileSystemUtil {
     int numFilesMoved = 0;
     for (FileStatus fStatus: sourceFs.listStatus(sourceDir)) {
       if (fStatus.isDirectory()) {
-        LOG.debug("Skipping copy of directory: " + fStatus.getPath());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Skipping copy of directory: " + fStatus.getPath());
+        }
         continue;
       } else if (isHiddenFile(fStatus.getPath().getName())) {
         continue;
@@ -180,8 +182,10 @@ public class FileSystemUtil {
     // non-distributed filesystem.
     if (!doRename) doRename = !destIsDfs && sameFileSystem;
     if (doRename) {
-      LOG.debug(String.format(
-          "Moving '%s' to '%s'", sourceFile.toString(), destFile.toString()));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format(
+            "Moving '%s' to '%s'", sourceFile.toString(), 
destFile.toString()));
+      }
       // Move (rename) the file.
       destFs.rename(sourceFile, destFile);
       return;
@@ -192,13 +196,17 @@ public class FileSystemUtil {
       // encryption zones. A move would return an error from the NN because a 
move is a
       // metadata-only operation and the files would not be 
encrypted/decrypted properly
       // on the DNs.
-      LOG.info(String.format(
-          "Copying source '%s' to '%s' because HDFS encryption zones are 
different.",
-          sourceFile, destFile));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format(
+            "Copying source '%s' to '%s' because HDFS encryption zones are 
different.",
+            sourceFile, destFile));
+      }
     } else {
       Preconditions.checkState(!sameFileSystem);
-      LOG.info(String.format("Copying '%s' to '%s' between filesystems.",
-          sourceFile, destFile));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Copying '%s' to '%s' between filesystems.",
+            sourceFile, destFile));
+      }
     }
     FileUtil.copy(sourceFs, sourceFile, destFs, destFile, true, true, CONF);
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
 
b/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
index 49d9426..7e8859a 100644
--- 
a/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
+++ 
b/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java
@@ -157,7 +157,9 @@ public class ExternalDataSourceExecutor {
         if (initString_ != null && initString_.startsWith(CACHE_CLASS_PREFIX)) 
{
           cachedClasses_.put(cacheMapKey, c);
         }
-        LOG.info("Loaded jar for class {} at path {}", className_, jarPath_);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Loaded jar for class {} at path {}", className_, 
jarPath_);
+        }
         numClassCacheMisses_++;
       } else {
         numClassCacheHits_++;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AggregationNode.java 
b/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
index 57dbd8f..07c51f1 100644
--- a/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/AggregationNode.java
@@ -181,10 +181,14 @@ public class AggregationNode extends PlanNode {
     cardinality_ = aggInfo_.getGroupingExprs().isEmpty() ? 1 :
       Expr.getNumDistinctValues(aggInfo_.getGroupingExprs());
     // take HAVING predicate into account
-    LOG.trace("Agg: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Agg: cardinality=" + Long.toString(cardinality_));
+    }
     if (cardinality_ > 0) {
       cardinality_ = Math.round((double) cardinality_ * computeSelectivity());
-      LOG.trace("sel=" + Double.toString(computeSelectivity()));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("sel=" + Double.toString(computeSelectivity()));
+      }
     }
     // if we ended up with an overflow, the estimate is certain to be wrong
     if (cardinality_ < 0) cardinality_ = -1;
@@ -199,7 +203,9 @@ public class AggregationNode extends PlanNode {
       }
     }
     cardinality_ = capAtLimit(cardinality_);
-    LOG.trace("stats Agg: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("stats Agg: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java 
b/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
index 6e07e79..408680b 100644
--- a/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/AnalyticEvalNode.java
@@ -121,7 +121,9 @@ public class AnalyticEvalNode extends PlanNode {
     // do this at the end so it can take all conjuncts into account
     computeStats(analyzer);
 
-    LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+    }
 
     // point fn calls, partition and ordering exprs at our input
     ExprSubstitutionMap childSmap = getCombinedChildSmap();
@@ -129,7 +131,7 @@ public class AnalyticEvalNode extends PlanNode {
     substitutedPartitionExprs_ = Expr.substituteList(partitionExprs_, 
childSmap,
         analyzer, false);
     orderByElements_ = OrderByElement.substitute(orderByElements_, childSmap, 
analyzer);
-    LOG.trace("evalnode: " + debugString());
+    if (LOG.isTraceEnabled()) LOG.trace("evalnode: " + debugString());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java 
b/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
index e44fc0b..6d726ec 100644
--- a/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/AnalyticPlanner.java
@@ -310,7 +310,9 @@ public class AnalyticPlanner {
 
     SortInfo sortInfo = new SortInfo(
         Expr.substituteList(sortExprs, sortSmap, analyzer_, false), isAsc, 
nullsFirst);
-    LOG.trace("sortinfo exprs: " + 
Expr.debugString(sortInfo.getOrderingExprs()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("sortinfo exprs: " + 
Expr.debugString(sortInfo.getOrderingExprs()));
+    }
     sortInfo.setMaterializedTupleInfo(sortTupleDesc, sortSlotExprs);
     return sortInfo;
   }
@@ -373,7 +375,9 @@ public class AnalyticPlanner {
       sortTupleId = sortNode.tupleIds_.get(0);
       bufferedTupleDesc =
           analyzer_.getDescTbl().copyTupleDescriptor(sortTupleId, 
"buffered-tuple");
-      LOG.trace("desctbl: " + analyzer_.getDescTbl().debugString());
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("desctbl: " + analyzer_.getDescTbl().debugString());
+      }
 
       List<SlotDescriptor> inputSlots = 
analyzer_.getTupleDesc(sortTupleId).getSlots();
       List<SlotDescriptor> bufferedSlots = bufferedTupleDesc.getSlots();
@@ -399,7 +403,9 @@ public class AnalyticPlanner {
         partitionByEq = createNullMatchingEquals(
             Expr.substituteList(windowGroup.partitionByExprs, sortSmap, 
analyzer_, false),
             sortTupleId, bufferedSmap);
-        LOG.trace("partitionByEq: " + partitionByEq.debugString());
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("partitionByEq: " + partitionByEq.debugString());
+        }
       }
       Expr orderByEq = null;
       if (!windowGroup.orderByElements.isEmpty()) {
@@ -407,7 +413,9 @@ public class AnalyticPlanner {
             OrderByElement.getOrderByExprs(OrderByElement.substitute(
                 windowGroup.orderByElements, sortSmap, analyzer_)),
             sortTupleId, bufferedSmap);
-        LOG.trace("orderByEq: " + orderByEq.debugString());
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("orderByEq: " + orderByEq.debugString());
+        }
       }
 
       root = new AnalyticEvalNode(ctx_.getNextNodeId(), root,

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java 
b/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
index 307e67e..22bdb49 100644
--- a/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java
@@ -272,7 +272,9 @@ public class DataSourceScanNode extends ScanNode {
     LOG.debug("computeStats DataSourceScan: cardinality=" + 
Long.toString(cardinality_));
 
     numNodes_ = table_.getNumNodes();
-    LOG.debug("computeStats DataSourceScan: #nodes=" + 
Integer.toString(numNodes_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats DataSourceScan: #nodes=" + 
Integer.toString(numNodes_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java 
b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
index 24d7caa..3cfc787 100644
--- a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java
@@ -78,9 +78,11 @@ public class DistributedPlanner {
       Preconditions.checkState(!queryStmt.hasOffset());
       isPartitioned = true;
     }
-    LOG.debug("create plan fragments");
     long perNodeMemLimit = ctx_.getQueryOptions().mem_limit;
-    LOG.debug("memlimit=" + Long.toString(perNodeMemLimit));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("create plan fragments");
+      LOG.debug("memlimit=" + Long.toString(perNodeMemLimit));
+    }
     createPlanFragments(singleNodePlan, isPartitioned, perNodeMemLimit, 
fragments);
     return fragments;
   }
@@ -435,10 +437,12 @@ public class DistributedPlanner {
         broadcastCost = 2 * rhsDataSize * leftChildFragment.getNumNodes();
       }
     }
-    LOG.debug("broadcast: cost=" + Long.toString(broadcastCost));
-    LOG.debug("card=" + Long.toString(rhsTree.getCardinality()) + " row_size="
-        + Float.toString(rhsTree.getAvgRowSize()) + " #nodes="
-        + Integer.toString(leftChildFragment.getNumNodes()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("broadcast: cost=" + Long.toString(broadcastCost));
+      LOG.debug("card=" + Long.toString(rhsTree.getCardinality()) + " 
row_size="
+          + Float.toString(rhsTree.getAvgRowSize()) + " #nodes="
+          + Integer.toString(leftChildFragment.getNumNodes()));
+    }
 
     // repartition: both left- and rightChildFragment are partitioned on the
     // join exprs, and a hash table is built with the rightChildFragment's 
output.
@@ -465,12 +469,14 @@ public class DistributedPlanner {
       double rhsNetworkCost = (rhsHasCompatPartition) ? 0.0 : rhsDataSize;
       partitionCost = Math.round(lhsNetworkCost + rhsNetworkCost + 
rhsDataSize);
     }
-    LOG.debug("partition: cost=" + Long.toString(partitionCost));
-    LOG.debug("lhs card=" + Long.toString(lhsTree.getCardinality()) + " 
row_size="
-        + Float.toString(lhsTree.getAvgRowSize()));
-    LOG.debug("rhs card=" + Long.toString(rhsTree.getCardinality()) + " 
row_size="
-        + Float.toString(rhsTree.getAvgRowSize()));
-    LOG.debug(rhsTree.getExplainString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("partition: cost=" + Long.toString(partitionCost));
+      LOG.debug("lhs card=" + Long.toString(lhsTree.getCardinality()) + " 
row_size="
+          + Float.toString(lhsTree.getAvgRowSize()));
+      LOG.debug("rhs card=" + Long.toString(rhsTree.getCardinality()) + " 
row_size="
+          + Float.toString(rhsTree.getAvgRowSize()));
+      LOG.debug(rhsTree.getExplainString());
+    }
 
     boolean doBroadcast = false;
     // we do a broadcast join if

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java 
b/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
index e8b26bc..40b44d3 100644
--- a/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java
@@ -216,11 +216,15 @@ public class HBaseScanNode extends ScanNode {
     cardinality_ *= computeSelectivity();
     cardinality_ = Math.max(1, cardinality_);
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats HbaseScan: cardinality=" + 
Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats HbaseScan: cardinality=" + 
Long.toString(cardinality_));
+    }
 
     // TODO: take actual regions into account
     numNodes_ = tbl.getNumNodes();
-    LOG.debug("computeStats HbaseScan: #nodes=" + Integer.toString(numNodes_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats HbaseScan: #nodes=" + 
Integer.toString(numNodes_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java 
b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
index 8d15425..7368358 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionFilter.java
@@ -119,8 +119,10 @@ public class HdfsPartitionFilter {
     }
 
     Expr literalPredicate = predicate_.substitute(sMap, analyzer, false);
-    LOG.trace("buildPartitionPredicate: " + literalPredicate.toSql() + " " +
-        literalPredicate.debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("buildPartitionPredicate: " + literalPredicate.toSql() + " " +
+          literalPredicate.debugString());
+    }
     Preconditions.checkState(literalPredicate.isConstant());
     return literalPredicate;
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java 
b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
index 66ed792..a2ad76c 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java
@@ -373,7 +373,9 @@ public class HdfsScanNode extends ScanNode {
   @Override
   public void computeStats(Analyzer analyzer) {
     super.computeStats(analyzer);
-    LOG.debug("collecting partitions for table " + tbl_.getName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("collecting partitions for table " + tbl_.getName());
+    }
     numPartitionsMissingStats_ = 0;
     totalFiles_ = 0;
     totalBytes_ = 0;
@@ -430,17 +432,23 @@ public class HdfsScanNode extends ScanNode {
     }
 
     if (cardinality_ > 0) {
-      LOG.debug("cardinality_=" + Long.toString(cardinality_) +
-                " sel=" + Double.toString(computeSelectivity()));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("cardinality_=" + Long.toString(cardinality_) +
+                  " sel=" + Double.toString(computeSelectivity()));
+      }
       cardinality_ = Math.round(cardinality_ * computeSelectivity());
       // IMPALA-2165: Avoid setting the cardinality to 0 after rounding.
       cardinality_ = Math.max(cardinality_, 1);
     }
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats HdfsScan: cardinality_=" + 
Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats HdfsScan: cardinality_=" + 
Long.toString(cardinality_));
+    }
 
     computeNumNodes(analyzer, cardinality_);
-    LOG.debug("computeStats HdfsScan: #nodes=" + Integer.toString(numNodes_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats HdfsScan: #nodes=" + 
Integer.toString(numNodes_));
+    }
   }
 
   /**
@@ -493,10 +501,12 @@ public class HdfsScanNode extends ScanNode {
     // Tables can reside on 0 nodes (empty table), but a plan node must always 
be
     // executed on at least one node.
     numNodes_ = (cardinality == 0 || totalNodes == 0) ? 1 : totalNodes;
-    LOG.debug("computeNumNodes totalRanges=" + scanRanges_.size() +
-        " localRanges=" + numLocalRanges + " remoteRanges=" + numRemoteRanges +
-        " localHostSet.size=" + localHostSet.size() +
-        " clusterNodes=" + cluster.numNodes());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeNumNodes totalRanges=" + scanRanges_.size() +
+          " localRanges=" + numLocalRanges + " remoteRanges=" + 
numRemoteRanges +
+          " localHostSet.size=" + localHostSet.size() +
+          " clusterNodes=" + cluster.numNodes());
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/JoinNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/JoinNode.java 
b/fe/src/main/java/org/apache/impala/planner/JoinNode.java
index 3362047..13cc854 100644
--- a/fe/src/main/java/org/apache/impala/planner/JoinNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/JoinNode.java
@@ -475,7 +475,9 @@ public abstract class JoinNode extends PlanNode {
     }
     cardinality_ = capAtLimit(cardinality_);
     Preconditions.checkState(hasValidStats());
-    LOG.debug("stats Join: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("stats Join: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java 
b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
index 61f6b28..cca7a6f 100644
--- a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java
@@ -223,7 +223,9 @@ public class KuduScanNode extends ScanNode {
     cardinality_ *= computeSelectivity();
     cardinality_ = Math.min(Math.max(1, cardinality_), 
kuduTable_.getNumRows());
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("computeStats KuduScan: cardinality=" + 
Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("computeStats KuduScan: cardinality=" + 
Long.toString(cardinality_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java 
b/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
index 6db4ae4..8f2a1a4 100644
--- a/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/ParallelPlanner.java
@@ -83,13 +83,11 @@ public class ParallelPlanner {
    * Assign fragment's plan id and cohort id to children.
    */
   private void createBuildPlans(PlanFragment fragment, CohortId buildCohortId) 
{
-    LOG.info("createbuildplans fragment " + fragment.getId().toString());
     List<JoinNode> joins = Lists.newArrayList();
     collectJoins(fragment.getPlanRoot(), joins);
     if (!joins.isEmpty()) {
       List<String> joinIds = Lists.newArrayList();
       for (JoinNode join: joins) joinIds.add(join.getId().toString());
-      LOG.info("collected joins " + Joiner.on(" ").join(joinIds));
 
       if (buildCohortId == null) buildCohortId = 
cohortIdGenerator_.getNextId();
       for (JoinNode join: joins) createBuildPlan(join, buildCohortId);
@@ -98,8 +96,6 @@ public class ParallelPlanner {
     if (!fragment.getChildren().isEmpty()) {
       List<String> ids = Lists.newArrayList();
       for (PlanFragment c: fragment.getChildren()) 
ids.add(c.getId().toString());
-      LOG.info("collected children " + Joiner.on(" ").join(ids) + " parent "
-          + fragment.getId().toString());
     }
     for (PlanFragment child: fragment.getChildren()) {
       child.setPlanId(fragment.getPlanId());
@@ -147,7 +143,6 @@ public class ParallelPlanner {
    * Also assigns the new plan a plan id.
    */
   private void createBuildPlan(JoinNode join, CohortId cohortId) {
-    LOG.info("createbuildplan " + join.getId().toString());
     Preconditions.checkNotNull(cohortId);
     // collect all ExchangeNodes on the build side and their corresponding 
input
     // fragments
@@ -183,8 +178,6 @@ public class ParallelPlanner {
 
     // move input fragments
     for (int i = 0; i < exchNodes.size(); ++i) {
-      LOG.info("re-link fragment " + inputFragments.get(i).getId().toString() 
+ " to "
-          + exchNodes.get(i).getFragment().getId().toString());
       Preconditions.checkState(exchNodes.get(i).getFragment() == 
buildFragment);
       join.getFragment().removeChild(inputFragments.get(i));
       buildFragment.getChildren().add(inputFragments.get(i));
@@ -196,9 +189,11 @@ public class ParallelPlanner {
     buildFragment.setCohortId(cohortId);
 
     planRoots_.add(buildFragment);
-    LOG.info("new build fragment " + buildFragment.getId().toString());
-    LOG.info("in cohort " + buildFragment.getCohortId().toString());
-    LOG.info("for join node " + join.getId().toString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("new build fragment " + buildFragment.getId().toString());
+      LOG.trace("in cohort " + buildFragment.getCohortId().toString());
+      LOG.trace("for join node " + join.getId().toString());
+    }
     createBuildPlans(buildFragment, null);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/Planner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/Planner.java 
b/fe/src/main/java/org/apache/impala/planner/Planner.java
index 1762144..40ca682 100644
--- a/fe/src/main/java/org/apache/impala/planner/Planner.java
+++ b/fe/src/main/java/org/apache/impala/planner/Planner.java
@@ -164,9 +164,11 @@ public class Planner {
     }
     rootFragment.setOutputExprs(resultExprs);
 
-    LOG.debug("desctbl: " + ctx_.getRootAnalyzer().getDescTbl().debugString());
-    LOG.debug("resultexprs: " + 
Expr.debugString(rootFragment.getOutputExprs()));
-    LOG.debug("finalize plan fragments");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("desctbl: " + 
ctx_.getRootAnalyzer().getDescTbl().debugString());
+      LOG.debug("resultexprs: " + 
Expr.debugString(rootFragment.getOutputExprs()));
+      LOG.debug("finalize plan fragments");
+    }
     for (PlanFragment fragment: fragments) {
       fragment.finalize(ctx_.getRootAnalyzer());
     }
@@ -214,7 +216,7 @@ public class Planner {
         graph.addTargetColumnLabels(ctx_.getQueryStmt().getColLabels());
         graph.computeLineageGraph(resultExprs, ctx_.getRootAnalyzer());
       }
-      LOG.trace("lineage: " + graph.debugString());
+      if (LOG.isTraceEnabled()) LOG.trace("lineage: " + graph.debugString());
       ctx_.getRootAnalyzer().getTimeline().markEvent("Lineage info computed");
     }
 
@@ -407,8 +409,10 @@ public class Planner {
     request.setPer_host_mem_req(maxPerHostMem);
     request.setPer_host_vcores((short) maxPerHostVcores);
 
-    LOG.debug("Estimated per-host peak memory requirement: " + maxPerHostMem);
-    LOG.debug("Estimated per-host virtual cores requirement: " + 
maxPerHostVcores);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Estimated per-host peak memory requirement: " + 
maxPerHostMem);
+      LOG.debug("Estimated per-host virtual cores requirement: " + 
maxPerHostVcores);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java 
b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
index 79cd8b8..133a14e 100644
--- a/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
+++ b/fe/src/main/java/org/apache/impala/planner/RuntimeFilterGenerator.java
@@ -245,7 +245,9 @@ public final class RuntimeFilterGenerator {
       // Ensure that the targer expr does not contain TupleIsNull predicates 
as these
       // can't be evaluated at a scan node.
       targetExpr = TupleIsNullPredicate.unwrapExpr(targetExpr.clone());
-      LOG.trace("Generating runtime filter from predicate " + joinPredicate);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Generating runtime filter from predicate " + joinPredicate);
+      }
       return new RuntimeFilter(idGen.getNextId(), filterSrcNode,
           srcExpr, targetExpr, targetSlots);
     }
@@ -424,7 +426,7 @@ public final class RuntimeFilterGenerator {
     }
     for (RuntimeFilter filter:
          filters.subList(0, Math.min(filters.size(), maxNumFilters))) {
-      LOG.trace("Runtime filter: " + filter.debugString());
+      if (LOG.isTraceEnabled()) LOG.trace("Runtime filter: " + 
filter.debugString());
       filter.assignToPlanNodes();
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/SelectNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SelectNode.java 
b/fe/src/main/java/org/apache/impala/planner/SelectNode.java
index 7713520..c571c1c 100644
--- a/fe/src/main/java/org/apache/impala/planner/SelectNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/SelectNode.java
@@ -74,7 +74,9 @@ public class SelectNode extends PlanNode {
       Preconditions.checkState(cardinality_ >= 0);
     }
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("stats Select: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("stats Select: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java 
b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
index f62c236..403a432 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
@@ -141,7 +141,9 @@ public class SingleNodePlanner {
       analyzer.materializeSlots(queryStmt.getBaseTblResultExprs());
     }
 
-    LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("desctbl: " + analyzer.getDescTbl().debugString());
+    }
     PlanNode singleNodePlan = createQueryPlan(queryStmt, analyzer,
         ctx_.getQueryOptions().isDisable_outermost_topn());
     Preconditions.checkNotNull(singleNodePlan);
@@ -363,15 +365,19 @@ public class SingleNodePlanner {
         // use 0 for the size to avoid it becoming the leftmost input
         // TODO: Consider raw size of scanned partitions in the absence of 
stats.
         candidates.add(new Pair(ref, new Long(0)));
-        LOG.trace("candidate " + ref.getUniqueAlias() + ": 0");
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("candidate " + ref.getUniqueAlias() + ": 0");
+        }
         continue;
       }
       Preconditions.checkState(ref.isAnalyzed());
       long materializedSize =
           (long) Math.ceil(plan.getAvgRowSize() * (double) 
plan.getCardinality());
       candidates.add(new Pair(ref, new Long(materializedSize)));
-      LOG.trace(
-          "candidate " + ref.getUniqueAlias() + ": " + 
Long.toString(materializedSize));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+            "candidate " + ref.getUniqueAlias() + ": " + 
Long.toString(materializedSize));
+      }
     }
     if (candidates.isEmpty()) return null;
 
@@ -402,7 +408,9 @@ public class SingleNodePlanner {
       List<Pair<TableRef, PlanNode>> refPlans, List<SubplanRef> subplanRefs)
       throws ImpalaException {
 
-    LOG.trace("createJoinPlan: " + leftmostRef.getUniqueAlias());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("createJoinPlan: " + leftmostRef.getUniqueAlias());
+    }
     // the refs that have yet to be joined
     List<Pair<TableRef, PlanNode>> remainingRefs = Lists.newArrayList();
     PlanNode root = null;  // root of accumulated join plan
@@ -458,7 +466,9 @@ public class SingleNodePlanner {
         analyzer.setAssignedConjuncts(root.getAssignedConjuncts());
         PlanNode candidate = createJoinNode(root, entry.second, ref, analyzer);
         if (candidate == null) continue;
-        LOG.trace("cardinality=" + Long.toString(candidate.getCardinality()));
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("cardinality=" + 
Long.toString(candidate.getCardinality()));
+        }
 
         // Use 'candidate' as the new root; don't consider any other table 
refs at this
         // position in the plan.
@@ -489,10 +499,12 @@ public class SingleNodePlanner {
       long lhsCardinality = root.getCardinality();
       long rhsCardinality = minEntry.second.getCardinality();
       numOps += lhsCardinality + rhsCardinality;
-      LOG.debug(Integer.toString(i) + " chose " + 
minEntry.first.getUniqueAlias()
-          + " #lhs=" + Long.toString(lhsCardinality)
-          + " #rhs=" + Long.toString(rhsCardinality)
-          + " #ops=" + Long.toString(numOps));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(Integer.toString(i) + " chose " + 
minEntry.first.getUniqueAlias()
+            + " #lhs=" + Long.toString(lhsCardinality)
+            + " #rhs=" + Long.toString(rhsCardinality)
+            + " #ops=" + Long.toString(numOps));
+      }
       remainingRefs.remove(minEntry);
       joinedRefs.add(minEntry.first);
       root = newRoot;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/SortNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SortNode.java 
b/fe/src/main/java/org/apache/impala/planner/SortNode.java
index 3a71f8c..58e04b4 100644
--- a/fe/src/main/java/org/apache/impala/planner/SortNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/SortNode.java
@@ -123,16 +123,20 @@ public class SortNode extends PlanNode {
     info_.substituteOrderingExprs(outputSmap_, analyzer);
     info_.checkConsistency();
 
-    LOG.trace("sort id " + tupleIds_.get(0).toString() + " smap: "
-        + outputSmap_.debugString());
-    LOG.trace("sort input exprs: " + Expr.debugString(resolvedTupleExprs_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("sort id " + tupleIds_.get(0).toString() + " smap: "
+          + outputSmap_.debugString());
+      LOG.trace("sort input exprs: " + Expr.debugString(resolvedTupleExprs_));
+    }
   }
 
   @Override
   protected void computeStats(Analyzer analyzer) {
     super.computeStats(analyzer);
     cardinality_ = capAtLimit(getChild(0).cardinality_);
-    LOG.debug("stats Sort: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("stats Sort: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/planner/UnionNode.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/UnionNode.java 
b/fe/src/main/java/org/apache/impala/planner/UnionNode.java
index a085973..69e7a37 100644
--- a/fe/src/main/java/org/apache/impala/planner/UnionNode.java
+++ b/fe/src/main/java/org/apache/impala/planner/UnionNode.java
@@ -97,7 +97,9 @@ public class UnionNode extends PlanNode {
     // (VALUES(1 x, 1 y)) b ON (a.x = b.y)). We need to set the correct value.
     if (numNodes_ == -1) numNodes_ = 1;
     cardinality_ = capAtLimit(cardinality_);
-    LOG.debug("stats Union: cardinality=" + Long.toString(cardinality_));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("stats Union: cardinality=" + Long.toString(cardinality_));
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java 
b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 9748003..52000a9 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -636,7 +636,9 @@ public class CatalogOpExecutor {
 
       // Set the altered view attributes and update the metastore.
       setViewAttributes(params, msTbl);
-      LOG.debug(String.format("Altering view %s", tableName));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Altering view %s", tableName));
+      }
       applyAlterTable(msTbl);
       try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
         tbl.load(true, msClient.getHiveClient(), msTbl);
@@ -665,7 +667,9 @@ public class CatalogOpExecutor {
 
     TableName tableName = table.getTableName();
     Preconditions.checkState(tableName != null && 
tableName.isFullyQualified());
-    LOG.info(String.format("Updating table stats for: %s", tableName));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Updating table stats for: %s", tableName));
+    }
 
     // Deep copy the msTbl to avoid updating our cache before successfully 
persisting
     // the results to the metastore.
@@ -761,8 +765,10 @@ public class CatalogOpExecutor {
       // but it is predictable and easy to reason about because it does not 
depend on the
       // existing state of the metadata. See IMPALA-2201.
       long numRows = partitionStats.stats.num_rows;
-      LOG.debug(String.format("Updating stats for partition %s: numRows=%s",
-          partition.getValuesAsString(), numRows));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Updating stats for partition %s: numRows=%s",
+            partition.getValuesAsString(), numRows));
+      }
       PartitionStatsUtil.partStatsToParameters(partitionStats, partition);
       partition.putToParameters(StatsSetupConst.ROW_COUNT, 
String.valueOf(numRows));
       partition.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
@@ -809,10 +815,12 @@ public class CatalogOpExecutor {
       ColumnStatisticsData colStatsData =
           createHiveColStatsData(entry.getValue(), tableCol.getType());
       if (colStatsData == null) continue;
-      LOG.debug(String.format("Updating column stats for %s: numDVs=%s 
numNulls=%s " +
-          "maxSize=%s avgSize=%s", colName, 
entry.getValue().getNum_distinct_values(),
-          entry.getValue().getNum_nulls(), entry.getValue().getMax_size(),
-          entry.getValue().getAvg_size()));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("Updating column stats for %s: numDVs=%s 
numNulls=%s " +
+            "maxSize=%s avgSize=%s", colName, 
entry.getValue().getNum_distinct_values(),
+            entry.getValue().getNum_nulls(), entry.getValue().getMax_size(),
+            entry.getValue().getAvg_size()));
+      }
       ColumnStatisticsObj colStatsObj = new ColumnStatisticsObj(colName,
           tableCol.getType().toString().toLowerCase(), colStatsData);
       colStats.addToStatsObj(colStatsObj);
@@ -879,8 +887,10 @@ public class CatalogOpExecutor {
     Preconditions.checkState(dbName != null && !dbName.isEmpty(),
         "Null or empty database name passed as argument to 
Catalog.createDatabase");
     if (params.if_not_exists && catalog_.getDb(dbName) != null) {
-      LOG.debug("Skipping database creation because " + dbName + " already 
exists and " +
-          "IF NOT EXISTS was specified.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Skipping database creation because " + dbName + " already 
exists "
+            + "and IF NOT EXISTS was specified.");
+      }
       resp.getResult().setVersion(catalog_.getCatalogVersion());
       return;
     }
@@ -893,7 +903,7 @@ public class CatalogOpExecutor {
     if (params.getLocation() != null) {
       db.setLocationUri(params.getLocation());
     }
-    LOG.debug("Creating database " + dbName);
+    if (LOG.isDebugEnabled()) LOG.debug("Creating database " + dbName);
     Db newDb = null;
     synchronized (metastoreDdlLock_) {
       try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
@@ -905,8 +915,10 @@ public class CatalogOpExecutor {
             throw new ImpalaRuntimeException(
                 String.format(HMS_RPC_ERROR_FORMAT_STR, "createDatabase"), e);
           }
-          LOG.debug(String.format("Ignoring '%s' when creating database %s 
because " +
-              "IF NOT EXISTS was specified.", e, dbName));
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("Ignoring '%s' when creating database %s 
because " +
+                "IF NOT EXISTS was specified.", e, dbName));
+          }
           newDb = catalog_.getDb(dbName);
           if (newDb == null) {
             try {
@@ -946,8 +958,10 @@ public class CatalogOpExecutor {
  private void createFunction(TCreateFunctionParams params, TDdlExecResponse 
resp)
       throws ImpalaException {
     Function fn = Function.fromThrift(params.getFn());
-    LOG.debug(String.format("Adding %s: %s",
-        fn.getClass().getSimpleName(), fn.signatureString()));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Adding %s: %s",
+          fn.getClass().getSimpleName(), fn.signatureString()));
+    }
     boolean isPersistentJavaFn =
         (fn.getBinaryType() == TFunctionBinaryType.JAVA) && fn.isPersistent();
     synchronized (metastoreDdlLock_) {
@@ -982,10 +996,11 @@ public class CatalogOpExecutor {
             "No compatible function signatures found in class: " + 
hiveFn.getClassName());
         }
         if (addJavaFunctionToHms(fn.dbName(), hiveFn, params.if_not_exists)) {
-          LOG.info("Funcs size:" + funcs.size());
           for (Function addedFn: funcs) {
-            LOG.info(String.format("Adding function: %s.%s", addedFn.dbName(),
-                addedFn.signatureString()));
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(String.format("Adding function: %s.%s", 
addedFn.dbName(),
+                  addedFn.signatureString()));
+            }
             Preconditions.checkState(catalog_.addFunction(addedFn));
             addedFunctions.add(buildTCatalogFnObject(addedFn));
           }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/Frontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java 
b/fe/src/main/java/org/apache/impala/service/Frontend.java
index 5e0307c..a7bec4d 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -839,8 +839,10 @@ public class Frontend {
         return false;
       }
 
-      LOG.trace(String.format("Waiting for table(s) to complete loading: %s",
-          Joiner.on(", ").join(missingTbls)));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(String.format("Waiting for table(s) to complete loading: %s",
+            Joiner.on(", ").join(missingTbls)));
+      }
       getCatalog().waitForCatalogUpdate(MAX_CATALOG_UPDATE_WAIT_TIME_MS);
       missingTbls = getMissingTbls(missingTbls);
       // TODO: Check for query cancellation here.
@@ -879,7 +881,7 @@ public class Frontend {
 
     AnalysisContext analysisCtx = new AnalysisContext(impaladCatalog_, 
queryCtx,
         authzConfig_);
-    LOG.debug("analyze query " + queryCtx.request.stmt);
+    if (LOG.isDebugEnabled()) LOG.debug("analyze query " + 
queryCtx.request.stmt);
 
     // Run analysis in a loop until it any of the following events occur:
     // 1) Analysis completes successfully.
@@ -898,8 +900,10 @@ public class Frontend {
 
           // Some tables/views were missing, request and wait for them to load.
           if (!requestTblLoadAndWait(missingTbls, 
MISSING_TBL_LOAD_WAIT_TIMEOUT_MS)) {
-            LOG.info(String.format("Missing tables were not received in %dms. 
Load " +
-                "request will be retried.", MISSING_TBL_LOAD_WAIT_TIMEOUT_MS));
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(String.format("Missing tables were not received in 
%dms. Load " +
+                  "request will be retried.", 
MISSING_TBL_LOAD_WAIT_TIMEOUT_MS));
+            }
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/JniFrontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java 
b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
index b343369..29cbd10 100644
--- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java
+++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
@@ -157,7 +157,9 @@ public class JniFrontend {
 
     StringBuilder explainString = new StringBuilder();
     TExecRequest result = frontend_.createExecRequest(queryCtx, explainString);
-    if (explainString.length() > 0) LOG.debug(explainString.toString());
+    if (explainString.length() > 0 && LOG.isDebugEnabled()) {
+      LOG.debug(explainString.toString());
+    }
 
     // TODO: avoid creating serializer for each query?
     TSerializer serializer = new TSerializer(protocolFactory_);
@@ -232,7 +234,7 @@ public class JniFrontend {
     TQueryCtx queryCtx = new TQueryCtx();
     JniUtil.deserializeThrift(protocolFactory_, queryCtx, thriftQueryContext);
     String plan = frontend_.getExplainString(queryCtx);
-    LOG.debug("Explain plan: " + plan);
+    if (LOG.isDebugEnabled()) LOG.debug("Explain plan: " + plan);
     return plan;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java 
b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
index 82fcab8..e3f9a7f 100644
--- a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java
@@ -70,8 +70,10 @@ public class KuduCatalogOpExecutor {
     Preconditions.checkState(!Table.isExternalTable(msTbl));
     String kuduTableName = msTbl.getParameters().get(KuduTable.KEY_TABLE_NAME);
     String masterHosts = msTbl.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
-    LOG.debug(String.format("Creating table '%s' in master '%s'", 
kuduTableName,
-        masterHosts));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Creating table '%s' in master '%s'", 
kuduTableName,
+          masterHosts));
+    }
     try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) {
       // TODO: The IF NOT EXISTS case should be handled by Kudu to ensure 
atomicity.
       // (see KUDU-1710).
@@ -191,8 +193,10 @@ public class KuduCatalogOpExecutor {
     Preconditions.checkState(!Table.isExternalTable(msTbl));
     String tableName = msTbl.getParameters().get(KuduTable.KEY_TABLE_NAME);
     String masterHosts = msTbl.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
-    LOG.debug(String.format("Dropping table '%s' from master '%s'", tableName,
-        masterHosts));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Dropping table '%s' from master '%s'", 
tableName,
+          masterHosts));
+    }
     try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) {
       Preconditions.checkState(!Strings.isNullOrEmpty(tableName));
       // TODO: The IF EXISTS case should be handled by Kudu to ensure 
atomicity.
@@ -220,8 +224,10 @@ public class KuduCatalogOpExecutor {
     String kuduTableName = 
msTblCopy.getParameters().get(KuduTable.KEY_TABLE_NAME);
     Preconditions.checkState(!Strings.isNullOrEmpty(kuduTableName));
     String masterHosts = 
msTblCopy.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
-    LOG.debug(String.format("Loading schema of table '%s' from master '%s'",
-        kuduTableName, masterHosts));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Loading schema of table '%s' from master '%s'",
+          kuduTableName, masterHosts));
+    }
     try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) {
       if (!kudu.tableExists(kuduTableName)) {
         throw new ImpalaRuntimeException(String.format("Table does not exist 
in Kudu: " +

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/service/MetadataOp.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/MetadataOp.java 
b/fe/src/main/java/org/apache/impala/service/MetadataOp.java
index c24f153..16166ee 100644
--- a/fe/src/main/java/org/apache/impala/service/MetadataOp.java
+++ b/fe/src/main/java/org/apache/impala/service/MetadataOp.java
@@ -375,7 +375,9 @@ public class MetadataOp {
         }
       }
     }
-    LOG.debug("Returning " + result.rows.size() + " table columns");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Returning " + result.rows.size() + " table columns");
+    }
     return result;
   }
 
@@ -426,7 +428,9 @@ public class MetadataOp {
       result.rows.add(row);
     }
 
-    LOG.debug("Returning " + result.rows.size() + " schemas");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Returning " + result.rows.size() + " schemas");
+    }
     return result;
   }
 
@@ -479,7 +483,7 @@ public class MetadataOp {
         result.rows.add(row);
       }
     }
-    LOG.debug("Returning " + result.rows.size() + " tables");
+    if (LOG.isDebugEnabled()) LOG.debug("Returning " + result.rows.size() + " 
tables");
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java 
b/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
index 8c7b1cc..579091c 100644
--- a/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
+++ b/fe/src/main/java/org/apache/impala/util/FsPermissionChecker.java
@@ -287,9 +287,12 @@ public class FsPermissionChecker {
     try {
       aclStatus = fs.getAclStatus(path);
     } catch (AclException ex) {
-      LOG.trace("No ACLs retrieved, skipping ACLs check (HDFS will enforce 
ACLs)", ex);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+            "No ACLs retrieved, skipping ACLs check (HDFS will enforce ACLs)", 
ex);
+      }
     } catch (UnsupportedOperationException ex) {
-      LOG.trace("No ACLs retrieved, unsupported", ex);
+      if (LOG.isTraceEnabled()) LOG.trace("No ACLs retrieved, unsupported", 
ex);
     }
     return new Permissions(fs.getFileStatus(path), aclStatus);
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java 
b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
index 0ee7d28..acbc53a 100644
--- a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java
@@ -130,7 +130,9 @@ public class HdfsCachingUtil {
   public static void uncacheTbl(org.apache.hadoop.hive.metastore.api.Table 
table)
       throws ImpalaRuntimeException {
     Preconditions.checkNotNull(table);
-    LOG.debug("Uncaching table: " + table.getDbName() + "." + 
table.getTableName());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Uncaching table: " + table.getDbName() + "." + 
table.getTableName());
+    }
     Long id = getCacheDirectiveId(table.getParameters());
     if (id == null) return;
     HdfsCachingUtil.removeDirective(id);
@@ -236,8 +238,10 @@ public class HdfsCachingUtil {
 
     bytesNeeded = cacheDir.getStats().getBytesNeeded();
     currentBytesCached = cacheDir.getStats().getBytesCached();
-    LOG.debug(String.format("Waiting on cache directive id: %d. Bytes " +
-        "cached (%d) / needed (%d)", directiveId, currentBytesCached, 
bytesNeeded));
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(String.format("Waiting on cache directive id: %d. Bytes " +
+          "cached (%d) / needed (%d)", directiveId, currentBytesCached, 
bytesNeeded));
+    }
     // All the bytes are cached, just return.
     if (bytesNeeded == currentBytesCached) return;
 
@@ -258,9 +262,11 @@ public class HdfsCachingUtil {
       currentBytesCached = cacheDir.getStats().getBytesCached();
       bytesNeeded = cacheDir.getStats().getBytesNeeded();
       if (currentBytesCached == bytesNeeded) {
-        LOG.debug(String.format("Cache directive id: %d has completed." +
-            "Bytes cached (%d) / needed (%d)", directiveId, currentBytesCached,
-            bytesNeeded));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format("Cache directive id: %d has completed." +
+              "Bytes cached (%d) / needed (%d)", directiveId, 
currentBytesCached,
+              bytesNeeded));
+        }
         return;
       }
 
@@ -295,7 +301,9 @@ public class HdfsCachingUtil {
         .setPool(poolName)
         .setReplication(replication)
         .setPath(path).build();
-    LOG.debug("Submitting cache directive: " + info.toString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Submitting cache directive: " + info.toString());
+    }
     try {
       return getDfs().addCacheDirective(info);
     } catch (IOException e) {
@@ -347,7 +355,9 @@ public class HdfsCachingUtil {
         .setPool(poolName)
         .setReplication(replication)
         .setPath(path).build();
-    LOG.debug("Modifying cache directive: " + info.toString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Modifying cache directive: " + info.toString());
+    }
     try {
       getDfs().modifyCacheDirective(info);
     } catch (IOException e) {
@@ -362,7 +372,7 @@ public class HdfsCachingUtil {
    * directive.
    */
   private static void removeDirective(long directiveId) throws 
ImpalaRuntimeException {
-    LOG.debug("Removing cache directive id: " + directiveId);
+    if (LOG.isDebugEnabled()) LOG.debug("Removing cache directive id: " + 
directiveId);
     try {
       getDfs().removeCacheDirective(directiveId);
     } catch (IOException e) {
@@ -379,7 +389,9 @@ public class HdfsCachingUtil {
    */
   private static CacheDirectiveEntry getDirective(long directiveId)
       throws ImpalaRuntimeException {
-    LOG.trace("Getting cache directive id: " + directiveId);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Getting cache directive id: " + directiveId);
+    }
     CacheDirectiveInfo filter = new CacheDirectiveInfo.Builder()
         .setId(directiveId)
         .build();

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java 
b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
index 6968f33..95ef700 100644
--- a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
@@ -126,8 +126,10 @@ public class MetaStoreUtil {
   public static List<Partition> fetchPartitionsByName(
       IMetaStoreClient client, List<String> partNames, String dbName, String 
tblName)
       throws MetaException, TException {
-    LOG.trace(String.format("Fetching %d partitions for: %s.%s using partition 
" +
-        "batch size: %d", partNames.size(), dbName, tblName, 
maxPartitionsPerRpc_));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Fetching %d partitions for: %s.%s using 
partition " +
+          "batch size: %d", partNames.size(), dbName, tblName, 
maxPartitionsPerRpc_));
+    }
 
     List<org.apache.hadoop.hive.metastore.api.Partition> fetchedPartitions =
         Lists.newArrayList();

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java 
b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
index c1e8224..94c0c33 100644
--- a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
+++ b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java
@@ -274,9 +274,12 @@ public class RequestPoolService {
     JniUtil.deserializeThrift(protocolFactory_, resolvePoolParams,
         thriftResolvePoolParams);
     TResolveRequestPoolResult result = resolveRequestPool(resolvePoolParams);
-    LOG.info("resolveRequestPool(pool={}, user={}): resolved_pool={}, 
has_access={}",
-        new Object[] { resolvePoolParams.getRequested_pool(), 
resolvePoolParams.getUser(),
-                       result.resolved_pool, result.has_access });
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("resolveRequestPool(pool={}, user={}): resolved_pool={}, 
has_access={}",
+          new Object[] {
+            resolvePoolParams.getRequested_pool(), resolvePoolParams.getUser(),
+            result.resolved_pool, result.has_access });
+    }
     try {
       return new TSerializer(protocolFactory_).serialize(result);
     } catch (TException e) {
@@ -372,10 +375,12 @@ public class RequestPoolService {
       
result.setDefault_query_options(getLlamaPoolConfigValue(currentLlamaConf, pool,
           QUERY_OPTIONS_KEY, ""));
     }
-    LOG.info("getPoolConfig(pool={}): max_mem_resources={}, max_requests={}, " 
+
-        "max_queued={},  queue_timeout_ms={}, default_query_options={}",
-        new Object[] { pool, result.max_mem_resources, result.max_requests,
-            result.max_queued, result.queue_timeout_ms, 
result.default_query_options });
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("getPoolConfig(pool={}): max_mem_resources={}, 
max_requests={}, " +
+          "max_queued={},  queue_timeout_ms={}, default_query_options={}",
+          new Object[] { pool, result.max_mem_resources, result.max_requests,
+              result.max_queued, result.queue_timeout_ms, 
result.default_query_options });
+    }
     return result;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/352833b8/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java 
b/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
index 083ad48..a1290cc 100644
--- a/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
+++ b/fe/src/main/java/org/apache/impala/util/SentryPolicyService.java
@@ -110,8 +110,10 @@ public class SentryPolicyService {
    */
   public void dropRole(User requestingUser, String roleName, boolean ifExists)
       throws ImpalaException {
-    LOG.trace(String.format("Dropping role: %s on behalf of: %s", roleName,
-        requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Dropping role: %s on behalf of: %s", roleName,
+          requestingUser.getName()));
+    }
     SentryServiceClient client = new SentryServiceClient();
     try {
       if (ifExists) {
@@ -139,8 +141,10 @@ public class SentryPolicyService {
    */
   public void createRole(User requestingUser, String roleName, boolean 
ifNotExists)
       throws ImpalaException {
-    LOG.trace(String.format("Creating role: %s on behalf of: %s", roleName,
-        requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Creating role: %s on behalf of: %s", roleName,
+          requestingUser.getName()));
+    }
     SentryServiceClient client = new SentryServiceClient();
     try {
       client.get().createRole(requestingUser.getShortName(), roleName);
@@ -167,8 +171,10 @@ public class SentryPolicyService {
    */
   public void grantRoleToGroup(User requestingUser, String roleName, String 
groupName)
       throws ImpalaException {
-    LOG.trace(String.format("Granting role '%s' to group '%s' on behalf of: 
%s",
-        roleName, groupName, requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Granting role '%s' to group '%s' on behalf of: 
%s",
+          roleName, groupName, requestingUser.getName()));
+    }
     SentryServiceClient client = new SentryServiceClient();
     try {
       client.get().grantRoleToGroup(requestingUser.getShortName(), groupName, 
roleName);
@@ -193,8 +199,10 @@ public class SentryPolicyService {
    */
   public void revokeRoleFromGroup(User requestingUser, String roleName, String 
groupName)
       throws ImpalaException {
-    LOG.trace(String.format("Revoking role '%s' from group '%s' on behalf of: 
%s",
-        roleName, groupName, requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Revoking role '%s' from group '%s' on behalf 
of: %s",
+          roleName, groupName, requestingUser.getName()));
+    }
     SentryServiceClient client = new SentryServiceClient();
     try {
       client.get().revokeRoleFromGroup(requestingUser.getShortName(),
@@ -231,9 +239,12 @@ public class SentryPolicyService {
     Preconditions.checkState(!privileges.isEmpty());
     TPrivilege privilege = privileges.get(0);
     TPrivilegeScope scope = privilege.getScope();
-    LOG.trace(String.format("Granting role '%s' '%s' privilege on '%s' on 
behalf of: %s",
-        roleName, privilege.getPrivilege_level().toString(), scope.toString(),
-        requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format(
+          "Granting role '%s' '%s' privilege on '%s' on behalf of: %s",
+          roleName, privilege.getPrivilege_level().toString(), 
scope.toString(),
+          requestingUser.getName()));
+    }
     // Verify that all privileges have the same scope.
     for (int i = 1; i < privileges.size(); ++i) {
       Preconditions.checkState(privileges.get(i).getScope() == scope, "All the 
" +
@@ -306,9 +317,11 @@ public class SentryPolicyService {
     Preconditions.checkState(!privileges.isEmpty());
     TPrivilege privilege = privileges.get(0);
     TPrivilegeScope scope = privilege.getScope();
-    LOG.trace(String.format("Revoking from role '%s' '%s' privilege on '%s' on 
" +
-        "behalf of: %s", roleName, privilege.getPrivilege_level().toString(),
-        scope.toString(), requestingUser.getName()));
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Revoking from role '%s' '%s' privilege on '%s' 
on " +
+          "behalf of: %s", roleName, privilege.getPrivilege_level().toString(),
+          scope.toString(), requestingUser.getName()));
+    }
     // Verify that all privileges have the same scope.
     for (int i = 1; i < privileges.size(); ++i) {
       Preconditions.checkState(privileges.get(i).getScope() == scope, "All the 
" +

Reply via email to