Cleanup of logging output, part 2: downgrading 'debug' to 'trace' All 'debug' output still gets written into the info log. Downgrading to 'trace' to avoid that.
Change-Id: If54f9d563be75571c7dc6d99ed13a6e86d9061a9 Reviewed-on: http://gerrit.cloudera.org:8080/5342 Reviewed-by: Alex Behm <[email protected]> Tested-by: Impala Public Jenkins Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/694d72ea Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/694d72ea Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/694d72ea Branch: refs/heads/master Commit: 694d72ea9bd1543e2bf1b0a2edf2af213f89a35b Parents: b237d13 Author: Marcel Kornacker <[email protected]> Authored: Fri Dec 2 17:02:29 2016 -0800 Committer: Impala Public Jenkins <[email protected]> Committed: Sat Dec 3 09:58:52 2016 +0000 ---------------------------------------------------------------------- .../apache/impala/analysis/AggregateInfo.java | 2 +- .../impala/analysis/ComputeStatsStmt.java | 12 ++-- .../impala/analysis/ExprSubstitutionMap.java | 4 +- .../org/apache/impala/analysis/SelectStmt.java | 2 +- .../impala/catalog/CatalogServiceCatalog.java | 24 +++---- .../java/org/apache/impala/catalog/Column.java | 4 +- .../apache/impala/catalog/DataSourceTable.java | 4 +- .../org/apache/impala/catalog/HdfsTable.java | 32 ++++----- .../apache/impala/catalog/ImpaladCatalog.java | 12 ++-- .../impala/catalog/MetaStoreClientPool.java | 4 +- .../java/org/apache/impala/catalog/Table.java | 2 +- .../org/apache/impala/catalog/TableLoader.java | 2 +- .../apache/impala/catalog/TableLoadingMgr.java | 6 +- .../apache/impala/common/FileSystemUtil.java | 18 +++--- .../ExternalDataSourceExecutor.java | 4 +- .../impala/planner/DataSourceScanNode.java | 8 ++- .../impala/planner/DistributedPlanner.java | 22 +++---- .../apache/impala/planner/HBaseScanNode.java | 8 +-- .../org/apache/impala/planner/HdfsScanNode.java | 20 +++--- .../org/apache/impala/planner/JoinNode.java | 4 +- .../org/apache/impala/planner/KuduScanNode.java | 4 +- .../java/org/apache/impala/planner/Planner.java | 14 ++-- .../org/apache/impala/planner/SelectNode.java | 4 +- .../impala/planner/SingleNodePlanner.java | 4 +- .../org/apache/impala/planner/SortNode.java | 4 +- .../org/apache/impala/planner/UnionNode.java | 4 +- .../impala/service/CatalogOpExecutor.java | 68 ++++++++++---------- .../org/apache/impala/service/Frontend.java | 12 ++-- .../org/apache/impala/service/JniFrontend.java | 6 +- .../impala/service/KuduCatalogOpExecutor.java | 12 ++-- .../org/apache/impala/service/MetadataOp.java | 10 +-- .../org/apache/impala/util/HdfsCachingUtil.java | 22 +++---- .../apache/impala/util/RequestPoolService.java | 6 +- 33 files changed, 185 insertions(+), 179 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java index 1ce5833..ec88aae 100644 --- a/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java +++ b/fe/src/main/java/org/apache/impala/analysis/AggregateInfo.java @@ -180,7 +180,7 @@ public class AggregateInfo extends AggregateInfoBase { Preconditions.checkState(tupleDesc == null); result.createDistinctAggInfo(groupingExprs, distinctAggExprs, analyzer); } - if (LOG.isDebugEnabled()) LOG.debug("agg info:\n" + result.debugString()); + if (LOG.isTraceEnabled()) LOG.trace("agg info:\n" + result.debugString()); return result; } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java index 298a17d..84b866b 100644 --- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java +++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java @@ -379,8 +379,8 @@ public class ComputeStatsStmt extends StatementBase { } if (filterPreds.size() == 0 && validPartStats_.size() != 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("No partitions selected for incremental stats update"); + if (LOG.isTraceEnabled()) { + LOG.trace("No partitions selected for incremental stats update"); } analyzer.addWarning("No partitions selected for incremental stats update"); return; @@ -442,12 +442,12 @@ public class ComputeStatsStmt extends StatementBase { } tableStatsQueryStr_ = tableStatsQueryBuilder.toString(); - if (LOG.isDebugEnabled()) LOG.debug("Table stats query: " + tableStatsQueryStr_); + if (LOG.isTraceEnabled()) LOG.trace("Table stats query: " + tableStatsQueryStr_); if (columnStatsSelectList.isEmpty()) { // Table doesn't have any columns that we can compute stats for. - if (LOG.isDebugEnabled()) { - LOG.debug("No supported column types in table " + table_.getTableName() + + if (LOG.isTraceEnabled()) { + LOG.trace("No supported column types in table " + table_.getTableName() + ", no column statistics will be gathered."); } columnStatsQueryStr_ = null; @@ -455,7 +455,7 @@ public class ComputeStatsStmt extends StatementBase { } columnStatsQueryStr_ = columnStatsQueryBuilder.toString(); - if (LOG.isDebugEnabled()) LOG.debug("Column stats query: " + columnStatsQueryStr_); + if (LOG.isTraceEnabled()) LOG.trace("Column stats query: " + columnStatsQueryStr_); } /** http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java b/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java index 83e227e..a3ce06f 100644 --- a/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java +++ b/fe/src/main/java/org/apache/impala/analysis/ExprSubstitutionMap.java @@ -156,8 +156,8 @@ public final class ExprSubstitutionMap { for (int i = 0; i < lhs_.size(); ++i) { for (int j = i + 1; j < lhs_.size(); ++j) { if (lhs_.get(i).equals(lhs_.get(j))) { - if (LOG.isDebugEnabled()) { - LOG.debug("verify: smap=" + this.debugString()); + if (LOG.isTraceEnabled()) { + LOG.trace("verify: smap=" + this.debugString()); } Preconditions.checkState(false); } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java index 264548c..80ffde5 100644 --- a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java +++ b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java @@ -273,7 +273,7 @@ public class SelectStmt extends QueryStmt { } if (aggInfo_ != null) { - if (LOG.isDebugEnabled()) LOG.debug("post-analysis " + aggInfo_.debugString()); + if (LOG.isTraceEnabled()) LOG.trace("post-analysis " + aggInfo_.debugString()); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java index e7c84da..6fbcccc 100644 --- a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java +++ b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java @@ -298,8 +298,8 @@ public class CatalogServiceCatalog extends Catalog { try { catalogTbl.setTable(tbl.toThrift()); } catch (Exception e) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Error calling toThrift() on table %s.%s: %s", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Error calling toThrift() on table %s.%s: %s", db.getName(), tblName, e.getMessage()), e); } continue; @@ -523,8 +523,8 @@ public class CatalogServiceCatalog extends Catalog { private void loadFunctionsFromDbParams(Db db, org.apache.hadoop.hive.metastore.api.Database msDb) { if (msDb == null || msDb.getParameters() == null) return; - if (LOG.isDebugEnabled()) { - LOG.debug("Loading native functions for database: " + db.getName()); + if (LOG.isTraceEnabled()) { + LOG.trace("Loading native functions for database: " + db.getName()); } TCompactProtocol.Factory protocolFactory = new TCompactProtocol.Factory(); for (String key: msDb.getParameters().keySet()) { @@ -551,8 +551,8 @@ public class CatalogServiceCatalog extends Catalog { private void loadJavaFunctions(Db db, List<org.apache.hadoop.hive.metastore.api.Function> functions) { Preconditions.checkNotNull(functions); - if (LOG.isDebugEnabled()) { - LOG.debug("Loading Java functions for database: " + db.getName()); + if (LOG.isTraceEnabled()) { + LOG.trace("Loading Java functions for database: " + db.getName()); } for (org.apache.hadoop.hive.metastore.api.Function function: functions) { try { @@ -888,8 +888,8 @@ public class CatalogServiceCatalog extends Catalog { * Throws a CatalogException if there is an error loading table metadata. */ public Table reloadTable(Table tbl) throws CatalogException { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Refreshing table metadata: %s", tbl.getFullName())); + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Refreshing table metadata: %s", tbl.getFullName())); } TTableName tblName = new TTableName(tbl.getDb().getName().toLowerCase(), tbl.getName().toLowerCase()); @@ -1019,8 +1019,8 @@ public class CatalogServiceCatalog extends Catalog { Preconditions.checkNotNull(updatedObjects); updatedObjects.first = null; updatedObjects.second = null; - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Invalidating table metadata: %s.%s", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Invalidating table metadata: %s.%s", tableName.getDb_name(), tableName.getTable_name())); } String dbName = tableName.getDb_name(); @@ -1261,8 +1261,8 @@ public class CatalogServiceCatalog extends Catalog { String partitionName = hdfsPartition == null ? HdfsTable.constructPartitionName(partitionSpec) : hdfsPartition.getPartitionName(); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Refreshing Partition metadata: %s %s", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Refreshing Partition metadata: %s %s", hdfsTable.getFullName(), partitionName)); } try (MetaStoreClient msClient = getMetaStoreClient()) { http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/Column.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/Column.java b/fe/src/main/java/org/apache/impala/catalog/Column.java index e01fa0a..b510102 100644 --- a/fe/src/main/java/org/apache/impala/catalog/Column.java +++ b/fe/src/main/java/org/apache/impala/catalog/Column.java @@ -68,8 +68,8 @@ public class Column { public boolean updateStats(ColumnStatisticsData statsData) { boolean statsDataCompatibleWithColType = stats_.update(type_, statsData); - if (LOG.isDebugEnabled()) { - LOG.debug("col stats: " + name_ + " #distinct=" + stats_.getNumDistinctValues()); + if (LOG.isTraceEnabled()) { + LOG.trace("col stats: " + name_ + " #distinct=" + stats_.getNumDistinctValues()); } return statsDataCompatibleWithColType; } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java b/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java index 7370806..a1597b9 100644 --- a/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java +++ b/fe/src/main/java/org/apache/impala/catalog/DataSourceTable.java @@ -163,8 +163,8 @@ public class DataSourceTable extends Table { Preconditions.checkNotNull(msTbl); msTable_ = msTbl; clearColumns(); - if (LOG.isDebugEnabled()) { - LOG.debug("load table: " + db_.getName() + "." + name_); + if (LOG.isTraceEnabled()) { + LOG.trace("load table: " + db_.getName() + "." + name_); } String dataSourceName = getRequiredTableProperty(msTbl, TBL_PROP_DATA_SRC_NAME, null); String location = getRequiredTableProperty(msTbl, TBL_PROP_LOCATION, dataSourceName); http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java index 66e28f8..2a30cec 100644 --- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java +++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java @@ -370,8 +370,8 @@ public class HdfsTable extends Table { Preconditions.checkNotNull(fd); Preconditions.checkNotNull(perFsFileBlocks); Preconditions.checkArgument(!file.isDirectory()); - if (LOG.isDebugEnabled()) { - LOG.debug("load block md for " + name_ + " file " + fd.getFileName()); + if (LOG.isTraceEnabled()) { + LOG.trace("load block md for " + name_ + " file " + fd.getFileName()); } if (!FileSystemUtil.hasGetFileBlockLocations(fs)) { @@ -1087,8 +1087,8 @@ public class HdfsTable extends Table { // Load partition and file metadata if (reuseMetadata) { // Incrementally update this table's partitions and file metadata - if (LOG.isDebugEnabled()) { - LOG.debug("incremental update for table: " + db_.getName() + "." + name_); + if (LOG.isTraceEnabled()) { + LOG.trace("incremental update for table: " + db_.getName() + "." + name_); } Preconditions.checkState(partitionsToUpdate == null || loadFileMetadata); updateMdFromHmsTable(msTbl); @@ -1099,8 +1099,8 @@ public class HdfsTable extends Table { } } else { // Load all partitions from Hive Metastore, including file metadata. - if (LOG.isDebugEnabled()) { - LOG.debug("load table from Hive Metastore: " + db_.getName() + "." + name_); + if (LOG.isTraceEnabled()) { + LOG.trace("load table from Hive Metastore: " + db_.getName() + "." + name_); } List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions = Lists.newArrayList(); @@ -1141,8 +1141,8 @@ public class HdfsTable extends Table { * Updates the file metadata of an unpartitioned HdfsTable. */ private void updateUnpartitionedTableFileMd() throws CatalogException { - if (LOG.isDebugEnabled()) { - LOG.debug("update unpartitioned table: " + name_); + if (LOG.isTraceEnabled()) { + LOG.trace("update unpartitioned table: " + name_); } resetPartitions(); org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable(); @@ -1166,8 +1166,8 @@ public class HdfsTable extends Table { */ private void updatePartitionsFromHms(IMetaStoreClient client, Set<String> partitionsToUpdate, boolean loadFileMetadata) throws Exception { - if (LOG.isDebugEnabled()) { - LOG.debug("sync table partitions: " + name_); + if (LOG.isTraceEnabled()) { + LOG.trace("sync table partitions: " + name_); } org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable(); Preconditions.checkNotNull(msTbl); @@ -1427,8 +1427,8 @@ public class HdfsTable extends Table { IMetaStoreClient client) throws Exception { Preconditions.checkNotNull(partitions); if (partitions.isEmpty()) return; - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Incrementally updating %d/%d partitions.", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Incrementally updating %d/%d partitions.", partitions.size(), partitionMap_.size())); } Set<String> partitionNames = Sets.newHashSet(); @@ -1483,8 +1483,8 @@ public class HdfsTable extends Table { private void loadPartitionFileMetadata(List<HdfsPartition> partitions) throws Exception { Preconditions.checkNotNull(partitions); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("loading file metadata for %d partitions", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("loading file metadata for %d partitions", partitions.size())); } org.apache.hadoop.hive.metastore.api.Table msTbl = getMetaStoreTable(); @@ -1855,8 +1855,8 @@ public class HdfsTable extends Table { } } } catch (Exception ex) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Invalid partition value (%s) for Type (%s).", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Invalid partition value (%s) for Type (%s).", partName[1], type.toSql())); } return null; http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java index 6a20fcc..521a844 100644 --- a/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java +++ b/fe/src/main/java/org/apache/impala/catalog/ImpaladCatalog.java @@ -237,8 +237,8 @@ public class ImpaladCatalog extends Catalog { throws TableLoadingException, DatabaseNotFoundException { // This item is out of date and should not be applied to the catalog. if (catalogDeltaLog_.wasObjectRemovedAfter(catalogObject)) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Skipping update because a matching object was removed " + + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Skipping update because a matching object was removed " + "in a later catalog version: %s", catalogObject)); } return; @@ -356,8 +356,8 @@ public class ImpaladCatalog extends Catalog { throws TableLoadingException { Db db = getDb(thriftTable.db_name); if (db == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Parent database of table does not exist: " + + if (LOG.isTraceEnabled()) { + LOG.trace("Parent database of table does not exist: " + thriftTable.db_name + "." + thriftTable.tbl_name); } return; @@ -373,8 +373,8 @@ public class ImpaladCatalog extends Catalog { function.setCatalogVersion(catalogVersion); Db db = getDb(function.getFunctionName().getDb()); if (db == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Parent database of function does not exist: " + function.getName()); + if (LOG.isTraceEnabled()) { + LOG.trace("Parent database of function does not exist: " + function.getName()); } return; } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java b/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java index 76db6f7..93f8585 100644 --- a/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java +++ b/fe/src/main/java/org/apache/impala/catalog/MetaStoreClientPool.java @@ -79,8 +79,8 @@ public class MetaStoreClientPool { * connection to the HMS before giving up and failing out with an exception. */ private MetaStoreClient(HiveConf hiveConf, int cnxnTimeoutSec) { - if (LOG.isDebugEnabled()) { - LOG.debug("Creating MetaStoreClient. Pool Size = " + clientPool_.size()); + if (LOG.isTraceEnabled()) { + LOG.trace("Creating MetaStoreClient. Pool Size = " + clientPool_.size()); } long retryDelaySeconds = hiveConf.getTimeVar( http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/Table.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java b/fe/src/main/java/org/apache/impala/catalog/Table.java index 214413b..9919dbf 100644 --- a/fe/src/main/java/org/apache/impala/catalog/Table.java +++ b/fe/src/main/java/org/apache/impala/catalog/Table.java @@ -154,7 +154,7 @@ public abstract class Table implements CatalogObject { * the correctness of the system. */ protected void loadAllColumnStats(IMetaStoreClient client) { - if (LOG.isDebugEnabled()) LOG.debug("Loading column stats for table: " + name_); + if (LOG.isTraceEnabled()) LOG.trace("Loading column stats for table: " + name_); List<ColumnStatisticsObj> colStats; // We need to only query those columns which may have stats; asking HMS for other http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/TableLoader.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/TableLoader.java b/fe/src/main/java/org/apache/impala/catalog/TableLoader.java index 1d0d54e..7f17a5c 100644 --- a/fe/src/main/java/org/apache/impala/catalog/TableLoader.java +++ b/fe/src/main/java/org/apache/impala/catalog/TableLoader.java @@ -55,7 +55,7 @@ public class TableLoader { */ public Table load(Db db, String tblName) { String fullTblName = db.getName() + "." + tblName; - if (LOG.isDebugEnabled()) LOG.debug("Loading metadata for: " + fullTblName); + if (LOG.isTraceEnabled()) LOG.trace("Loading metadata for: " + fullTblName); Table table; // turn all exceptions into TableLoadingException try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) { http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java b/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java index 84de5c4..17f962d 100644 --- a/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java +++ b/fe/src/main/java/org/apache/impala/catalog/TableLoadingMgr.java @@ -278,8 +278,10 @@ public class TableLoadingMgr { // Always get the next table from the head of the deque. final TTableName tblName = tableLoadingDeque_.takeFirst(); tableLoadingSet_.remove(tblName); - LOG.debug("Loading next table. Remaining items in queue: " - + tableLoadingDeque_.size()); + if (LOG.isTraceEnabled()) { + LOG.trace("Loading next table. Remaining items in queue: " + + tableLoadingDeque_.size()); + } try { // TODO: Instead of calling "getOrLoad" here we could call "loadAsync". We would // just need to add a mechanism for moving loaded tables into the Catalog. http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java index f167771..71eea88 100644 --- a/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java +++ b/fe/src/main/java/org/apache/impala/common/FileSystemUtil.java @@ -57,7 +57,7 @@ public class FileSystemUtil { for (FileStatus fStatus: fs.listStatus(directory)) { // Only delete files that are not hidden. if (fStatus.isFile() && !isHiddenFile(fStatus.getPath().getName())) { - if (LOG.isDebugEnabled()) LOG.debug("Removing: " + fStatus.getPath()); + if (LOG.isTraceEnabled()) LOG.trace("Removing: " + fStatus.getPath()); fs.delete(fStatus.getPath(), false); ++numFilesDeleted; } @@ -123,8 +123,8 @@ public class FileSystemUtil { int numFilesMoved = 0; for (FileStatus fStatus: sourceFs.listStatus(sourceDir)) { if (fStatus.isDirectory()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Skipping copy of directory: " + fStatus.getPath()); + if (LOG.isTraceEnabled()) { + LOG.trace("Skipping copy of directory: " + fStatus.getPath()); } continue; } else if (isHiddenFile(fStatus.getPath().getName())) { @@ -182,8 +182,8 @@ public class FileSystemUtil { // non-distributed filesystem. if (!doRename) doRename = !destIsDfs && sameFileSystem; if (doRename) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format( + if (LOG.isTraceEnabled()) { + LOG.trace(String.format( "Moving '%s' to '%s'", sourceFile.toString(), destFile.toString())); } // Move (rename) the file. @@ -196,15 +196,15 @@ public class FileSystemUtil { // encryption zones. A move would return an error from the NN because a move is a // metadata-only operation and the files would not be encrypted/decrypted properly // on the DNs. - if (LOG.isDebugEnabled()) { - LOG.debug(String.format( + if (LOG.isTraceEnabled()) { + LOG.trace(String.format( "Copying source '%s' to '%s' because HDFS encryption zones are different.", sourceFile, destFile)); } } else { Preconditions.checkState(!sameFileSystem); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Copying '%s' to '%s' between filesystems.", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Copying '%s' to '%s' between filesystems.", sourceFile, destFile)); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java b/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java index 7e8859a..a7624d6 100644 --- a/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java +++ b/fe/src/main/java/org/apache/impala/extdatasource/ExternalDataSourceExecutor.java @@ -157,8 +157,8 @@ public class ExternalDataSourceExecutor { if (initString_ != null && initString_.startsWith(CACHE_CLASS_PREFIX)) { cachedClasses_.put(cacheMapKey, c); } - if (LOG.isDebugEnabled()) { - LOG.debug("Loaded jar for class {} at path {}", className_, jarPath_); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded jar for class {} at path {}", className_, jarPath_); } numClassCacheMisses_++; } else { http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java b/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java index 22bdb49..520f0f3 100644 --- a/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java +++ b/fe/src/main/java/org/apache/impala/planner/DataSourceScanNode.java @@ -269,11 +269,13 @@ public class DataSourceScanNode extends ScanNode { cardinality_ = Math.max(1, cardinality_); cardinality_ = capAtLimit(cardinality_); - LOG.debug("computeStats DataSourceScan: cardinality=" + Long.toString(cardinality_)); + if (LOG.isTraceEnabled()) { + LOG.trace("computeStats DataSourceScan: cardinality=" + Long.toString(cardinality_)); + } numNodes_ = table_.getNumNodes(); - if (LOG.isDebugEnabled()) { - LOG.debug("computeStats DataSourceScan: #nodes=" + Integer.toString(numNodes_)); + if (LOG.isTraceEnabled()) { + LOG.trace("computeStats DataSourceScan: #nodes=" + Integer.toString(numNodes_)); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java index 3cfc787..dd03ac8 100644 --- a/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java +++ b/fe/src/main/java/org/apache/impala/planner/DistributedPlanner.java @@ -79,9 +79,9 @@ public class DistributedPlanner { isPartitioned = true; } long perNodeMemLimit = ctx_.getQueryOptions().mem_limit; - if (LOG.isDebugEnabled()) { - LOG.debug("create plan fragments"); - LOG.debug("memlimit=" + Long.toString(perNodeMemLimit)); + if (LOG.isTraceEnabled()) { + LOG.trace("create plan fragments"); + LOG.trace("memlimit=" + Long.toString(perNodeMemLimit)); } createPlanFragments(singleNodePlan, isPartitioned, perNodeMemLimit, fragments); return fragments; @@ -437,9 +437,9 @@ public class DistributedPlanner { broadcastCost = 2 * rhsDataSize * leftChildFragment.getNumNodes(); } } - if (LOG.isDebugEnabled()) { - LOG.debug("broadcast: cost=" + Long.toString(broadcastCost)); - LOG.debug("card=" + Long.toString(rhsTree.getCardinality()) + " row_size=" + if (LOG.isTraceEnabled()) { + LOG.trace("broadcast: cost=" + Long.toString(broadcastCost)); + LOG.trace("card=" + Long.toString(rhsTree.getCardinality()) + " row_size=" + Float.toString(rhsTree.getAvgRowSize()) + " #nodes=" + Integer.toString(leftChildFragment.getNumNodes())); } @@ -469,13 +469,13 @@ public class DistributedPlanner { double rhsNetworkCost = (rhsHasCompatPartition) ? 0.0 : rhsDataSize; partitionCost = Math.round(lhsNetworkCost + rhsNetworkCost + rhsDataSize); } - if (LOG.isDebugEnabled()) { - LOG.debug("partition: cost=" + Long.toString(partitionCost)); - LOG.debug("lhs card=" + Long.toString(lhsTree.getCardinality()) + " row_size=" + if (LOG.isTraceEnabled()) { + LOG.trace("partition: cost=" + Long.toString(partitionCost)); + LOG.trace("lhs card=" + Long.toString(lhsTree.getCardinality()) + " row_size=" + Float.toString(lhsTree.getAvgRowSize())); - LOG.debug("rhs card=" + Long.toString(rhsTree.getCardinality()) + " row_size=" + LOG.trace("rhs card=" + Long.toString(rhsTree.getCardinality()) + " row_size=" + Float.toString(rhsTree.getAvgRowSize())); - LOG.debug(rhsTree.getExplainString()); + LOG.trace(rhsTree.getExplainString()); } boolean doBroadcast = false; http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java b/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java index 40b44d3..9e25655 100644 --- a/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java +++ b/fe/src/main/java/org/apache/impala/planner/HBaseScanNode.java @@ -216,14 +216,14 @@ public class HBaseScanNode extends ScanNode { cardinality_ *= computeSelectivity(); cardinality_ = Math.max(1, cardinality_); cardinality_ = capAtLimit(cardinality_); - if (LOG.isDebugEnabled()) { - LOG.debug("computeStats HbaseScan: cardinality=" + Long.toString(cardinality_)); + if (LOG.isTraceEnabled()) { + LOG.trace("computeStats HbaseScan: cardinality=" + Long.toString(cardinality_)); } // TODO: take actual regions into account numNodes_ = tbl.getNumNodes(); - if (LOG.isDebugEnabled()) { - LOG.debug("computeStats HbaseScan: #nodes=" + Integer.toString(numNodes_)); + if (LOG.isTraceEnabled()) { + LOG.trace("computeStats HbaseScan: #nodes=" + Integer.toString(numNodes_)); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java index a2ad76c..0aee399 100644 --- a/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java +++ b/fe/src/main/java/org/apache/impala/planner/HdfsScanNode.java @@ -373,8 +373,8 @@ public class HdfsScanNode extends ScanNode { @Override public void computeStats(Analyzer analyzer) { super.computeStats(analyzer); - if (LOG.isDebugEnabled()) { - LOG.debug("collecting partitions for table " + tbl_.getName()); + if (LOG.isTraceEnabled()) { + LOG.trace("collecting partitions for table " + tbl_.getName()); } numPartitionsMissingStats_ = 0; totalFiles_ = 0; @@ -432,8 +432,8 @@ public class HdfsScanNode extends ScanNode { } if (cardinality_ > 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("cardinality_=" + Long.toString(cardinality_) + + if (LOG.isTraceEnabled()) { + LOG.trace("cardinality_=" + Long.toString(cardinality_) + " sel=" + Double.toString(computeSelectivity())); } cardinality_ = Math.round(cardinality_ * computeSelectivity()); @@ -441,13 +441,13 @@ public class HdfsScanNode extends ScanNode { cardinality_ = Math.max(cardinality_, 1); } cardinality_ = capAtLimit(cardinality_); - if (LOG.isDebugEnabled()) { - LOG.debug("computeStats HdfsScan: cardinality_=" + Long.toString(cardinality_)); + if (LOG.isTraceEnabled()) { + LOG.trace("computeStats HdfsScan: cardinality_=" + Long.toString(cardinality_)); } computeNumNodes(analyzer, cardinality_); - if (LOG.isDebugEnabled()) { - LOG.debug("computeStats HdfsScan: #nodes=" + Integer.toString(numNodes_)); + if (LOG.isTraceEnabled()) { + LOG.trace("computeStats HdfsScan: #nodes=" + Integer.toString(numNodes_)); } } @@ -501,8 +501,8 @@ public class HdfsScanNode extends ScanNode { // Tables can reside on 0 nodes (empty table), but a plan node must always be // executed on at least one node. numNodes_ = (cardinality == 0 || totalNodes == 0) ? 1 : totalNodes; - if (LOG.isDebugEnabled()) { - LOG.debug("computeNumNodes totalRanges=" + scanRanges_.size() + + if (LOG.isTraceEnabled()) { + LOG.trace("computeNumNodes totalRanges=" + scanRanges_.size() + " localRanges=" + numLocalRanges + " remoteRanges=" + numRemoteRanges + " localHostSet.size=" + localHostSet.size() + " clusterNodes=" + cluster.numNodes()); http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/JoinNode.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/JoinNode.java b/fe/src/main/java/org/apache/impala/planner/JoinNode.java index 13cc854..8e963ff 100644 --- a/fe/src/main/java/org/apache/impala/planner/JoinNode.java +++ b/fe/src/main/java/org/apache/impala/planner/JoinNode.java @@ -475,8 +475,8 @@ public abstract class JoinNode extends PlanNode { } cardinality_ = capAtLimit(cardinality_); Preconditions.checkState(hasValidStats()); - if (LOG.isDebugEnabled()) { - LOG.debug("stats Join: cardinality=" + Long.toString(cardinality_)); + if (LOG.isTraceEnabled()) { + LOG.trace("stats Join: cardinality=" + Long.toString(cardinality_)); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java index cca7a6f..883a556 100644 --- a/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java +++ b/fe/src/main/java/org/apache/impala/planner/KuduScanNode.java @@ -223,8 +223,8 @@ public class KuduScanNode extends ScanNode { cardinality_ *= computeSelectivity(); cardinality_ = Math.min(Math.max(1, cardinality_), kuduTable_.getNumRows()); cardinality_ = capAtLimit(cardinality_); - if (LOG.isDebugEnabled()) { - LOG.debug("computeStats KuduScan: cardinality=" + Long.toString(cardinality_)); + if (LOG.isTraceEnabled()) { + LOG.trace("computeStats KuduScan: cardinality=" + Long.toString(cardinality_)); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/Planner.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/Planner.java b/fe/src/main/java/org/apache/impala/planner/Planner.java index 40ca682..3369686 100644 --- a/fe/src/main/java/org/apache/impala/planner/Planner.java +++ b/fe/src/main/java/org/apache/impala/planner/Planner.java @@ -164,10 +164,10 @@ public class Planner { } rootFragment.setOutputExprs(resultExprs); - if (LOG.isDebugEnabled()) { - LOG.debug("desctbl: " + ctx_.getRootAnalyzer().getDescTbl().debugString()); - LOG.debug("resultexprs: " + Expr.debugString(rootFragment.getOutputExprs())); - LOG.debug("finalize plan fragments"); + if (LOG.isTraceEnabled()) { + LOG.trace("desctbl: " + ctx_.getRootAnalyzer().getDescTbl().debugString()); + LOG.trace("resultexprs: " + Expr.debugString(rootFragment.getOutputExprs())); + LOG.trace("finalize plan fragments"); } for (PlanFragment fragment: fragments) { fragment.finalize(ctx_.getRootAnalyzer()); @@ -409,9 +409,9 @@ public class Planner { request.setPer_host_mem_req(maxPerHostMem); request.setPer_host_vcores((short) maxPerHostVcores); - if (LOG.isDebugEnabled()) { - LOG.debug("Estimated per-host peak memory requirement: " + maxPerHostMem); - LOG.debug("Estimated per-host virtual cores requirement: " + maxPerHostVcores); + if (LOG.isTraceEnabled()) { + LOG.trace("Estimated per-host peak memory requirement: " + maxPerHostMem); + LOG.trace("Estimated per-host virtual cores requirement: " + maxPerHostVcores); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/SelectNode.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/SelectNode.java b/fe/src/main/java/org/apache/impala/planner/SelectNode.java index c571c1c..e09d572 100644 --- a/fe/src/main/java/org/apache/impala/planner/SelectNode.java +++ b/fe/src/main/java/org/apache/impala/planner/SelectNode.java @@ -74,8 +74,8 @@ public class SelectNode extends PlanNode { Preconditions.checkState(cardinality_ >= 0); } cardinality_ = capAtLimit(cardinality_); - if (LOG.isDebugEnabled()) { - LOG.debug("stats Select: cardinality=" + Long.toString(cardinality_)); + if (LOG.isTraceEnabled()) { + LOG.trace("stats Select: cardinality=" + Long.toString(cardinality_)); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java index 403a432..1634dd2 100644 --- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java +++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java @@ -499,8 +499,8 @@ public class SingleNodePlanner { long lhsCardinality = root.getCardinality(); long rhsCardinality = minEntry.second.getCardinality(); numOps += lhsCardinality + rhsCardinality; - if (LOG.isDebugEnabled()) { - LOG.debug(Integer.toString(i) + " chose " + minEntry.first.getUniqueAlias() + if (LOG.isTraceEnabled()) { + LOG.trace(Integer.toString(i) + " chose " + minEntry.first.getUniqueAlias() + " #lhs=" + Long.toString(lhsCardinality) + " #rhs=" + Long.toString(rhsCardinality) + " #ops=" + Long.toString(numOps)); http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/SortNode.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/SortNode.java b/fe/src/main/java/org/apache/impala/planner/SortNode.java index 58e04b4..0533b22 100644 --- a/fe/src/main/java/org/apache/impala/planner/SortNode.java +++ b/fe/src/main/java/org/apache/impala/planner/SortNode.java @@ -134,8 +134,8 @@ public class SortNode extends PlanNode { protected void computeStats(Analyzer analyzer) { super.computeStats(analyzer); cardinality_ = capAtLimit(getChild(0).cardinality_); - if (LOG.isDebugEnabled()) { - LOG.debug("stats Sort: cardinality=" + Long.toString(cardinality_)); + if (LOG.isTraceEnabled()) { + LOG.trace("stats Sort: cardinality=" + Long.toString(cardinality_)); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/planner/UnionNode.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/planner/UnionNode.java b/fe/src/main/java/org/apache/impala/planner/UnionNode.java index 69e7a37..a8d2291 100644 --- a/fe/src/main/java/org/apache/impala/planner/UnionNode.java +++ b/fe/src/main/java/org/apache/impala/planner/UnionNode.java @@ -97,8 +97,8 @@ public class UnionNode extends PlanNode { // (VALUES(1 x, 1 y)) b ON (a.x = b.y)). We need to set the correct value. if (numNodes_ == -1) numNodes_ = 1; cardinality_ = capAtLimit(cardinality_); - if (LOG.isDebugEnabled()) { - LOG.debug("stats Union: cardinality=" + Long.toString(cardinality_)); + if (LOG.isTraceEnabled()) { + LOG.trace("stats Union: cardinality=" + Long.toString(cardinality_)); } } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java index b06f415..4087818 100644 --- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java +++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java @@ -636,8 +636,8 @@ public class CatalogOpExecutor { // Set the altered view attributes and update the metastore. setViewAttributes(params, msTbl); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Altering view %s", tableName)); + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Altering view %s", tableName)); } applyAlterTable(msTbl); try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) { @@ -667,8 +667,8 @@ public class CatalogOpExecutor { TableName tableName = table.getTableName(); Preconditions.checkState(tableName != null && tableName.isFullyQualified()); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Updating table stats for: %s", tableName)); + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Updating table stats for: %s", tableName)); } // Deep copy the msTbl to avoid updating our cache before successfully persisting @@ -765,8 +765,8 @@ public class CatalogOpExecutor { // but it is predictable and easy to reason about because it does not depend on the // existing state of the metadata. See IMPALA-2201. long numRows = partitionStats.stats.num_rows; - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Updating stats for partition %s: numRows=%s", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Updating stats for partition %s: numRows=%s", partition.getValuesAsString(), numRows)); } PartitionStatsUtil.partStatsToParameters(partitionStats, partition); @@ -815,8 +815,8 @@ public class CatalogOpExecutor { ColumnStatisticsData colStatsData = createHiveColStatsData(entry.getValue(), tableCol.getType()); if (colStatsData == null) continue; - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Updating column stats for %s: numDVs=%s numNulls=%s " + + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Updating column stats for %s: numDVs=%s numNulls=%s " + "maxSize=%s avgSize=%s", colName, entry.getValue().getNum_distinct_values(), entry.getValue().getNum_nulls(), entry.getValue().getMax_size(), entry.getValue().getAvg_size())); @@ -887,8 +887,8 @@ public class CatalogOpExecutor { Preconditions.checkState(dbName != null && !dbName.isEmpty(), "Null or empty database name passed as argument to Catalog.createDatabase"); if (params.if_not_exists && catalog_.getDb(dbName) != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Skipping database creation because " + dbName + " already exists " + if (LOG.isTraceEnabled()) { + LOG.trace("Skipping database creation because " + dbName + " already exists " + "and IF NOT EXISTS was specified."); } resp.getResult().setVersion(catalog_.getCatalogVersion()); @@ -903,7 +903,7 @@ public class CatalogOpExecutor { if (params.getLocation() != null) { db.setLocationUri(params.getLocation()); } - if (LOG.isDebugEnabled()) LOG.debug("Creating database " + dbName); + if (LOG.isTraceEnabled()) LOG.trace("Creating database " + dbName); Db newDb = null; synchronized (metastoreDdlLock_) { try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) { @@ -915,8 +915,8 @@ public class CatalogOpExecutor { throw new ImpalaRuntimeException( String.format(HMS_RPC_ERROR_FORMAT_STR, "createDatabase"), e); } - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Ignoring '%s' when creating database %s because " + + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Ignoring '%s' when creating database %s because " + "IF NOT EXISTS was specified.", e, dbName)); } newDb = catalog_.getDb(dbName); @@ -958,8 +958,8 @@ public class CatalogOpExecutor { private void createFunction(TCreateFunctionParams params, TDdlExecResponse resp) throws ImpalaException { Function fn = Function.fromThrift(params.getFn()); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Adding %s: %s", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Adding %s: %s", fn.getClass().getSimpleName(), fn.signatureString())); } boolean isPersistentJavaFn = @@ -997,8 +997,8 @@ public class CatalogOpExecutor { } if (addJavaFunctionToHms(fn.dbName(), hiveFn, params.if_not_exists)) { for (Function addedFn: funcs) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Adding function: %s.%s", addedFn.dbName(), + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Adding function: %s.%s", addedFn.dbName(), addedFn.signatureString())); } Preconditions.checkState(catalog_.addFunction(addedFn)); @@ -1031,7 +1031,7 @@ public class CatalogOpExecutor { private void createDataSource(TCreateDataSourceParams params, TDdlExecResponse resp) throws ImpalaException { - if (LOG.isDebugEnabled()) { LOG.debug("Adding DATA SOURCE: " + params.toString()); } + if (LOG.isTraceEnabled()) { LOG.trace("Adding DATA SOURCE: " + params.toString()); } DataSource dataSource = DataSource.fromThrift(params.getData_source()); if (catalog_.getDataSource(dataSource.getName()) != null) { if (!params.if_not_exists) { @@ -1054,7 +1054,7 @@ public class CatalogOpExecutor { private void dropDataSource(TDropDataSourceParams params, TDdlExecResponse resp) throws ImpalaException { - if (LOG.isDebugEnabled()) { LOG.debug("Drop DATA SOURCE: " + params.toString()); } + if (LOG.isTraceEnabled()) LOG.trace("Drop DATA SOURCE: " + params.toString()); DataSource dataSource = catalog_.getDataSource(params.getData_source()); if (dataSource == null) { if (!params.if_exists) { @@ -1208,7 +1208,7 @@ public class CatalogOpExecutor { Preconditions.checkState(params.getDb() != null && !params.getDb().isEmpty(), "Null or empty database name passed as argument to Catalog.dropDatabase"); - LOG.debug("Dropping database " + params.getDb()); + LOG.trace("Dropping database " + params.getDb()); Db db = catalog_.getDb(params.db); if (db != null && db.numFunctions() > 0 && !params.cascade) { throw new CatalogException("Database " + db.getName() + " is not empty"); @@ -1290,7 +1290,7 @@ public class CatalogOpExecutor { throws ImpalaException { TableName tableName = TableName.fromThrift(params.getTable_name()); Preconditions.checkState(tableName != null && tableName.isFullyQualified()); - LOG.debug(String.format("Dropping table/view %s", tableName)); + LOG.trace(String.format("Dropping table/view %s", tableName)); TCatalogObject removedObject = new TCatalogObject(); synchronized (metastoreDdlLock_) { @@ -1504,13 +1504,13 @@ public class CatalogOpExecutor { if (params.if_not_exists && catalog_.containsTable(tableName.getDb(), tableName.getTbl())) { - LOG.debug(String.format("Skipping table creation because %s already exists and " + + LOG.trace(String.format("Skipping table creation because %s already exists and " + "IF NOT EXISTS was specified.", tableName)); response.getResult().setVersion(catalog_.getCatalogVersion()); return false; } org.apache.hadoop.hive.metastore.api.Table tbl = createMetaStoreTable(params); - LOG.debug(String.format("Creating table %s", tableName)); + LOG.trace(String.format("Creating table %s", tableName)); if (KuduTable.isKuduTable(tbl)) return createKuduTable(tbl, params, response); Preconditions.checkState(params.getColumns().size() > 0, "Empty column list given as argument to Catalog.createTable"); @@ -1680,7 +1680,7 @@ public class CatalogOpExecutor { "Null or empty column list given as argument to DdlExecutor.createView"); if (params.if_not_exists && catalog_.containsTable(tableName.getDb(), tableName.getTbl())) { - LOG.debug(String.format("Skipping view creation because %s already exists and " + + LOG.trace(String.format("Skipping view creation because %s already exists and " + "ifNotExists is true.", tableName)); } @@ -1688,7 +1688,7 @@ public class CatalogOpExecutor { org.apache.hadoop.hive.metastore.api.Table view = new org.apache.hadoop.hive.metastore.api.Table(); setViewAttributes(params, view); - LOG.debug(String.format("Creating view %s", tableName)); + LOG.trace(String.format("Creating view %s", tableName)); createTable(view, params.if_not_exists, null, response); } @@ -1712,7 +1712,7 @@ public class CatalogOpExecutor { if (params.if_not_exists && catalog_.containsTable(tblName.getDb(), tblName.getTbl())) { - LOG.debug(String.format("Skipping table creation because %s already exists and " + + LOG.trace(String.format("Skipping table creation because %s already exists and " + "IF NOT EXISTS was specified.", tblName)); response.getResult().setVersion(catalog_.getCatalogVersion()); return; @@ -1767,7 +1767,7 @@ public class CatalogOpExecutor { } // Set the row count of this table to unknown. tbl.putToParameters(StatsSetupConst.ROW_COUNT, "-1"); - LOG.debug(String.format("Creating table %s LIKE %s", tblName, srcTblName)); + LOG.trace(String.format("Creating table %s LIKE %s", tblName, srcTblName)); createTable(tbl, params.if_not_exists, null, response); } @@ -1861,7 +1861,7 @@ public class CatalogOpExecutor { TableName tableName = tbl.getTableName(); if (ifNotExists && catalog_.containsHdfsPartition(tableName.getDb(), tableName.getTbl(), partitionSpec)) { - LOG.debug(String.format("Skipping partition creation because (%s) already exists" + + LOG.trace(String.format("Skipping partition creation because (%s) already exists" + " and ifNotExists is true.", Joiner.on(", ").join(partitionSpec))); return null; } @@ -1916,7 +1916,7 @@ public class CatalogOpExecutor { throw new ImpalaRuntimeException( String.format(HMS_RPC_ERROR_FORMAT_STR, "add_partition"), e); } - LOG.debug(String.format("Ignoring '%s' when adding partition to %s because" + + LOG.trace(String.format("Ignoring '%s' when adding partition to %s because" + " ifNotExists is true.", e, tableName)); } catch (TException e) { throw new ImpalaRuntimeException( @@ -1950,7 +1950,7 @@ public class CatalogOpExecutor { Preconditions.checkState(!partitionSet.isEmpty()); } else { if (partitionSet.isEmpty()) { - LOG.debug(String.format("Ignoring empty partition list when dropping " + + LOG.trace(String.format("Ignoring empty partition list when dropping " + "partitions from %s because ifExists is true.", tableName)); return tbl; } @@ -1985,7 +1985,7 @@ public class CatalogOpExecutor { throw new ImpalaRuntimeException( String.format(HMS_RPC_ERROR_FORMAT_STR, "dropPartition"), e); } - LOG.debug( + LOG.trace( String.format("Ignoring '%s' when dropping partitions from %s because" + " ifExists is true.", e, tableName)); } @@ -2492,7 +2492,7 @@ public class CatalogOpExecutor { updateLastDdlTime(msTbl, msClient); } catch (AlreadyExistsException e) { // This may happen when another client of HMS has added the partitions. - LOG.debug(String.format("Ignoring '%s' when adding partition to %s.", e, + LOG.trace(String.format("Ignoring '%s' when adding partition to %s.", e, tableName)); } catch (TException e) { throw new ImpalaRuntimeException( @@ -2853,7 +2853,9 @@ public class CatalogOpExecutor { private long updateLastDdlTime(org.apache.hadoop.hive.metastore.api.Table msTbl, MetaStoreClient msClient) throws MetaException, NoSuchObjectException, TException { Preconditions.checkNotNull(msTbl); - LOG.debug("Updating lastDdlTime for table: " + msTbl.getTableName()); + if (LOG.isTraceEnabled()) { + LOG.trace("Updating lastDdlTime for table: " + msTbl.getTableName()); + } Map<String, String> params = msTbl.getParameters(); long lastDdlTime = calculateDdlTime(msTbl); params.put("transient_lastDdlTime", Long.toString(lastDdlTime)); http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/service/Frontend.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java index a7bec4d..e7eabb1 100644 --- a/fe/src/main/java/org/apache/impala/service/Frontend.java +++ b/fe/src/main/java/org/apache/impala/service/Frontend.java @@ -881,7 +881,7 @@ public class Frontend { AnalysisContext analysisCtx = new AnalysisContext(impaladCatalog_, queryCtx, authzConfig_); - if (LOG.isDebugEnabled()) LOG.debug("analyze query " + queryCtx.request.stmt); + if (LOG.isTraceEnabled()) LOG.trace("analyze query " + queryCtx.request.stmt); // Run analysis in a loop until it any of the following events occur: // 1) Analysis completes successfully. @@ -900,8 +900,8 @@ public class Frontend { // Some tables/views were missing, request and wait for them to load. if (!requestTblLoadAndWait(missingTbls, MISSING_TBL_LOAD_WAIT_TIMEOUT_MS)) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Missing tables were not received in %dms. Load " + + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Missing tables were not received in %dms. Load " + "request will be retried.", MISSING_TBL_LOAD_WAIT_TIMEOUT_MS)); } } @@ -984,10 +984,10 @@ public class Frontend { List<PlanFragment> planRoots = Lists.newArrayList(); TQueryExecRequest result = new TQueryExecRequest(); if (isMtExec) { - LOG.debug("create mt plan"); + LOG.trace("create mt plan"); planRoots.addAll(planner.createParallelPlans()); } else { - LOG.debug("create plan"); + LOG.trace("create plan"); planRoots.add(planner.createPlan().get(0)); } @@ -1088,7 +1088,7 @@ public class Frontend { result.setQuery_exec_request(queryExecRequest); if (analysisResult.isQueryStmt()) { // fill in the metadata - LOG.debug("create result set metadata"); + LOG.trace("create result set metadata"); result.stmt_type = TStmtType.QUERY; result.query_exec_request.stmt_type = result.stmt_type; TResultSetMetadata metadata = new TResultSetMetadata(); http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/service/JniFrontend.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java b/fe/src/main/java/org/apache/impala/service/JniFrontend.java index 29cbd10..fec35d7 100644 --- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java +++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java @@ -157,8 +157,8 @@ public class JniFrontend { StringBuilder explainString = new StringBuilder(); TExecRequest result = frontend_.createExecRequest(queryCtx, explainString); - if (explainString.length() > 0 && LOG.isDebugEnabled()) { - LOG.debug(explainString.toString()); + if (explainString.length() > 0 && LOG.isTraceEnabled()) { + LOG.trace(explainString.toString()); } // TODO: avoid creating serializer for each query? @@ -234,7 +234,7 @@ public class JniFrontend { TQueryCtx queryCtx = new TQueryCtx(); JniUtil.deserializeThrift(protocolFactory_, queryCtx, thriftQueryContext); String plan = frontend_.getExplainString(queryCtx); - if (LOG.isDebugEnabled()) LOG.debug("Explain plan: " + plan); + if (LOG.isTraceEnabled()) LOG.trace("Explain plan: " + plan); return plan; } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java index aa553cf..8c978b4 100644 --- a/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java +++ b/fe/src/main/java/org/apache/impala/service/KuduCatalogOpExecutor.java @@ -70,8 +70,8 @@ public class KuduCatalogOpExecutor { Preconditions.checkState(!Table.isExternalTable(msTbl)); String kuduTableName = msTbl.getParameters().get(KuduTable.KEY_TABLE_NAME); String masterHosts = msTbl.getParameters().get(KuduTable.KEY_MASTER_HOSTS); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Creating table '%s' in master '%s'", kuduTableName, + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Creating table '%s' in master '%s'", kuduTableName, masterHosts)); } try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) { @@ -200,8 +200,8 @@ public class KuduCatalogOpExecutor { Preconditions.checkState(!Table.isExternalTable(msTbl)); String tableName = msTbl.getParameters().get(KuduTable.KEY_TABLE_NAME); String masterHosts = msTbl.getParameters().get(KuduTable.KEY_MASTER_HOSTS); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Dropping table '%s' from master '%s'", tableName, + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Dropping table '%s' from master '%s'", tableName, masterHosts)); } try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) { @@ -231,8 +231,8 @@ public class KuduCatalogOpExecutor { String kuduTableName = msTblCopy.getParameters().get(KuduTable.KEY_TABLE_NAME); Preconditions.checkState(!Strings.isNullOrEmpty(kuduTableName)); String masterHosts = msTblCopy.getParameters().get(KuduTable.KEY_MASTER_HOSTS); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Loading schema of table '%s' from master '%s'", + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Loading schema of table '%s' from master '%s'", kuduTableName, masterHosts)); } try (KuduClient kudu = KuduUtil.createKuduClient(masterHosts)) { http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/service/MetadataOp.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/service/MetadataOp.java b/fe/src/main/java/org/apache/impala/service/MetadataOp.java index 16166ee..78cb785 100644 --- a/fe/src/main/java/org/apache/impala/service/MetadataOp.java +++ b/fe/src/main/java/org/apache/impala/service/MetadataOp.java @@ -375,8 +375,8 @@ public class MetadataOp { } } } - if (LOG.isDebugEnabled()) { - LOG.debug("Returning " + result.rows.size() + " table columns"); + if (LOG.isTraceEnabled()) { + LOG.trace("Returning " + result.rows.size() + " table columns"); } return result; } @@ -428,8 +428,8 @@ public class MetadataOp { result.rows.add(row); } - if (LOG.isDebugEnabled()) { - LOG.debug("Returning " + result.rows.size() + " schemas"); + if (LOG.isTraceEnabled()) { + LOG.trace("Returning " + result.rows.size() + " schemas"); } return result; } @@ -483,7 +483,7 @@ public class MetadataOp { result.rows.add(row); } } - if (LOG.isDebugEnabled()) LOG.debug("Returning " + result.rows.size() + " tables"); + if (LOG.isTraceEnabled()) LOG.trace("Returning " + result.rows.size() + " tables"); return result; } http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java index acbc53a..a14c810 100644 --- a/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java +++ b/fe/src/main/java/org/apache/impala/util/HdfsCachingUtil.java @@ -130,8 +130,8 @@ public class HdfsCachingUtil { public static void uncacheTbl(org.apache.hadoop.hive.metastore.api.Table table) throws ImpalaRuntimeException { Preconditions.checkNotNull(table); - if (LOG.isDebugEnabled()) { - LOG.debug("Uncaching table: " + table.getDbName() + "." + table.getTableName()); + if (LOG.isTraceEnabled()) { + LOG.trace("Uncaching table: " + table.getDbName() + "." + table.getTableName()); } Long id = getCacheDirectiveId(table.getParameters()); if (id == null) return; @@ -238,8 +238,8 @@ public class HdfsCachingUtil { bytesNeeded = cacheDir.getStats().getBytesNeeded(); currentBytesCached = cacheDir.getStats().getBytesCached(); - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Waiting on cache directive id: %d. Bytes " + + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Waiting on cache directive id: %d. Bytes " + "cached (%d) / needed (%d)", directiveId, currentBytesCached, bytesNeeded)); } // All the bytes are cached, just return. @@ -262,8 +262,8 @@ public class HdfsCachingUtil { currentBytesCached = cacheDir.getStats().getBytesCached(); bytesNeeded = cacheDir.getStats().getBytesNeeded(); if (currentBytesCached == bytesNeeded) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Cache directive id: %d has completed." + + if (LOG.isTraceEnabled()) { + LOG.trace(String.format("Cache directive id: %d has completed." + "Bytes cached (%d) / needed (%d)", directiveId, currentBytesCached, bytesNeeded)); } @@ -301,8 +301,8 @@ public class HdfsCachingUtil { .setPool(poolName) .setReplication(replication) .setPath(path).build(); - if (LOG.isDebugEnabled()) { - LOG.debug("Submitting cache directive: " + info.toString()); + if (LOG.isTraceEnabled()) { + LOG.trace("Submitting cache directive: " + info.toString()); } try { return getDfs().addCacheDirective(info); @@ -355,8 +355,8 @@ public class HdfsCachingUtil { .setPool(poolName) .setReplication(replication) .setPath(path).build(); - if (LOG.isDebugEnabled()) { - LOG.debug("Modifying cache directive: " + info.toString()); + if (LOG.isTraceEnabled()) { + LOG.trace("Modifying cache directive: " + info.toString()); } try { getDfs().modifyCacheDirective(info); @@ -372,7 +372,7 @@ public class HdfsCachingUtil { * directive. */ private static void removeDirective(long directiveId) throws ImpalaRuntimeException { - if (LOG.isDebugEnabled()) LOG.debug("Removing cache directive id: " + directiveId); + if (LOG.isTraceEnabled()) LOG.trace("Removing cache directive id: " + directiveId); try { getDfs().removeCacheDirective(directiveId); } catch (IOException e) { http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/694d72ea/fe/src/main/java/org/apache/impala/util/RequestPoolService.java ---------------------------------------------------------------------- diff --git a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java index 94c0c33..1443e68 100644 --- a/fe/src/main/java/org/apache/impala/util/RequestPoolService.java +++ b/fe/src/main/java/org/apache/impala/util/RequestPoolService.java @@ -274,8 +274,8 @@ public class RequestPoolService { JniUtil.deserializeThrift(protocolFactory_, resolvePoolParams, thriftResolvePoolParams); TResolveRequestPoolResult result = resolveRequestPool(resolvePoolParams); - if (LOG.isDebugEnabled()) { - LOG.debug("resolveRequestPool(pool={}, user={}): resolved_pool={}, has_access={}", + if (LOG.isTraceEnabled()) { + LOG.trace("resolveRequestPool(pool={}, user={}): resolved_pool={}, has_access={}", new Object[] { resolvePoolParams.getRequested_pool(), resolvePoolParams.getUser(), result.resolved_pool, result.has_access }); @@ -375,7 +375,7 @@ public class RequestPoolService { result.setDefault_query_options(getLlamaPoolConfigValue(currentLlamaConf, pool, QUERY_OPTIONS_KEY, "")); } - if (LOG.isDebugEnabled()) { + if (LOG.isTraceEnabled()) { LOG.debug("getPoolConfig(pool={}): max_mem_resources={}, max_requests={}, " + "max_queued={}, queue_timeout_ms={}, default_query_options={}", new Object[] { pool, result.max_mem_resources, result.max_requests,
