Author: prasanthj
Date: Mon Feb 23 21:45:59 2015
New Revision: 1661791
URL: http://svn.apache.org/r1661791
Log:
Merge from trunk to llap (Prasanth Jayachandran)
Modified:
hive/branches/llap/ (props changed)
hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
Propchange: hive/branches/llap/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Mon Feb 23 21:45:59 2015
@@ -4,4 +4,4 @@
/hive/branches/spark:1608589-1660298
/hive/branches/tez:1494760-1622766
/hive/branches/vectorization:1466908-1527856
-/hive/trunk:1624170-1661246
+/hive/trunk:1624170-1661790
Modified:
hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
(original)
+++
hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
Mon Feb 23 21:45:59 2015
@@ -794,7 +794,7 @@ public class HiveMetaStoreClient impleme
@Override
public List<Partition> dropPartitions(String dbName, String tblName,
List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean
ignoreProtection,
- boolean ifExists) throws NoSuchObjectException, MetaException,
TException {
+ boolean ifExists, boolean needResult) throws NoSuchObjectException,
MetaException, TException {
RequestPartsSpec rps = new RequestPartsSpec();
List<DropPartitionsExpr> exprs = new
ArrayList<DropPartitionsExpr>(partExprs.size());
for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
@@ -807,11 +807,19 @@ public class HiveMetaStoreClient impleme
DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName,
rps);
req.setDeleteData(deleteData);
req.setIgnoreProtection(ignoreProtection);
- req.setNeedResult(true);
+ req.setNeedResult(needResult);
req.setIfExists(ifExists);
return client.drop_partitions_req(req).getPartitions();
}
+ @Override
+ public List<Partition> dropPartitions(String dbName, String tblName,
+ List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean
ignoreProtection,
+ boolean ifExists) throws NoSuchObjectException, MetaException,
TException {
+ // By default, we need the results from dropPartitions();
+ return dropPartitions(dbName, tblName, partExprs, deleteData,
ignoreProtection, ifExists, true);
+ }
+
/**
* {@inheritDoc}
* @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
Modified:
hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
(original)
+++
hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
Mon Feb 23 21:45:59 2015
@@ -664,6 +664,10 @@ public interface IMetaStoreClient {
List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean
ignoreProtection,
boolean ifExists) throws NoSuchObjectException, MetaException,
TException;
+ List<Partition> dropPartitions(String dbName, String tblName,
+ List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData, boolean
ignoreProtection,
+ boolean ifExists, boolean needResults) throws NoSuchObjectException,
MetaException, TException;
+
boolean dropPartition(String db_name, String tbl_name,
String name, boolean deleteData) throws NoSuchObjectException,
MetaException, TException;
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java
Mon Feb 23 21:45:59 2015
@@ -81,7 +81,9 @@ public class AppMasterEventOperator exte
Writable writableRow = serializer.serialize(row, rowInspector);
writableRow.write(buffer);
if (buffer.getLength() > MAX_SIZE) {
- LOG.info("Disabling AM events. Buffer size too large: " +
buffer.getLength());
+ if (isLogInfoEnabled) {
+ LOG.info("Disabling AM events. Buffer size too large: " +
buffer.getLength());
+ }
hasReachedMaxSize = true;
buffer = null;
}
@@ -89,7 +91,7 @@ public class AppMasterEventOperator exte
throw new HiveException(e);
}
- if (LOG.isDebugEnabled()) {
+ if (isLogDebugEnabled) {
LOG.debug("AppMasterEvent: " + row);
}
forward(row, rowInspector);
@@ -116,8 +118,10 @@ public class AppMasterEventOperator exte
InputInitializerEvent.create(vertexName, inputName,
ByteBuffer.wrap(payload, 0, payload.length));
- LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " +
inputName
- + ". Payload size = " + payload.length);
+ if (isLogInfoEnabled) {
+ LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " +
inputName
+ + ". Payload size = " + payload.length);
+ }
context.getTezProcessorContext().sendEvents(Collections.singletonList(event));
}
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
Mon Feb 23 21:45:59 2015
@@ -316,7 +316,9 @@ public abstract class CommonJoinOperator
}
}
- LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " +
totalSz);
+ if (isLogInfoEnabled) {
+ LOG.info("JOIN " + outputObjInspector.getTypeName() + " totalsz = " +
totalSz);
+ }
}
transient boolean newGroupStarted = false;
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/DemuxOperator.java
Mon Feb 23 21:45:59 2015
@@ -176,7 +176,9 @@ public class DemuxOperator extends Opera
}
newChildOperatorsTag[i] = toArray(childOperatorTags);
}
- LOG.info("newChildOperatorsTag " + newChildOperatorsTag);
+ if (isLogInfoEnabled) {
+ LOG.info("newChildOperatorsTag " + newChildOperatorsTag);
+ }
initializeChildren(hconf);
}
@@ -200,12 +202,16 @@ public class DemuxOperator extends Opera
@Override
protected void initializeChildren(Configuration hconf) throws HiveException {
state = State.INIT;
- LOG.info("Operator " + id + " " + getName() + " initialized");
- LOG.info("Initializing children of " + id + " " + getName());
+ if (isLogInfoEnabled) {
+ LOG.info("Operator " + id + " " + getName() + " initialized");
+ LOG.info("Initializing children of " + id + " " + getName());
+ }
for (int i = 0; i < childOperatorsArray.length; i++) {
- LOG.info("Initializing child " + i + " " +
childOperatorsArray[i].getIdentifier() + " " +
- childOperatorsArray[i].getName() +
- " " + childInputObjInspectors[i].length);
+ if (isLogInfoEnabled) {
+ LOG.info("Initializing child " + i + " " +
childOperatorsArray[i].getIdentifier() + " " +
+ childOperatorsArray[i].getName() +
+ " " + childInputObjInspectors[i].length);
+ }
// We need to initialize those MuxOperators first because if we first
// initialize other operators, the states of all parents of those
MuxOperators
// are INIT (including this DemuxOperator),
@@ -229,9 +235,11 @@ public class DemuxOperator extends Opera
}
}
for (int i = 0; i < childOperatorsArray.length; i++) {
- LOG.info("Initializing child " + i + " " +
childOperatorsArray[i].getIdentifier() + " " +
- childOperatorsArray[i].getName() +
- " " + childInputObjInspectors[i].length);
+ if (isLogInfoEnabled) {
+ LOG.info("Initializing child " + i + " " +
childOperatorsArray[i].getIdentifier() + " " +
+ childOperatorsArray[i].getName() +
+ " " + childInputObjInspectors[i].length);
+ }
if (!(childOperatorsArray[i] instanceof MuxOperator)) {
childOperatorsArray[i].initialize(hconf, childInputObjInspectors[i]);
} else {
@@ -255,10 +263,10 @@ public class DemuxOperator extends Opera
endGroupIfNecessary(currentChildIndex);
int oldTag = newTagToOldTag[tag];
- if (isLogInfoEnabled) {
+ if (isLogDebugEnabled) {
cntrs[tag]++;
if (cntrs[tag] == nextCntrs[tag]) {
- LOG.info(id + " (newTag, childIndex, oldTag)=(" + tag + ", " +
currentChildIndex + ", "
+ LOG.debug(id + " (newTag, childIndex, oldTag)=(" + tag + ", " +
currentChildIndex + ", "
+ oldTag + "), forwarding " + cntrs[tag] + " rows");
nextCntrs[tag] = getNextCntr(cntrs[tag]);
}
@@ -291,8 +299,10 @@ public class DemuxOperator extends Opera
int newTag = i;
int oldTag = newTagToOldTag[i];
int childIndex = newTagToChildIndex[newTag];
- LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " +
childIndex + ", "
- + oldTag + "), forwarded " + cntrs[newTag] + " rows");
+ if (isLogInfoEnabled) {
+ LOG.info(id + " (newTag, childIndex, oldTag)=(" + newTag + ", " +
childIndex + ", "
+ + oldTag + "), forwarded " + cntrs[newTag] + " rows");
+ }
}
}
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
Mon Feb 23 21:45:59 2015
@@ -391,7 +391,7 @@ public class FileSinkOperator extends Te
valToPaths.put("", fsp); // special entry for non-DP case
}
}
-
+
final StoragePolicyValue tmpStorage = StoragePolicyValue.lookup(HiveConf
.getVar(hconf,
HIVE_TEMPORARY_TABLE_STORAGE));
if (isTemporary && fsp != null
@@ -702,7 +702,7 @@ public class FileSinkOperator extends Te
fpaths.stat.addToStat(StatsSetupConst.ROW_COUNT, 1);
}
- if (++numRows == cntr) {
+ if ((++numRows == cntr) && isLogInfoEnabled) {
cntr *= 10;
LOG.info(toString() + ": records written - " + numRows);
}
@@ -967,7 +967,7 @@ public class FileSinkOperator extends Te
public void closeOp(boolean abort) throws HiveException {
row_count.set(numRows);
- LOG.info(toString() + ": records written - " + numRows);
+ LOG.info(toString() + ": records written - " + numRows);
if (!bDynParts && !filesCreated) {
createBucketFiles(fsp);
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
Mon Feb 23 21:45:59 2015
@@ -961,12 +961,16 @@ public class GroupByOperator extends Ope
}
hashAggregations.clear();
hashAggregations = null;
- LOG.info("Hash Table completed flushed");
+ if (isLogInfoEnabled) {
+ LOG.info("Hash Table completed flushed");
+ }
return;
}
int oldSize = hashAggregations.size();
- LOG.info("Hash Tbl flush: #hash table = " + oldSize);
+ if (isLogInfoEnabled) {
+ LOG.info("Hash Tbl flush: #hash table = " + oldSize);
+ }
Iterator<Map.Entry<KeyWrapper, AggregationBuffer[]>> iter =
hashAggregations
.entrySet().iterator();
int numDel = 0;
@@ -976,7 +980,9 @@ public class GroupByOperator extends Ope
iter.remove();
numDel++;
if (numDel * 10 >= oldSize) {
- LOG.info("Hash Table flushed: new size = " + hashAggregations.size());
+ if (isLogInfoEnabled) {
+ LOG.info("Hash Table flushed: new size = " + hashAggregations.size());
+ }
return;
}
}
@@ -1015,8 +1021,10 @@ public class GroupByOperator extends Ope
public void flush() throws HiveException{
try {
if (hashAggregations != null) {
- LOG.info("Begin Hash Table flush: size = "
- + hashAggregations.size());
+ if (isLogInfoEnabled) {
+ LOG.info("Begin Hash Table flush: size = "
+ + hashAggregations.size());
+ }
Iterator iter = hashAggregations.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<KeyWrapper, AggregationBuffer[]> m = (Map.Entry) iter
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
Mon Feb 23 21:45:59 2015
@@ -73,7 +73,7 @@ public class HashTableSinkOperator exten
/**
* The filters for join
*/
- private transient List<ExprNodeEvaluator>[] joinFilters;
+ private transient List<ExprNodeEvaluator>[] joinFilters;
private transient int[][] filterMaps;
@@ -103,7 +103,7 @@ public class HashTableSinkOperator exten
protected transient LogHelper console;
private long hashTableScale;
private MapJoinMemoryExhaustionHandler memoryExhaustionHandler;
-
+
public HashTableSinkOperator() {
}
@@ -265,7 +265,9 @@ public class HashTableSinkOperator exten
public void closeOp(boolean abort) throws HiveException {
try {
if (mapJoinTables == null) {
- LOG.debug("mapJoinTables is null");
+ if (isLogDebugEnabled) {
+ LOG.debug("mapJoinTables is null");
+ }
} else {
flushToFile();
}
@@ -280,7 +282,9 @@ public class HashTableSinkOperator exten
protected void flushToFile() throws IOException, HiveException {
// get tmp file URI
Path tmpURI = getExecContext().getLocalWork().getTmpPath();
- LOG.info("Temp URI for side table: " + tmpURI);
+ if (isLogInfoEnabled) {
+ LOG.info("Temp URI for side table: " + tmpURI);
+ }
for (byte tag = 0; tag < mapJoinTables.length; tag++) {
// get the key and value
MapJoinPersistableTableContainer tableContainer = mapJoinTables[tag];
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java
Mon Feb 23 21:45:59 2015
@@ -104,12 +104,12 @@ public class JoinOperator extends Common
storage[alias].clearRows();
}
} else {
- if (sz == nextSz) {
+ if (isLogInfoEnabled && (sz == nextSz)) {
// Print a message if we reached at least 1000 rows for a join
operand
// We won't print a message for the last join operand since the size
// will never goes to joinEmitInterval.
- LOG.info("table " + alias + " has " + sz + " rows for join key "
- + keyObject);
+ LOG.info("table " + alias + " has " + sz + " rows for join key "
+ + keyObject);
nextSz = getNextSize(nextSz);
}
}
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
Mon Feb 23 21:45:59 2015
@@ -110,10 +110,14 @@ public class MapJoinOperator extends Abs
mapJoinTables = (MapJoinTableContainer[]) cache.retrieve(tableKey);
mapJoinTableSerdes = (MapJoinTableContainerSerDe[])
cache.retrieve(serdeKey);
hashTblInitedOnce = true;
- LOG.info("Try to retrieve from cache");
+ if (isLogInfoEnabled) {
+ LOG.info("Try to retrieve from cache");
+ }
if (mapJoinTables == null || mapJoinTableSerdes == null) {
- LOG.info("Did not find tables in cache");
+ if (isLogInfoEnabled) {
+ LOG.info("Did not find tables in cache");
+ }
mapJoinTables = new MapJoinTableContainer[tagLen];
mapJoinTableSerdes = new MapJoinTableContainerSerDe[tagLen];
hashTblInitedOnce = false;
@@ -200,7 +204,9 @@ public class MapJoinOperator extends Abs
* requires changes in the Tez API with regard to finding bucket id and
* also ability to schedule tasks to re-use containers that have cached
the specific bucket.
*/
- LOG.info("This is not bucket map join, so cache");
+ if (isLogInfoEnabled) {
+ LOG.info("This is not bucket map join, so cache");
+ }
cache.cache(tableKey, mapJoinTables);
cache.cache(serdeKey, mapJoinTableSerdes);
}
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java
Mon Feb 23 21:45:59 2015
@@ -453,7 +453,7 @@ public class MapOperator extends Operato
builder.append(context.alias);
}
if (isLogDebugEnabled) {
- LOG.info("Processing alias(es) " + builder.toString() + " for file " +
fpath);
+ LOG.debug("Processing alias(es) " + builder.toString() + " for file "
+ fpath);
}
}
// Add alias, table name, and partitions to hadoop conf so that their
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/MuxOperator.java
Mon Feb 23 21:45:59 2015
@@ -214,11 +214,15 @@ public class MuxOperator extends Operato
@Override
protected void initializeChildren(Configuration hconf) throws HiveException {
state = State.INIT;
- LOG.info("Operator " + id + " " + getName() + " initialized");
+ if (isLogInfoEnabled) {
+ LOG.info("Operator " + id + " " + getName() + " initialized");
+ }
if (childOperators == null || childOperators.isEmpty()) {
return;
}
- LOG.info("Initializing children of " + id + " " + getName());
+ if (isLogInfoEnabled) {
+ LOG.info("Initializing children of " + id + " " + getName());
+ }
childOperatorsArray[0].initialize(hconf, outputObjectInspectors);
if (reporter != null) {
childOperatorsArray[0].setReporter(reporter);
@@ -302,8 +306,10 @@ public class MuxOperator extends Operato
@Override
protected void closeOp(boolean abort) throws HiveException {
- for (int i = 0; i < numParents; i++) {
- LOG.info(id + ", tag=" + i + ", forwarded " + cntrs[i] + " rows");
+ if (isLogInfoEnabled) {
+ for (int i = 0; i < numParents; i++) {
+ LOG.info(id + ", tag=" + i + ", forwarded " + cntrs[i] + " rows");
+ }
}
}
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
--- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
(original)
+++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
Mon Feb 23 21:45:59 2015
@@ -343,7 +343,9 @@ public abstract class Operator<T extends
return;
}
- LOG.info("Initializing Self " + this);
+ if (isLogInfoEnabled) {
+ LOG.info("Initializing Self " + this);
+ }
if (inputOIs != null) {
inputObjInspectors = inputOIs;
@@ -391,7 +393,9 @@ public abstract class Operator<T extends
"Internal Hive error during operator initialization.");
}
- LOG.info("Initialization Done " + id + " " + getName());
+ if (isLogInfoEnabled) {
+ LOG.info("Initialization Done " + id + " " + getName());
+ }
}
public void initializeLocalWork(Configuration hconf) throws HiveException {
@@ -416,11 +420,15 @@ public abstract class Operator<T extends
*/
protected void initializeChildren(Configuration hconf) throws HiveException {
state = State.INIT;
- LOG.info("Operator " + id + " " + getName() + " initialized");
+ if (isLogInfoEnabled) {
+ LOG.info("Operator " + id + " " + getName() + " initialized");
+ }
if (childOperators == null || childOperators.isEmpty()) {
return;
}
- LOG.info("Initializing children of " + id + " " + getName());
+ if (isLogInfoEnabled) {
+ LOG.info("Initializing children of " + id + " " + getName());
+ }
for (int i = 0; i < childOperatorsArray.length; i++) {
childOperatorsArray[i].initialize(hconf, outputObjInspector,
childOperatorsTag[i]);
@@ -455,7 +463,9 @@ public abstract class Operator<T extends
*/
protected void initialize(Configuration hconf, ObjectInspector inputOI,
int parentId) throws HiveException {
- LOG.info("Initializing child " + id + " " + getName());
+ if (isLogInfoEnabled) {
+ LOG.info("Initializing child " + id + " " + getName());
+ }
// Double the size of the array if needed
if (parentId >= inputObjInspectors.length) {
int newLength = inputObjInspectors.length * 2;
@@ -565,7 +575,9 @@ public abstract class Operator<T extends
if(parent==null){
continue;
}
- LOG.debug("allInitializedParentsAreClosed? parent.state = " +
parent.state);
+ if (isLogDebugEnabled) {
+ LOG.debug("allInitializedParentsAreClosed? parent.state = " +
parent.state);
+ }
if (!(parent.state == State.CLOSE || parent.state == State.UNINIT)) {
return false;
}
@@ -585,14 +597,18 @@ public abstract class Operator<T extends
// check if all parents are finished
if (!allInitializedParentsAreClosed()) {
- LOG.debug("Not all parent operators are closed. Not closing.");
+ if (isLogDebugEnabled) {
+ LOG.debug("Not all parent operators are closed. Not closing.");
+ }
return;
}
// set state as CLOSE as long as all parents are closed
// state == CLOSE doesn't mean all children are also in state CLOSE
state = State.CLOSE;
- LOG.info(id + " finished. closing... ");
+ if (isLogInfoEnabled) {
+ LOG.info(id + " finished. closing... ");
+ }
// call the operator specific close routine
closeOp(abort);
@@ -606,11 +622,15 @@ public abstract class Operator<T extends
}
for (Operator<? extends OperatorDesc> op : childOperators) {
- LOG.debug("Closing child = " + op);
+ if (isLogDebugEnabled) {
+ LOG.debug("Closing child = " + op);
+ }
op.close(abort);
}
- LOG.info(id + " Close done");
+ if (isLogInfoEnabled) {
+ LOG.info(id + " Close done");
+ }
} catch (HiveException e) {
e.printStackTrace();
throw e;
@@ -856,8 +876,10 @@ public abstract class Operator<T extends
}
public void logStats() {
- for (String e : statsMap.keySet()) {
- LOG.info(e.toString() + ":" + statsMap.get(e).toString());
+ if (isLogInfoEnabled) {
+ for (String e : statsMap.keySet()) {
+ LOG.info(e.toString() + ":" + statsMap.get(e).toString());
+ }
}
}
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
Mon Feb 23 21:45:59 2015
@@ -81,7 +81,9 @@ public class OrcFileMergeOperator extend
if (prevPath == null) {
prevPath = k.getInputPath();
reader = OrcFile.createReader(fs, k.getInputPath());
- LOG.info("ORC merge file input path: " + k.getInputPath());
+ if (isLogInfoEnabled) {
+ LOG.info("ORC merge file input path: " + k.getInputPath());
+ }
}
// store the orc configuration from the first file. All other files
should
@@ -100,7 +102,9 @@ public class OrcFileMergeOperator extend
.version(version)
.rowIndexStride(rowIndexStride)
.inspector(reader.getObjectInspector()));
- LOG.info("ORC merge file output path: " + outPath);
+ if (isLogDebugEnabled) {
+ LOG.info("ORC merge file output path: " + outPath);
+ }
}
if (!checkCompatibility(k)) {
@@ -123,9 +127,11 @@ public class OrcFileMergeOperator extend
outWriter.appendStripe(buffer, 0, buffer.length,
v.getStripeInformation(),
v.getStripeStatistics());
- LOG.info("Merged stripe from file " + k.getInputPath() + " [ offset : "
- + v.getStripeInformation().getOffset() + " length: "
- + v.getStripeInformation().getLength() + " ]");
+ if (isLogInfoEnabled) {
+ LOG.info("Merged stripe from file " + k.getInputPath() + " [ offset : "
+ + v.getStripeInformation().getOffset() + " length: "
+ + v.getStripeInformation().getLength() + " ]");
+ }
// add user metadata to footer in case of any
if (v.isLastStripeInFile()) {
@@ -151,33 +157,43 @@ public class OrcFileMergeOperator extend
private boolean checkCompatibility(OrcFileKeyWrapper k) {
// check compatibility with subsequent files
if ((k.getTypes().get(0).getSubtypesCount() != columnCount)) {
- LOG.info("Incompatible ORC file merge! Column counts does not match for "
- + k.getInputPath());
+ if (isLogInfoEnabled) {
+ LOG.info("Incompatible ORC file merge! Column counts does not match for
"
+ + k.getInputPath());
+ }
return false;
}
if (!k.getCompression().equals(compression)) {
- LOG.info("Incompatible ORC file merge! Compression codec does not match"
+
- " for " + k.getInputPath());
+ if (isLogInfoEnabled) {
+ LOG.info("Incompatible ORC file merge! Compression codec does not
match" +
+ " for " + k.getInputPath());
+ }
return false;
}
if (k.getCompressBufferSize() != compressBuffSize) {
- LOG.info("Incompatible ORC file merge! Compression buffer size does not"
+
- " match for " + k.getInputPath());
+ if (isLogInfoEnabled) {
+ LOG.info("Incompatible ORC file merge! Compression buffer size does
not" +
+ " match for " + k.getInputPath());
+ }
return false;
}
if (!k.getVersion().equals(version)) {
- LOG.info("Incompatible ORC file merge! Version does not match for "
- + k.getInputPath());
+ if (isLogInfoEnabled) {
+ LOG.info("Incompatible ORC file merge! Version does not match for "
+ + k.getInputPath());
+ }
return false;
}
if (k.getRowIndexStride() != rowIndexStride) {
- LOG.info("Incompatible ORC file merge! Row index stride does not match" +
- " for " + k.getInputPath());
+ if (isLogInfoEnabled) {
+ LOG.info("Incompatible ORC file merge! Row index stride does not match"
+
+ " for " + k.getInputPath());
+ }
return false;
}
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
Mon Feb 23 21:45:59 2015
@@ -226,7 +226,7 @@ public class SMBMapJoinOperator extends
public void cleanUpInputFileChangedOp() throws HiveException {
inputFileChanged = true;
}
-
+
protected List<Object> smbJoinComputeKeys(Object row, byte alias) throws
HiveException {
return JoinUtil.computeKeys(row, joinKeys[alias],
joinKeysObjectInspectors[alias]);
@@ -265,8 +265,8 @@ public class SMBMapJoinOperator extends
byte alias = (byte) tag;
// compute keys and values as StandardObjects
- List<Object> key = smbJoinComputeKeys(row, alias);
-
+ List<Object> key = smbJoinComputeKeys(row, alias);
+
List<Object> value = getFilteredValue(alias, row);
@@ -527,7 +527,9 @@ public class SMBMapJoinOperator extends
BucketMatcher bucketMatcher =
ReflectionUtils.newInstance(bucketMatcherCls, null);
getExecContext().setFileId(bucketMatcherCxt.createFileId(currentInputPath.toString()));
- LOG.info("set task id: " + getExecContext().getFileId());
+ if (isLogInfoEnabled) {
+ LOG.info("set task id: " + getExecContext().getFileId());
+ }
bucketMatcher.setAliasBucketFileNameMapping(bucketMatcherCxt
.getAliasBucketFileNameMapping());
@@ -751,7 +753,9 @@ public class SMBMapJoinOperator extends
}
Integer current = top();
if (current == null) {
- LOG.info("MergeQueue forwarded " + counter + " rows");
+ if (isLogInfoEnabled) {
+ LOG.info("MergeQueue forwarded " + counter + " rows");
+ }
return null;
}
counter++;
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java
Mon Feb 23 21:45:59 2015
@@ -303,10 +303,11 @@ public class ScriptOperator extends Oper
}
void displayBrokenPipeInfo() {
- LOG
- .info("The script did not consume all input data. This is considered
as an error.");
- LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString()
- + "=true; to ignore it.");
+ if (isLogInfoEnabled) {
+ LOG.info("The script did not consume all input data. This is considered
as an error.");
+ LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString()
+ + "=true; to ignore it.");
+ }
return;
}
@@ -347,10 +348,12 @@ public class ScriptOperator extends Oper
}
String[] wrappedCmdArgs = addWrapper(cmdArgs);
- LOG.info("Executing " + Arrays.asList(wrappedCmdArgs));
- LOG.info("tablename=" + tableName);
- LOG.info("partname=" + partitionName);
- LOG.info("alias=" + alias);
+ if (isLogInfoEnabled) {
+ LOG.info("Executing " + Arrays.asList(wrappedCmdArgs));
+ LOG.info("tablename=" + tableName);
+ LOG.info("partname=" + partitionName);
+ LOG.info("alias=" + alias);
+ }
ProcessBuilder pb = new ProcessBuilder(wrappedCmdArgs);
Map<String, String> env = pb.environment();
@@ -442,8 +445,7 @@ public class ScriptOperator extends Oper
+ StringUtils.stringifyException(e2));
}
setDone(true);
- LOG
- .warn("Got broken pipe during write: ignoring exception and
setting operator to done");
+ LOG.warn("Got broken pipe during write: ignoring exception and setting
operator to done");
} else {
LOG.error("Error in writing to script: " + e.getMessage());
if (isBrokenPipeException(e)) {
@@ -666,7 +668,9 @@ public class ScriptOperator extends Oper
long now = System.currentTimeMillis();
// reporter is a member variable of the Operator class.
if (now - lastReportTime > 60 * 1000 && reporter != null) {
- LOG.info("ErrorStreamProcessor calling reporter.progress()");
+ if (isLogInfoEnabled) {
+ LOG.info("ErrorStreamProcessor calling reporter.progress()");
+ }
lastReportTime = now;
reporter.progress();
}
@@ -721,7 +725,9 @@ public class ScriptOperator extends Oper
}
proc.processLine(row);
}
- LOG.info("StreamThread " + name + " done");
+ if (isLogInfoEnabled) {
+ LOG.info("StreamThread " + name + " done");
+ }
} catch (Throwable th) {
scriptError = th;
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java
Mon Feb 23 21:45:59 2015
@@ -59,7 +59,9 @@ public class SelectOperator extends Oper
eval = ExprNodeEvaluatorFactory.toCachedEvals(eval);
}
output = new Object[eval.length];
- LOG.info("SELECT " + inputObjInspectors[0].getTypeName());
+ if (isLogInfoEnabled) {
+ LOG.info("SELECT " + inputObjInspectors[0].getTypeName());
+ }
outputObjInspector = initEvaluatorsAndReturnStruct(eval,
conf.getOutputColumnNames(),
inputObjInspectors[0]);
initializeChildren(hconf);
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
Mon Feb 23 21:45:59 2015
@@ -153,7 +153,9 @@ public class TableScanOperator extends O
values.add(o == null ? defaultPartitionName : o.toString());
}
partitionSpecs = FileUtils.makePartName(conf.getPartColumns(), values);
- LOG.info("Stats Gathering found a new partition spec = " +
partitionSpecs);
+ if (isLogInfoEnabled) {
+ LOG.info("Stats Gathering found a new partition spec = " +
partitionSpecs);
+ }
}
// find which column contains the raw data size (both partitioned and
non partitioned
int uSizeColumn = -1;
@@ -279,7 +281,9 @@ public class TableScanOperator extends O
StatsPublisher statsPublisher = Utilities.getStatsPublisher(jc);
if (!statsPublisher.connect(jc)) {
// just return, stats gathering should not block the main query.
- LOG.info("StatsPublishing error: cannot connect to database.");
+ if (isLogInfoEnabled) {
+ LOG.info("StatsPublishing error: cannot connect to database.");
+ }
if (isStatsReliable) {
throw new
HiveException(ErrorMsg.STATSPUBLISHER_CONNECTION_ERROR.getErrorCodedMsg());
}
@@ -307,7 +311,9 @@ public class TableScanOperator extends O
throw new
HiveException(ErrorMsg.STATSPUBLISHER_PUBLISHING_ERROR.getErrorCodedMsg());
}
}
- LOG.info("publishing : " + key + " : " + statsToPublish.toString());
+ if (isLogInfoEnabled) {
+ LOG.info("publishing : " + key + " : " + statsToPublish.toString());
+ }
}
if (!statsPublisher.closeConnection()) {
if (isStatsReliable) {
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java
Mon Feb 23 21:45:59 2015
@@ -111,7 +111,7 @@ public class UnionOperator extends Opera
// to
// create ObjectInspectors.
needsTransform[p] = (inputObjInspectors[p] != outputObjInspector);
- if (needsTransform[p]) {
+ if (isLogInfoEnabled && needsTransform[p]) {
LOG.info("Union Operator needs to transform row from parent[" + p
+ "] from " + inputObjInspectors[p] + " to " + outputObjInspector);
}
Modified:
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
URL:
http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java?rev=1661791&r1=1661790&r2=1661791&view=diff
==============================================================================
---
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
(original)
+++
hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
Mon Feb 23 21:45:59 2015
@@ -32,7 +32,6 @@ import org.apache.hadoop.hive.ql.exec.Op
import org.apache.hadoop.hive.ql.exec.OperatorUtils;
import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
import org.apache.hadoop.hive.ql.parse.GlobalLimitCtx;
@@ -99,28 +98,18 @@ public class GlobalLimitOptimizer implem
// query qualify for the optimization
if (tempGlobalLimit != null && tempGlobalLimit != 0) {
Table tab = ts.getConf().getTableMetadata();
+ Set<FilterOperator> filterOps = OperatorUtils.findOperators(ts,
FilterOperator.class);
if (!tab.isPartitioned()) {
- Set<FilterOperator> filterOps =
- OperatorUtils.findOperators(ts, FilterOperator.class);
if (filterOps.size() == 0) {
globalLimitCtx.enableOpt(tempGlobalLimit);
}
} else {
// check if the pruner only contains partition columns
- if (PartitionPruner.onlyContainsPartnCols(tab,
- opToPartPruner.get(ts))) {
+ if (onlyContainsPartnCols(tab, filterOps)) {
- PrunedPartitionList partsList;
- try {
- String alias = (String) topOps.keySet().toArray()[0];
- partsList = PartitionPruner.prune(ts, pctx, alias);
- } catch (HiveException e) {
- // Has to use full name to make sure it does not conflict with
- // org.apache.commons.lang.StringUtils
-
LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- }
+ String alias = (String) topOps.keySet().toArray()[0];
+ PrunedPartitionList partsList = pctx.getPrunedPartitions(alias,
ts);
// If there is any unknown partition, create a map-reduce job for
// the filter to prune correctly
@@ -138,6 +127,15 @@ public class GlobalLimitOptimizer implem
return pctx;
}
+ private boolean onlyContainsPartnCols(Table table, Set<FilterOperator>
filters) {
+ for (FilterOperator filter : filters) {
+ if (!PartitionPruner.onlyContainsPartnCols(table,
filter.getConf().getPredicate())) {
+ return false;
+ }
+ }
+ return true;
+ }
+
/**
* Check the limit number in all sub queries
*