hive git commit: HIVE-13788 : hive msck listpartitions need to make use of directSQL instead of datanucleus

2016-06-13 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 12ad34d62 -> 696104fab


HIVE-13788 : hive msck listpartitions need to make use of directSQL instead of 
datanucleus

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/696104fa
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/696104fa
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/696104fa

Branch: refs/heads/master
Commit: 696104fab350a470a745530105d34f1015ded664
Parents: 12ad34d
Author: Hari Subramaniyan 
Authored: Fri Jun 10 14:43:00 2016 -0800
Committer: Ashutosh Chauhan 
Committed: Mon Jun 13 09:42:11 2016 -0700

--
 .../apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java   | 6 +-
 .../apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java   | 6 --
 2 files changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/696104fa/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
index 10fa561..fa3ad0c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult;
+import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
+import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.thrift.TException;
 
 /**
@@ -196,8 +198,10 @@ public class HiveMetaStoreChecker {
 
 if (table.isPartitioned()) {
   if (partitions == null || partitions.isEmpty()) {
+PrunedPartitionList prunedPartList =
+PartitionPruner.prune(table, null, conf, toString(), null);
 // no partitions specified, let's get all
-parts = hive.getPartitions(table);
+parts.addAll(prunedPartList.getPartitions());
   } else {
 // we're interested in specific partitions,
 // don't check for any others

http://git-wip-us.apache.org/repos/asf/hive/blob/696104fa/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
index 02c5a89..26e936e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
@@ -227,7 +227,7 @@ public class PartitionPruner extends Transform {
 
   private static PrunedPartitionList getAllPartsFromCacheOrServer(Table tab, 
String key, boolean unknownPartitions,
 Map partsCache)  throws SemanticException {
-PrunedPartitionList ppList = partsCache.get(key);
+PrunedPartitionList ppList = partsCache == null ? null : 
partsCache.get(key);
 if (ppList != null) {
   return ppList;
 }
@@ -238,7 +238,9 @@ public class PartitionPruner extends Transform {
   throw new SemanticException(e);
 }
 ppList = new PrunedPartitionList(tab, parts, null, unknownPartitions);
-partsCache.put(key, ppList);
+if (partsCache != null) {
+  partsCache.put(key, ppList);
+}
 return ppList;
   }
 



hive git commit: HIVE-13900: HiveStatement.executeAsync() may not work properly when hive.server2.async.exec.async.compile is turned on (Aihua Xu, reviewed by Jimmy Xiang)

2016-06-13 Thread aihuaxu
Repository: hive
Updated Branches:
  refs/heads/master 696104fab -> 2f285aea0


HIVE-13900: HiveStatement.executeAsync() may not work properly when 
hive.server2.async.exec.async.compile is turned on (Aihua Xu, reviewed by Jimmy 
Xiang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2f285aea
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2f285aea
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2f285aea

Branch: refs/heads/master
Commit: 2f285aea07ddad904058b51c137abe633289794d
Parents: 696104f
Author: Aihua Xu 
Authored: Wed Jun 8 15:14:35 2016 -0400
Committer: Aihua Xu 
Committed: Mon Jun 13 14:30:19 2016 -0400

--
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   | 59 ++--
 .../org/apache/hive/jdbc/HiveStatement.java | 24 +++-
 2 files changed, 78 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2f285aea/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 7243648..b0fa98f 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -2734,7 +2734,33 @@ public void testParseUrlHttpMode() throws SQLException, 
JdbcUriParseException,
   @Test
   public void testSelectExecAsync() throws Exception {
 HiveStatement stmt = (HiveStatement) con.createStatement();
-ResultSet rs;
+testSelect(stmt);
+stmt.close();
+  }
+
+  @Test
+  public void testSelectExecAsync2() throws Exception {
+HiveStatement stmt = (HiveStatement) con.createStatement();
+
+stmt.execute("SET hive.driver.parallel.compilation=true");
+stmt.execute("SET hive.server2.async.exec.async.compile=true");
+
+testSelect(stmt);
+stmt.close();
+  }
+
+  @Test
+  public void testSelectExecAsync3() throws Exception {
+HiveStatement stmt = (HiveStatement) con.createStatement();
+
+stmt.execute("SET hive.driver.parallel.compilation=true");
+stmt.execute("SET hive.server2.async.exec.async.compile=false");
+
+testSelect(stmt);
+stmt.close();
+  }
+
+  private void testSelect(HiveStatement stmt) throws SQLException {
 // Expected row count of the join query we'll run
 int expectedCount = 1028;
 int rowCount = 0;
@@ -2742,7 +2768,7 @@ public void testParseUrlHttpMode() throws SQLException, 
JdbcUriParseException,
 stmt.executeAsync("select t1.value as v11, " + "t2.value as v12 from " 
+ tableName
 + " t1 join " + tableName + " t2 on t1.under_col = t2.under_col");
 assertTrue(isResulSet);
-rs = stmt.getResultSet();
+ResultSet rs = stmt.getResultSet();
 assertNotNull(rs);
 // ResultSet#next blocks until the async query is complete
 while (rs.next()) {
@@ -2751,7 +2777,6 @@ public void testParseUrlHttpMode() throws SQLException, 
JdbcUriParseException,
   assertNotNull(value);
 }
 assertEquals(rowCount, expectedCount);
-stmt.close();
   }
 
   /**
@@ -2789,6 +2814,33 @@ public void testParseUrlHttpMode() throws SQLException, 
JdbcUriParseException,
   @Test
   public void testInsertOverwriteExecAsync() throws Exception {
 HiveStatement stmt = (HiveStatement) con.createStatement();
+testInsertOverwrite(stmt);
+stmt.close();
+  }
+
+  @Test
+  public void testInsertOverwriteExecAsync2() throws Exception {
+HiveStatement stmt = (HiveStatement) con.createStatement();
+
+stmt.execute("SET hive.driver.parallel.compilation=true");
+stmt.execute("SET hive.server2.async.exec.async.compile=true");
+
+testInsertOverwrite(stmt);
+stmt.close();
+  }
+
+  @Test
+  public void testInsertOverwriteExecAsync3() throws Exception {
+HiveStatement stmt = (HiveStatement) con.createStatement();
+
+stmt.execute("SET hive.driver.parallel.compilation=true");
+stmt.execute("SET hive.server2.async.exec.async.compile=false");
+
+testInsertOverwrite(stmt);
+stmt.close();
+  }
+
+  private void testInsertOverwrite(HiveStatement stmt) throws SQLException {
 String tblName = "testInsertOverwriteExecAsync";
 int rowCount = 0;
 stmt.execute("create table " + tblName + " (col1 int , col2 string)");
@@ -2807,6 +2859,5 @@ public void testParseUrlHttpMode() throws SQLException, 
JdbcUriParseException,
 }
 assertEquals(rowCount, dataFileRowCount);
 stmt.execute("drop table " + tblName);
-stmt.close();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/2f285aea/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
-

hive git commit: HIVE-13392 disable speculative execution for ACID Compactor (Eugene Koifman, reviewed by Wei Zheng, Alan Gates)

2016-06-13 Thread ekoifman
Repository: hive
Updated Branches:
  refs/heads/master 2f285aea0 -> 00e177614


HIVE-13392 disable speculative execution for ACID Compactor (Eugene Koifman, 
reviewed by Wei Zheng, Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/00e17761
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/00e17761
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/00e17761

Branch: refs/heads/master
Commit: 00e1776147b2e4d4025167210ff215694170c7d8
Parents: 2f285ae
Author: Eugene Koifman 
Authored: Mon Jun 13 11:37:56 2016 -0700
Committer: Eugene Koifman 
Committed: Mon Jun 13 11:38:26 2016 -0700

--
 .../hive/common/ValidCompactorTxnList.java  | 111 +++
 .../hive/metastore/txn/CompactionInfo.java  |   1 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   1 +
 .../metastore/txn/ValidCompactorTxnList.java| 111 ---
 .../txn/TestValidCompactorTxnList.java  |   1 +
 .../hive/ql/txn/compactor/CompactorMR.java  |   8 +-
 .../apache/hadoop/hive/ql/io/TestAcidUtils.java |   2 +-
 7 files changed, 121 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/00e17761/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
--
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java 
b/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
new file mode 100644
index 000..ad79e2c
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+
+import java.util.Arrays;
+
+/**
+ * And implementation of {@link org.apache.hadoop.hive.common.ValidTxnList} 
for use by the compactor.
+ * For the purposes of {@link #isTxnRangeValid} this class will view a 
transaction as valid if it
+ * is committed or aborted.  Additionally it will return none if there are any 
open transactions
+ * below the max transaction given, since we don't want to compact above open 
transactions.  For
+ * {@link #isTxnValid} it will still view a transaction as valid only if it is 
committed.  These
+ * produce the logic we need to assure that the compactor only sees records 
less than the lowest
+ * open transaction when choosing which files to compact, but that it still 
ignores aborted
+ * records when compacting.
+ */
+public class ValidCompactorTxnList extends ValidReadTxnList {
+  //TODO: refactor this - minOpenTxn is not needed if we set
+  // highWatermark = Math.min(highWaterMark, minOpenTxn) (assuming there are 
open txns)
+
+  // The minimum open transaction id
+  private long minOpenTxn;
+
+  public ValidCompactorTxnList() {
+super();
+minOpenTxn = -1;
+  }
+
+  /**
+   *
+   * @param exceptions list of all open and aborted transactions
+   * @param minOpen lowest open transaction
+   * @param highWatermark highest committed transaction
+   */
+  public ValidCompactorTxnList(long[] exceptions, long minOpen, long 
highWatermark) {
+super(exceptions, highWatermark);
+minOpenTxn = minOpen;
+  }
+
+  public ValidCompactorTxnList(String value) {
+super(value);
+  }
+
+  @Override
+  public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) {
+if (highWatermark < minTxnId) {
+  return RangeResponse.NONE;
+} else if (minOpenTxn < 0) {
+  return highWatermark >= maxTxnId ? RangeResponse.ALL : 
RangeResponse.NONE;
+} else {
+  return minOpenTxn > maxTxnId ? RangeResponse.ALL : RangeResponse.NONE;
+}
+  }
+
+  @Override
+  public String writeToString() {
+StringBuilder buf = new StringBuilder();
+buf.append(highWatermark);
+buf.append(':');
+buf.append(minOpenTxn);
+if (exceptions.length == 0) {
+  buf.append(':');
+} else {
+  

hive git commit: HIVE-13392 disable speculative execution for ACID Compactor (Eugene Koifman, reviewed by Wei Zheng, Alan Gates)

2016-06-13 Thread ekoifman
Repository: hive
Updated Branches:
  refs/heads/branch-1 39decb0bf -> 293e22e0e


HIVE-13392 disable speculative execution for ACID Compactor (Eugene Koifman, 
reviewed by Wei Zheng, Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/293e22e0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/293e22e0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/293e22e0

Branch: refs/heads/branch-1
Commit: 293e22e0eed47ec3f7e0ce4d981366c59b65455c
Parents: 39decb0
Author: Eugene Koifman 
Authored: Mon Jun 13 11:41:30 2016 -0700
Committer: Eugene Koifman 
Committed: Mon Jun 13 11:41:30 2016 -0700

--
 .../hive/common/ValidCompactorTxnList.java  | 111 +++
 .../hive/metastore/txn/CompactionInfo.java  |   1 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   1 +
 .../metastore/txn/ValidCompactorTxnList.java| 111 ---
 .../txn/TestValidCompactorTxnList.java  |   1 +
 .../hive/ql/txn/compactor/CompactorMR.java  |   8 +-
 .../apache/hadoop/hive/ql/io/TestAcidUtils.java |   2 +-
 7 files changed, 121 insertions(+), 114 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/293e22e0/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
--
diff --git 
a/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java 
b/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
new file mode 100644
index 000..ad79e2c
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/ValidCompactorTxnList.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+
+import java.util.Arrays;
+
+/**
+ * And implementation of {@link org.apache.hadoop.hive.common.ValidTxnList} 
for use by the compactor.
+ * For the purposes of {@link #isTxnRangeValid} this class will view a 
transaction as valid if it
+ * is committed or aborted.  Additionally it will return none if there are any 
open transactions
+ * below the max transaction given, since we don't want to compact above open 
transactions.  For
+ * {@link #isTxnValid} it will still view a transaction as valid only if it is 
committed.  These
+ * produce the logic we need to assure that the compactor only sees records 
less than the lowest
+ * open transaction when choosing which files to compact, but that it still 
ignores aborted
+ * records when compacting.
+ */
+public class ValidCompactorTxnList extends ValidReadTxnList {
+  //TODO: refactor this - minOpenTxn is not needed if we set
+  // highWatermark = Math.min(highWaterMark, minOpenTxn) (assuming there are 
open txns)
+
+  // The minimum open transaction id
+  private long minOpenTxn;
+
+  public ValidCompactorTxnList() {
+super();
+minOpenTxn = -1;
+  }
+
+  /**
+   *
+   * @param exceptions list of all open and aborted transactions
+   * @param minOpen lowest open transaction
+   * @param highWatermark highest committed transaction
+   */
+  public ValidCompactorTxnList(long[] exceptions, long minOpen, long 
highWatermark) {
+super(exceptions, highWatermark);
+minOpenTxn = minOpen;
+  }
+
+  public ValidCompactorTxnList(String value) {
+super(value);
+  }
+
+  @Override
+  public RangeResponse isTxnRangeValid(long minTxnId, long maxTxnId) {
+if (highWatermark < minTxnId) {
+  return RangeResponse.NONE;
+} else if (minOpenTxn < 0) {
+  return highWatermark >= maxTxnId ? RangeResponse.ALL : 
RangeResponse.NONE;
+} else {
+  return minOpenTxn > maxTxnId ? RangeResponse.ALL : RangeResponse.NONE;
+}
+  }
+
+  @Override
+  public String writeToString() {
+StringBuilder buf = new StringBuilder();
+buf.append(highWatermark);
+buf.append(':');
+buf.append(minOpenTxn);
+if (exceptions.length == 0) {
+  buf.append(':');
+} else {
+  

hive git commit: HIVE-13866 : flatten callstack for directSQL errors (Sergey Shelukhin, reviewed by Ashutosh Chauhan)

2016-06-13 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/master 00e177614 -> b3b4516ac


HIVE-13866 : flatten callstack for directSQL errors (Sergey Shelukhin, reviewed 
by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b3b4516a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b3b4516a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b3b4516a

Branch: refs/heads/master
Commit: b3b4516ac5dfc2d47011ae1550dcfa96a1290bfa
Parents: 00e1776
Author: Sergey Shelukhin 
Authored: Mon Jun 13 16:28:09 2016 -0700
Committer: Sergey Shelukhin 
Committed: Mon Jun 13 16:28:09 2016 -0700

--
 .../hadoop/hive/metastore/ObjectStore.java  | 43 +++-
 1 file changed, 42 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b3b4516a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index f98de13..da188d3 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -2696,7 +2696,16 @@ public class ObjectStore implements RawStore, 
Configurable {
 }
 
 private void handleDirectSqlError(Exception ex) throws MetaException, 
NoSuchObjectException {
-  LOG.warn("Direct SQL failed" + (allowJdo ? ", falling back to ORM" : 
""), ex);
+  String message = null;
+  try {
+message = generateShorterMessage(ex);
+  } catch (Throwable t) {
+message = ex.toString() + "; error building a better message: " + 
t.getMessage();
+  }
+  LOG.warn(message); // Don't log the exception, people just get confused.
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Full DirectSQL callstack for debugging (note: this is not 
an error)", ex);
+  }
   if (!allowJdo) {
 if (ex instanceof MetaException) {
   throw (MetaException)ex;
@@ -2739,6 +2748,38 @@ public class ObjectStore implements RawStore, 
Configurable {
   doUseDirectSql = false;
 }
 
+private String generateShorterMessage(Exception ex) {
+  StringBuilder message = new StringBuilder(
+  "Falling back to ORM path due to direct SQL failure (this is not an 
error): ");
+  Throwable t = ex;
+  StackTraceElement[] prevStack = null;
+  while (t != null) {
+message.append(t.getMessage());
+StackTraceElement[] stack = t.getStackTrace();
+int uniqueFrames = stack.length - 1;
+if (prevStack != null) {
+  int n = prevStack.length - 1;
+  while (uniqueFrames >= 0 && n >= 0 && 
stack[uniqueFrames].equals(prevStack[n])) {
+uniqueFrames--; n--;
+  }
+}
+for (int i = 0; i <= uniqueFrames; ++i) {
+  StackTraceElement ste = stack[i];
+  message.append(" at ").append(ste);
+  if (ste.getMethodName() != null && 
ste.getMethodName().contains("getSqlResult")
+  && (ste.getFileName() == null || 
ste.getFileName().contains("ObjectStore"))) {
+break;
+  }
+}
+prevStack = stack;
+t = t.getCause();
+if (t != null) {
+  message.append(";\n Caused by: ");
+}
+  }
+  return message.toString();
+}
+
 public void disableDirectSql() {
   this.doUseDirectSql = false;
 }



hive git commit: HIVE-13957 : vectorized IN is inconsistent with non-vectorized (at least for decimal in (string)) (Sergey Shelukhin, reviewed by Matt McCline)

2016-06-13 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/master b3b4516ac -> 59e6c83fd


HIVE-13957 : vectorized IN is inconsistent with non-vectorized (at least for 
decimal in (string)) (Sergey Shelukhin, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/59e6c83f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/59e6c83f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/59e6c83f

Branch: refs/heads/master
Commit: 59e6c83fd91456b7b1e28dcbc61a4d46c7097dd7
Parents: b3b4516
Author: Sergey Shelukhin 
Authored: Mon Jun 13 18:32:12 2016 -0700
Committer: Sergey Shelukhin 
Committed: Mon Jun 13 18:32:12 2016 -0700

--
 .../ql/exec/vector/VectorizationContext.java|  30 --
 .../hive/ql/udf/generic/GenericUDFUtils.java|  52 +++--
 .../clientpositive/vector_string_decimal.q  |  21 
 .../spark/vector_between_in.q.out   |   3 -
 .../clientpositive/tez/vector_between_in.q.out  |   3 -
 .../clientpositive/vector_between_in.q.out  |   3 -
 .../clientpositive/vector_string_decimal.q.out  | 106 +++
 .../hive/serde2/typeinfo/HiveDecimalUtils.java  |   4 +-
 8 files changed, 192 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/59e6c83f/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index f486e9a..c4f47e1 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -517,8 +517,8 @@ public class VectorizationContext {
* Given a udf and its children, return the common type to which the 
children's type should be
* cast.
*/
-  private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf, 
List children,
-  TypeInfo returnType) {
+  private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf,
+  List children, TypeInfo returnType) throws HiveException {
 TypeInfo commonType;
 if (genericUdf instanceof GenericUDFBaseCompare) {
 
@@ -530,9 +530,20 @@ public class VectorizationContext {
 commonType = returnType;
   }
 } else if (genericUdf instanceof GenericUDFIn) {
-
-  // Cast to the type of the first child
-  return children.get(0).getTypeInfo();
+  TypeInfo colTi = children.get(0).getTypeInfo();
+  if (colTi.getCategory() != Category.PRIMITIVE) {
+return colTi; // Handled later, only struct will be supported.
+  }
+  TypeInfo opTi = GenericUDFUtils.deriveInType(children);
+  if (opTi == null || opTi.getCategory() != Category.PRIMITIVE) {
+throw new HiveException("Cannot vectorize IN() - common type is " + 
opTi);
+  }
+  if (((PrimitiveTypeInfo)colTi).getPrimitiveCategory() !=
+  ((PrimitiveTypeInfo)opTi).getPrimitiveCategory()) {
+throw new HiveException("Cannot vectorize IN() - casting a column is 
not supported. "
++ "Column type is " + colTi + " but the common type is " + opTi);
+  }
+  return colTi;
 } else {
   // The children type should be converted to return type
   commonType = returnType;
@@ -629,6 +640,7 @@ public class VectorizationContext {
 }
 PrimitiveTypeInfo ptinfo = (PrimitiveTypeInfo) inputTypeInfo;
 int precision = getPrecisionForType(ptinfo);
+// TODO: precision and scale would be practically invalid for string 
conversion (38,38)
 int scale = HiveDecimalUtils.getScaleForType(ptinfo);
 return new DecimalTypeInfo(precision, scale);
   }
@@ -1503,8 +1515,8 @@ public class VectorizationContext {
   /**
* Create a filter or boolean-valued expression for column IN ( 
 )
*/
-  private VectorExpression getInExpression(List childExpr, 
VectorExpressionDescriptor.Mode mode, TypeInfo returnType)
-  throws HiveException {
+  private VectorExpression getInExpression(List childExpr,
+  VectorExpressionDescriptor.Mode mode, TypeInfo returnType) throws 
HiveException {
 ExprNodeDesc colExpr = childExpr.get(0);
 List inChildren = childExpr.subList(1, childExpr.size());
 
@@ -1512,7 +1524,7 @@ public class VectorizationContext {
 colType = VectorizationContext.mapTypeNameSynonyms(colType);
 TypeInfo colTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(colType);
 Category category = colTypeInfo.getCategory();
-if (category == Category.STRUCT){
+if (category == Category.STRUCT) {
   return getStructInExpression(childExpr, colExpr, colTypeInfo, 
inChildren, mode, returnType);
 } else if (catego

[2/2] hive git commit: HIVE-13957 : vectorized IN is inconsistent with non-vectorized (at least for decimal in (string)) (Sergey Shelukhin, reviewed by Matt McCline)

2016-06-13 Thread sershe
HIVE-13957 : vectorized IN is inconsistent with non-vectorized (at least for 
decimal in (string)) (Sergey Shelukhin, reviewed by Matt McCline)

Conflicts:

ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
ql/src/test/results/clientpositive/spark/vector_between_in.q.out
ql/src/test/results/clientpositive/tez/vector_between_in.q.out
ql/src/test/results/clientpositive/vector_between_in.q.out


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/78bedc8e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/78bedc8e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/78bedc8e

Branch: refs/heads/branch-1
Commit: 78bedc8e212dddae70d8635a7cdaace7275923b3
Parents: 293e22e
Author: Sergey Shelukhin 
Authored: Mon Jun 13 18:32:12 2016 -0700
Committer: Sergey Shelukhin 
Committed: Mon Jun 13 18:48:35 2016 -0700

--
 .../ql/exec/vector/VectorizationContext.java|  30 --
 .../hive/ql/udf/generic/GenericUDFUtils.java|  52 +++--
 .../clientpositive/vector_string_decimal.q  |  21 
 .../spark/vector_between_in.q.out   |   2 -
 .../clientpositive/tez/vector_between_in.q.out  |   2 -
 .../clientpositive/vector_between_in.q.out  |   2 -
 .../clientpositive/vector_string_decimal.q.out  | 106 +++
 .../hive/serde2/typeinfo/HiveDecimalUtils.java  |   4 +-
 8 files changed, 192 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/78bedc8e/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 9caa771..3aa182a 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -500,8 +500,8 @@ public class VectorizationContext {
* Given a udf and its children, return the common type to which the 
children's type should be
* cast.
*/
-  private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf, 
List children,
-  TypeInfo returnType) {
+  private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf,
+  List children, TypeInfo returnType) throws HiveException {
 TypeInfo commonType;
 if (genericUdf instanceof GenericUDFBaseCompare) {
 
@@ -513,9 +513,20 @@ public class VectorizationContext {
 commonType = returnType;
   }
 } else if (genericUdf instanceof GenericUDFIn) {
-
-  // Cast to the type of the first child
-  return children.get(0).getTypeInfo();
+  TypeInfo colTi = children.get(0).getTypeInfo();
+  if (colTi.getCategory() != Category.PRIMITIVE) {
+return colTi; // Handled later, only struct will be supported.
+  }
+  TypeInfo opTi = GenericUDFUtils.deriveInType(children);
+  if (opTi == null || opTi.getCategory() != Category.PRIMITIVE) {
+throw new HiveException("Cannot vectorize IN() - common type is " + 
opTi);
+  }
+  if (((PrimitiveTypeInfo)colTi).getPrimitiveCategory() !=
+  ((PrimitiveTypeInfo)opTi).getPrimitiveCategory()) {
+throw new HiveException("Cannot vectorize IN() - casting a column is 
not supported. "
++ "Column type is " + colTi + " but the common type is " + opTi);
+  }
+  return colTi;
 } else {
   // The children type should be converted to return type
   commonType = returnType;
@@ -612,6 +623,7 @@ public class VectorizationContext {
 }
 PrimitiveTypeInfo ptinfo = (PrimitiveTypeInfo) inputTypeInfo;
 int precision = getPrecisionForType(ptinfo);
+// TODO: precision and scale would be practically invalid for string 
conversion (38,38)
 int scale = HiveDecimalUtils.getScaleForType(ptinfo);
 return new DecimalTypeInfo(precision, scale);
   }
@@ -1444,8 +1456,8 @@ public class VectorizationContext {
   /**
* Create a filter or boolean-valued expression for column IN ( 
 )
*/
-  private VectorExpression getInExpression(List childExpr, Mode 
mode, TypeInfo returnType)
-  throws HiveException {
+  private VectorExpression getInExpression(List childExpr,
+  VectorExpressionDescriptor.Mode mode, TypeInfo returnType) throws 
HiveException {
 ExprNodeDesc colExpr = childExpr.get(0);
 List inChildren = childExpr.subList(1, childExpr.size());
 
@@ -1453,7 +1465,7 @@ public class VectorizationContext {
 colType = VectorizationContext.mapTypeNameSynonyms(colType);
 TypeInfo colTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(colType);
 Category category = colTypeInfo.getCategory();

[1/2] hive git commit: HIVE-13957 : vectorized IN is inconsistent with non-vectorized (at least for decimal in (string)) (Sergey Shelukhin, reviewed by Matt McCline)

2016-06-13 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-1 293e22e0e -> 78bedc8e2
  refs/heads/branch-2.0 54760abdc -> 69440a62a


HIVE-13957 : vectorized IN is inconsistent with non-vectorized (at least for 
decimal in (string)) (Sergey Shelukhin, reviewed by Matt McCline)

Conflicts:

ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
ql/src/test/results/clientpositive/spark/vector_between_in.q.out
ql/src/test/results/clientpositive/tez/vector_between_in.q.out
ql/src/test/results/clientpositive/vector_between_in.q.out


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/69440a62
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/69440a62
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/69440a62

Branch: refs/heads/branch-2.0
Commit: 69440a62af477e8c237030ca1ed0120c7dc7d787
Parents: 54760ab
Author: Sergey Shelukhin 
Authored: Mon Jun 13 18:32:12 2016 -0700
Committer: Sergey Shelukhin 
Committed: Mon Jun 13 18:48:14 2016 -0700

--
 .../ql/exec/vector/VectorizationContext.java|  30 --
 .../hive/ql/udf/generic/GenericUDFUtils.java|  52 +++--
 .../clientpositive/vector_string_decimal.q  |  21 
 .../spark/vector_between_in.q.out   |   2 -
 .../clientpositive/tez/vector_between_in.q.out  |   2 -
 .../clientpositive/vector_between_in.q.out  |   2 -
 .../clientpositive/vector_string_decimal.q.out  | 106 +++
 .../hive/serde2/typeinfo/HiveDecimalUtils.java  |   4 +-
 8 files changed, 192 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/69440a62/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 1eb960d..6601a87 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -494,8 +494,8 @@ public class VectorizationContext {
* Given a udf and its children, return the common type to which the 
children's type should be
* cast.
*/
-  private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf, 
List children,
-  TypeInfo returnType) {
+  private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf,
+  List children, TypeInfo returnType) throws HiveException {
 TypeInfo commonType;
 if (genericUdf instanceof GenericUDFBaseCompare) {
 
@@ -507,9 +507,20 @@ public class VectorizationContext {
 commonType = returnType;
   }
 } else if (genericUdf instanceof GenericUDFIn) {
-
-  // Cast to the type of the first child
-  return children.get(0).getTypeInfo();
+  TypeInfo colTi = children.get(0).getTypeInfo();
+  if (colTi.getCategory() != Category.PRIMITIVE) {
+return colTi; // Handled later, only struct will be supported.
+  }
+  TypeInfo opTi = GenericUDFUtils.deriveInType(children);
+  if (opTi == null || opTi.getCategory() != Category.PRIMITIVE) {
+throw new HiveException("Cannot vectorize IN() - common type is " + 
opTi);
+  }
+  if (((PrimitiveTypeInfo)colTi).getPrimitiveCategory() !=
+  ((PrimitiveTypeInfo)opTi).getPrimitiveCategory()) {
+throw new HiveException("Cannot vectorize IN() - casting a column is 
not supported. "
++ "Column type is " + colTi + " but the common type is " + opTi);
+  }
+  return colTi;
 } else {
   // The children type should be converted to return type
   commonType = returnType;
@@ -606,6 +617,7 @@ public class VectorizationContext {
 }
 PrimitiveTypeInfo ptinfo = (PrimitiveTypeInfo) inputTypeInfo;
 int precision = getPrecisionForType(ptinfo);
+// TODO: precision and scale would be practically invalid for string 
conversion (38,38)
 int scale = HiveDecimalUtils.getScaleForType(ptinfo);
 return new DecimalTypeInfo(precision, scale);
   }
@@ -1496,8 +1508,8 @@ public class VectorizationContext {
   /**
* Create a filter or boolean-valued expression for column IN ( 
 )
*/
-  private VectorExpression getInExpression(List childExpr, Mode 
mode, TypeInfo returnType)
-  throws HiveException {
+  private VectorExpression getInExpression(List childExpr,
+  VectorExpressionDescriptor.Mode mode, TypeInfo returnType) throws 
HiveException {
 ExprNodeDesc colExpr = childExpr.get(0);
 List inChildren = childExpr.subList(1, childExpr.size());
 
@@ -1505,7 +1517,7 @@ public class VectorizationContext {
 colType = VectorizationContext.mapTypeNameSynonyms(colType)

hive git commit: HIVE-13827 : LLAPIF: authentication on the output channel (Sergey Shelukhin, reviewed by Jason Dere)

2016-06-13 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/master 59e6c83fd -> 0d69a88b9


HIVE-13827 : LLAPIF: authentication on the output channel (Sergey Shelukhin, 
reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0d69a88b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0d69a88b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0d69a88b

Branch: refs/heads/master
Commit: 0d69a88b9f317e93194e08aee409f0b1b6ccab7c
Parents: 59e6c83
Author: Sergey Shelukhin 
Authored: Mon Jun 13 18:57:35 2016 -0700
Committer: Sergey Shelukhin 
Committed: Mon Jun 13 18:57:35 2016 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   4 +
 .../daemon/rpc/LlapDaemonProtocolProtos.java| 665 ++-
 .../hive/llap/security/SecretManager.java   |  11 +
 .../apache/hadoop/hive/llap/tez/Converters.java |  13 +
 .../src/protobuf/LlapDaemonProtocol.proto   |   6 +
 .../hadoop/hive/llap/LlapBaseInputFormat.java   |  32 +-
 .../hive/llap/daemon/impl/LlapDaemon.java   |  11 +-
 .../daemon/impl/LlapProtocolServerImpl.java |  19 +-
 .../impl/TestLlapDaemonProtocolServerImpl.java  |   2 +-
 .../hadoop/hive/llap/LlapOutputFormat.java  |   3 -
 .../hive/llap/LlapOutputFormatService.java  | 141 ++--
 .../hive/ql/exec/tez/MapRecordProcessor.java|  18 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |  11 +-
 .../ql/udf/generic/GenericUDTFGetSplits.java| 204 +++---
 .../hadoop/hive/llap/TestLlapOutputFormat.java  |  60 +-
 15 files changed, 971 insertions(+), 229 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0d69a88b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 285caa3..761dbb2 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2883,6 +2883,10 @@ public class HiveConf extends Configuration {
 "protocol or ZK paths), similar to how ssh refuses a key with bad 
access permissions."),
 LLAP_DAEMON_OUTPUT_SERVICE_PORT("hive.llap.daemon.output.service.port", 
15003,
 "LLAP daemon output service port"),
+
LLAP_DAEMON_OUTPUT_STREAM_TIMEOUT("hive.llap.daemon.output.stream.timeout", 
"120s",
+new TimeValidator(TimeUnit.SECONDS),
+"The timeout for the client to connect to LLAP output service and 
start the fragment\n" +
+"output after sending the fragment. The fragment will fail if its 
output is not claimed."),
 
LLAP_DAEMON_OUTPUT_SERVICE_SEND_BUFFER_SIZE("hive.llap.daemon.output.service.send.buffer.size",
 128 * 1024, "Send buffer size to be used by LLAP daemon output 
service"),
 LLAP_ENABLE_GRACE_JOIN_IN_LLAP("hive.llap.enable.grace.join.in.llap", 
false,

http://git-wip-us.apache.org/repos/asf/hive/blob/0d69a88b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
--
diff --git 
a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
 
b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
index 856ea30..56a1361 100644
--- 
a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
+++ 
b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java
@@ -16441,6 +16441,624 @@ public final class LlapDaemonProtocolProtos {
 // @@protoc_insertion_point(class_scope:GetTokenResponseProto)
   }
 
+  public interface LlapOutputSocketInitMessageOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string fragment_id = 1;
+/**
+ * required string fragment_id = 1;
+ */
+boolean hasFragmentId();
+/**
+ * required string fragment_id = 1;
+ */
+java.lang.String getFragmentId();
+/**
+ * required string fragment_id = 1;
+ */
+com.google.protobuf.ByteString
+getFragmentIdBytes();
+
+// optional bytes token = 2;
+/**
+ * optional bytes token = 2;
+ */
+boolean hasToken();
+/**
+ * optional bytes token = 2;
+ */
+com.google.protobuf.ByteString getToken();
+  }
+  /**
+   * Protobuf type {@code LlapOutputSocketInitMessage}
+   *
+   * 
+   * The message sent by external client to claim the output from the output 
socket.
+   * 
+   */
+  public static final class LlapOutputSocketInitMessage extends
+  com.google.protobuf.GeneratedMessage
+  impleme

hive git commit: HIVE-13903 : getFunctionInfo should register jars under a checks (Reviewed by Jesus Camacho Rodriguez)

2016-06-13 Thread amareshwari
Repository: hive
Updated Branches:
  refs/heads/master 0d69a88b9 -> b1ad0566e


HIVE-13903 : getFunctionInfo should register jars under a checks (Reviewed by 
Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b1ad0566
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b1ad0566
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b1ad0566

Branch: refs/heads/master
Commit: b1ad0566e46733493344329b20545838fdb152a1
Parents: 0d69a88
Author: Rajat Khandelwal 
Authored: Tue Jun 14 12:25:24 2016 +0530
Committer: Amareshwari Sriramadasu 
Committed: Tue Jun 14 12:25:24 2016 +0530

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b1ad0566/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
index 86df74d..a4584e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
@@ -556,7 +556,16 @@ public class Registry {
 // and the current thread may not be able to resolve the UDF. Test for 
this condition
 // and if necessary load the JARs in this thread.
 if (isNative && info != null && info.isPersistent()) {
-  return registerToSessionRegistry(qualifiedName, info);
+  Class functionClass;
+  try {
+functionClass = info.getFunctionClass();
+  } catch (Exception e) {
+return registerToSessionRegistry(qualifiedName, info);
+  }
+  if (functionClass == null) {
+return registerToSessionRegistry(qualifiedName, info);
+  }
+  return info;
 }
 if (info != null || !isNative) {
   return info; // We have the UDF, or we are in the session registry (or 
both).



hive git commit: HIVE-13903 : getFunctionInfo should register jars under a checks (Reviewed by Jesus Camacho Rodriguez)

2016-06-13 Thread amareshwari
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 397a51143 -> 0b7780606


HIVE-13903 : getFunctionInfo should register jars under a checks (Reviewed by 
Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0b778060
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0b778060
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0b778060

Branch: refs/heads/branch-2.1
Commit: 0b7780606570e062459b19a275b6a117bf171a6b
Parents: 397a511
Author: Rajat Khandelwal 
Authored: Tue Jun 14 12:25:24 2016 +0530
Committer: Amareshwari Sriramadasu 
Committed: Tue Jun 14 12:26:11 2016 +0530

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0b778060/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
index 891514b..42bbab0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
@@ -556,7 +556,16 @@ public class Registry {
 // and the current thread may not be able to resolve the UDF. Test for 
this condition
 // and if necessary load the JARs in this thread.
 if (isNative && info != null && info.isPersistent()) {
-  return registerToSessionRegistry(qualifiedName, info);
+  Class functionClass;
+  try {
+functionClass = info.getFunctionClass();
+  } catch (Exception e) {
+return registerToSessionRegistry(qualifiedName, info);
+  }
+  if (functionClass == null) {
+return registerToSessionRegistry(qualifiedName, info);
+  }
+  return info;
 }
 if (info != null || !isNative) {
   return info; // We have the UDF, or we are in the session registry (or 
both).