hive git commit: HIVE-19632: Remove webapps directory from standalone jar (Prasanth Jayachandran reviewed by Thejas Nair)

2018-05-24 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/master a6832e605 -> c358ef5af


HIVE-19632: Remove webapps directory from standalone jar (Prasanth Jayachandran 
reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c358ef5a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c358ef5a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c358ef5a

Branch: refs/heads/master
Commit: c358ef5af146ec3c16430dee12a8fd014e963a71
Parents: a6832e6
Author: Prasanth Jayachandran 
Authored: Thu May 24 23:05:44 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Thu May 24 23:05:44 2018 -0700

--
 druid-handler/pom.xml | 1 +
 jdbc/pom.xml  | 4 
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c358ef5a/druid-handler/pom.xml
--
diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml
index 12ac6a7..c7e4457 100644
--- a/druid-handler/pom.xml
+++ b/druid-handler/pom.xml
@@ -372,6 +372,7 @@
 META-INF/*.SF
 META-INF/*.DSA
 META-INF/*.RSA
+   static/
   
 
   

http://git-wip-us.apache.org/repos/asf/hive/blob/c358ef5a/jdbc/pom.xml
--
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index a148c18..3f55e1b 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -185,6 +185,10 @@
 META-INF/*.DSA
 META-INF/*.RSA
 core-default.xml
+   webapps/
+   hive-webapps/
+   hbase-webapps/
+   static/
   
 
   



hive git commit: HIVE-19632: Remove webapps directory from standalone jar (Prasanth Jayachandran reviewed by Thejas Nair)

2018-05-24 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-3 fe3b15ee8 -> a93d1bd66


HIVE-19632: Remove webapps directory from standalone jar (Prasanth Jayachandran 
reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a93d1bd6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a93d1bd6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a93d1bd6

Branch: refs/heads/branch-3
Commit: a93d1bd6665d921341ec145b63747f78960e0b78
Parents: fe3b15e
Author: Prasanth Jayachandran 
Authored: Thu May 24 23:05:44 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Thu May 24 23:06:07 2018 -0700

--
 druid-handler/pom.xml | 1 +
 jdbc/pom.xml  | 4 
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a93d1bd6/druid-handler/pom.xml
--
diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml
index 33bc928..d35a86b 100644
--- a/druid-handler/pom.xml
+++ b/druid-handler/pom.xml
@@ -372,6 +372,7 @@
 META-INF/*.SF
 META-INF/*.DSA
 META-INF/*.RSA
+   static/
   
 
   

http://git-wip-us.apache.org/repos/asf/hive/blob/a93d1bd6/jdbc/pom.xml
--
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index 3c23a75..1139a2c 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -185,6 +185,10 @@
 META-INF/*.DSA
 META-INF/*.RSA
 core-default.xml
+   webapps/
+   hive-webapps/
+   hbase-webapps/
+   static/
   
 
   



hive git commit: HIVE-19557: stats: filters for dates are not taking advantage of min/max values (Zoltan Haindrich reviewed by Ashutosh Chauhan)

2018-05-24 Thread kgyrtkirk
Repository: hive
Updated Branches:
  refs/heads/master c4c53a646 -> a6832e605


HIVE-19557: stats: filters for dates are not taking advantage of min/max values 
(Zoltan Haindrich reviewed by Ashutosh Chauhan)

Signed-off-by: Zoltan Haindrich 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a6832e60
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a6832e60
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a6832e60

Branch: refs/heads/master
Commit: a6832e60526585ba859ae0ce38c14ea9014a0bb5
Parents: c4c53a6
Author: Zoltan Haindrich 
Authored: Fri May 25 07:00:19 2018 +0200
Committer: Zoltan Haindrich 
Committed: Fri May 25 07:00:19 2018 +0200

--
 .../test/resources/testconfiguration.properties |   1 +
 .../stats/annotation/StatsRulesProcFactory.java |  10 +-
 .../clientpositive/colstats_date_min_max.q  |  30 +++
 .../llap/colstats_date_min_max.q.out| 193 +++
 .../clientpositive/llap/vector_between_in.q.out |  20 +-
 5 files changed, 243 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a6832e60/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index dcdb0fa..d146f92 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -489,6 +489,7 @@ minillaplocal.query.files=\
   cbo_subq_not_in.q,\
   column_table_stats.q,\
   column_table_stats_orc.q,\
+  colstats_date_min_max.q,\
   compare_double_bigint_2.q,\
   constprog_dpp.q,\
   current_date_timestamp.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/a6832e60/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 345595b..c59128d 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -100,6 +100,8 @@ import 
org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
 import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.io.DateWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
@@ -751,8 +753,14 @@ public class StatsRulesProcFactory {
 }
   } else if (colTypeLowerCase.equals(serdeConstants.INT_TYPE_NAME) ||
   colTypeLowerCase.equals(serdeConstants.DATE_TYPE_NAME)) {
+int value;
+if (colTypeLowerCase == serdeConstants.DATE_TYPE_NAME) {
+  DateWritable writableVal = new 
DateWritable(java.sql.Date.valueOf(boundValue));
+  value = writableVal.getDays();
+} else {
+  value = new Integer(boundValue);
+}
 // Date is an integer internally
-int value = new Integer(boundValue);
 int maxValue = cs.getRange().maxValue.intValue();
 int minValue = cs.getRange().minValue.intValue();
 if (upperBound) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a6832e60/ql/src/test/queries/clientpositive/colstats_date_min_max.q
--
diff --git a/ql/src/test/queries/clientpositive/colstats_date_min_max.q 
b/ql/src/test/queries/clientpositive/colstats_date_min_max.q
new file mode 100644
index 000..7f5be6a
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/colstats_date_min_max.q
@@ -0,0 +1,30 @@
+set hive.explain.user=true;
+
+create table d1(d date);
+--  tblproperties('transactional'='false');
+
+insert into d1 values
+   ('2010-10-01'),
+   ('2010-10-02'),
+   ('2010-10-03'),
+   ('2010-10-04'),
+   ('2010-10-05'),
+   ('2010-10-06'),
+   ('2010-10-07'),
+   ('2010-10-08'),
+   ('2010-10-09'),
+   ('2010-10-10');
+
+analyze table d1 compute statistics for columns;
+
+desc formatted d1;
+desc formatted d1 d;
+
+explain
+select 'stats: FIL ~0 read',count(1) from d1 where d < '2010-03-01';
+
+explain
+selec

hive git commit: HIVE-19390 : Useless error messages logged for dummy table stats (Ashutosh Chauhan via Jesus Camacho Rodriguez)

2018-05-24 Thread vgarg
Repository: hive
Updated Branches:
  refs/heads/branch-3 b38bef33f -> fe3b15ee8


HIVE-19390 : Useless error messages logged for dummy table stats (Ashutosh 
Chauhan via Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fe3b15ee
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fe3b15ee
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fe3b15ee

Branch: refs/heads/branch-3
Commit: fe3b15ee833eb84de43fae1018ff476ab93a7881
Parents: b38bef3
Author: Ashutosh Chauhan 
Authored: Wed May 2 17:32:00 2018 -0700
Committer: Vineet Garg 
Committed: Thu May 24 21:44:52 2018 -0700

--
 .../java/org/apache/hadoop/hive/ql/stats/StatsUtils.java | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/fe3b15ee/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index cef87f5..952b4ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.metadata.PartitionIterable;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ColumnStatsList;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hadoop.hive.ql.plan.ColStatistics;
 import org.apache.hadoop.hive.ql.plan.ColStatistics.Range;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -1054,8 +1055,8 @@ public class StatsUtils {
   cs.setAvgColLen(getAvgColLenOf(conf,cinfo.getObjectInspector(), 
cinfo.getTypeName()));
 } else if (colTypeLowerCase.equals(serdeConstants.BOOLEAN_TYPE_NAME)) {
 cs.setCountDistint(2);
-cs.setNumTrues(Math.max(1, (long)numRows/2));
-cs.setNumFalses(Math.max(1, (long)numRows/2));
+cs.setNumTrues(Math.max(1, numRows/2));
+cs.setNumFalses(Math.max(1, numRows/2));
 cs.setAvgColLen(JavaDataModel.get().primitive1());
 } else if (colTypeLowerCase.equals(serdeConstants.TIMESTAMP_TYPE_NAME) ||
 colTypeLowerCase.equals(serdeConstants.TIMESTAMPLOCALTZ_TYPE_NAME)) {
@@ -1117,6 +1118,12 @@ public class StatsUtils {
 // Retrieve stats from metastore
 String dbName = table.getDbName();
 String tabName = table.getTableName();
+if (SemanticAnalyzer.DUMMY_DATABASE.equals(dbName) &&
+SemanticAnalyzer.DUMMY_TABLE.equals(tabName)) {
+  // insert into values gets written into insert from select dummy_table
+  // This table is dummy and has no stats
+  return null;
+}
 List stats = null;
 try {
   List colStat = Hive.get().getTableColumnStatistics(



hive git commit: HIVE-19706: Disable TestJdbcWithMiniHS2#testHttpRetryOnServerIdleTimeout (Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)

2018-05-24 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/branch-3 57167734c -> b38bef33f


HIVE-19706: Disable TestJdbcWithMiniHS2#testHttpRetryOnServerIdleTimeout (Jesus 
Camacho Rodriguez, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b38bef33
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b38bef33
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b38bef33

Branch: refs/heads/branch-3
Commit: b38bef33f8d7fd8bf7024b19141b54cb3f60a869
Parents: 5716773
Author: Jesus Camacho Rodriguez 
Authored: Thu May 24 19:22:57 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 19:57:07 2018 -0700

--
 .../src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b38bef33/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index f45fb50..0919390 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -82,6 +82,7 @@ import org.datanucleus.AbstractNucleusContext;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestJdbcWithMiniHS2 {
@@ -1021,6 +1022,7 @@ public class TestJdbcWithMiniHS2 {
* Test for jdbc driver retry on NoHttpResponseException
* @throws Exception
*/
+  @Ignore("Flaky test. Should be re-enabled in HIVE-19706")
   @Test
   public void testHttpRetryOnServerIdleTimeout() throws Exception {
 // Stop HiveServer2



hive git commit: HIVE-19706: Disable TestJdbcWithMiniHS2#testHttpRetryOnServerIdleTimeout (Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)

2018-05-24 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 4e4ab7aa3 -> c4c53a646


HIVE-19706: Disable TestJdbcWithMiniHS2#testHttpRetryOnServerIdleTimeout (Jesus 
Camacho Rodriguez, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c4c53a64
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c4c53a64
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c4c53a64

Branch: refs/heads/master
Commit: c4c53a646790ff25f127d4641de430c31635eab6
Parents: 4e4ab7a
Author: Jesus Camacho Rodriguez 
Authored: Thu May 24 19:22:57 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 19:56:34 2018 -0700

--
 .../src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c4c53a64/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index f52338a..a5c41bc 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -82,6 +82,7 @@ import org.datanucleus.AbstractNucleusContext;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestJdbcWithMiniHS2 {
@@ -1021,6 +1022,7 @@ public class TestJdbcWithMiniHS2 {
* Test for jdbc driver retry on NoHttpResponseException
* @throws Exception
*/
+  @Ignore("Flaky test. Should be re-enabled in HIVE-19706")
   @Test
   public void testHttpRetryOnServerIdleTimeout() throws Exception {
 // Stop HiveServer2



hive git commit: HIVE-19588: Several invocation of file listing when creating VectorizedOrcAcidRowBatchReader (Prasanth Jayachandran reviewed by Eugene Koifman)

2018-05-24 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-3 ffd1b7c0d -> 57167734c


HIVE-19588: Several invocation of file listing when creating 
VectorizedOrcAcidRowBatchReader (Prasanth Jayachandran reviewed by Eugene 
Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/57167734
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/57167734
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/57167734

Branch: refs/heads/branch-3
Commit: 57167734c74b810fdd0e41234d51b4744939a7e8
Parents: ffd1b7c
Author: Prasanth Jayachandran 
Authored: Thu May 24 19:11:48 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Thu May 24 19:13:01 2018 -0700

--
 .../hive/llap/io/api/impl/LlapRecordReader.java | 15 +++--
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 24 +--
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  7 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java  | 10 ++-
 .../io/orc/VectorizedOrcAcidRowBatchReader.java | 28 +---
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 70 
 .../TestVectorizedOrcAcidRowBatchReader.java|  2 +-
 7 files changed, 120 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/57167734/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
index 7451ea4..6897336 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
@@ -86,6 +86,8 @@ class LlapRecordReader
   private final IncludesImpl includes;
   private final SearchArgument sarg;
   private final VectorizedRowBatchCtx rbCtx;
+  private final boolean isVectorized;
+  private VectorizedOrcAcidRowBatchReader acidReader;
   private final Object[] partitionValues;
 
   private final LinkedBlockingQueue queue;
@@ -174,6 +176,12 @@ class LlapRecordReader
   partitionValues = null;
 }
 
+this.isVectorized = HiveConf.getBoolVar(jobConf, 
HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
+if (isAcidScan) {
+  this.acidReader = new VectorizedOrcAcidRowBatchReader((OrcSplit) split, 
jobConf, Reporter.NULL, null, rbCtx,
+true);
+}
+
 // Create the consumer of encoded data; it will coordinate decoding to 
CVBs.
 feedback = rp = cvp.createReadPipeline(this, split, includes, sarg, 
counters, includes,
 sourceInputFormat, sourceSerDe, reporter, job, 
mapWork.getPathToPartitionInfo());
@@ -309,8 +317,6 @@ class LlapRecordReader
   counters.incrTimeCounter(LlapIOCounters.CONSUMER_TIME_NS, 
firstReturnTime);
   return false;
 }
-final boolean isVectorized = HiveConf.getBoolVar(jobConf,
-HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
 if (isAcidScan) {
   vrb.selectedInUse = true;
   if (isVectorized) {
@@ -329,10 +335,7 @@ class LlapRecordReader
   inputVrb.cols[ixInVrb] = cvb.cols[ixInReadSet];
 }
 inputVrb.size = cvb.size;
-// TODO: reuse between calls
-@SuppressWarnings("resource")
-VectorizedOrcAcidRowBatchReader acidReader = new 
VectorizedOrcAcidRowBatchReader(
-(OrcSplit)split, jobConf, Reporter.NULL, new 
AcidWrapper(inputVrb), rbCtx, true);
+acidReader.setBaseAndInnerReader(new AcidWrapper(inputVrb));
 acidReader.next(NullWritable.get(), vrb);
   } else {
  // TODO: WTF? The old code seems to just drop the ball here.

http://git-wip-us.apache.org/repos/asf/hive/blob/57167734/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 4b9a8a1..51a793f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -313,6 +313,21 @@ public class AcidUtils {
   }
 
   /**
+   * Get the bucket id from the file path
+   * @param bucketFile - bucket file path
+   * @return - bucket id
+   */
+  public static int parseBucketId(Path bucketFile) {
+String filename = bucketFile.getName();
+if (ORIGINAL_PATTERN.matcher(filename).matches() || 
ORIGINAL_PATTERN_COPY.matcher(filename).matches()) {
+  return Integer.parseInt(filename.substring(0, filename.indexOf('_')));
+} else if (filename.startsWith(BUCKET_PREFIX)) {
+  return Integer.parseInt(filename.substring(filename.indexOf('_') + 1));
+}
+return -1

hive git commit: HIVE-19588: Several invocation of file listing when creating VectorizedOrcAcidRowBatchReader (Prasanth Jayachandran reviewed by Eugene Koifman)

2018-05-24 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-3.0 8230a1d4a -> 6112d57e3


HIVE-19588: Several invocation of file listing when creating 
VectorizedOrcAcidRowBatchReader (Prasanth Jayachandran reviewed by Eugene 
Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6112d57e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6112d57e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6112d57e

Branch: refs/heads/branch-3.0
Commit: 6112d57e36b87e21995aa6347ee83c677074e733
Parents: 8230a1d
Author: Prasanth Jayachandran 
Authored: Thu May 24 19:11:48 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Thu May 24 19:13:27 2018 -0700

--
 .../hive/llap/io/api/impl/LlapRecordReader.java | 15 +++--
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 24 +--
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  7 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java  | 10 ++-
 .../io/orc/VectorizedOrcAcidRowBatchReader.java | 28 +---
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 70 
 .../TestVectorizedOrcAcidRowBatchReader.java|  2 +-
 7 files changed, 120 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6112d57e/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
index 7451ea4..6897336 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
@@ -86,6 +86,8 @@ class LlapRecordReader
   private final IncludesImpl includes;
   private final SearchArgument sarg;
   private final VectorizedRowBatchCtx rbCtx;
+  private final boolean isVectorized;
+  private VectorizedOrcAcidRowBatchReader acidReader;
   private final Object[] partitionValues;
 
   private final LinkedBlockingQueue queue;
@@ -174,6 +176,12 @@ class LlapRecordReader
   partitionValues = null;
 }
 
+this.isVectorized = HiveConf.getBoolVar(jobConf, 
HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
+if (isAcidScan) {
+  this.acidReader = new VectorizedOrcAcidRowBatchReader((OrcSplit) split, 
jobConf, Reporter.NULL, null, rbCtx,
+true);
+}
+
 // Create the consumer of encoded data; it will coordinate decoding to 
CVBs.
 feedback = rp = cvp.createReadPipeline(this, split, includes, sarg, 
counters, includes,
 sourceInputFormat, sourceSerDe, reporter, job, 
mapWork.getPathToPartitionInfo());
@@ -309,8 +317,6 @@ class LlapRecordReader
   counters.incrTimeCounter(LlapIOCounters.CONSUMER_TIME_NS, 
firstReturnTime);
   return false;
 }
-final boolean isVectorized = HiveConf.getBoolVar(jobConf,
-HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
 if (isAcidScan) {
   vrb.selectedInUse = true;
   if (isVectorized) {
@@ -329,10 +335,7 @@ class LlapRecordReader
   inputVrb.cols[ixInVrb] = cvb.cols[ixInReadSet];
 }
 inputVrb.size = cvb.size;
-// TODO: reuse between calls
-@SuppressWarnings("resource")
-VectorizedOrcAcidRowBatchReader acidReader = new 
VectorizedOrcAcidRowBatchReader(
-(OrcSplit)split, jobConf, Reporter.NULL, new 
AcidWrapper(inputVrb), rbCtx, true);
+acidReader.setBaseAndInnerReader(new AcidWrapper(inputVrb));
 acidReader.next(NullWritable.get(), vrb);
   } else {
  // TODO: WTF? The old code seems to just drop the ball here.

http://git-wip-us.apache.org/repos/asf/hive/blob/6112d57e/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 183515a..e7288f8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -313,6 +313,21 @@ public class AcidUtils {
   }
 
   /**
+   * Get the bucket id from the file path
+   * @param bucketFile - bucket file path
+   * @return - bucket id
+   */
+  public static int parseBucketId(Path bucketFile) {
+String filename = bucketFile.getName();
+if (ORIGINAL_PATTERN.matcher(filename).matches() || 
ORIGINAL_PATTERN_COPY.matcher(filename).matches()) {
+  return Integer.parseInt(filename.substring(0, filename.indexOf('_')));
+} else if (filename.startsWith(BUCKET_PREFIX)) {
+  return Integer.parseInt(filename.substring(filename.indexOf('_') + 1));
+}
+retur

hive git commit: HIVE-19588: Several invocation of file listing when creating VectorizedOrcAcidRowBatchReader (Prasanth Jayachandran reviewed by Eugene Koifman)

2018-05-24 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/master fc040d52c -> 4e4ab7aa3


HIVE-19588: Several invocation of file listing when creating 
VectorizedOrcAcidRowBatchReader (Prasanth Jayachandran reviewed by Eugene 
Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4e4ab7aa
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4e4ab7aa
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4e4ab7aa

Branch: refs/heads/master
Commit: 4e4ab7aa3ada0300c3f732ef5fc48c83ebb1ab47
Parents: fc040d5
Author: Prasanth Jayachandran 
Authored: Thu May 24 19:11:48 2018 -0700
Committer: Prasanth Jayachandran 
Committed: Thu May 24 19:11:48 2018 -0700

--
 .../hive/llap/io/api/impl/LlapRecordReader.java | 15 +++--
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 24 +--
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  7 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java  | 10 ++-
 .../io/orc/VectorizedOrcAcidRowBatchReader.java | 28 +---
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 70 
 .../TestVectorizedOrcAcidRowBatchReader.java|  2 +-
 7 files changed, 120 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4e4ab7aa/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
--
diff --git 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
index 7451ea4..6897336 100644
--- 
a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
+++ 
b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
@@ -86,6 +86,8 @@ class LlapRecordReader
   private final IncludesImpl includes;
   private final SearchArgument sarg;
   private final VectorizedRowBatchCtx rbCtx;
+  private final boolean isVectorized;
+  private VectorizedOrcAcidRowBatchReader acidReader;
   private final Object[] partitionValues;
 
   private final LinkedBlockingQueue queue;
@@ -174,6 +176,12 @@ class LlapRecordReader
   partitionValues = null;
 }
 
+this.isVectorized = HiveConf.getBoolVar(jobConf, 
HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
+if (isAcidScan) {
+  this.acidReader = new VectorizedOrcAcidRowBatchReader((OrcSplit) split, 
jobConf, Reporter.NULL, null, rbCtx,
+true);
+}
+
 // Create the consumer of encoded data; it will coordinate decoding to 
CVBs.
 feedback = rp = cvp.createReadPipeline(this, split, includes, sarg, 
counters, includes,
 sourceInputFormat, sourceSerDe, reporter, job, 
mapWork.getPathToPartitionInfo());
@@ -309,8 +317,6 @@ class LlapRecordReader
   counters.incrTimeCounter(LlapIOCounters.CONSUMER_TIME_NS, 
firstReturnTime);
   return false;
 }
-final boolean isVectorized = HiveConf.getBoolVar(jobConf,
-HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
 if (isAcidScan) {
   vrb.selectedInUse = true;
   if (isVectorized) {
@@ -329,10 +335,7 @@ class LlapRecordReader
   inputVrb.cols[ixInVrb] = cvb.cols[ixInReadSet];
 }
 inputVrb.size = cvb.size;
-// TODO: reuse between calls
-@SuppressWarnings("resource")
-VectorizedOrcAcidRowBatchReader acidReader = new 
VectorizedOrcAcidRowBatchReader(
-(OrcSplit)split, jobConf, Reporter.NULL, new 
AcidWrapper(inputVrb), rbCtx, true);
+acidReader.setBaseAndInnerReader(new AcidWrapper(inputVrb));
 acidReader.next(NullWritable.get(), vrb);
   } else {
  // TODO: WTF? The old code seems to just drop the ball here.

http://git-wip-us.apache.org/repos/asf/hive/blob/4e4ab7aa/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index d84d0ee..7fce67f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -315,6 +315,21 @@ public class AcidUtils {
   }
 
   /**
+   * Get the bucket id from the file path
+   * @param bucketFile - bucket file path
+   * @return - bucket id
+   */
+  public static int parseBucketId(Path bucketFile) {
+String filename = bucketFile.getName();
+if (ORIGINAL_PATTERN.matcher(filename).matches() || 
ORIGINAL_PATTERN_COPY.matcher(filename).matches()) {
+  return Integer.parseInt(filename.substring(0, filename.indexOf('_')));
+} else if (filename.startsWith(BUCKET_PREFIX)) {
+  return Integer.parseInt(filename.substring(filename.indexOf('_') + 1));
+}
+return -1;
+ 

[01/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/branch-3 678e9fee1 -> ffd1b7c0d


http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/skewjoinopt5.q
--
diff --git a/ql/src/test/queries/clientpositive/skewjoinopt5.q 
b/ql/src/test/queries/clientpositive/skewjoinopt5.q
index 3024903..6469e29 100644
--- a/ql/src/test/queries/clientpositive/skewjoinopt5.q
+++ b/ql/src/test/queries/clientpositive/skewjoinopt5.q
@@ -1,22 +1,22 @@
 set hive.mapred.mode=nonstrict;
 set hive.optimize.skewjoin.compiletime = true;
 
-CREATE TABLE T1(key STRING, val STRING)
+CREATE TABLE T1_n100(key STRING, val STRING)
 SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n100;
 
-CREATE TABLE T2(key STRING, val STRING)
+CREATE TABLE T2_n63(key STRING, val STRING)
 SKEWED BY (key) ON ((3)) STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n63;
 
 -- One of the tables is skewed by 2 columns, and the other table is
 -- skewed by one column. Ths join is performed on the first skewed column
 -- adding a order by at the end to make the results deterministic
 
 EXPLAIN
-SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key;
+SELECT a.*, b.* FROM T1_n100 a JOIN T2_n63 b ON a.key = b.key;
 
-SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key
+SELECT a.*, b.* FROM T1_n100 a JOIN T2_n63 b ON a.key = b.key
 ORDER BY a.key, b.key, a.val, b.val;



[52/58] [abbrv] hive git commit: HIVE-19637: Add slow test report script to testutils (Prasanth Jayachandran reviewed by Jesus Camacho Rodriguez)

2018-05-24 Thread jcamacho
HIVE-19637: Add slow test report script to testutils (Prasanth Jayachandran 
reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fa6bad55
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fa6bad55
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fa6bad55

Branch: refs/heads/branch-3
Commit: fa6bad55ed612dff98488deb3f7bc6c4b2fdcf22
Parents: 9bf28a3
Author: Prasanth Jayachandran 
Authored: Mon May 21 13:51:44 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 17:54:28 2018 -0700

--
 testutils/gen-report.py | 240 +++
 1 file changed, 240 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/fa6bad55/testutils/gen-report.py
--
diff --git a/testutils/gen-report.py b/testutils/gen-report.py
new file mode 100644
index 000..791da5c
--- /dev/null
+++ b/testutils/gen-report.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from BeautifulSoup import BeautifulSoup
+import urllib2
+import xmltodict
+import json
+import Queue
+from threading import Thread
+from collections import OrderedDict
+import itertools
+from ascii_graph import Pyasciigraph
+import sys
+import argparse
+import os
+
+# default build that is used against apache hive precommit test report
+REPORTS_DIR = "/tmp/slow-test-reports"
+BUILD_NUMBER = 830
+TOP_K = 25
+json_dumps = []
+
+# parallel xml report downloader
+class ReportDownloader(Thread):
+   def __init__(self, q):
+   Thread.__init__(self)
+   self.q = q
+
+   def run(self):
+   while True:
+   # Get the work from the queue and expand the tuple
+   link = self.q.get()
+   xmlFile = urllib2.urlopen(link)
+   xmlData = xmlFile.read()
+   xmlSoup = BeautifulSoup(xmlData)
+   d = xmltodict.parse(xmlData, xml_attribs=True)
+   d['testsuite'].pop('properties', None)
+   json_dumps.append(d)
+   self.q.task_done()
+
+def get_links(rootUrl):
+   html_page = urllib2.urlopen(rootUrl)
+   soup = BeautifulSoup(html_page)
+   result = []
+   for link in soup.findAll('a'):
+   hrefs = link.get('href')
+   if hrefs.endswith('.xml'):
+   result.append(rootUrl + "/" + hrefs)
+   
+   return result
+
+def take(iterable, n=TOP_K):
+return list(itertools.islice(iterable, 0, n))
+
+def plot_testsuite_time(json_data, top_k=TOP_K, ascii_graph=False, 
report_file=None):
+   suite_time = {}
+   
+   overall_time = 0.0
+   for suite in json_data:
+   name = suite['testsuite']['@name'].rsplit(".",1)[-1]
+   time = float(suite['testsuite']['@time'].replace(',',''))
+   overall_time += time
+   if name in suite_time:
+   total_time = suite_time[name]
+   suite_time[name] = total_time + time
+   else:
+   suite_time[name] = time
+
+   d_descending = OrderedDict(sorted(suite_time.items(), 
+  key=lambda kv: kv[1], reverse=True))
+
+   gdata = []
+   for k,v in take(d_descending.iteritems(), top_k):
+   gdata.append((k, v))
+
+   print '\nTop ' + str(top_k) + ' testsuite in terms of execution time 
(in seconds).. [Total time: ' + str(overall_time) + ' seconds]'
+   if ascii_graph:
+   graph = Pyasciigraph()
+   for line in  graph.graph('', gdata):
+   print line
+   else:
+   for line in gdata:
+   print line[0] + "\t" + str(line[1])
+
+   if report_file != None:
+   with open(report_file, "w") as f:
+   f.write('Top ' + str(top_k) + ' tes

[37/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q
--
diff --git a/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q 
b/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q
index 2aa4665..e7d031b 100644
--- a/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q
+++ b/ql/src/test/queries/clientpositive/enforce_constraint_notnull.q
@@ -1,108 +1,108 @@
 --  SIMPLE TABLE
 -- create table with first and last column with not null
-CREATE TABLE table1 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT NULL 
ENFORCED);
+CREATE TABLE table1_n7 (a STRING NOT NULL ENFORCED, b STRING, c STRING NOT 
NULL ENFORCED);
 
 -- insert value tuples
-explain INSERT INTO table1 values('not', 'null', 'constraint');
-INSERT INTO table1 values('not', 'null', 'constraint');
-SELECT * FROM table1;
+explain INSERT INTO table1_n7 values('not', 'null', 'constraint');
+INSERT INTO table1_n7 values('not', 'null', 'constraint');
+SELECT * FROM table1_n7;
 
 -- insert with column specified
-explain insert into table1(a,c) values('1','2');
-insert into table1(a,c) values('1','2');
+explain insert into table1_n7(a,c) values('1','2');
+insert into table1_n7(a,c) values('1','2');
 
 -- insert from select
-explain INSERT INTO table1 select key, src.value, value from src;
-INSERT INTO table1 select key, src.value, value from src;
-SELECT * FROM table1;
+explain INSERT INTO table1_n7 select key, src.value, value from src;
+INSERT INTO table1_n7 select key, src.value, value from src;
+SELECT * FROM table1_n7;
 
 -- insert overwrite
-explain INSERT OVERWRITE TABLE table1 select src.*, value from src;
-INSERT OVERWRITE TABLE table1 select src.*, value from src;
-SELECT * FROM table1;
+explain INSERT OVERWRITE TABLE table1_n7 select src.*, value from src;
+INSERT OVERWRITE TABLE table1_n7 select src.*, value from src;
+SELECT * FROM table1_n7;
 
 -- insert overwrite with if not exists
-explain INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, 
src.value from src;
-INSERT OVERWRITE TABLE table1 if not exists select src.key, src.key, src.value 
from src;
-SELECT * FROM table1;
+explain INSERT OVERWRITE TABLE table1_n7 if not exists select src.key, 
src.key, src.value from src;
+INSERT OVERWRITE TABLE table1_n7 if not exists select src.key, src.key, 
src.value from src;
+SELECT * FROM table1_n7;
 
-DROP TABLE table1;
+DROP TABLE table1_n7;
 
 -- multi insert
-create table src_multi1 (a STRING NOT NULL ENFORCED, b STRING);
-create table src_multi2 (i STRING, j STRING NOT NULL ENABLE);
+create table src_multi1_n0 (a STRING NOT NULL ENFORCED, b STRING);
+create table src_multi2_n1 (i STRING, j STRING NOT NULL ENABLE);
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n0 select * where key < 10
+insert overwrite table src_multi2_n1 select * where key > 10 and key < 20;
 
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n0 select * where key < 10
+insert overwrite table src_multi2_n1 select * where key > 10 and key < 20;
 
 explain
 from src
-insert into table src_multi1 select * where src.key < 10
-insert into table src_multi2 select src.* where key > 10 and key < 20;
+insert into table src_multi1_n0 select * where src.key < 10
+insert into table src_multi2_n1 select src.* where key > 10 and key < 20;
 
 from src
-insert into table src_multi1 select * where src.key < 10
-insert into table src_multi2 select src.* where key > 10 and key < 20;
+insert into table src_multi1_n0 select * where src.key < 10
+insert into table src_multi2_n1 select src.* where key > 10 and key < 20;
 
 --  ACID TABLE
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
 -- SORT_QUERY_RESULTS
-create table acid_uami(i int,
+create table acid_uami_n1(i int,
  de decimal(5,2) constraint nn1 not null enforced,
  vc varchar(128) constraint nn2 not null enforced) clustered 
by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
 
 -- insert into values
-explain insert into table acid_uami values
+explain insert into table acid_uami_n1 values
 (1, 109.23, 'mary had a little lamb'),
 (6553, 923.19, 'its fleece was white as snow');
-insert into table acid_uami values
+insert into table acid_uami_n1 values
 (1, 109.23, 'mary had a little lamb'),
 (6553, 923.19, 'its fleece was white as snow');
-select * from acid_uami;
+select * from acid_uami_n1;
 
  --insert into select
-explain insert into table acid_uami select cast(key as int), cast (key as 
decimal(5,2)), value from src;
-insert into table acid_uami select cast(

[58/58] [abbrv] hive git commit: HIVE-19697: TestReOptimization#testStatCachingMetaStore is flaky (Jesus Camacho Rodriguez, reviewed by Zoltan Haindrich)

2018-05-24 Thread jcamacho
HIVE-19697: TestReOptimization#testStatCachingMetaStore is flaky (Jesus Camacho 
Rodriguez, reviewed by Zoltan Haindrich)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ffd1b7c0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ffd1b7c0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ffd1b7c0

Branch: refs/heads/branch-3
Commit: ffd1b7c0d7308c721b06c62fb3b3d2d95c852094
Parents: c065d82
Author: Jesus Camacho Rodriguez 
Authored: Thu May 24 11:30:13 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 17:57:05 2018 -0700

--
 .../org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/ffd1b7c0/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java 
b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java
index cd2a46b..b945c60 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java
@@ -42,10 +42,12 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestRule;
 
+@Ignore("Flaky. Will be re-enabled by HIVE-19697")
 public class TestReOptimization {
 
   @ClassRule



[25/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q
--
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q 
b/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q
index 9ffae37..2964c83 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_5.q
@@ -7,291 +7,291 @@ set hive.strict.checks.cartesian.product=false;
 set hive.stats.fetch.column.stats=true;
 set hive.materializedview.rewriting=true;
 
-create table emps (
+create table emps_n2 (
   empid int,
   deptno int,
   name varchar(256),
   salary float,
   commission int)
 stored as orc TBLPROPERTIES ('transactional'='true');
-insert into emps values (100, 10, 'Bill', 1, 1000), (200, 20, 'Eric', 
8000, 500),
+insert into emps_n2 values (100, 10, 'Bill', 1, 1000), (200, 20, 'Eric', 
8000, 500),
   (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 1, 250), (110, 
10, 'Bill', 1, 250);
-analyze table emps compute statistics for columns;
+analyze table emps_n2 compute statistics for columns;
 
-create table depts (
+create table depts_n1 (
   deptno int,
   name varchar(256),
   locationid int)
 stored as orc TBLPROPERTIES ('transactional'='true');
-insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 
'HR', 20);
-analyze table depts compute statistics for columns;
+insert into depts_n1 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 
'HR', 20);
+analyze table depts_n1 compute statistics for columns;
 
-create table dependents (
+create table dependents_n1 (
   empid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
-insert into dependents values (10, 'Michael'), (10, 'Jane');
-analyze table dependents compute statistics for columns;
+insert into dependents_n1 values (10, 'Michael'), (10, 'Jane');
+analyze table dependents_n1 compute statistics for columns;
 
-create table locations (
+create table locations_n1 (
   locationid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
-insert into locations values (10, 'San Francisco'), (10, 'San Diego');
-analyze table locations compute statistics for columns;
+insert into locations_n1 values (10, 'San Francisco'), (10, 'San Diego');
+analyze table locations_n1 compute statistics for columns;
 
-alter table emps add constraint pk1 primary key (empid) disable novalidate 
rely;
-alter table depts add constraint pk2 primary key (deptno) disable novalidate 
rely;
-alter table dependents add constraint pk3 primary key (empid) disable 
novalidate rely;
-alter table locations add constraint pk4 primary key (locationid) disable 
novalidate rely;
+alter table emps_n2 add constraint pk1 primary key (empid) disable novalidate 
rely;
+alter table depts_n1 add constraint pk2 primary key (deptno) disable 
novalidate rely;
+alter table dependents_n1 add constraint pk3 primary key (empid) disable 
novalidate rely;
+alter table locations_n1 add constraint pk4 primary key (locationid) disable 
novalidate rely;
 
-alter table emps add constraint fk1 foreign key (deptno) references 
depts(deptno) disable novalidate rely;
-alter table depts add constraint fk2 foreign key (locationid) references 
locations(locationid) disable novalidate rely;
+alter table emps_n2 add constraint fk1 foreign key (deptno) references 
depts_n1(deptno) disable novalidate rely;
+alter table depts_n1 add constraint fk2 foreign key (locationid) references 
locations_n1(locationid) disable novalidate rely;
 
-alter table emps change column deptno deptno int constraint nn1 not null 
disable novalidate rely;
-alter table depts change column locationid locationid int constraint nn2 not 
null disable novalidate rely;
+alter table emps_n2 change column deptno deptno int constraint nn1 not null 
disable novalidate rely;
+alter table depts_n1 change column locationid locationid int constraint nn2 
not null disable novalidate rely;
 
 
 -- EXAMPLE 8
-create materialized view mv1 enable rewrite as
-select name, deptno, salary from emps where deptno > 15 group by name, deptno, 
salary;
-analyze table mv1 compute statistics for columns;
+create materialized view mv1_n1 enable rewrite as
+select name, deptno, salary from emps_n2 where deptno > 15 group by name, 
deptno, salary;
+analyze table mv1_n1 compute statistics for columns;
 
 explain
-select name from emps where deptno >= 20 group by name;
+select name from emps_n2 where deptno >= 20 group by name;
 
-select name from emps where deptno >= 20 group by name;
+select name from emps_n2 where deptno >= 20 group by name;
 
-drop materialized view mv1;
+drop materialized view mv1_n1;
 
 -- EXAMPLE 12
-create materialized view mv1 enable rewrite as
+create materialized view mv1_n1 enable rewrite as
 select name, deptno, salary, count(*) as c, sum(e

[03/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q
--
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q
index 3059604..5256597 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_table.q
@@ -18,9 +18,9 @@ set hive.llap.io.enabled=false;
 --  vectorized reading of TEXTFILE format files using the row SERDE methods.
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n24(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n24;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -29,51 +29,51 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING);
+CREATE TABLE table_add_int_permute_select_n8(insert_num int, a INT, b STRING);
 
-insert into table table_add_int_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_permute_select_n8 SELECT insert_num, int1, 
'original' FROM schema_evolution_data_n24;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_permute_select add columns(c int);
+alter table table_add_int_permute_select_n8 add columns(c int);
 
-insert into table table_add_int_permute_select VALUES (111, 8, 'new', 
8);
+insert into table table_add_int_permute_select_n8 VALUES (111, 8, 'new', 
8);
 
 explain vectorization detail
-select insert_num,a,b from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n8;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_permute_select;
-select insert_num,a,b,c from table_add_int_permute_select;
-select insert_num,c from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n8;
+select insert_num,a,b,c from table_add_int_permute_select_n8;
+select insert_num,c from table_add_int_permute_select_n8;
 
-drop table table_add_int_permute_select;
+drop table table_add_int_permute_select_n8;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b 
STRING);
+CREATE TABLE table_add_int_string_permute_select_n8(insert_num int, a INT, b 
STRING);
 
-insert into table table_add_int_string_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_string_permute_select_n8 SELECT insert_num, 
int1, 'original' FROM schema_evolution_data_n24;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_string_permute_select add columns(c int, d string);
+alter table table_add_int_string_permute_select_n8 add columns(c int, d 
string);
 
-insert into table table_add_int_string_permute_select VALUES (111, 8, 
'new', 8, 'filler');
+insert into table table_add_int_string_permute_select_n8 VALUES (111, 8, 
'new', 8, 'filler');
 
 explain vectorization detail
-select insert_num,a,b from table_add_int_string_permute_select;
+select insert_num,a,b from table_add_int_string_permute_select_n8;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_string_permute_select;
-select insert_num,a,b,c from table_add_int_string_permute_select;
-select insert_num,a,b,c,d from table_add_int_string_permute_select;
-select inse

[26/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q
--
diff --git 
a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q 
b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q
index 1d97325..141c92e 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q
@@ -4,121 +4,121 @@ set 
hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.strict.checks.cartesian.product=false;
 set hive.materializedview.rewriting=true;
 
-create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) 
stored as orc TBLPROPERTIES ('transactional'='true');
+create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) 
stored as orc TBLPROPERTIES ('transactional'='true');
 
-insert into cmv_basetable values
+insert into cmv_basetable_n6 values
  (1, 'alfred', 10.30, 2),
  (2, 'bob', 3.14, 3),
  (2, 'bonnie', 172342.2, 3),
  (3, 'calvin', 978.76, 3),
  (3, 'charlie', 9.8, 1);
 
-analyze table cmv_basetable compute statistics for columns;
+analyze table cmv_basetable_n6 compute statistics for columns;
 
-create table cmv_basetable_2 (a int, b varchar(256), c decimal(10,2), d int) 
stored as orc TBLPROPERTIES ('transactional'='true');
+create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d 
int) stored as orc TBLPROPERTIES ('transactional'='true');
 
-insert into cmv_basetable_2 values
+insert into cmv_basetable_2_n3 values
  (1, 'alfred', 10.30, 2),
  (3, 'calvin', 978.76, 3);
 
-analyze table cmv_basetable_2 compute statistics for columns;
+analyze table cmv_basetable_2_n3 compute statistics for columns;
 
-CREATE MATERIALIZED VIEW cmv_mat_view ENABLE REWRITE
+CREATE MATERIALIZED VIEW cmv_mat_view_n6 ENABLE REWRITE
   TBLPROPERTIES ('transactional'='true') AS
-  SELECT cmv_basetable.a, cmv_basetable_2.c
-  FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = 
cmv_basetable_2.a)
-  WHERE cmv_basetable_2.c > 10.0;
-analyze table cmv_mat_view compute statistics for columns;
+  SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c
+  FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = 
cmv_basetable_2_n3.a)
+  WHERE cmv_basetable_2_n3.c > 10.0;
+analyze table cmv_mat_view_n6 compute statistics for columns;
 
-insert into cmv_basetable_2 values
+insert into cmv_basetable_2_n3 values
  (3, 'charlie', 15.8, 1);
 
-analyze table cmv_basetable_2 compute statistics for columns;
+analyze table cmv_basetable_2_n3 compute statistics for columns;
 
 -- CANNOT USE THE VIEW, IT IS OUTDATED
 EXPLAIN
-SELECT cmv_basetable.a
-FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = 
cmv_basetable_2.a)
-WHERE cmv_basetable_2.c > 10.10;
+SELECT cmv_basetable_n6.a
+FROM cmv_basetable_n6 join cmv_basetable_2_n3 ON (cmv_basetable_n6.a = 
cmv_basetable_2_n3.a)
+WHERE cmv_basetable_2_n3.c > 10.10;
 
-SELECT cmv_basetable.a
-FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = 
cmv_basetable_2.a)
-WHERE cmv_basetable_2.c > 10.10;
+SELECT cmv_basetable_n6.a
+FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = 
cmv_basetable_2_n3.a)
+WHERE cmv_basetable_2_n3.c > 10.10;
 
 -- REBUILD
 EXPLAIN
-ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD;
 
-ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD;
 
-DESCRIBE FORMATTED cmv_mat_view;
+DESCRIBE FORMATTED cmv_mat_view_n6;
 
 -- NOW IT CAN BE USED AGAIN
 EXPLAIN
-SELECT cmv_basetable.a
-FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = 
cmv_basetable_2.a)
-WHERE cmv_basetable_2.c > 10.10;
+SELECT cmv_basetable_n6.a
+FROM cmv_basetable_n6 join cmv_basetable_2_n3 ON (cmv_basetable_n6.a = 
cmv_basetable_2_n3.a)
+WHERE cmv_basetable_2_n3.c > 10.10;
 
-SELECT cmv_basetable.a
-FROM cmv_basetable JOIN cmv_basetable_2 ON (cmv_basetable.a = 
cmv_basetable_2.a)
-WHERE cmv_basetable_2.c > 10.10;
+SELECT cmv_basetable_n6.a
+FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = 
cmv_basetable_2_n3.a)
+WHERE cmv_basetable_2_n3.c > 10.10;
 
 -- NOW AN UPDATE
-UPDATE cmv_basetable_2 SET a=2 WHERE a=1;
+UPDATE cmv_basetable_2_n3 SET a=2 WHERE a=1;
 
 -- INCREMENTAL REBUILD CANNOT BE TRIGGERED
 EXPLAIN
-ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD;
 
-ALTER MATERIALIZED VIEW cmv_mat_view REBUILD;
+ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD;
 
 -- MV CAN BE USED
 EXPLAIN
-SELECT cmv_basetable.a
-FROM cmv_basetable join cmv_basetable_2 ON (cmv_basetable.a = 
cmv_basetable_2.a)
-WHERE cmv_basetable_2.c > 10.10;
+SELECT cmv_basetable_n6.a
+FROM cmv_basetable_n6 join cmv_basetable_2_n3 ON (cmv_basetable_n6.a = 
cmv_basetable_2_n3.a)
+WHERE cmv_basetable_2_n3.c

[53/58] [abbrv] hive git commit: HIVE-19626: Change tmp staging mapred directory for CliDriver (Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)

2018-05-24 Thread jcamacho
HIVE-19626: Change tmp staging mapred directory for CliDriver (Jesus Camacho 
Rodriguez, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b89ceeef
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b89ceeef
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b89ceeef

Branch: refs/heads/branch-3
Commit: b89ceeefc516d1fd21ca4b6022919e60b83f9bd5
Parents: fa6bad5
Author: Jesus Camacho Rodriguez 
Authored: Mon May 21 09:43:56 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 17:54:35 2018 -0700

--
 data/conf/hive-site.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b89ceeef/data/conf/hive-site.xml
--
diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml
index b56cbd2..0c3adb4 100644
--- a/data/conf/hive-site.xml
+++ b/data/conf/hive-site.xml
@@ -25,6 +25,12 @@
   Internal marker for test. Used for masking env-dependent 
values
 
 
+
+
+  mapreduce.jobtracker.staging.root.dir
+  ${test.tmp.dir}/cli/mapred/staging
+
+
 
 
 



[04/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q
index b4a9d66..027e9e6 100644
--- 
a/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q
+++ 
b/ql/src/test/queries/clientpositive/schema_evol_text_vecrow_part_all_complex.q
@@ -23,103 +23,103 @@ set hive.llap.io.enabled=false;
 --
 -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: 
STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT);
+CREATE TABLE part_change_various_various_struct1_n4(insert_num int, s1 
STRUCT, b STRING) PARTITIONED BY(part INT);
 
-CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING)
+CREATE TABLE complex_struct1_a_txt_n4(insert_num int, s1 STRUCT, b STRING)
 row format delimited fields terminated by '|'
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table 
complex_struct1_a_txt;
+load data local inpath 
'../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table 
complex_struct1_a_txt_n4;
 
-insert into table part_change_various_various_struct1 partition(part=1) select 
* from complex_struct1_a_txt;
+insert into table part_change_various_various_struct1_n4 partition(part=1) 
select * from complex_struct1_a_txt_n4;
 
-select insert_num,part,s1,b from part_change_various_various_struct1;
+select insert_num,part,s1,b from part_change_various_various_struct1_n4;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_various_various_struct1 replace columns (insert_num 
int, s1 STRUCT, b STRING);
+alter table part_change_various_various_struct1_n4 replace columns (insert_num 
int, s1 STRUCT, b STRING);
 
-CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING)
+CREATE TABLE complex_struct1_b_txt_n4(insert_num int, s1 STRUCT, b STRING)
 row format delimited fields terminated by '|'
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table 
complex_struct1_b_txt;
+load data local inpath 
'../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table 
complex_struct1_b_txt_n4;
 
-insert into table part_change_various_various_struct1 partition(part=2) select 
* from complex_struct1_b_txt;
+insert into table part_change_various_various_struct1_n4 partition(part=2) 
select * from complex_struct1_b_txt_n4;
 
-CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING)
+CREATE TABLE complex_struct1_c_txt_n4(insert_num int, s1 STRUCT, b STRING)
 row format delimited fields terminated by '|'
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table 
complex_struct1_c_txt;
+load data local inpath 
'../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table 
complex_struct1_c_txt_n4;
 
-insert into table part_change_various_various_struct1 partition(part=1) select 
* from complex_struct1_c_txt;
+insert into table part_change_various_various_struct1_n4 partition(part=1) 
select * from complex_struct1_c_txt_n4;
 
 explain vectorization detail
-select insert_num,part,s1,b from part_change_various_various_struct1;
+select insert_num,part,s1,b from part_change_various_various_struct1_n4;
 
-select insert_num,part,s1,b from part_change_various_various_struct1;
+select insert_num,part,s1,b from part_change_various_various_struct1_n4;
 
-drop table part_change_various_various_struct1;
+drop table part_change_various_various_struct1_n4;
 
 
 
 --
 -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT
 --
-CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) 
PARTITIONED BY(part INT);
+CREATE TABLE part_add_various_various_struct2_n4(insert_num int, b STRING) 
PARTITIONED BY(part INT);
 
-insert into table part_add_various_various_struct2 partition(part=1)
+insert into table part_add_various_various_struct2_n4 partition(part=1)
 values(1, 'original'),
   (2, 'original');
 
-select insert_num,part,b from part_add_various_various_struct2;
+select insert_num,part,b from part_add_various_various_struct2_n4;
 
 -- Table-Non-Cascade ADD COLUMN ...
-alter table part_add_various_various_struct2 ADD columns (s2 
STRUCT);
+alter table part_add_various_various_struct2_n4 ADD columns (s2 
STRUCT);
 
-CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 
STRUCT)
+CREATE TABLE complex_struct2_a_txt_n4(insert_num int, b STRING, s2 
STRUCT)
 row format d

[14/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q
index 65e68a6..22b84d0 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q
@@ -21,92 +21,92 @@ set hive.llap.io.enabled=false;
 -- Also, we don't do EXPLAINs on ACID files because the write id causes Q file 
statistics differences...
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n21(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n21;
 
-CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
+CREATE TABLE schema_evolution_data_2_n6(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into 
table schema_evolution_data_2;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into 
table schema_evolution_data_2_n6;
 
 --
 --
 -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns
 ---
-CREATE TABLE table5(insert_num int, a INT, b STRING) clustered by (a) into 2 
buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+CREATE TABLE table5_n3(insert_num int, a INT, b STRING) clustered by (a) into 
2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table table5 SELECT insert_num, int1, 'original' FROM 
schema_evolution_data;
+insert into table table5_n3 SELECT insert_num, int1, 'original' FROM 
schema_evolution_data_n21;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table5 add columns(c int, d string);
+alter table table5_n3 add columns(c int, d string);
 
-insert into table table5 SELECT insert_num, int1, 'new', int1, string1 FROM 
schema_evolution_data_2;
+insert into table table5_n3 SELECT insert_num, int1, 'new', int1, string1 FROM 
schema_evolution_data_2_n6;
 
-select a,b,c,d from table5;
+select a,b,c,d from table5_n3;
 
 -- UPDATE New Columns
-update table5 set c=99;
+update table5_n3 set c=99;
 
-select a,b,c,d from table5;
+select a,b,c,d from table5_n3;
 
-alter table table5 compact 'major';
+alter table table5_n3 compact 'major';
 
-select a,b,c,d from table5;
+select a,b,c,d from table5_n3;
 
-DROP TABLE table5;
+DROP TABLE table5_n3;
 
 --
 --
 -- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column
 ---
-CREATE TABLE table6(insert_num int, a INT, b STRING) clustered by (a) into 2 
buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+CREATE TABLE table6_n2(insert_num int, a INT, b STRING) clustered by (a) into 
2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
 
-insert into table table6 SEL

[09/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q
--
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q
index 6746fae..14d0c8d 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part.q
@@ -17,9 +17,9 @@ set hive.llap.io.enabled=false;
 -- FILE VARIATION: TEXTFILE, Non-Vectorized, MapWork, Partitioned
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n27(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n27;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -28,51 +28,51 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) 
PARTITIONED BY(part INT);
+CREATE TABLE part_add_int_permute_select_n8(insert_num int, a INT, b STRING) 
PARTITIONED BY(part INT);
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (1, 
, 'new');
+insert into table part_add_int_permute_select_n8 partition(part=1) VALUES (1, 
, 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_permute_select add columns(c int);
+alter table part_add_int_permute_select_n8 add columns(c int);
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (2, 
, 'new', );
+insert into table part_add_int_permute_select_n8 partition(part=1) VALUES (2, 
, 'new', );
 
 explain vectorization detail
-select insert_num,part,a,b from part_add_int_permute_select;
+select insert_num,part,a,b from part_add_int_permute_select_n8;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,part,a,b from part_add_int_permute_select;
-select insert_num,part,a,b,c from part_add_int_permute_select;
-select insert_num,part,c from part_add_int_permute_select;
+select insert_num,part,a,b from part_add_int_permute_select_n8;
+select insert_num,part,a,b,c from part_add_int_permute_select_n8;
+select insert_num,part,c from part_add_int_permute_select_n8;
 
-drop table part_add_int_permute_select;
+drop table part_add_int_permute_select_n8;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b 
STRING) PARTITIONED BY(part INT);
+CREATE TABLE part_add_int_string_permute_select_n8(insert_num int, a INT, b 
STRING) PARTITIONED BY(part INT);
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES 
(1, , 'new');
+insert into table part_add_int_string_permute_select_n8 partition(part=1) 
VALUES (1, , 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_string_permute_select add columns(c int, d string);
+alter table part_add_int_string_permute_select_n8 add columns(c int, d string);
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES 
(2, , 'new', , '');
+insert into table part_add_int_string_permute_select_n8 partition(part=1) 
VALUES (2, , 'new', , '');
 
 explain vectorization detail
-select insert_num,part,a,b from part_add_int_string_permute_select;
+select insert_num,part,a,b from part_add_int_string_permute_select_n8;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,part,a,b from part_add_int_string_permute_select;
-select insert_num,part,a,b,c from part_add_int_string_permute_select;
-selec

[54/58] [abbrv] hive git commit: HIVE-19654: Change tmp staging mapred directory for TestBlobstoreCliDriver (Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)

2018-05-24 Thread jcamacho
HIVE-19654: Change tmp staging mapred directory for TestBlobstoreCliDriver 
(Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f3f9cc2f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f3f9cc2f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f3f9cc2f

Branch: refs/heads/branch-3
Commit: f3f9cc2fab85849599c19e0429edfdd52a14518a
Parents: b89ceee
Author: Jesus Camacho Rodriguez 
Authored: Tue May 22 08:47:19 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 17:55:05 2018 -0700

--
 itests/hive-blobstore/src/test/resources/hive-site.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f3f9cc2f/itests/hive-blobstore/src/test/resources/hive-site.xml
--
diff --git a/itests/hive-blobstore/src/test/resources/hive-site.xml 
b/itests/hive-blobstore/src/test/resources/hive-site.xml
index 775c559..91bef55 100644
--- a/itests/hive-blobstore/src/test/resources/hive-site.xml
+++ b/itests/hive-blobstore/src/test/resources/hive-site.xml
@@ -24,6 +24,12 @@
 Internal marker for test. Used for masking env-dependent 
values
   
 
+  
+  
+mapreduce.jobtracker.staging.root.dir
+${test.tmp.dir}/blobstore/mapred/staging
+  
+
   
   
   



[41/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/create_like.q
--
diff --git a/ql/src/test/queries/clientpositive/create_like.q 
b/ql/src/test/queries/clientpositive/create_like.q
index 81172f3..a675104 100644
--- a/ql/src/test/queries/clientpositive/create_like.q
+++ b/ql/src/test/queries/clientpositive/create_like.q
@@ -2,43 +2,43 @@
 
 
 
-CREATE TABLE table1 (a STRING, b STRING) STORED AS TEXTFILE;
-DESCRIBE FORMATTED table1;
+CREATE TABLE table1_n17 (a STRING, b STRING) STORED AS TEXTFILE;
+DESCRIBE FORMATTED table1_n17;
 
-CREATE TABLE table2 LIKE table1;
-DESCRIBE FORMATTED table2;
+CREATE TABLE table2_n12 LIKE table1_n17;
+DESCRIBE FORMATTED table2_n12;
 
-CREATE TABLE IF NOT EXISTS table2 LIKE table1;
+CREATE TABLE IF NOT EXISTS table2_n12 LIKE table1_n17;
 
-CREATE EXTERNAL TABLE IF NOT EXISTS table2 LIKE table1;
+CREATE EXTERNAL TABLE IF NOT EXISTS table2_n12 LIKE table1_n17;
 
-CREATE EXTERNAL TABLE IF NOT EXISTS table3 LIKE table1;
-DESCRIBE FORMATTED table3;
+CREATE EXTERNAL TABLE IF NOT EXISTS table3_n3 LIKE table1_n17;
+DESCRIBE FORMATTED table3_n3;
 
-INSERT OVERWRITE TABLE table1 SELECT key, value FROM src WHERE key = 86;
-INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100;
+INSERT OVERWRITE TABLE table1_n17 SELECT key, value FROM src WHERE key = 86;
+INSERT OVERWRITE TABLE table2_n12 SELECT key, value FROM src WHERE key = 100;
 
-SELECT * FROM table1;
-SELECT * FROM table2;
+SELECT * FROM table1_n17;
+SELECT * FROM table2_n12;
 
 dfs -cp ${system:hive.root}/data/files/ext_test 
${system:test.tmp.dir}/ext_test;
 
-CREATE EXTERNAL TABLE table4 (a INT) LOCATION 
'${system:test.tmp.dir}/ext_test';
-CREATE EXTERNAL TABLE table5 LIKE table4 LOCATION 
'${system:test.tmp.dir}/ext_test';
+CREATE EXTERNAL TABLE table4_n1 (a INT) LOCATION 
'${system:test.tmp.dir}/ext_test';
+CREATE EXTERNAL TABLE table5_n5 LIKE table4_n1 LOCATION 
'${system:test.tmp.dir}/ext_test';
 
-SELECT * FROM table4;
-SELECT * FROM table5;
+SELECT * FROM table4_n1;
+SELECT * FROM table5_n5;
 
-DROP TABLE table5;
-SELECT * FROM table4;
-DROP TABLE table4;
+DROP TABLE table5_n5;
+SELECT * FROM table4_n1;
+DROP TABLE table4_n1;
 
-CREATE EXTERNAL TABLE table4 (a INT) LOCATION 
'${system:test.tmp.dir}/ext_test';
-SELECT * FROM table4;
+CREATE EXTERNAL TABLE table4_n1 (a INT) LOCATION 
'${system:test.tmp.dir}/ext_test';
+SELECT * FROM table4_n1;
 
-CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{
+CREATE TABLE doctors_n2 STORED AS AVRO TBLPROPERTIES ('avro.schema.literal'='{
   "namespace": "testing.hive.avro.serde",
-  "name": "doctors",
+  "name": "doctors_n2",
   "type": "record",
   "fields": [
 {
@@ -59,10 +59,10 @@ CREATE TABLE doctors STORED AS AVRO TBLPROPERTIES 
('avro.schema.literal'='{
   ]
 }');
 
-alter table doctors set tblproperties ('k1'='v1', 'k2'='v2');
-DESCRIBE FORMATTED doctors;
+alter table doctors_n2 set tblproperties ('k1'='v1', 'k2'='v2');
+DESCRIBE FORMATTED doctors_n2;
 
-CREATE TABLE doctors2 like doctors;
+CREATE TABLE doctors2 like doctors_n2;
 DESCRIBE FORMATTED doctors2;
 
 CREATE TABLE PropertiedParquetTable(a INT, b STRING) STORED AS PARQUET 
TBLPROPERTIES("parquet.compression"="LZO");
@@ -70,28 +70,28 @@ CREATE TABLE LikePropertiedParquetTable LIKE 
PropertiedParquetTable;
 
 DESCRIBE FORMATTED LikePropertiedParquetTable;
 
-CREATE TABLE table5(col1 int, col2 string) stored as TEXTFILE;
-DESCRIBE FORMATTED table5;
+CREATE TABLE table5_n5(col1 int, col2 string) stored as TEXTFILE;
+DESCRIBE FORMATTED table5_n5;
 
-CREATE TABLE table6 like table5 stored as RCFILE;
-DESCRIBE FORMATTED table6;
+CREATE TABLE table6_n4 like table5_n5 stored as RCFILE;
+DESCRIBE FORMATTED table6_n4;
 
-drop table table6;
+drop table table6_n4;
 
-CREATE  TABLE table6 like table5 ROW FORMAT SERDE 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' STORED AS 
INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT   
'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' LOCATION 
'${system:hive.root}/data/files/table6';
-DESCRIBE FORMATTED table6;
+CREATE  TABLE table6_n4 like table5_n5 ROW FORMAT SERDE 
'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' STORED AS 
INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT   
'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' LOCATION 
'${system:hive.root}/data/files/table6';
+DESCRIBE FORMATTED table6_n4;
 
-drop table table5;
+drop table table5_n5;
 
-create table orc_table (
+create table orc_table_n0 (
 `time` string)
 stored as ORC tblproperties ("orc.compress"="SNAPPY");
 
-create table orc_table_using_like like orc_table;
+create table orc_table_using_like like orc_table_n0;
 
 describe formatted orc_table_using_like;
 
 drop table orc_table_using_like;
 
-drop table orc_table;
+drop table orc_table_n0;
 

http://git-wip-us.apache.org/repos/asf/hiv

[20/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/parquet_map_null.q
--
diff --git a/ql/src/test/queries/clientpositive/parquet_map_null.q 
b/ql/src/test/queries/clientpositive/parquet_map_null.q
index 810532d..7f84100 100644
--- a/ql/src/test/queries/clientpositive/parquet_map_null.q
+++ b/ql/src/test/queries/clientpositive/parquet_map_null.q
@@ -2,14 +2,14 @@ set hive.vectorized.execution.enabled=false;
 
 -- This test attempts to write a parquet table from an avro table that 
contains map null values
 
-DROP TABLE IF EXISTS avro_table;
+DROP TABLE IF EXISTS avro_table_n0;
 DROP TABLE IF EXISTS parquet_table;
 
-CREATE TABLE avro_table (avreau_col_1 map) STORED AS AVRO;
-LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO 
TABLE avro_table;
+CREATE TABLE avro_table_n0 (avreau_col_1 map) STORED AS AVRO;
+LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO 
TABLE avro_table_n0;
 
-CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table;
+CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table_n0;
 SELECT * FROM parquet_table;
 
-DROP TABLE avro_table;
+DROP TABLE avro_table_n0;
 DROP TABLE parquet_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/parquet_nested_complex.q
--
diff --git a/ql/src/test/queries/clientpositive/parquet_nested_complex.q 
b/ql/src/test/queries/clientpositive/parquet_nested_complex.q
index 717e16f..9688e16 100644
--- a/ql/src/test/queries/clientpositive/parquet_nested_complex.q
+++ b/ql/src/test/queries/clientpositive/parquet_nested_complex.q
@@ -2,9 +2,9 @@
 set hive.vectorized.execution.enabled=false;
 set hive.test.vectorized.execution.enabled.override=none;
 
--- start with the original nestedcomplex test
+-- start with the original nestedcomplex_n0 test
 
-create table nestedcomplex (
+create table nestedcomplex_n0 (
 simple_int int,
 max_nested_array  
array>>,
 max_nested_map
array>,
@@ -18,16 +18,16 @@ WITH SERDEPROPERTIES (
 )
 ;
 
-describe nestedcomplex;
-describe extended nestedcomplex;
+describe nestedcomplex_n0;
+describe extended nestedcomplex_n0;
 
-load data local inpath '../../data/files/nested_complex.txt' overwrite into 
table nestedcomplex;
+load data local inpath '../../data/files/nested_complex.txt' overwrite into 
table nestedcomplex_n0;
 
 -- and load the table into Parquet
 
-CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM 
nestedcomplex;
+CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM 
nestedcomplex_n0;
 
 SELECT * FROM parquet_nested_complex SORT BY simple_int;
 
-DROP TABLE nestedcomplex;
+DROP TABLE nestedcomplex_n0;
 DROP TABLE parquet_nested_complex;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/parquet_ppd_char.q
--
diff --git a/ql/src/test/queries/clientpositive/parquet_ppd_char.q 
b/ql/src/test/queries/clientpositive/parquet_ppd_char.q
index 6a12787..cbc2f07 100644
--- a/ql/src/test/queries/clientpositive/parquet_ppd_char.q
+++ b/ql/src/test/queries/clientpositive/parquet_ppd_char.q
@@ -4,74 +4,74 @@ SET hive.optimize.ppd=true;
 SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 
-create table newtypestbl(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as parquet;
+create table newtypestbl_n3(c char(10), v varchar(10), d decimal(5,3), da 
date) stored as parquet;
 
-insert overwrite table newtypestbl select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
+insert overwrite table newtypestbl_n3 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2 limit 10) 
uniontbl;
 
 set hive.optimize.index.filter=false;
 
 -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN 
tests)
-select * from newtypestbl where c="apple";
+select * from newtypestbl_n3 where c="apple";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c="apple";
+select * from newtypestbl_n3 where c="apple";
 
 set hive.optimize.index.filter=false;
-select * from newtypestbl where c!="apple";
+select * from newtypestbl_n3 where c!="apple";
 
 set hive.optimize.index.filter=true;
-select * from newtypestbl where c!="apple";
+select * from newtypestbl_n3 where c!="apple";
 
 set hive.optimize.index.filter=false

[38/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q
--
diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q 
b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q
index 6da0376..1ddcfdf 100644
--- a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q
+++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction.q
@@ -14,67 +14,67 @@ set hive.stats.fetch.column.stats=true;
 set hive.tez.bloom.filter.factor=1.0f; 
 
 -- Create Tables
-create table alltypesorc_int ( cint int, cstring string ) stored as ORC;
-create table srcpart_date (key string, value string) partitioned by (ds string 
) stored as ORC;
-CREATE TABLE srcpart_small(key1 STRING, value1 STRING) partitioned by (ds 
string) STORED as ORC;
+create table alltypesorc_int_n1 ( cint int, cstring string ) stored as ORC;
+create table srcpart_date_n7 (key string, value string) partitioned by (ds 
string ) stored as ORC;
+CREATE TABLE srcpart_small_n3(key1 STRING, value1 STRING) partitioned by (ds 
string) STORED as ORC;
 
 -- Add Partitions
-alter table srcpart_date add partition (ds = "2008-04-08");
-alter table srcpart_date add partition (ds = "2008-04-09");
+alter table srcpart_date_n7 add partition (ds = "2008-04-08");
+alter table srcpart_date_n7 add partition (ds = "2008-04-09");
 
-alter table srcpart_small add partition (ds = "2008-04-08");
-alter table srcpart_small add partition (ds = "2008-04-09");
+alter table srcpart_small_n3 add partition (ds = "2008-04-08");
+alter table srcpart_small_n3 add partition (ds = "2008-04-09");
 
 -- Load
-insert overwrite table alltypesorc_int select cint, cstring1 from alltypesorc;
-insert overwrite table srcpart_date partition (ds = "2008-04-08" ) select key, 
value from srcpart where ds = "2008-04-08";
-insert overwrite table srcpart_date partition (ds = "2008-04-09") select key, 
value from srcpart where ds = "2008-04-09";
-insert overwrite table srcpart_small partition (ds = "2008-04-09") select key, 
value from srcpart where ds = "2008-04-09" limit 20;
+insert overwrite table alltypesorc_int_n1 select cint, cstring1 from 
alltypesorc;
+insert overwrite table srcpart_date_n7 partition (ds = "2008-04-08" ) select 
key, value from srcpart where ds = "2008-04-08";
+insert overwrite table srcpart_date_n7 partition (ds = "2008-04-09") select 
key, value from srcpart where ds = "2008-04-09";
+insert overwrite table srcpart_small_n3 partition (ds = "2008-04-09") select 
key, value from srcpart where ds = "2008-04-09" limit 20;
 
 set hive.tez.dynamic.semijoin.reduction=false;
 
-analyze table alltypesorc_int compute statistics for columns;
-analyze table srcpart_date compute statistics for columns;
-analyze table srcpart_small compute statistics for columns;
+analyze table alltypesorc_int_n1 compute statistics for columns;
+analyze table srcpart_date_n7 compute statistics for columns;
+analyze table srcpart_small_n3 compute statistics for columns;
 
 -- single column, single key
-EXPLAIN select count(*) from srcpart_date join srcpart_small on 
(srcpart_date.key = srcpart_small.key1);
-select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = 
srcpart_small.key1);
+EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on 
(srcpart_date_n7.key = srcpart_small_n3.key1);
+select count(*) from srcpart_date_n7 join srcpart_small_n3 on 
(srcpart_date_n7.key = srcpart_small_n3.key1);
 set hive.tez.dynamic.semijoin.reduction=true;
-EXPLAIN select count(*) from srcpart_date join srcpart_small on 
(srcpart_date.key = srcpart_small.key1);
-select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = 
srcpart_small.key1);
+EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on 
(srcpart_date_n7.key = srcpart_small_n3.key1);
+select count(*) from srcpart_date_n7 join srcpart_small_n3 on 
(srcpart_date_n7.key = srcpart_small_n3.key1);
 set hive.tez.dynamic.semijoin.reduction=true;
 
 -- Mix dynamic partition pruning(DPP) and min/max bloom filter optimizations. 
Should pick the DPP.
-EXPLAIN select count(*) from srcpart_date join srcpart_small on 
(srcpart_date.key = srcpart_small.ds);
-select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = 
srcpart_small.ds);
+EXPLAIN select count(*) from srcpart_date_n7 join srcpart_small_n3 on 
(srcpart_date_n7.key = srcpart_small_n3.ds);
+select count(*) from srcpart_date_n7 join srcpart_small_n3 on 
(srcpart_date_n7.key = srcpart_small_n3.ds);
 set hive.tez.dynamic.semijoin.reduction=false;
 
 --multiple sources, single key
-EXPLAIN select count(*) from srcpart_date join srcpart_small on 
(srcpart_date.key = srcpart_small.key1) join alltypesorc_int on 
(srcpart_small.key1 = alltypesorc_int.cstring);
-select count(*) from srcpart_date join srcpart_small on (srcpart_date.key = 
srcpart_small.key1) join alltypesorc_int on (s

[46/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q
--
diff --git 
a/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q 
b/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q
index 4b03fff..279d05d 100644
--- a/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q
+++ b/ql/src/test/queries/clientpositive/avro_alter_table_update_columns.q
@@ -1,6 +1,6 @@
 -- verify schema changes introduced in avro.schema.literal/url sync with HMS 
if ALTER TABLE UPDATE COLUMNS is called
 
-CREATE TABLE avro_extschema_literal
+CREATE TABLE avro_extschema_literal_n1
  STORED AS AVRO
  TBLPROPERTIES ('avro.schema.literal'='{
   "namespace": "org.apache.hive",
@@ -11,9 +11,9 @@ CREATE TABLE avro_extschema_literal
 { "name":"first_name", "type":"string" },
 { "name":"last_name", "type":"string" }
   ] }');
-DESCRIBE avro_extschema_literal;
+DESCRIBE avro_extschema_literal_n1;
 
-ALTER TABLE avro_extschema_literal SET
+ALTER TABLE avro_extschema_literal_n1 SET
  TBLPROPERTIES ('avro.schema.literal'='{
   "namespace": "org.apache.hive",
   "name": "ext_schema",
@@ -21,12 +21,12 @@ ALTER TABLE avro_extschema_literal SET
   "fields": [
 { "name":"newCol", "type":"int" }
   ] }');
-DESCRIBE avro_extschema_literal;
+DESCRIBE avro_extschema_literal_n1;
 
-ALTER TABLE avro_extschema_literal UNSET TBLPROPERTIES ('avro.schema.literal');
-DESCRIBE avro_extschema_literal;
+ALTER TABLE avro_extschema_literal_n1 UNSET TBLPROPERTIES 
('avro.schema.literal');
+DESCRIBE avro_extschema_literal_n1;
 
-ALTER TABLE avro_extschema_literal SET
+ALTER TABLE avro_extschema_literal_n1 SET
  TBLPROPERTIES ('avro.schema.literal'='{
   "namespace": "org.apache.hive",
   "name": "ext_schema",
@@ -34,36 +34,36 @@ ALTER TABLE avro_extschema_literal SET
   "fields": [
 { "name":"newCol", "type":"int" }
   ] }');
-ALTER TABLE avro_extschema_literal UPDATE COLUMNS CASCADE;
-DESCRIBE avro_extschema_literal;
+ALTER TABLE avro_extschema_literal_n1 UPDATE COLUMNS CASCADE;
+DESCRIBE avro_extschema_literal_n1;
 
-ALTER TABLE avro_extschema_literal UNSET TBLPROPERTIES ('avro.schema.literal');
-DESCRIBE avro_extschema_literal;
+ALTER TABLE avro_extschema_literal_n1 UNSET TBLPROPERTIES 
('avro.schema.literal');
+DESCRIBE avro_extschema_literal_n1;
 
 dfs -cp ${system:hive.root}data/files/grad.avsc ${system:test.tmp.dir}/;
 dfs -cp ${system:hive.root}data/files/grad2.avsc ${system:test.tmp.dir}/;
 
 
-CREATE TABLE avro_extschema_url
+CREATE TABLE avro_extschema_url_n1
  STORED AS AVRO
  TBLPROPERTIES ('avro.schema.url'='${system:test.tmp.dir}/grad.avsc');
-DESCRIBE avro_extschema_url;
+DESCRIBE avro_extschema_url_n1;
 
-ALTER TABLE avro_extschema_url SET
+ALTER TABLE avro_extschema_url_n1 SET
  TBLPROPERTIES ('avro.schema.url'='${system:test.tmp.dir}/grad2.avsc');
-DESCRIBE avro_extschema_url;
+DESCRIBE avro_extschema_url_n1;
 
-ALTER TABLE avro_extschema_url UNSET TBLPROPERTIES ('avro.schema.url');
-DESCRIBE avro_extschema_url;
+ALTER TABLE avro_extschema_url_n1 UNSET TBLPROPERTIES ('avro.schema.url');
+DESCRIBE avro_extschema_url_n1;
 
 
-ALTER TABLE avro_extschema_url SET
+ALTER TABLE avro_extschema_url_n1 SET
  TBLPROPERTIES ('avro.schema.url'='${system:test.tmp.dir}/grad2.avsc');
-ALTER TABLE avro_extschema_url UPDATE COLUMNS CASCADE;
-DESCRIBE avro_extschema_url;
+ALTER TABLE avro_extschema_url_n1 UPDATE COLUMNS CASCADE;
+DESCRIBE avro_extschema_url_n1;
 
-ALTER TABLE avro_extschema_url UNSET TBLPROPERTIES ('avro.schema.url');
-DESCRIBE avro_extschema_url;
+ALTER TABLE avro_extschema_url_n1 UNSET TBLPROPERTIES ('avro.schema.url');
+DESCRIBE avro_extschema_url_n1;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/avro_compression_enabled.q
--
diff --git a/ql/src/test/queries/clientpositive/avro_compression_enabled.q 
b/ql/src/test/queries/clientpositive/avro_compression_enabled.q
index 58867ff..cba1326 100644
--- a/ql/src/test/queries/clientpositive/avro_compression_enabled.q
+++ b/ql/src/test/queries/clientpositive/avro_compression_enabled.q
@@ -1,6 +1,6 @@
 -- verify that new joins bring in correct schemas (including evolved schemas)
 
-CREATE TABLE doctors4
+CREATE TABLE doctors4_n0
 ROW FORMAT
 SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
 STORED AS
@@ -35,7 +35,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
   ]
 }');
 
-LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4;
+LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4_n0;
 
 set hive.exec.compress.output=true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/avro_compression_enabled_native.q
--
diff --git 
a/ql/src/test/

[18/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q
--
diff --git a/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q 
b/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q
index f6b067b..52d9a0a 100644
--- a/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q
+++ b/ql/src/test/queries/clientpositive/read_uint_parquet_vectorized.q
@@ -2,205 +2,205 @@
 SET hive.vectorized.execution.enabled=true;
 SET hive.fetch.task.conversion=none;
 
-create table testbasicint (uint_32_col int) stored as parquet;
-load data local inpath '../../data/files/test_uint.parquet' into table 
testbasicint;
-select * from testbasicint;
-drop table testbasicint;
+create table testbasicint_n0 (uint_32_col int) stored as parquet;
+load data local inpath '../../data/files/test_uint.parquet' into table 
testbasicint_n0;
+select * from testbasicint_n0;
+drop table testbasicint_n0;
 
-create table testbigintinv
+create table testbigintinv_n0
 (col_INT32_UINT_8 bigint,
  col_INT32_UINT_16 bigint,
  col_INT32_UINT_32 bigint,
  col_INT64_UINT_64 bigint) stored as parquet;
-load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testbigintinv;
-select * from testbigintinv;
-drop table testbigintinv;
+load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testbigintinv_n0;
+select * from testbigintinv_n0;
+drop table testbigintinv_n0;
 
-create table testintinv
+create table testintinv_n0
 (col_INT32_UINT_8  int,
  col_INT32_UINT_16 int,
  col_INT32_UINT_32 int,
  col_INT64_UINT_64 int) stored as parquet;
-load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table testintinv;
-select * from testintinv;
-drop table testintinv;
+load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testintinv_n0;
+select * from testintinv_n0;
+drop table testintinv_n0;
 
-create table testsmallintinv
+create table testsmallintinv_n0
 (col_INT32_UINT_8  smallint,
  col_INT32_UINT_16 smallint,
  col_INT32_UINT_32 smallint,
  col_INT64_UINT_64 smallint) stored as parquet;
-load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testsmallintinv;
-select * from testsmallintinv;
-drop table testsmallintinv;
+load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testsmallintinv_n0;
+select * from testsmallintinv_n0;
+drop table testsmallintinv_n0;
 
-create table testtinyintinv
+create table testtinyintinv_n0
 (col_INT32_UINT_8  tinyint,
  col_INT32_UINT_16 tinyint,
  col_INT32_UINT_32 tinyint,
  col_INT64_UINT_64 tinyint) stored as parquet;
-load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testtinyintinv;
-select * from testtinyintinv;
-drop table testtinyintinv;
+load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testtinyintinv_n0;
+select * from testtinyintinv_n0;
+drop table testtinyintinv_n0;
 
-create table testfloatinv
+create table testfloatinv_n0
 (col_INT32_UINT_8  float,
  col_INT32_UINT_16 float,
  col_INT32_UINT_32 float,
  col_INT64_UINT_64 float) stored as parquet;
-load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testfloatinv;
-select * from testfloatinv;
-drop table testfloatinv;
+load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testfloatinv_n0;
+select * from testfloatinv_n0;
+drop table testfloatinv_n0;
 
-create table testdoubleinv
+create table testdoubleinv_n0
 (col_INT32_UINT_8  double,
  col_INT32_UINT_16 double,
  col_INT32_UINT_32 double,
  col_INT64_UINT_64 double) stored as parquet;
-load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testdoubleinv;
-select * from testdoubleinv;
-drop table testdoubleinv;
+load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testdoubleinv_n0;
+select * from testdoubleinv_n0;
+drop table testdoubleinv_n0;
 
-create table testdecimal22_2inv
+create table testdecimal22_2inv_n0
 (col_INT32_UINT_8  decimal(22,2),
  col_INT32_UINT_16 decimal(22,2),
  col_INT32_UINT_32 decimal(22,2),
  col_INT64_UINT_64 decimal(22,2)) stored as parquet;
-load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testdecimal22_2inv;
-select * from testdecimal22_2inv;
-drop table testdecimal22_2inv;
+load data local inpath 
'../../data/files/data_including_invalid_values.parquet' into table 
testdecimal22_2inv_n0;
+select * from testdecimal22_2inv_n0;
+drop table testdecimal22_2inv_n0;
 
-create table testdecimal13_2inv
+create table testdecimal13_2inv_n0
 (col_INT32_UINT_8  decimal(13,2),
  col_INT32_UINT_16 decimal(13,2),
  col_INT32_UINT_32 decimal

[27/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/mapjoin46.q
--
diff --git a/ql/src/test/queries/clientpositive/mapjoin46.q 
b/ql/src/test/queries/clientpositive/mapjoin46.q
index b7aa092..9de7113 100644
--- a/ql/src/test/queries/clientpositive/mapjoin46.q
+++ b/ql/src/test/queries/clientpositive/mapjoin46.q
@@ -3,263 +3,263 @@ set hive.auto.convert.join=true;
 set hive.strict.checks.cartesian.product=false;
 set hive.join.emit.interval=2;
 
-CREATE TABLE test1 (key INT, value INT, col_1 STRING);
-INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'),
+CREATE TABLE test1_n4 (key INT, value INT, col_1 STRING);
+INSERT INTO test1_n4 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'),
 (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car');
 
-CREATE TABLE test2 (key INT, value INT, col_2 STRING);
-INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'),
+CREATE TABLE test2_n2 (key INT, value INT, col_2 STRING);
+INSERT INTO test2_n2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'),
 (104, 3, 'Fli'), (105, NULL, 'None');
 
 
 -- Basic outer join
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value);
 
 -- Conjunction with pred on multiple inputs and single inputs (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  AND test1.key between 100 and 102
-  AND test2.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value
+  AND test1_n4.key between 100 and 102
+  AND test2_n2.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  AND test1.key between 100 and 102
-  AND test2.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value
+  AND test1_n4.key between 100 and 102
+  AND test2_n2.key between 100 and 102);
 
 -- Conjunction with pred on single inputs (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.key between 100 and 102
-  AND test2.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.key between 100 and 102
+  AND test2_n2.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.key between 100 and 102
-  AND test2.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.key between 100 and 102
+  AND test2_n2.key between 100 and 102);
 
 -- Conjunction with pred on multiple inputs and none (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 RIGHT OUTER JOIN test2
-ON (test1.value=test2.value AND true);
+FROM test1_n4 RIGHT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value AND true);
 
 SELECT *
-FROM test1 RIGHT OUTER JOIN test2
-ON (test1.value=test2.value AND true);
+FROM test1_n4 RIGHT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value AND true);
 
 -- Condition on one input (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.key between 100 and 102);
 
 -- Disjunction with pred on multiple inputs and single inputs (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test1.key between 100 and 102
-  OR test2.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value
+  OR test1_n4.key between 100 and 102
+  OR test2_n2.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test1.key between 100 and 102
-  OR test2.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value
+  OR test1_n4.key between 100 and 102
+  OR test2_n2.key between 100 and 102);
 
 -- Disjunction with pred on multiple inputs and left input (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test1.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value
+  OR test1_n4.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test1.key between 100 and 102);
+FROM test1_n4 LEFT OUTER JOIN test2_n2
+ON (test1_n4.value=test2_n2.value
+  OR test1_n4.key between 100 and 102);
 
 -- Disjunction with pred on multiple inputs and right input (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test2.key betw

[32/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/infer_bucket_sort.q
--
diff --git a/ql/src/test/queries/clientpositive/infer_bucket_sort.q 
b/ql/src/test/queries/clientpositive/infer_bucket_sort.q
index 3423299..43506b8 100644
--- a/ql/src/test/queries/clientpositive/infer_bucket_sort.q
+++ b/ql/src/test/queries/clientpositive/infer_bucket_sort.q
@@ -4,156 +4,156 @@ set hive.exec.infer.bucket.sort=true;
 -- This tests inferring how data is bucketed/sorted from the operators in the 
reducer
 -- and populating that information in partitions' metadata
 
-CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part 
STRING);
+CREATE TABLE test_table_n5 (key STRING, value STRING) PARTITIONED BY (part 
STRING);
 
 -- Test group by, should be bucketed and sorted by group by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, count(*) FROM src GROUP BY key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test group by where a key isn't selected, should not be bucketed or sorted
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, count(*) FROM src GROUP BY key, value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join, should be bucketed and sorted by join key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join with two keys, should be bucketed and sorted by join keys
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = 
b.value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join with two keys and only one selected, should not be bucketed or 
sorted
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, '1' FROM src a JOIN src b ON a.key = b.key AND a.value = b.value;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join on three tables on same key, should be bucketed and sorted by 
join key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON 
(b.key = c.key);
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test join on three tables on different keys, should be bucketed and sorted 
by latter key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON 
(b.value = c.value);
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test distribute by, should only be bucketed by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src DISTRIBUTE BY key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test sort by, should be sorted by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src SORT BY key ASC;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test sort by desc, should be sorted by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src SORT BY key DESC;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test cluster by, should be bucketed and sorted by key
-INSERT OVERWRITE TABLE test_table PARTITION (part = '1') 
+INSERT OVERWRITE TABLE test_table_n5 PARTITION (part = '1') 
 SELECT key, value FROM src CLUSTER BY key;
 
-DESCRIBE FORMATTED test_table PARTITION (part = '1');
+DESCRIBE FORMATTED test_table_n5 PARTITION (part = '1');
 
 -- Test distribute by and sort by different keys, should be bucketed by one 
key sorted by the

[05/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q
--
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q
index 166b34a..3088a8d 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_vec_table.q
@@ -18,9 +18,9 @@ set hive.llap.io.enabled=false;
 --  vectorized reading of TEXTFILE format files using the vector SERDE methods.
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n15(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n15;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -29,51 +29,51 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING);
+CREATE TABLE table_add_int_permute_select_n4(insert_num int, a INT, b STRING);
 
-insert into table table_add_int_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_permute_select_n4 SELECT insert_num, int1, 
'original' FROM schema_evolution_data_n15;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_permute_select add columns(c int);
+alter table table_add_int_permute_select_n4 add columns(c int);
 
-insert into table table_add_int_permute_select VALUES (111, 8, 'new', 
8);
+insert into table table_add_int_permute_select_n4 VALUES (111, 8, 'new', 
8);
 
 explain vectorization detail
-select insert_num,a,b from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n4;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_permute_select;
-select insert_num,a,b,c from table_add_int_permute_select;
-select insert_num,c from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n4;
+select insert_num,a,b,c from table_add_int_permute_select_n4;
+select insert_num,c from table_add_int_permute_select_n4;
 
-drop table table_add_int_permute_select;
+drop table table_add_int_permute_select_n4;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b 
STRING);
+CREATE TABLE table_add_int_string_permute_select_n4(insert_num int, a INT, b 
STRING);
 
-insert into table table_add_int_string_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_string_permute_select_n4 SELECT insert_num, 
int1, 'original' FROM schema_evolution_data_n15;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_string_permute_select add columns(c int, d string);
+alter table table_add_int_string_permute_select_n4 add columns(c int, d 
string);
 
-insert into table table_add_int_string_permute_select VALUES (111, 8, 
'new', 8, 'filler');
+insert into table table_add_int_string_permute_select_n4 VALUES (111, 8, 
'new', 8, 'filler');
 
 explain vectorization detail
-select insert_num,a,b from table_add_int_string_permute_select;
+select insert_num,a,b from table_add_int_string_permute_select_n4;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_string_permute_select;
-select insert_num,a,b,c from table_add_int_string_permute_select;
-select insert_num,a,b,c,d from table_add_int_string_permute_select;
-select insert_num,a,c,d

[44/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/bucketmapjoin4.q
--
diff --git a/ql/src/test/queries/clientpositive/bucketmapjoin4.q 
b/ql/src/test/queries/clientpositive/bucketmapjoin4.q
index d882a38..2957d4a 100644
--- a/ql/src/test/queries/clientpositive/bucketmapjoin4.q
+++ b/ql/src/test/queries/clientpositive/bucketmapjoin4.q
@@ -2,82 +2,82 @@ SET hive.vectorized.execution.enabled=false;
 set hive.strict.checks.bucketing=false;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 
2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin;
-load data local inpath '../../data/files/bmj/01_0' INTO TABLE 
srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n17(key int, value string) CLUSTERED BY (key) 
INTO 2 BUCKETS STORED AS TEXTFILE;
+load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin_n17;
+load data local inpath '../../data/files/bmj/01_0' INTO TABLE 
srcbucket_mapjoin_n17;
 
-CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/01_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/02_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/03_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
+CREATE TABLE srcbucket_mapjoin_part_n18 (key int, value string) partitioned by 
(ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin_part_n18 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/01_0' INTO TABLE 
srcbucket_mapjoin_part_n18 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/02_0' INTO TABLE 
srcbucket_mapjoin_part_n18 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/03_0' INTO TABLE 
srcbucket_mapjoin_part_n18 partition(ds='2008-04-08');
 
-CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by 
(ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/01_0' INTO TABLE 
srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+CREATE TABLE srcbucket_mapjoin_part_2_n15 (key int, value string) partitioned 
by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin_part_2_n15 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/01_0' INTO TABLE 
srcbucket_mapjoin_part_2_n15 partition(ds='2008-04-08');
 
-create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 
bigint);
-create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 
bigint);
+create table bucketmapjoin_hash_result_1_n6 (key bigint , value1 bigint, 
value2 bigint);
+create table bucketmapjoin_hash_result_2_n6 (key bigint , value1 bigint, 
value2 bigint);
 
 set hive.optimize.bucketmapjoin = true;
-create table bucketmapjoin_tmp_result (key string , value1 string, value2 
string);
+create table bucketmapjoin_tmp_result_n8 (key string , value1 string, value2 
string);
 
 explain extended
-insert overwrite table bucketmapjoin_tmp_result
+insert overwrite table bucketmapjoin_tmp_result_n8
 select /*+mapjoin(b)*/ a.key, a.value, b.value
-from srcbucket_mapjoin a join srcbucket_mapjoin b
+from srcbucket_mapjoin_n17 a join srcbucket_mapjoin_n17 b
 on a.key=b.key;
 
-insert overwrite table bucketmapjoin_tmp_result
+insert overwrite table bucketmapjoin_tmp_result_n8
 select /*+mapjoin(b)*/ a.key, a.value, b.value
-from srcbucket_mapjoin a join srcbucket_mapjoin b
+from srcbucket_mapjoin_n17 a join srcbucket_mapjoin_n17 b
 on a.key=b.key;
 
-select count(1) from bucketmapjoin_tmp_result;
-insert overwrite table bucketmapjoin_hash_result_1
-select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from 
bucketmapjoin_tmp_result;
+select count(1) from bucketmapjoin_tmp_result_n8;
+insert overwrite table bucketmapjoin_hash_result_1_n6
+select sum(hash(key)), sum(hash(value1)), sum(hash(value2)) from 
bucketmapjoin_tmp_result_n8;
 
 set hive.optimize.bucketmapjoin = false;
-insert overwrite table bucketmapjoin_tmp_result
+insert overwrite table bucketmapjoin_tmp_result_n8
 select /*+mapjoin(b)*/ a.key, a.value, b.value
-from src

[48/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/autoColumnStats_2.q
--
diff --git a/ql/src/test/queries/clientpositive/autoColumnStats_2.q 
b/ql/src/test/queries/clientpositive/autoColumnStats_2.q
index fadac76..8321863 100644
--- a/ql/src/test/queries/clientpositive/autoColumnStats_2.q
+++ b/ql/src/test/queries/clientpositive/autoColumnStats_2.q
@@ -21,37 +21,37 @@ explain extended select * from src_multi1;
 
 describe formatted src_multi1;
 
-drop table a;
-drop table b;
-create table a like src;
-create table b like src;
+drop table a_n3;
+drop table b_n3;
+create table a_n3 like src;
+create table b_n3 like src;
 
 from src
-insert into table a select *
-insert into table b select *;
+insert into table a_n3 select *
+insert into table b_n3 select *;
 
-describe formatted a key;
-describe formatted b key;
+describe formatted a_n3 key;
+describe formatted b_n3 key;
 
 from src
-insert overwrite table a select *
-insert into table b select *;
+insert overwrite table a_n3 select *
+insert into table b_n3 select *;
 
-describe formatted a;
-describe formatted b;
+describe formatted a_n3;
+describe formatted b_n3;
 
-describe formatted b key;
-describe formatted b value;
+describe formatted b_n3 key;
+describe formatted b_n3 value;
 
-insert into table b select NULL, NULL from src limit 10;
+insert into table b_n3 select NULL, NULL from src limit 10;
 
-describe formatted b key;
-describe formatted b value;
+describe formatted b_n3 key;
+describe formatted b_n3 value;
 
-insert into table b(value) select key+10 from src limit 10;
+insert into table b_n3(value) select key+10 from src limit 10;
 
-describe formatted b key;
-describe formatted b value;
+describe formatted b_n3 key;
+describe formatted b_n3 value;
 
 drop table src_multi2;
 
@@ -69,11 +69,11 @@ create table if not exists nzhang_part14 (key string)
 
 insert into table nzhang_part14 partition(value) 
 select key, value from (
-  select * from (select 'k1' as key, cast(null as string) as value from src 
limit 2)a 
+  select * from (select 'k1' as key, cast(null as string) as value from src 
limit 2)a_n3 
   union all
-  select * from (select 'k2' as key, '' as value from src limit 2)b
+  select * from (select 'k2' as key, '' as value from src limit 2)b_n3
   union all 
-  select * from (select 'k3' as key, ' ' as value from src limit 2)c
+  select * from (select 'k3' as key, ' ' as value from src limit 2)c_n1
 ) T;
 
 explain select key from nzhang_part14;
@@ -130,31 +130,31 @@ select * from src1;
 
 describe formatted src_stat_part PARTITION(partitionId=2);
 
-drop table srcbucket_mapjoin;
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-drop table tab_part;
-CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-drop table srcbucket_mapjoin_part;
-CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+drop table srcbucket_mapjoin_n2;
+CREATE TABLE srcbucket_mapjoin_n2(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+drop table tab_part_n1;
+CREATE TABLE tab_part_n1 (key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+drop table srcbucket_mapjoin_part_n2;
+CREATE TABLE srcbucket_mapjoin_part_n2 (key int, value string) partitioned by 
(ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
 
-load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj1/01_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin_n2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj1/01_0' INTO TABLE 
srcbucket_mapjoin_n2 partition(ds='2008-04-08');
 
-load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/01_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/02_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/03_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/00_0' INTO TABLE 
srcbucket_mapjoin_part_n2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/01_0' INTO TABLE 
srcbucket_mapjoin_part_n2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/02_0' INTO TABLE 
srcbucket_map

[31/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/input_testxpath2.q
--
diff --git a/ql/src/test/queries/clientpositive/input_testxpath2.q 
b/ql/src/test/queries/clientpositive/input_testxpath2.q
index 131105b..bdb1a8c 100644
--- a/ql/src/test/queries/clientpositive/input_testxpath2.q
+++ b/ql/src/test/queries/clientpositive/input_testxpath2.q
@@ -1,10 +1,11 @@
-CREATE TABLE dest1(lint_size INT, lintstring_size INT, mstringstring_size INT) 
STORED AS TEXTFILE;
+--! qt:dataset:src_thrift
+CREATE TABLE dest1_n32(lint_size INT, lintstring_size INT, mstringstring_size 
INT) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), 
size(src_thrift.lintstring), size(src_thrift.mstringstring) where 
src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL);
+INSERT OVERWRITE TABLE dest1_n32 SELECT size(src_thrift.lint), 
size(src_thrift.lintstring), size(src_thrift.mstringstring) where 
src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL);
 
 FROM src_thrift
-INSERT OVERWRITE TABLE dest1 SELECT size(src_thrift.lint), 
size(src_thrift.lintstring), size(src_thrift.mstringstring) where 
src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL);
+INSERT OVERWRITE TABLE dest1_n32 SELECT size(src_thrift.lint), 
size(src_thrift.lintstring), size(src_thrift.mstringstring) where 
src_thrift.lint IS NOT NULL AND NOT (src_thrift.mstringstring IS NULL);
 
-SELECT dest1.* FROM dest1;
+SELECT dest1_n32.* FROM dest1_n32;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/inputddl7.q
--
diff --git a/ql/src/test/queries/clientpositive/inputddl7.q 
b/ql/src/test/queries/clientpositive/inputddl7.q
index 27e587a..6f775c1 100644
--- a/ql/src/test/queries/clientpositive/inputddl7.q
+++ b/ql/src/test/queries/clientpositive/inputddl7.q
@@ -2,29 +2,29 @@
 -- test for loading into partitions with the correct file format
 
 
-CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1;
-SELECT COUNT(1) FROM T1;
+CREATE TABLE T1_n117(name STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1_n117;
+SELECT COUNT(1) FROM T1_n117;
 
 
-CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2;
-SELECT COUNT(1) FROM T2;
+CREATE TABLE T2_n69(name STRING) STORED AS SEQUENCEFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2_n69;
+SELECT COUNT(1) FROM T2_n69;
 
 
-CREATE TABLE T3(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3 PARTITION 
(ds='2008-04-09');
-SELECT COUNT(1) FROM T3 where T3.ds='2008-04-09';
+CREATE TABLE T3_n25(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3_n25 PARTITION 
(ds='2008-04-09');
+SELECT COUNT(1) FROM T3_n25 where T3_n25.ds='2008-04-09';
 
 
-CREATE TABLE T4(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4 PARTITION 
(ds='2008-04-09');
-SELECT COUNT(1) FROM T4 where T4.ds='2008-04-09';
+CREATE TABLE T4_n14(name STRING) PARTITIONED BY(ds STRING) STORED AS 
SEQUENCEFILE;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4_n14 PARTITION 
(ds='2008-04-09');
+SELECT COUNT(1) FROM T4_n14 where T4_n14.ds='2008-04-09';
 
-DESCRIBE EXTENDED T1;
-DESCRIBE EXTENDED T2;
-DESCRIBE EXTENDED T3 PARTITION (ds='2008-04-09');
-DESCRIBE EXTENDED T4 PARTITION (ds='2008-04-09');
+DESCRIBE EXTENDED T1_n117;
+DESCRIBE EXTENDED T2_n69;
+DESCRIBE EXTENDED T3_n25 PARTITION (ds='2008-04-09');
+DESCRIBE EXTENDED T4_n14 PARTITION (ds='2008-04-09');
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/insert0.q
--
diff --git a/ql/src/test/queries/clientpositive/insert0.q 
b/ql/src/test/queries/clientpositive/insert0.q
index 7e687ac..a3e99ee 100644
--- a/ql/src/test/queries/clientpositive/insert0.q
+++ b/ql/src/test/queries/clientpositive/insert0.q
@@ -1,19 +1,19 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
 
-DROP TABLE insert_into1;
+DROP TABLE insert_into1_n1;
 DROP TABLE ctas_table;
 DROP TABLE ctas_part;
 
-CREATE TABLE insert_into1 (key int, value string);
+CREATE TABLE insert_into1_n1 (key int, value string);
 
-INSERT OVERWRITE TABLE insert_into1 SELECT * from src ORDER BY key LIMIT 10;
+INSERT OVERWRITE TABLE insert_into1_n1 SELECT * from src ORDER BY key LIMIT 10;
 
-select * from insert_into1 order by key;
+select * from insert_into1_n1 order by key;
 
-INSERT INTO TABLE insert_into1 SELECT

[55/58] [abbrv] hive git commit: HIVE-19655: Mask stats for TestMiniLlapLocalCliDriver#smb_mapjoin_15 (Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)

2018-05-24 Thread jcamacho
HIVE-19655: Mask stats for TestMiniLlapLocalCliDriver#smb_mapjoin_15 (Jesus 
Camacho Rodriguez, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/85c33593
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/85c33593
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/85c33593

Branch: refs/heads/branch-3
Commit: 85c33593345a35f23b089123fc41e2d4680e8c9f
Parents: f3f9cc2
Author: Jesus Camacho Rodriguez 
Authored: Tue May 22 09:07:38 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 17:55:30 2018 -0700

--
 .../queries/clientpositive/smb_mapjoin_15.q |   4 +
 .../clientpositive/llap/smb_mapjoin_15.q.out| 104 +--
 .../clientpositive/spark/smb_mapjoin_15.q.out   | 104 +--
 3 files changed, 108 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/85c33593/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
--
diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_15.q 
b/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
index 0e76446..dea684d 100644
--- a/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
+++ b/ql/src/test/queries/clientpositive/smb_mapjoin_15.q
@@ -1,3 +1,7 @@
+--! qt:dataset:src
+
+-- MASK_STATS
+
 set hive.optimize.bucketmapjoin = true;
 set hive.optimize.bucketmapjoin.sortedmerge = true;
 set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;

http://git-wip-us.apache.org/repos/asf/hive/blob/85c33593/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out 
b/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out
index c228fed..21aac45 100644
--- a/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_15.q.out
@@ -55,22 +55,22 @@ STAGE PLANS:
 Map Operator Tree:
 TableScan
   alias: a
-  Statistics: Num rows: 500 Data size: 47500 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: ###Masked### Data size: ###Masked### 
Basic stats: COMPLETE Column stats: COMPLETE
   GatherStats: false
   Filter Operator
 isSamplingPred: false
 predicate: key is not null (type: boolean)
-Statistics: Num rows: 500 Data size: 47500 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: ###Masked### Data size: ###Masked### 
Basic stats: COMPLETE Column stats: COMPLETE
 Select Operator
   expressions: key (type: int), value (type: string)
   outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 47500 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: ###Masked### Data size: 
###Masked### Basic stats: COMPLETE Column stats: COMPLETE
   Reduce Output Operator
 key expressions: _col0 (type: int)
 null sort order: a
 sort order: +
 Map-reduce partition columns: _col0 (type: int)
-Statistics: Num rows: 500 Data size: 47500 Basic 
stats: COMPLETE Column stats: COMPLETE
+Statistics: Num rows: ###Masked### Data size: 
###Masked### Basic stats: COMPLETE Column stats: COMPLETE
 tag: 0
 value expressions: _col1 (type: string)
 auto parallelism: true
@@ -137,22 +137,22 @@ STAGE PLANS:
 Map Operator Tree:
 TableScan
   alias: b
-  Statistics: Num rows: 500 Data size: 47500 Basic stats: 
COMPLETE Column stats: COMPLETE
+  Statistics: Num rows: ###Masked### Data size: ###Masked### 
Basic stats: COMPLETE Column stats: COMPLETE
   GatherStats: false
   Filter Operator
 isSamplingPred: false
 predicate: key is not null (type: boolean)
-Statistics: Num rows: 500 Data size: 47500 Basic stats: 
COMPLETE Column stats: COMPLETE
+Statistics: Num rows: ###Masked### Data size: ###Masked### 
Basic stats: COMPLETE Column stats: COMPLETE
 Select Operator
   expressions: key (type: int), value (type: string)
   outputColumnNames: _col0, _col1
-  Stati

[40/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/default_constraint.q
--
diff --git a/ql/src/test/queries/clientpositive/default_constraint.q 
b/ql/src/test/queries/clientpositive/default_constraint.q
index a86622b..981da63 100644
--- a/ql/src/test/queries/clientpositive/default_constraint.q
+++ b/ql/src/test/queries/clientpositive/default_constraint.q
@@ -7,51 +7,51 @@
  set hive.support.concurrency=true;
  set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-CREATE TABLE numericDataType(a TINYINT CONSTRAINT tinyint_constraint DEFAULT 
127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647,
+CREATE TABLE numericDataType_n1(a TINYINT CONSTRAINT tinyint_constraint 
DEFAULT 127Y ENABLE, b SMALLINT DEFAULT 32767S, c INT DEFAULT 2147483647,
 d BIGINT DEFAULT  9223372036854775807L, e DOUBLE DEFAULT 3.4E38, f 
DECIMAL(9,2) DEFAULT 1234567.89)
 clustered by (a) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true');
-DESC FORMATTED numericDataType;
+DESC FORMATTED numericDataType_n1;
 
-EXPLAIN INSERT INTO numericDataType(a) values(3Y);
-INSERT INTO numericDataType(a) values(3Y);
-SELECT * FROM numericDataType;
+EXPLAIN INSERT INTO numericDataType_n1(a) values(3Y);
+INSERT INTO numericDataType_n1(a) values(3Y);
+SELECT * FROM numericDataType_n1;
 
-EXPLAIN INSERT INTO numericDataType(e,f) values(4.5, 678.4);
-INSERT INTO numericDataType(e,f) values(4.5, 678.4);
-SELECT * FROM numericDataType;
+EXPLAIN INSERT INTO numericDataType_n1(e,f) values(4.5, 678.4);
+INSERT INTO numericDataType_n1(e,f) values(4.5, 678.4);
+SELECT * FROM numericDataType_n1;
 
-DROP TABLE numericDataType;
+DROP TABLE numericDataType_n1;
 
   -- Date/time
-CREATE TABLE table1(d DATE DEFAULT DATE'2018-02-14', t TIMESTAMP DEFAULT 
TIMESTAMP'2016-02-22 12:45:07.0',
+CREATE TABLE table1_n16(d DATE DEFAULT DATE'2018-02-14', t TIMESTAMP DEFAULT 
TIMESTAMP'2016-02-22 12:45:07.0',
 tz timestamp with local time zone DEFAULT TIMESTAMPLOCALTZ'2016-01-03 
12:26:34 America/Los_Angeles',
 d1 DATE DEFAULT current_date() ENABLE, t1 TIMESTAMP DEFAULT 
current_timestamp() DISABLE);
-DESC FORMATTED table1;
+DESC FORMATTED table1_n16;
 
-EXPLAIN INSERT INTO table1(t) values ("1985-12-31 12:45:07");
-INSERT INTO table1(t) values ("1985-12-31 12:45:07");
-SELECT d, t, tz,d1=current_date(), t1 from table1;
+EXPLAIN INSERT INTO table1_n16(t) values ("1985-12-31 12:45:07");
+INSERT INTO table1_n16(t) values ("1985-12-31 12:45:07");
+SELECT d, t, tz,d1=current_date(), t1 from table1_n16;
 
-EXPLAIN INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 
17:32:14.259');
-INSERT INTO table1(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259');
-SELECT d, t, tz,d1=current_date(), t1=current_timestamp() from table1;
+EXPLAIN INSERT INTO table1_n16(d, t1) values ("1985-12-31", '2018-02-27 
17:32:14.259');
+INSERT INTO table1_n16(d, t1) values ("1985-12-31", '2018-02-27 17:32:14.259');
+SELECT d, t, tz,d1=current_date(), t1=current_timestamp() from table1_n16;
 
-DROP TABLE table1;
+DROP TABLE table1_n16;
 
 -- string type
-CREATE TABLE table2(i STRING DEFAULT 'current_database()', j STRING DEFAULT 
current_user(),
+CREATE TABLE table2_n11(i STRING DEFAULT 'current_database()', j STRING 
DEFAULT current_user(),
 k STRING DEFAULT 'Current_User()', v varchar(350) DEFAULT 
cast('varchar_default_value' as varchar(350)),
 c char(20) DEFAULT cast('char_value' as char(20)))
 clustered by (i) into 2 buckets stored as orc TBLPROPERTIES 
('transactional'='true');
-DESC FORMATTED table2;
-EXPLAIN INSERT INTO table2(i) values('default');
-INSERT INTO table2(i) values('default');
-SELECT i,j=current_user(),k,v,c FROM table2;
+DESC FORMATTED table2_n11;
+EXPLAIN INSERT INTO table2_n11(i) values('default');
+INSERT INTO table2_n11(i) values('default');
+SELECT i,j=current_user(),k,v,c FROM table2_n11;
 
-EXPLAIN INSERT INTO table2(v, c) values('varchar_default2', 'char');
-INSERT INTO table2(v, c) values('varchar_default2', 'char');
-SELECT i,j=current_user(),k,v,c FROM table2;
-DROP TABLE table2;
+EXPLAIN INSERT INTO table2_n11(v, c) values('varchar_default2', 'char');
+INSERT INTO table2_n11(v, c) values('varchar_default2', 'char');
+SELECT i,j=current_user(),k,v,c FROM table2_n11;
+DROP TABLE table2_n11;
 
 
 -- misc type
@@ -67,7 +67,7 @@ SELECT b, b1 from misc;
 DROP TABLE misc;
 
 -- CAST
-CREATE table t11(i int default cast(cast(4 as double) as int),
+CREATE table t11_n2(i int default cast(cast(4 as double) as int),
 b1 boolean default cast ('true' as boolean), b2 int default cast (5.67 as 
int),
 b3 tinyint default cast (45 as tinyint), b4 float default cast (45.4 as 
float),
 b5 bigint default cast (567 as bigint), b6 smallint default cast (88 as 
smallint),
@@ -77,55 +77,55 @@ CREATE table t11(i int default cast(cast(4 as double) as 
int),
  ts timestamp defau

[39/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/druid_timestamptz.q
--
diff --git a/ql/src/test/queries/clientpositive/druid_timestamptz.q 
b/ql/src/test/queries/clientpositive/druid_timestamptz.q
index 63c6e4e..4830044 100644
--- a/ql/src/test/queries/clientpositive/druid_timestamptz.q
+++ b/ql/src/test/queries/clientpositive/druid_timestamptz.q
@@ -1,51 +1,51 @@
 set hive.fetch.task.conversion=more;
 
 
-drop table tstz1;
+drop table tstz1_n0;
 
-create table tstz1(`__time` timestamp with local time zone, n string, v 
integer)
+create table tstz1_n0(`__time` timestamp with local time zone, n string, v 
integer)
 STORED BY 'org.apache.hadoop.hive.druid.DruidStorageHandler'
 TBLPROPERTIES ("druid.segment.granularity" = "HOUR");
 
-insert into table tstz1
+insert into table tstz1_n0
 values(cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local 
time zone), 'Bill', 10);
 
-EXPLAIN select `__time` from tstz1;
-select `__time` from tstz1;
+EXPLAIN select `__time` from tstz1_n0;
+select `__time` from tstz1_n0;
 
-EXPLAIN select cast(`__time` as timestamp) from tstz1;
-select cast(`__time` as timestamp) from tstz1;
+EXPLAIN select cast(`__time` as timestamp) from tstz1_n0;
+select cast(`__time` as timestamp) from tstz1_n0;
 
-EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
-select cast(`__time` as timestamp) from tstz1 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
+EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
+select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
 
-EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1;
-SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1;
+EXPLAIN SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0;
+SELECT EXTRACT(HOUR FROM CAST(`__time` AS timestamp)) FROM tstz1_n0;
 
-EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1;
-SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1;
+EXPLAIN SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0;
+SELECT FLOOR(CAST(`__time` AS timestamp) to HOUR) FROM tstz1_n0;
 
 
 set time zone UTC;
-EXPLAIN select `__time` from tstz1;
-select `__time` from tstz1;
-EXPLAIN select cast(`__time` as timestamp) from tstz1;
-select cast(`__time` as timestamp) from tstz1;
-EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
-select cast(`__time` as timestamp) from tstz1 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
+EXPLAIN select `__time` from tstz1_n0;
+select `__time` from tstz1_n0;
+EXPLAIN select cast(`__time` as timestamp) from tstz1_n0;
+select cast(`__time` as timestamp) from tstz1_n0;
+EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
+select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
 
 -- THIS is failing explore why
---EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` = 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
---select cast(`__time` as timestamp) from tstz1 where `__time` = 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
+--EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` = 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
+--select cast(`__time` as timestamp) from tstz1_n0 where `__time` = 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone);
 
-EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` >= 
cast('2016-01-03 20:26:34' as timestamp);
-select cast(`__time` as timestamp) from tstz1 where `__time` >= 
cast('2016-01-03 20:26:34' as timestamp);
+EXPLAIN select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= 
cast('2016-01-03 20:26:34' as timestamp);
+select cast(`__time` as timestamp) from tstz1_n0 where `__time` >= 
cast('2016-01-03 20:26:34' as timestamp);
 
-EXPLAIN select cast(`__time` as timestamp) from tstz1 where `__time` >= 
cast('2016-01-03 12:26:34 America/Los_Angeles' as timestamp with local time 
zone) AND `__time` <= cast('2016-01-03 12:26:34 America/Los_Angeles' as 
timestamp with local time zone);
-select cast(`__time` as timestamp) from tstz1 where `__time` >= 
c

[15/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q
index 7509d3d..61cc09b 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q
@@ -24,9 +24,9 @@ set hive.llap.io.encode.enabled=true;
 -- Instead explain vectorization only detail
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n23(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n23;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -35,51 +35,51 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) 
PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC 
TBLPROPERTIES ('transactional'='true');
+CREATE TABLE part_add_int_permute_select_n6(insert_num int, a INT, b STRING) 
PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC 
TBLPROPERTIES ('transactional'='true');
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (1, 
, 'new');
+insert into table part_add_int_permute_select_n6 partition(part=1) VALUES (1, 
, 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_permute_select add columns(c int);
+alter table part_add_int_permute_select_n6 add columns(c int);
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (2, 
, 'new', );
+insert into table part_add_int_permute_select_n6 partition(part=1) VALUES (2, 
, 'new', );
 
 explain vectorization only detail
-select insert_num,part,a,b,c from part_add_int_permute_select;
+select insert_num,part,a,b,c from part_add_int_permute_select_n6;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,part,a,b from part_add_int_permute_select;
-select insert_num,part,a,b,c from part_add_int_permute_select;
-select insert_num,part,c from part_add_int_permute_select;
+select insert_num,part,a,b from part_add_int_permute_select_n6;
+select insert_num,part,a,b,c from part_add_int_permute_select_n6;
+select insert_num,part,c from part_add_int_permute_select_n6;
 
-drop table part_add_int_permute_select;
+drop table part_add_int_permute_select_n6;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b 
STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC 
TBLPROPERTIES ('transactional'='true');
+CREATE TABLE part_add_int_string_permute_select_n6(insert_num int, a INT, b 
STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC 
TBLPROPERTIES ('transactional'='true');
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES 
(1, , 'new');
+insert into table part_add_int_string_permute_select_n6 partition(part=1) 
VALUES (1, , 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_string_permute_select add columns(c int, d string);
+alter table part_add_int_string_permute_select_n6 add columns(c int, d string);
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES 
(2, , 'new', , '');
+insert into table part_add_int_string_permute_select_n6 partition(part=1) 
VALUES (2, , 'new', , '');
 
 explain vec

[07/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q
index 5d5ea38..9cc3a89 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_table_llap_io.q
@@ -17,9 +17,9 @@ set hive.llap.io.encode.enabled=true;
 -- FILE VARIATION: TEXTFILE, Non-Vectorized, MapWork, Table
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n10(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n10;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -28,51 +28,51 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING);
+CREATE TABLE table_add_int_permute_select_n2(insert_num int, a INT, b STRING);
 
-insert into table table_add_int_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_permute_select_n2 SELECT insert_num, int1, 
'original' FROM schema_evolution_data_n10;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_permute_select add columns(c int);
+alter table table_add_int_permute_select_n2 add columns(c int);
 
-insert into table table_add_int_permute_select VALUES (111, 8, 'new', 
8);
+insert into table table_add_int_permute_select_n2 VALUES (111, 8, 'new', 
8);
 
 explain vectorization detail
-select insert_num,a,b from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n2;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_permute_select;
-select insert_num,a,b,c from table_add_int_permute_select;
-select insert_num,c from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n2;
+select insert_num,a,b,c from table_add_int_permute_select_n2;
+select insert_num,c from table_add_int_permute_select_n2;
 
-drop table table_add_int_permute_select;
+drop table table_add_int_permute_select_n2;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b 
STRING);
+CREATE TABLE table_add_int_string_permute_select_n2(insert_num int, a INT, b 
STRING);
 
-insert into table table_add_int_string_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_string_permute_select_n2 SELECT insert_num, 
int1, 'original' FROM schema_evolution_data_n10;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_string_permute_select add columns(c int, d string);
+alter table table_add_int_string_permute_select_n2 add columns(c int, d 
string);
 
-insert into table table_add_int_string_permute_select VALUES (111, 8, 
'new', 8, 'filler');
+insert into table table_add_int_string_permute_select_n2 VALUES (111, 8, 
'new', 8, 'filler');
 
 explain vectorization detail
-select insert_num,a,b from table_add_int_string_permute_select;
+select insert_num,a,b from table_add_int_string_permute_select_n2;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_string_permute_select;
-select insert_num,a,b,c from table_add_int_string_permute_select;
-select insert_num,a,b,c,d from table_add_int_strin

[13/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
 
b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
index c2d8308..bf60f1e 100644
--- 
a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
+++ 
b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_part_all_primitive.q
@@ -21,13 +21,13 @@ set hive.llap.io.enabled=false;
 --
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n41(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n41;
 
-CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
+CREATE TABLE schema_evolution_data_2_n14(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into 
table schema_evolution_data_2;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into 
table schema_evolution_data_2_n14;
 
 --
 -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various:
@@ -37,7 +37,7 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_
 --(BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, 
STRING, CHAR, VARCHAR, TIMESTAMP) --> INT–2147483648 to 2147483647 and
 --(BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, 
STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT   -9223372036854775808 to 
9223372036854775807
 --
-CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int,
+CREATE TABLE part_change_various_various_boolean_to_bigint_n6(insert_num int,
   c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, 
c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP,
   c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 
DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 
TIMESTAMP,
   c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 
DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 
TIMESTAMP,
@@ -45,18 +45,18 @@ CREATE TABLE 
part_change_various_various_boolean_to_bigint(insert_num int,
   c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 
DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 
TIMESTAMP,
   b STRING) PARTITIONED BY(part INT);
 
-insert into table part_change_various_various_boolean_to_bigint 
partition(part=1) SELECT insert_num,
+insert into table part_change_various_various_boolean_to_bigint_n6 
partition(part=1) SELECT insert_num,
  tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, 
boolean_str, timestamp1,
  boolean1, smallint1, int1, bigint1, flo

[02/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/setop_no_distinct.q
--
diff --git a/ql/src/test/queries/clientpositive/setop_no_distinct.q 
b/ql/src/test/queries/clientpositive/setop_no_distinct.q
index 207954a..798f9c7 100644
--- a/ql/src/test/queries/clientpositive/setop_no_distinct.q
+++ b/ql/src/test/queries/clientpositive/setop_no_distinct.q
@@ -1,51 +1,51 @@
 set hive.mapred.mode=nonstrict;
 set hive.cbo.enable=true;
 
-create table a(key int, value int);
+create table a_n1(key int, value int);
 
-insert into table a values (1,2),(1,2),(1,2),(1,3),(2,3);
+insert into table a_n1 values (1,2),(1,2),(1,2),(1,3),(2,3);
 
-create table b(key int, value int);
+create table b_n1(key int, value int);
 
-insert into table b values (1,2),(1,2),(2,3);
+insert into table b_n1 values (1,2),(1,2),(2,3);
 
-select * from a intersect select * from b;
+select * from a_n1 intersect select * from b_n1;
 
-(select * from b intersect (select * from a)) intersect select * from b;
+(select * from b_n1 intersect (select * from a_n1)) intersect select * from 
b_n1;
 
-select * from b intersect all select * from a intersect select * from b;
+select * from b_n1 intersect all select * from a_n1 intersect select * from 
b_n1;
 
-(select * from b) intersect all ((select * from a) intersect select * from b);
+(select * from b_n1) intersect all ((select * from a_n1) intersect select * 
from b_n1);
 
-select * from (select a.key, b.value from a join b on a.key=b.key)sub1 
+select * from (select a_n1.key, b_n1.value from a_n1 join b_n1 on 
a_n1.key=b_n1.key)sub1 
 intersect 
-select * from (select a.key, b.value from a join b on a.key=b.key)sub2; 
+select * from (select a_n1.key, b_n1.value from a_n1 join b_n1 on 
a_n1.key=b_n1.key)sub2; 
 
-drop table a;
+drop table a_n1;
 
-drop table b;
+drop table b_n1;
 
-create table a(key int);
+create table a_n1(key int);
 
-insert into table a values 
(0),(1),(2),(2),(2),(2),(3),(NULL),(NULL),(NULL),(NULL),(NULL);
+insert into table a_n1 values 
(0),(1),(2),(2),(2),(2),(3),(NULL),(NULL),(NULL),(NULL),(NULL);
 
-create table b(key bigint);
+create table b_n1(key bigint);
 
-insert into table b values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL);
+insert into table b_n1 values (1),(2),(2),(3),(5),(5),(NULL),(NULL),(NULL);
 
-select * from a except select * from b;
+select * from a_n1 except select * from b_n1;
 
-(select * from a) minus select * from b union (select * from a) minus select * 
from b;
+(select * from a_n1) minus select * from b_n1 union (select * from a_n1) minus 
select * from b_n1;
 
-(select * from a) minus select * from b union all ((select * from a) minus 
select * from b);
+(select * from a_n1) minus select * from b_n1 union all ((select * from a_n1) 
minus select * from b_n1);
 
-(select * from a) minus select * from b union all (select * from a) minus all 
select * from b;
+(select * from a_n1) minus select * from b_n1 union all (select * from a_n1) 
minus all select * from b_n1;
 
-select * from a minus select * from b minus (select * from a minus select * 
from b);
+select * from a_n1 minus select * from b_n1 minus (select * from a_n1 minus 
select * from b_n1);
 
-(select * from a) minus (select * from b minus (select * from a minus select * 
from b));
+(select * from a_n1) minus (select * from b_n1 minus (select * from a_n1 minus 
select * from b_n1));
 
-drop table a;
+drop table a_n1;
 
-drop table b;
+drop table b_n1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/show_columns.q
--
diff --git a/ql/src/test/queries/clientpositive/show_columns.q 
b/ql/src/test/queries/clientpositive/show_columns.q
index fdd1ea8..aa45bae 100644
--- a/ql/src/test/queries/clientpositive/show_columns.q
+++ b/ql/src/test/queries/clientpositive/show_columns.q
@@ -8,32 +8,32 @@ SHOW COLUMNS from shcol_test;
 -- SHOW COLUMNS
 CREATE DATABASE test_db;
 USE test_db;
-CREATE TABLE foo(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a 
INT, b INT, c INT);
+CREATE TABLE foo_n7(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc 
INT, a INT, b INT, c INT);
 
 -- SHOW COLUMNS basic syntax tests
 USE test_db;
-SHOW COLUMNS from foo;
-SHOW COLUMNS in foo;
-SHOW COLUMNS in foo 'col*';
-SHOW COLUMNS in foo "col*";
-SHOW COLUMNS from foo 'col*';
-SHOW COLUMNS from foo "col*";
-SHOW COLUMNS from foo "col1|cola";
+SHOW COLUMNS from foo_n7;
+SHOW COLUMNS in foo_n7;
+SHOW COLUMNS in foo_n7 'col*';
+SHOW COLUMNS in foo_n7 "col*";
+SHOW COLUMNS from foo_n7 'col*';
+SHOW COLUMNS from foo_n7 "col*";
+SHOW COLUMNS from foo_n7 "col1|cola";
 
 -- SHOW COLUMNS from a database with a name that requires escaping
 CREATE DATABASE `database`;
 USE `database`;
-CREATE TABLE foo(col1 INT, col2 INT, col3 INT, cola INT, colb INT, colc INT, a 
INT, b INT, c INT);
-SHOW COLUMNS from 

[11/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q
index bff7cca..77476f2 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_all_complex.q
@@ -20,103 +20,103 @@ set hive.llap.io.enabled=false;
 --
 -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various: 
STRUCT --> STRUCT, b STRING) PARTITIONED BY(part INT);
+CREATE TABLE part_change_various_various_struct1_n8(insert_num int, s1 
STRUCT, b STRING) PARTITIONED BY(part INT);
 
-CREATE TABLE complex_struct1_a_txt(insert_num int, s1 STRUCT, b STRING)
+CREATE TABLE complex_struct1_a_txt_n8(insert_num int, s1 STRUCT, b STRING)
 row format delimited fields terminated by '|'
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table 
complex_struct1_a_txt;
+load data local inpath 
'../../data/files/schema_evolution/complex_struct1_a.txt' overwrite into table 
complex_struct1_a_txt_n8;
 
-insert into table part_change_various_various_struct1 partition(part=1) select 
* from complex_struct1_a_txt;
+insert into table part_change_various_various_struct1_n8 partition(part=1) 
select * from complex_struct1_a_txt_n8;
 
-select insert_num,part,s1,b from part_change_various_various_struct1;
+select insert_num,part,s1,b from part_change_various_various_struct1_n8;
 
 -- Table-Non-Cascade CHANGE COLUMNS ...
-alter table part_change_various_various_struct1 replace columns (insert_num 
int, s1 STRUCT, b STRING);
+alter table part_change_various_various_struct1_n8 replace columns (insert_num 
int, s1 STRUCT, b STRING);
 
-CREATE TABLE complex_struct1_b_txt(insert_num int, s1 STRUCT, b STRING)
+CREATE TABLE complex_struct1_b_txt_n8(insert_num int, s1 STRUCT, b STRING)
 row format delimited fields terminated by '|'
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table 
complex_struct1_b_txt;
+load data local inpath 
'../../data/files/schema_evolution/complex_struct1_b.txt' overwrite into table 
complex_struct1_b_txt_n8;
 
-insert into table part_change_various_various_struct1 partition(part=2) select 
* from complex_struct1_b_txt;
+insert into table part_change_various_various_struct1_n8 partition(part=2) 
select * from complex_struct1_b_txt_n8;
 
-CREATE TABLE complex_struct1_c_txt(insert_num int, s1 STRUCT, b STRING)
+CREATE TABLE complex_struct1_c_txt_n8(insert_num int, s1 STRUCT, b STRING)
 row format delimited fields terminated by '|'
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table 
complex_struct1_c_txt;
+load data local inpath 
'../../data/files/schema_evolution/complex_struct1_c.txt' overwrite into table 
complex_struct1_c_txt_n8;
 
-insert into table part_change_various_various_struct1 partition(part=1) select 
* from complex_struct1_c_txt;
+insert into table part_change_various_various_struct1_n8 partition(part=1) 
select * from complex_struct1_c_txt_n8;
 
  explain vectorization detail
-select insert_num,part,s1,b from part_change_various_various_struct1;
+select insert_num,part,s1,b from part_change_various_various_struct1_n8;
 
-select insert_num,part,s1,b from part_change_various_various_struct1;
+select insert_num,part,s1,b from part_change_various_various_struct1_n8;
 
-drop table part_change_various_various_struct1;
+drop table part_change_various_various_struct1_n8;
 
 
 
 --
 -- SUBSECTION: ALTER TABLE ADD COLUMNS for Various --> Various: STRUCT
 --
-CREATE TABLE part_add_various_various_struct2(insert_num int, b STRING) 
PARTITIONED BY(part INT);
+CREATE TABLE part_add_various_various_struct2_n8(insert_num int, b STRING) 
PARTITIONED BY(part INT);
 
-insert into table part_add_various_various_struct2 partition(part=1)
+insert into table part_add_various_various_struct2_n8 partition(part=1)
 values(1, 'original'),
   (2, 'original');
 
-select insert_num,part,b from part_add_various_various_struct2;
+select insert_num,part,b from part_add_various_various_struct2_n8;
 
 -- Table-Non-Cascade ADD COLUMN ...
-alter table part_add_various_various_struct2 ADD columns (s2 
STRUCT);
+alter table part_add_various_various_struct2_n8 ADD columns (s2 
STRUCT);
 
-CREATE TABLE complex_struct2_a_txt(insert_num int, b STRING, s2 
STRUCT)
+CREATE TABLE complex_struct2_a_txt_n8(insert_num int, b STRING, s2 
STRUCT)
 row format delimited fields termi

[22/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/orc_llap_counters.q
--
diff --git a/ql/src/test/queries/clientpositive/orc_llap_counters.q 
b/ql/src/test/queries/clientpositive/orc_llap_counters.q
index 1136b55..9f8e3bb 100644
--- a/ql/src/test/queries/clientpositive/orc_llap_counters.q
+++ b/ql/src/test/queries/clientpositive/orc_llap_counters.q
@@ -8,7 +8,7 @@ SET hive.llap.io.enabled=true;
 SET hive.map.aggr=false;
 -- disabling map side aggregation as that can lead to different intermediate 
record counts
 
-CREATE TABLE staging(t tinyint,
+CREATE TABLE staging_n6(t tinyint,
si smallint,
i int,
b bigint,
@@ -22,10 +22,10 @@ CREATE TABLE staging(t tinyint,
 ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging;
-LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE 
staging_n6;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging_n6;
 
-CREATE TABLE orc_ppd_staging(t tinyint,
+CREATE TABLE orc_ppd_staging_n0(t tinyint,
si smallint,
i int,
b bigint,
@@ -40,14 +40,14 @@ CREATE TABLE orc_ppd_staging(t tinyint,
bin binary)
 STORED AS ORC tblproperties("orc.row.index.stride" = "1000", 
"orc.bloom.filter.columns"="*");
 
-insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s 
as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging 
order by t, s;
+insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, 
cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from 
staging_n6 order by t, s;
 
 -- just to introduce a gap in min/max range for bloom filters. The dataset has 
contiguous values
 -- which makes it hard to test bloom filters
-insert into orc_ppd_staging select 
-10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa"
 from staging limit 1;
-insert into orc_ppd_staging select 
127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz"
 from staging limit 1;
+insert into orc_ppd_staging_n0 select 
-10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa"
 from staging_n6 limit 1;
+insert into orc_ppd_staging_n0 select 
127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz"
 from staging_n6 limit 1;
 
-CREATE TABLE orc_ppd(t tinyint,
+CREATE TABLE orc_ppd_n1(t tinyint,
si smallint,
i int,
b bigint,
@@ -62,9 +62,9 @@ CREATE TABLE orc_ppd(t tinyint,
bin binary)
 STORED AS ORC tblproperties("orc.row.index.stride" = "1000", 
"orc.bloom.filter.columns"="*");
 
-insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as 
char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by 
t, s;
+insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as 
char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n0 order 
by t, s;
 
-describe formatted orc_ppd;
+describe formatted orc_ppd_n1;
 
 SET hive.fetch.task.conversion=none;
 SET 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
@@ -75,50 +75,50 @@ SET 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrint
 -- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 
0,4,119,0,0,244,19
 
 -- INPUT_RECORDS: 2100 (all row groups)
-select count(*) from orc_ppd;
+select count(*) from orc_ppd_n1;
 
 -- INPUT_RECORDS: 0 (no row groups)
-select count(*) from orc_ppd where t > 127;
+select count(*) from orc_ppd_n1 where t > 127;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = 55;
-select count(*) from orc_ppd where t <=> 50;
-select count(*) from orc_ppd where t <=> 100;
+select count(*) from orc_ppd_n1 where t = 55;
+select count(*) from orc_ppd_n1 where t <=> 50;
+select count(*) from orc_ppd_n1 where t <=> 100;
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where t = "54";
+select count(*) from orc_ppd_n1 where t = "54";
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = -10.0;
+select count(*) from orc_ppd_n1 where t = -10.0;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = cast(53 as float);
-select count(*) from orc_ppd where t = cast(53 as double);
+select count(*) from orc_ppd_n1 where t = cast(53 as float);
+select count(*) from orc_ppd_n1 where t = cast(53 as double);
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where t < 100;
+select count(*) from orc_ppd_n1 where t < 100;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd wh

[33/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/groupby_sort_1.q
--
diff --git a/ql/src/test/queries/clientpositive/groupby_sort_1.q 
b/ql/src/test/queries/clientpositive/groupby_sort_1.q
index 2255ede..46ec0be 100644
--- a/ql/src/test/queries/clientpositive/groupby_sort_1.q
+++ b/ql/src/test/queries/clientpositive/groupby_sort_1.q
@@ -6,58 +6,58 @@ set hive.map.groupby.sorted=true;
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20S)
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE T1(key STRING, val STRING)
+CREATE TABLE T1_n4(key STRING, val STRING)
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/bucket_files/00_0' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/bucket_files/00_0' INTO TABLE 
T1_n4;
 
 -- perform an insert to make sure there are 2 files
-INSERT OVERWRITE TABLE T1 select key, val from T1;
+INSERT OVERWRITE TABLE T1_n4 select key, val from T1_n4;
 
-CREATE TABLE outputTbl1(key int, cnt int);
+CREATE TABLE outputTbl1_n2(key int, cnt int);
 
 -- The plan should be converted to a map-side group by if the group by key
 -- matches the sorted key
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1
-SELECT key, count(1) FROM T1 GROUP BY key;
+INSERT OVERWRITE TABLE outputTbl1_n2
+SELECT key, count(1) FROM T1_n4 GROUP BY key;
 
-INSERT OVERWRITE TABLE outputTbl1
-SELECT key, count(1) FROM T1 GROUP BY key;
+INSERT OVERWRITE TABLE outputTbl1_n2
+SELECT key, count(1) FROM T1_n4 GROUP BY key;
 
-SELECT * FROM outputTbl1;
+SELECT * FROM outputTbl1_n2;
 
-CREATE TABLE outputTbl2(key1 int, key2 string, cnt int);
+CREATE TABLE outputTbl2_n0(key1 int, key2 string, cnt int);
 
 -- no map-side group by even if the group by key is a superset of sorted key
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl2
-SELECT key, val, count(1) FROM T1 GROUP BY key, val;
+INSERT OVERWRITE TABLE outputTbl2_n0
+SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val;
 
-INSERT OVERWRITE TABLE outputTbl2
-SELECT key, val, count(1) FROM T1 GROUP BY key, val;
+INSERT OVERWRITE TABLE outputTbl2_n0
+SELECT key, val, count(1) FROM T1_n4 GROUP BY key, val;
 
-SELECT * FROM outputTbl2;
+SELECT * FROM outputTbl2_n0;
 
 -- It should work for sub-queries
 EXPLAIN EXTENDED 
-INSERT OVERWRITE TABLE outputTbl1
-SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
+INSERT OVERWRITE TABLE outputTbl1_n2
+SELECT key, count(1) FROM (SELECT key, val FROM T1_n4) subq1 GROUP BY key;
 
-INSERT OVERWRITE TABLE outputTbl1
-SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
+INSERT OVERWRITE TABLE outputTbl1_n2
+SELECT key, count(1) FROM (SELECT key, val FROM T1_n4) subq1 GROUP BY key;
 
-SELECT * FROM outputTbl1;
+SELECT * FROM outputTbl1_n2;
 
 -- It should work for sub-queries with column aliases
 EXPLAIN EXTENDED
-INSERT OVERWRITE TABLE outputTbl1
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
+INSERT OVERWRITE TABLE outputTbl1_n2
+SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n4) subq1 GROUP BY 
k;
 
-INSERT OVERWRITE TABLE outputTbl1
-SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
+INSERT OVERWRITE TABLE outputTbl1_n2
+SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1_n4) subq1 GROUP BY 
k;
 
-SELECT * FROM outputTbl1;
+SELECT * FROM outputTbl1_n2;
 
 CREATE TABLE outputTbl3(key1 int, key2 int, cnt int);
 
@@ -65,10 +65,10 @@ CREATE TABLE outputTbl3(key1 int, key2 int, cnt int);
 -- by a match to the sorted key
 EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl3
-SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
+SELECT 1, key, count(1) FROM T1_n4 GROUP BY 1, key;
 
 INSERT OVERWRITE TABLE outputTbl3
-SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
+SELECT 1, key, count(1) FROM T1_n4 GROUP BY 1, key;
 
 SELECT * FROM outputTbl3;
 
@@ -77,20 +77,20 @@ CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, 
cnt int);
 -- no map-side group by if the group by key contains a constant followed by 
another column
 EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
+SELECT key, 1, val, count(1) FROM T1_n4 GROUP BY key, 1, val;
 
 INSERT OVERWRITE TABLE outputTbl4
-SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
+SELECT key, 1, val, count(1) FROM T1_n4 GROUP BY key, 1, val;
 
 SELECT * FROM outputTbl4;
 
 -- no map-side group by if the group by key contains a function
 EXPLAIN EXTENDED 
 INSERT OVERWRITE TABLE outputTbl3
-SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
+SELECT key, key + 1, count(1) FROM T1_n4 GROUP BY key, key + 1;
 
 INSERT OVERWRITE TABLE outputTbl3
-SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
+SELECT key, key + 1, count(1) FROM T1_n4 GROUP BY key, key + 1;
 
 SELECT * FROM outputTbl3;
 
@@ -99,104 +99,104 @@ SELECT * FROM outputT

[49/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q
--
diff --git a/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q 
b/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q
index f94994a..72ebb17 100644
--- a/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q
+++ b/ql/src/test/queries/clientpositive/annotate_stats_join_pkfk.q
@@ -1,13 +1,13 @@
 set hive.stats.fetch.column.stats=true;
 
-drop table store_sales;
-drop table store;
+drop table store_sales_n0;
+drop table store_n0;
 drop table customer_address;
 
 -- s_store_sk is PK, ss_store_sk is FK
 -- ca_address_sk is PK, ss_addr_sk is FK
 
-create table store_sales
+create table store_sales_n0
 (
 ss_sold_date_sk   int,
 ss_sold_time_sk   int,
@@ -35,7 +35,7 @@ create table store_sales
 )
 row format delimited fields terminated by '|';
 
-create table store
+create table store_n0
 (
 s_store_skint,
 s_store_idstring,
@@ -121,44 +121,44 @@ create table customer_address
 )
 row format delimited fields terminated by '|';
 
-load data local inpath '../../data/files/store.txt' overwrite into table store;
+load data local inpath '../../data/files/store.txt' overwrite into table 
store_n0;
 load data local inpath '../../data/files/store.txt' overwrite into table 
store_bigint;
-load data local inpath '../../data/files/store_sales.txt' overwrite into table 
store_sales;
+load data local inpath '../../data/files/store_sales.txt' overwrite into table 
store_sales_n0;
 load data local inpath '../../data/files/customer_address.txt' overwrite into 
table customer_address;
 
-analyze table store compute statistics;
-analyze table store compute statistics for columns s_store_sk, s_floor_space;
+analyze table store_n0 compute statistics;
+analyze table store_n0 compute statistics for columns s_store_sk, 
s_floor_space;
 analyze table store_bigint compute statistics;
 analyze table store_bigint compute statistics for columns s_store_sk, 
s_floor_space;
-analyze table store_sales compute statistics;
-analyze table store_sales compute statistics for columns ss_store_sk, 
ss_addr_sk, ss_quantity;
+analyze table store_sales_n0 compute statistics;
+analyze table store_sales_n0 compute statistics for columns ss_store_sk, 
ss_addr_sk, ss_quantity;
 analyze table customer_address compute statistics;
 analyze table customer_address compute statistics for columns ca_address_sk;
 
-explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk 
= ss.ss_store_sk);
+explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on 
(s.s_store_sk = ss.ss_store_sk);
 
 -- widening cast: inferred PK-FK, thus same row count as previous query
-explain select s.s_store_sk from store_bigint s join store_sales ss on 
(s.s_store_sk = ss.ss_store_sk);
+explain select s.s_store_sk from store_bigint s join store_sales_n0 ss on 
(s.s_store_sk = ss.ss_store_sk);
 
-explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk 
= ss.ss_store_sk) where s.s_store_sk > 0;
+explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on 
(s.s_store_sk = ss.ss_store_sk) where s.s_store_sk > 0;
 
-explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk 
= ss.ss_store_sk) where s.s_company_id > 0 and ss.ss_quantity > 10;
+explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on 
(s.s_store_sk = ss.ss_store_sk) where s.s_company_id > 0 and ss.ss_quantity > 
10;
 
-explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk 
= ss.ss_store_sk) where s.s_floor_space > 0;
+explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on 
(s.s_store_sk = ss.ss_store_sk) where s.s_floor_space > 0;
 
-explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk 
= ss.ss_store_sk) where ss.ss_quantity > 10;
+explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on 
(s.s_store_sk = ss.ss_store_sk) where ss.ss_quantity > 10;
 
-explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk 
= ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk);
+explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on 
(s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = 
ss.ss_store_sk);
 
-explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk 
= ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where 
s.s_store_sk > 1000;
+explain select s.s_store_sk from store_n0 s join store_sales_n0 ss on 
(s.s_store_sk = ss.ss_store_sk) join store_n0 s1 on (s1.s_store_sk = 
ss.ss_store_sk) where s.s_store_sk > 1000;
 
-explain select s.s_store_sk from store s join store_sales ss on (s.s_store_sk 
= ss.ss_store_sk) join store s1 on (s1.s_store_sk = ss.ss_store_sk) where 
s.s_floor_space > 1000

[43/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
--
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q 
b/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
index 99bd780..9c351ea 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_annotate_stats_groupby.q
@@ -23,69 +23,69 @@ set hive.map.aggr.hash.percentmemory=0.0f;
 -- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * 
sizeOfGroupingSet)
 -- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct)
 
-create table if not exists loc_staging (
+create table if not exists loc_staging_n1 (
   state string,
   locid int,
   zip bigint,
   year int
 ) row format delimited fields terminated by '|' stored as textfile;
 
-create table loc_orc like loc_staging;
-alter table loc_orc set fileformat orc;
+create table loc_orc_n1 like loc_staging_n1;
+alter table loc_orc_n1 set fileformat orc;
 
-load data local inpath '../../data/files/loc.txt' overwrite into table 
loc_staging;
+load data local inpath '../../data/files/loc.txt' overwrite into table 
loc_staging_n1;
 
-insert overwrite table loc_orc select * from loc_staging;
+insert overwrite table loc_orc_n1 select * from loc_staging_n1;
 
 -- numRows: 8 rawDataSize: 796
-explain select * from loc_orc;
+explain select * from loc_orc_n1;
 
 -- partial column stats
-analyze table loc_orc compute statistics for columns state;
+analyze table loc_orc_n1 compute statistics for columns state;
 
 -- inner group by: map - numRows: 8 reduce - numRows: 4
 -- outer group by: map - numRows: 4 reduce numRows: 2
 explain select a, c, min(b)
 from ( select state as a, locid as b, count(*) as c
-   from loc_orc
+   from loc_orc_n1
group by state,locid
  ) sq1
 group by a,c;
 
-analyze table loc_orc compute statistics for columns state,locid,year;
+analyze table loc_orc_n1 compute statistics for columns state,locid,year;
 
 -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 
8
 -- Case 9: column stats, NO grouping sets - caridnality = 2
-explain select year from loc_orc group by year;
+explain select year from loc_orc_n1 group by year;
 
 -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 
8
 -- Case 9: column stats, NO grouping sets - caridnality = 8
-explain select state,locid from loc_orc group by state,locid;
+explain select state,locid from loc_orc_n1 group by state,locid;
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
 -- Case 8: column stats, grouping sets - cardinality = 32
-explain select state,locid from loc_orc group by state,locid with cube;
+explain select state,locid from loc_orc_n1 group by state,locid with cube;
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
 -- Case 8: column stats, grouping sets - cardinality = 24
-explain select state,locid from loc_orc group by state,locid with rollup;
-explain select state,locid from loc_orc group by rollup (state,locid);
+explain select state,locid from loc_orc_n1 group by state,locid with rollup;
+explain select state,locid from loc_orc_n1 group by rollup (state,locid);
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8
 -- Case 8: column stats, grouping sets - cardinality = 8
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state));
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state));
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16
 -- Case 8: column stats, grouping sets - cardinality = 16
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state),(locid));
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state),(locid));
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24
 -- Case 8: column stats, grouping sets - cardinality = 24
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state),(locid),());
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state),(locid),());
 
 -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32
 -- Case 8: column stats, grouping sets - cardinality = 32
-explain select state,locid from loc_orc group by state,locid grouping 
sets((state,locid),(state),(locid),());
+explain select state,locid from loc_orc_n1 group by state,locid grouping 
sets((state,locid),(state),(locid),());
 
 set hive.map.aggr.hash.percentmemory=0.5f;
 set mapred.max.split.size=80;
@@ -93,52 +93,52 @@ set mapred.max.split.size=80;
 
 -- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4
 -- Case 9: column stats, NO grouping sets -

[47/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q
--
diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q 
b/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q
index d9066a4..3725df3 100644
--- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q
+++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q
@@ -3,26 +3,26 @@ set hive.strict.checks.bucketing=false;
 set hive.mapred.mode=nonstrict;
 -- small 1 part, 2 bucket & big 2 part, 4 bucket
 
-CREATE TABLE bucket_small (key string, value string) partitioned by (ds 
string) CLUSTERED BY (key) SORTED BY (KEY) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/auto_sortmerge_join/small/00_0' 
INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/small/01_0' 
INTO TABLE bucket_small partition(ds='2008-04-08');
+CREATE TABLE bucket_small_n11 (key string, value string) partitioned by (ds 
string) CLUSTERED BY (key) SORTED BY (KEY) INTO 2 BUCKETS STORED AS TEXTFILE;
+load data local inpath '../../data/files/auto_sortmerge_join/small/00_0' 
INTO TABLE bucket_small_n11 partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/small/01_0' 
INTO TABLE bucket_small_n11 partition(ds='2008-04-08');
 
-CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) 
CLUSTERED BY (key) SORTED BY(KEY) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/auto_sortmerge_join/big/00_0' 
INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/big/01_0' 
INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/big/02_0' 
INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/big/03_0' 
INTO TABLE bucket_big partition(ds='2008-04-08');
+CREATE TABLE bucket_big_n11 (key string, value string) partitioned by (ds 
string) CLUSTERED BY (key) SORTED BY(KEY) INTO 4 BUCKETS STORED AS TEXTFILE;
+load data local inpath '../../data/files/auto_sortmerge_join/big/00_0' 
INTO TABLE bucket_big_n11 partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/01_0' 
INTO TABLE bucket_big_n11 partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/02_0' 
INTO TABLE bucket_big_n11 partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/03_0' 
INTO TABLE bucket_big_n11 partition(ds='2008-04-08');
 
-load data local inpath '../../data/files/auto_sortmerge_join/big/00_0' 
INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../../data/files/auto_sortmerge_join/big/01_0' 
INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../../data/files/auto_sortmerge_join/big/02_0' 
INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../../data/files/auto_sortmerge_join/big/03_0' 
INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/auto_sortmerge_join/big/00_0' 
INTO TABLE bucket_big_n11 partition(ds='2008-04-09');
+load data local inpath '../../data/files/auto_sortmerge_join/big/01_0' 
INTO TABLE bucket_big_n11 partition(ds='2008-04-09');
+load data local inpath '../../data/files/auto_sortmerge_join/big/02_0' 
INTO TABLE bucket_big_n11 partition(ds='2008-04-09');
+load data local inpath '../../data/files/auto_sortmerge_join/big/03_0' 
INTO TABLE bucket_big_n11 partition(ds='2008-04-09');
 
 set hive.auto.convert.join=true;
 -- disable hash joins
 set hive.auto.convert.join.noconditionaltask.size=10;
-explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON 
a.key = b.key;
-select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
+explain extended select count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b 
ON a.key = b.key;
+select count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b ON a.key = b.key;
 
 set hive.auto.convert.sortmerge.join=true;
 set hive.optimize.bucketmapjoin=true;
@@ -31,13 +31,13 @@ set hive.optimize.bucketmapjoin.sortedmerge=true;
 -- The tables are only bucketed and not sorted, the join should not be 
converted
 -- Currenly, a join is only converted to a sort-merge join without a hint, 
automatic conversion to
 -- bucketized mapjoin is not done
-explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON 
a.key = b.key;
-select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
+explain extended select count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b 
ON a.key = b.key;
+select count(*) FROM bucket_small_n11 a JOIN bucket_big_n11 b O

[50/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 17d6a95..4590b7a 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -34,7 +34,9 @@ disabled.query.files=ql_rewrite_gbtoidx.q,\
   stats_filemetadata.q,\
   cbo_rp_insert.q,\
   cbo_rp_lineage2.q,\
-  union_stats.q
+  union_stats.q,\
+  sample2.q,\
+  sample4.q
 
 
 # NOTE: Add tests to minitez only if it is very

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/acid_nullscan.q
--
diff --git a/ql/src/test/queries/clientpositive/acid_nullscan.q 
b/ql/src/test/queries/clientpositive/acid_nullscan.q
index 3c71242..0b36708 100644
--- a/ql/src/test/queries/clientpositive/acid_nullscan.q
+++ b/ql/src/test/queries/clientpositive/acid_nullscan.q
@@ -6,13 +6,13 @@ set 
hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 
-CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS 
STORED AS ORC TBLPROPERTIES ('transactional'='true');
-insert into table acid_vectorized select cint, cstring1 from alltypesorc where 
cint is not null order by cint limit 10;
-insert into table acid_vectorized values (1, 'bar');
+CREATE TABLE acid_vectorized_n1(a INT, b STRING) CLUSTERED BY(a) INTO 2 
BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
+insert into table acid_vectorized_n1 select cint, cstring1 from alltypesorc 
where cint is not null order by cint limit 10;
+insert into table acid_vectorized_n1 values (1, 'bar');
 
 
 explain extended
-select sum(a) from acid_vectorized where false;
+select sum(a) from acid_vectorized_n1 where false;
 
-select sum(a) from acid_vectorized where false;
+select sum(a) from acid_vectorized_n1 where false;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/acid_vectorization_original.q
--
diff --git a/ql/src/test/queries/clientpositive/acid_vectorization_original.q 
b/ql/src/test/queries/clientpositive/acid_vectorization_original.q
index 0b91f69..5082aed 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_original.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_original.q
@@ -26,7 +26,7 @@ CREATE TEMPORARY FUNCTION runWorker AS 
'org.apache.hadoop.hive.ql.udf.UDFRunWork
 create table mydual(a int);
 insert into mydual values(1);
 
-CREATE TABLE over10k(t tinyint,
+CREATE TABLE over10k_n2(t tinyint,
si smallint,
i int,
b bigint,
@@ -41,7 +41,7 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
 --oddly this has  rows not > 10K
-LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over10k;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE 
over10k_n2;
 
 CREATE TABLE over10k_orc_bucketed(t tinyint,
si smallint,
@@ -56,14 +56,14 @@ CREATE TABLE over10k_orc_bucketed(t tinyint,
bin binary) CLUSTERED BY(si) INTO 4 BUCKETS STORED AS ORC;
 
 -- this produces about 250 distinct values across all 4 equivalence classes
-select distinct si, si%4 from over10k order by si;
+select distinct si, si%4 from over10k_n2 order by si;
 
--- explain insert into over10k_orc_bucketed select * from over10k;
-insert into over10k_orc_bucketed select * from over10k;
+-- explain insert into over10k_orc_bucketed select * from over10k_n2;
+insert into over10k_orc_bucketed select * from over10k_n2;
 
 dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed;
 -- create copy_N files
-insert into over10k_orc_bucketed select * from over10k;
+insert into over10k_orc_bucketed select * from over10k_n2;
 
 -- this output of this is masked in .out - it is visible in .orig
 dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/over10k_orc_bucketed;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q
--
diff --git 
a/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q 
b/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q
index 00a3ab2..50fb3e9 100644
--- a/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q
+++ b/ql/src/test/queries/clientpositive/acid_vectorization_original_tez.q
@@ -23,10 +23,10 @@ set hive.explain.user=false;
 
 
 CREATE TEMPORARY FUNCTION runWorker AS 
'org.apache.hadoop.hive.ql.udf.UDFRunWorker';
-create table mydual(a i

[21/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/orc_ppd_date.q
--
diff --git a/ql/src/test/queries/clientpositive/orc_ppd_date.q 
b/ql/src/test/queries/clientpositive/orc_ppd_date.q
index 32767d8..2069fbf 100644
--- a/ql/src/test/queries/clientpositive/orc_ppd_date.q
+++ b/ql/src/test/queries/clientpositive/orc_ppd_date.q
@@ -3,99 +3,99 @@ SET 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 
-create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as orc tblproperties("orc.stripe.size"="16777216"); 
+create table newtypesorc_n3(c char(10), v varchar(10), d decimal(5,3), da 
date) stored as orc tblproperties("orc.stripe.size"="16777216"); 
 
-insert overwrite table newtypesorc select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+insert overwrite table newtypesorc_n3 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
 
 -- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN 
tests)
-select sum(hash(*)) from newtypesorc where da='1970-02-20';
+select sum(hash(*)) from newtypesorc_n3 where da='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da='1970-02-20';
+select sum(hash(*)) from newtypesorc_n3 where da='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da= date '1970-02-20';
+select sum(hash(*)) from newtypesorc_n3 where da= date '1970-02-20';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da=cast('1970-02-20' as date);
+select sum(hash(*)) from newtypesorc_n3 where da=cast('1970-02-20' as date);
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da=cast('1970-02-20' as date);
+select sum(hash(*)) from newtypesorc_n3 where da=cast('1970-02-20' as date);
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da=cast('1970-02-20' as 
varchar(20));
+select sum(hash(*)) from newtypesorc_n3 where da=cast('1970-02-20' as 
varchar(20));
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da=cast('1970-02-20' as 
varchar(20));
+select sum(hash(*)) from newtypesorc_n3 where da=cast('1970-02-20' as 
varchar(20));
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da!='1970-02-20';
+select sum(hash(*)) from newtypesorc_n3 where da!='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da!='1970-02-20';
+select sum(hash(*)) from newtypesorc_n3 where da!='1970-02-20';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da<'1970-02-27';
+select sum(hash(*)) from newtypesorc_n3 where da<'1970-02-27';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da<'1970-02-27';
+select sum(hash(*)) from newtypesorc_n3 where da<'1970-02-27';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da<'1970-02-29';
+select sum(hash(*)) from newtypesorc_n3 where da<'1970-02-29';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da<'1970-02-29';
+select sum(hash(*)) from newtypesorc_n3 where da<'1970-02-29';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da<'1970-02-15';
+select sum(hash(*)) from newtypesorc_n3 where da<'1970-02-15';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da<'1970-02-15';
+select sum(hash(*)) from newtypesorc_n3 where da<'1970-02-15';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da<='1970-02-20';
+select sum(hash(*)) from newtypesorc_n3 where da<='1970-02-20';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da<='1970-02-20';
+select sum(hash(*)) from newtypesorc_n3 where da<='1970-02-20';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da<='1970-02-27';
+select sum(hash(*)) from newtypesorc_n3 where da<='1970-02-27';
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where da<='1970-02-27';
+select sum(hash(*)) from newtypesorc_n3 where da<='1970-02-27';
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where da in (cast('1970-02-21' as date), 
cast('1970-02-27' as date));
+select sum(hash(*

[06/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q
index 2448268..7688f2b 100644
--- 
a/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q
+++ 
b/ql/src/test/queries/clientpositive/schema_evol_text_vec_part_all_primitive.q
@@ -23,13 +23,13 @@ set hive.llap.io.enabled=false;
 --
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n44(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n44;
 
-CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
+CREATE TABLE schema_evolution_data_2_n16(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into 
table schema_evolution_data_2;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into 
table schema_evolution_data_2_n16;
 
 --
 -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various:
@@ -39,7 +39,7 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_
 --(BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, 
STRING, CHAR, VARCHAR, TIMESTAMP) --> INT–2147483648 to 2147483647 and
 --(BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, 
STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT   -9223372036854775808 to 
9223372036854775807
 --
-CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int,
+CREATE TABLE part_change_various_various_boolean_to_bigint_n8(insert_num int,
   c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, 
c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP,
   c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 
DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 
TIMESTAMP,
   c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 
DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 
TIMESTAMP,
@@ -47,18 +47,18 @@ CREATE TABLE 
part_change_various_various_boolean_to_bigint(insert_num int,
   c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 
DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 
TIMESTAMP,
   b STRING) PARTITIONED BY(part INT);
 
-insert into table part_change_various_various_boolean_to_bigint 
partition(part=1) SELECT insert_num,
+insert into table part_change_various_various_boolean_to_bigint_n8 
partition(part=1) SELECT insert_num,
  tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, 
boolean_str, timestamp1,
  boolean1, smallint1, int1, bigint1, float1, double

[24/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
--
diff --git 
a/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q 
b/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
index 692c414..ed75c57 100644
--- a/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
+++ b/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
@@ -2,7 +2,7 @@ set hive.stats.column.autogather=false;
 set hive.stats.dbclass=fs;
 set hive.compute.query.using.stats=true;
 set hive.explain.user=false;
-create table over10k(
+create table over10k_n23(
t tinyint,
si smallint,
i int,
@@ -17,9 +17,9 @@ create table over10k(
row format delimited
fields terminated by '|';
 
-load data local inpath '../../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k_n23;
 
-create table stats_tbl_part(
+create table stats_tbl_part_n0(
t tinyint,
si smallint,
i int,
@@ -33,22 +33,22 @@ create table stats_tbl_part(
bin binary) partitioned by (dt int);
 
 
-from over10k 
-insert overwrite table stats_tbl_part partition (dt=2010) select 
t,si,i,b,f,d,bo,s,ts,`dec`,bin where t>0 and t<30 
-insert overwrite table stats_tbl_part partition (dt=2014) select 
t,si,i,b,f,d,bo,s,ts,`dec`,bin where t > 30 and t<60;
+from over10k_n23 
+insert overwrite table stats_tbl_part_n0 partition (dt=2010) select 
t,si,i,b,f,d,bo,s,ts,`dec`,bin where t>0 and t<30 
+insert overwrite table stats_tbl_part_n0 partition (dt=2014) select 
t,si,i,b,f,d,bo,s,ts,`dec`,bin where t > 30 and t<60;
 
-analyze table stats_tbl_part partition(dt) compute statistics;
-analyze table stats_tbl_part partition(dt=2010) compute statistics for columns 
t,si,i,b,f,d,bo,s,bin;
-analyze table stats_tbl_part partition(dt=2014) compute statistics for columns 
t,si,i,b,f,d,bo,s,bin;
+analyze table stats_tbl_part_n0 partition(dt) compute statistics;
+analyze table stats_tbl_part_n0 partition(dt=2010) compute statistics for 
columns t,si,i,b,f,d,bo,s,bin;
+analyze table stats_tbl_part_n0 partition(dt=2014) compute statistics for 
columns t,si,i,b,f,d,bo,s,bin;
 
 explain 
-select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), 
max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010;
-select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), 
max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010;
+select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), 
max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt = 2010;
+select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), 
max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt = 2010;
 explain 
-select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), 
count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010;
-select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), 
count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010;
+select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), 
count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt > 
2010;
+select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), 
count(si), max(i), min(b), max(f), min(d) from stats_tbl_part_n0 where dt > 
2010;
 
-select count(*) from stats_tbl_part;
-select count(*)/2 from stats_tbl_part;
-drop table stats_tbl_part;
+select count(*) from stats_tbl_part_n0;
+select count(*)/2 from stats_tbl_part_n0;
+drop table stats_tbl_part_n0;
 set hive.compute.query.using.stats=false;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/metadataonly1.q
--
diff --git a/ql/src/test/queries/clientpositive/metadataonly1.q 
b/ql/src/test/queries/clientpositive/metadataonly1.q
index 409109a..2c88dcd 100644
--- a/ql/src/test/queries/clientpositive/metadataonly1.q
+++ b/ql/src/test/queries/clientpositive/metadataonly1.q
@@ -1,47 +1,47 @@
 set hive.mapred.mode=nonstrict;
 set hive.optimize.metadataonly=true;
-CREATE TABLE TEST1(A INT, B DOUBLE) partitioned by (ds string);
-explain extended select max(ds) from TEST1;
-select max(ds) from TEST1;
+CREATE TABLE TEST1_n12(A INT, B DOUBLE) partitioned by (ds string);
+explain extended select max(ds) from TEST1_n12;
+select max(ds) from TEST1_n12;
 
-alter table TEST1 add partition (ds='1');
-explain extended select max(ds) from TEST1;
-select max(ds) from TEST1;
+alter table TEST1_n12 add partition (ds='1');
+explain extended select max(ds) from TEST1_n12;
+select max(ds) from TEST1_n12;
 
-explain extended select c

[16/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q
index 71ab2e5..5e617e3 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q
@@ -23,9 +23,9 @@ set hive.llap.io.encode.enabled=true;
 -- Instead just one explain vectorization only detail
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n4(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n4;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -34,48 +34,48 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING) 
clustered by (a) into 2 buckets STORED AS ORC  TBLPROPERTIES 
('transactional'='true');
+CREATE TABLE table_add_int_permute_select_n0(insert_num int, a INT, b STRING) 
clustered by (a) into 2 buckets STORED AS ORC  TBLPROPERTIES 
('transactional'='true');
 
-insert into table table_add_int_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_permute_select_n0 SELECT insert_num, int1, 
'original' FROM schema_evolution_data_n4;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_permute_select add columns(c int);
+alter table table_add_int_permute_select_n0 add columns(c int);
 
-insert into table table_add_int_permute_select VALUES (111, 8, 'new', 
8);
+insert into table table_add_int_permute_select_n0 VALUES (111, 8, 'new', 
8);
 
 explain vectorization only detail
-select insert_num,a,b,c from table_add_int_permute_select;
+select insert_num,a,b,c from table_add_int_permute_select_n0;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_permute_select;
-select insert_num,a,b,c from table_add_int_permute_select;
-select insert_num,c from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n0;
+select insert_num,a,b,c from table_add_int_permute_select_n0;
+select insert_num,c from table_add_int_permute_select_n0;
 
-drop table table_add_int_permute_select;
+drop table table_add_int_permute_select_n0;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b 
STRING)  clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES 
('transactional'='true');
+CREATE TABLE table_add_int_string_permute_select_n0(insert_num int, a INT, b 
STRING)  clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES 
('transactional'='true');
 
-insert into table table_add_int_string_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_string_permute_select_n0 SELECT insert_num, 
int1, 'original' FROM schema_evolution_data_n4;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_string_permute_select add columns(c int, d string);
+alter table table_add_int_string_permute_select_n0 add columns(c int, d 
string);
 
-insert into table table_add_int_string_permute_select VALUES (111, 8, 
'new', 8, 'filler');
+insert into table table_add_int_string_permute_select_n0 VALUES (111, 8, 
'new', 8, 'filler');
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert

[34/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/groupby8.q
--
diff --git a/ql/src/test/queries/clientpositive/groupby8.q 
b/ql/src/test/queries/clientpositive/groupby8.q
index e73607f..579b6cb 100644
--- a/ql/src/test/queries/clientpositive/groupby8.q
+++ b/ql/src/test/queries/clientpositive/groupby8.q
@@ -3,31 +3,31 @@ set hive.groupby.skewindata=true;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE DEST1_n71(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE DEST2_n15(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM SRC
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
+INSERT OVERWRITE TABLE DEST1_n71 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
+INSERT OVERWRITE TABLE DEST2_n15 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
 
 FROM SRC
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
+INSERT OVERWRITE TABLE DEST1_n71 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
+INSERT OVERWRITE TABLE DEST2_n15 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
 
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1_n71.* FROM DEST1_n71;
+SELECT DEST2_n15.* FROM DEST2_n15;
 
 set hive.multigroupby.singlereducer=false;
 
 EXPLAIN
 FROM SRC
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
+INSERT OVERWRITE TABLE DEST1_n71 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
+INSERT OVERWRITE TABLE DEST2_n15 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
 
 FROM SRC
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
+INSERT OVERWRITE TABLE DEST1_n71 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
+INSERT OVERWRITE TABLE DEST2_n15 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
 
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1_n71.* FROM DEST1_n71;
+SELECT DEST2_n15.* FROM DEST2_n15;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/groupby8_map.q
--
diff --git a/ql/src/test/queries/clientpositive/groupby8_map.q 
b/ql/src/test/queries/clientpositive/groupby8_map.q
index ad537e1..8481020 100644
--- a/ql/src/test/queries/clientpositive/groupby8_map.q
+++ b/ql/src/test/queries/clientpositive/groupby8_map.q
@@ -4,18 +4,18 @@ set mapred.reduce.tasks=31;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE;
-CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE DEST1_n136(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE DEST2_n35(key INT, value STRING) STORED AS TEXTFILE;
 
 EXPLAIN
 FROM SRC
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
+INSERT OVERWRITE TABLE DEST1_n136 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
+INSERT OVERWRITE TABLE DEST2_n35 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
 
 FROM SRC
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
+INSERT OVERWRITE TABLE DEST1_n136 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key
+INSERT OVERWRITE TABLE DEST2_n35 SELECT SRC.key, COUNT(DISTINCT 
SUBSTR(SRC.value,5)) GROUP BY SRC.key;
 
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1_n136.* FROM DEST1_n136;
+SELECT DEST2_n35.* FROM DEST2_n35;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/groupby8_map_skew.q
--
diff --git a/ql/src/test/queries/clientpositive/groupby8_map_skew.q 
b/ql/src/test/queries/clientpositive/groupby8_map_skew.q
index e89ad95..607d58a 100644
--- a/ql/src/test/queries/clientpositive/groupby8_map_skew.q
+++ b/ql/src/test/queries/clientpositive/groupby8_map_skew.q
@@ -4,1

[30/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/join46.q
--
diff --git a/ql/src/test/queries/clientpositive/join46.q 
b/ql/src/test/queries/clientpositive/join46.q
index a661c0f..f40acd4 100644
--- a/ql/src/test/queries/clientpositive/join46.q
+++ b/ql/src/test/queries/clientpositive/join46.q
@@ -1,275 +1,275 @@
 set hive.strict.checks.cartesian.product=false;
 set hive.join.emit.interval=2;
 
-CREATE TABLE test1 (key INT, value INT, col_1 STRING);
-INSERT INTO test1 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'),
+CREATE TABLE test1_n2 (key INT, value INT, col_1 STRING);
+INSERT INTO test1_n2 VALUES (NULL, NULL, 'None'), (98, NULL, 'None'),
 (99, 0, 'Alice'), (99, 2, 'Mat'), (100, 1, 'Bob'), (101, 2, 'Car');
 
-CREATE TABLE test2 (key INT, value INT, col_2 STRING);
-INSERT INTO test2 VALUES (102, 2, 'Del'), (103, 2, 'Ema'),
+CREATE TABLE test2_n0 (key INT, value INT, col_2 STRING);
+INSERT INTO test2_n0 VALUES (102, 2, 'Del'), (103, 2, 'Ema'),
 (104, 3, 'Fli'), (105, NULL, 'None');
 
 
 -- Basic outer join
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value);
 
 -- Conjunction with pred on multiple inputs and single inputs (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  AND test1.key between 100 and 102
-  AND test2.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value
+  AND test1_n2.key between 100 and 102
+  AND test2_n0.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  AND test1.key between 100 and 102
-  AND test2.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value
+  AND test1_n2.key between 100 and 102
+  AND test2_n0.key between 100 and 102);
 
 -- Conjunction with pred on single inputs (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.key between 100 and 102
-  AND test2.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.key between 100 and 102
+  AND test2_n0.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.key between 100 and 102
-  AND test2.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.key between 100 and 102
+  AND test2_n0.key between 100 and 102);
 
 -- Conjunction with pred on multiple inputs and none (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 RIGHT OUTER JOIN test2
-ON (test1.value=test2.value AND true);
+FROM test1_n2 RIGHT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value AND true);
 
 SELECT *
-FROM test1 RIGHT OUTER JOIN test2
-ON (test1.value=test2.value AND true);
+FROM test1_n2 RIGHT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value AND true);
 
 -- Condition on one input (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.key between 100 and 102);
 
 -- Disjunction with pred on multiple inputs and single inputs (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test1.key between 100 and 102
-  OR test2.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value
+  OR test1_n2.key between 100 and 102
+  OR test2_n0.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test1.key between 100 and 102
-  OR test2.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value
+  OR test1_n2.key between 100 and 102
+  OR test2_n0.key between 100 and 102);
 
 -- Disjunction with pred on multiple inputs and left input (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test1.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value
+  OR test1_n2.key between 100 and 102);
 
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test1.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN test2_n0
+ON (test1_n2.value=test2_n0.value
+  OR test1_n2.key between 100 and 102);
 
 -- Disjunction with pred on multiple inputs and right input (left outer join)
 EXPLAIN
 SELECT *
-FROM test1 LEFT OUTER JOIN test2
-ON (test1.value=test2.value
-  OR test2.key between 100 and 102);
+FROM test1_n2 LEFT OUTER JOIN

[56/58] [abbrv] hive git commit: HIVE-19677: Disable sample6.q (Jesus Camacho Rodriguez, reviewed by Zoltan Haindrich)

2018-05-24 Thread jcamacho
HIVE-19677: Disable sample6.q (Jesus Camacho Rodriguez, reviewed by Zoltan 
Haindrich)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4c80bae7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4c80bae7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4c80bae7

Branch: refs/heads/branch-3
Commit: 4c80bae783146c4349957220724511ddbacb3a5c
Parents: 85c3359
Author: Jesus Camacho Rodriguez 
Authored: Wed May 23 10:21:10 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 17:56:03 2018 -0700

--
 itests/src/test/resources/testconfiguration.properties | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4c80bae7/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 4590b7a..67b2441 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -36,7 +36,8 @@ disabled.query.files=ql_rewrite_gbtoidx.q,\
   cbo_rp_lineage2.q,\
   union_stats.q,\
   sample2.q,\
-  sample4.q
+  sample4.q,\
+  sample6.q
 
 
 # NOTE: Add tests to minitez only if it is very



[12/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q
--
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q
index 592b400..c9e27be 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_table.q
@@ -17,9 +17,9 @@ set hive.llap.io.enabled=false;
 -- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n36(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n36;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -28,51 +28,51 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_permute_select(insert_num int, a INT, b STRING);
+CREATE TABLE table_add_int_permute_select_n10(insert_num int, a INT, b STRING);
 
-insert into table table_add_int_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_permute_select_n10 SELECT insert_num, int1, 
'original' FROM schema_evolution_data_n36;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_permute_select add columns(c int);
+alter table table_add_int_permute_select_n10 add columns(c int);
 
-insert into table table_add_int_permute_select VALUES (111, 8, 'new', 
8);
+insert into table table_add_int_permute_select_n10 VALUES (111, 8, 'new', 
8);
 
 explain vectorization detail
-select insert_num,a,b from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n10;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_permute_select;
-select insert_num,a,b,c from table_add_int_permute_select;
-select insert_num,c from table_add_int_permute_select;
+select insert_num,a,b from table_add_int_permute_select_n10;
+select insert_num,a,b,c from table_add_int_permute_select_n10;
+select insert_num,c from table_add_int_permute_select_n10;
 
-drop table table_add_int_permute_select;
+drop table table_add_int_permute_select_n10;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE table_add_int_string_permute_select(insert_num int, a INT, b 
STRING);
+CREATE TABLE table_add_int_string_permute_select_n10(insert_num int, a INT, b 
STRING);
 
-insert into table table_add_int_string_permute_select SELECT insert_num, int1, 
'original' FROM schema_evolution_data;
+insert into table table_add_int_string_permute_select_n10 SELECT insert_num, 
int1, 'original' FROM schema_evolution_data_n36;
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table table_add_int_string_permute_select add columns(c int, d string);
+alter table table_add_int_string_permute_select_n10 add columns(c int, d 
string);
 
-insert into table table_add_int_string_permute_select VALUES (111, 8, 
'new', 8, 'filler');
+insert into table table_add_int_string_permute_select_n10 VALUES (111, 8, 
'new', 8, 'filler');
 
 explain vectorization detail
-select insert_num,a,b from table_add_int_string_permute_select;
+select insert_num,a,b from table_add_int_string_permute_select_n10;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,a,b from table_add_int_string_permute_select;
-select insert_num,a,b,c from table_add_int_string_permute_select;
-select insert_num,a,b,c,d from table_add_int_string_permute_select;
-select insert_num,a,c,d 

[08/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q
 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q
index 2a15910..5bb3580 100644
--- 
a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q
+++ 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_part_all_primitive_llap_io.q
@@ -22,13 +22,13 @@ set hive.llap.io.encode.enabled=true;
 --
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n8(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n8;
 
-CREATE TABLE schema_evolution_data_2(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
+CREATE TABLE schema_evolution_data_2_n2(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into 
table schema_evolution_data_2;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_2.txt' overwrite into 
table schema_evolution_data_2_n2;
 
 --
 -- SUBSECTION: ALTER TABLE CHANGE COLUMNS for Various --> Various:
@@ -38,7 +38,7 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data_
 --(BOOLEAN, TINYINT, SMALLINT, LONG, FLOAT, DOUBLE, DECIMAL, 
STRING, CHAR, VARCHAR, TIMESTAMP) --> INT–2147483648 to 2147483647 and
 --(BOOLEAN, TINYINT, SMALLINT, INT, FLOAT, DOUBLE, DECIMAL, 
STRING, CHAR, VARCHAR, TIMESTAMP) --> BIGINT   -9223372036854775808 to 
9223372036854775807
 --
-CREATE TABLE part_change_various_various_boolean_to_bigint(insert_num int,
+CREATE TABLE part_change_various_various_boolean_to_bigint_n1(insert_num int,
   c1 TINYINT, c2 SMALLINT, c3 INT, c4 BIGINT, c5 FLOAT, c6 DOUBLE, 
c7 DECIMAL(38,18), c8 STRING, c9 TIMESTAMP,
   c10 BOOLEAN, c11 SMALLINT, c12 INT, c13 BIGINT, c14 FLOAT, c15 
DOUBLE, c16 DECIMAL(38,18), c17 STRING, c18 CHAR(25), c19 VARCHAR(25), c20 
TIMESTAMP,
   c21 BOOLEAN, c22 TINYINT, c23 INT, c24 BIGINT, c25 FLOAT, c26 
DOUBLE, c27 DECIMAL(38,18), c28 STRING, c29 CHAR(25), c30 VARCHAR(25), c31 
TIMESTAMP,
@@ -46,21 +46,21 @@ CREATE TABLE 
part_change_various_various_boolean_to_bigint(insert_num int,
   c43 BOOLEAN, c44 TINYINT, c45 SMALLINT, c46 INT, c47 FLOAT, c48 
DOUBLE, c49 DECIMAL(38,18), c50 STRING, c51 CHAR(25), c52 VARCHAR(25), c53 
TIMESTAMP,
   b STRING) PARTITIONED BY(part INT);
 
-insert into table part_change_various_various_boolean_to_bigint 
partition(part=1) SELECT insert_num,
+insert into table part_change_various_various_boolean_to_bigint_n1 
partition(part=1) SELECT insert_num,
  tinyint1, smallint1, int1, bigint1, float1, double1, decimal1, 
boolean_str, timestamp1,
  

[42/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/column_access_stats.q
--
diff --git a/ql/src/test/queries/clientpositive/column_access_stats.q 
b/ql/src/test/queries/clientpositive/column_access_stats.q
index b981ee4..516bdb0 100644
--- a/ql/src/test/queries/clientpositive/column_access_stats.q
+++ b/ql/src/test/queries/clientpositive/column_access_stats.q
@@ -5,114 +5,114 @@ SET hive.stats.collect.scancols=true;
 -- SORT_QUERY_RESULTS
 -- This test is used for testing the ColumnAccessAnalyzer
 
-CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+CREATE TABLE T1_n127(key STRING, val STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n127;
 
-CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE T4(key STRING, val STRING) PARTITIONED BY (p STRING);
+CREATE TABLE T2_n75(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T3_n29(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T4_n16(key STRING, val STRING) PARTITIONED BY (p STRING);
 
 -- Simple select queries
-SELECT key FROM T1;
-SELECT key, val FROM T1;
-SELECT 1 FROM T1;
-SELECT key, val from T4 where p=1;
-SELECT val FROM T4 where p=1;
-SELECT p, val FROM T4 where p=1;
+SELECT key FROM T1_n127;
+SELECT key, val FROM T1_n127;
+SELECT 1 FROM T1_n127;
+SELECT key, val from T4_n16 where p=1;
+SELECT val FROM T4_n16 where p=1;
+SELECT p, val FROM T4_n16 where p=1;
 
 -- More complicated select queries
-EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1;
-SELECT key FROM (SELECT key, val FROM T1) subq1;
-EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1) subq1;
-SELECT k FROM (SELECT key as k, val as v FROM T1) subq1;
-SELECT key + 1 as k FROM T1;
-SELECT key + val as k FROM T1;
+EXPLAIN SELECT key FROM (SELECT key, val FROM T1_n127) subq1;
+SELECT key FROM (SELECT key, val FROM T1_n127) subq1;
+EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1_n127) subq1;
+SELECT k FROM (SELECT key as k, val as v FROM T1_n127) subq1;
+SELECT key + 1 as k FROM T1_n127;
+SELECT key + val as k FROM T1_n127;
 
 -- Work with union
 EXPLAIN
 SELECT * FROM (
-SELECT key as c FROM T1
+SELECT key as c FROM T1_n127
  UNION ALL
-SELECT val as c FROM T1
+SELECT val as c FROM T1_n127
 ) subq1;
 
 SELECT * FROM (
-SELECT key as c FROM T1
+SELECT key as c FROM T1_n127
  UNION ALL
-SELECT val as c FROM T1
+SELECT val as c FROM T1_n127
 ) subq1;
 
 EXPLAIN
 SELECT * FROM (
-SELECT key as c FROM T1
+SELECT key as c FROM T1_n127
  UNION ALL
-SELECT key as c FROM T1
+SELECT key as c FROM T1_n127
 ) subq1;
 
 SELECT * FROM (
-SELECT key as c FROM T1
+SELECT key as c FROM T1_n127
  UNION ALL
-SELECT key as c FROM T1
+SELECT key as c FROM T1_n127
 ) subq1;
 
 -- Work with insert overwrite
-FROM T1
-INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key
-INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key;
+FROM T1_n127
+INSERT OVERWRITE TABLE T2_n75 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE T3_n29 SELECT key, sum(val) GROUP BY key;
 
 -- Simple joins
 SELECT *
-FROM T1 JOIN T2
-ON T1.key = T2.key ;
+FROM T1_n127 JOIN T2_n75
+ON T1_n127.key = T2_n75.key ;
 
 EXPLAIN
-SELECT T1.key
-FROM T1 JOIN T2
-ON T1.key = T2.key;
+SELECT T1_n127.key
+FROM T1_n127 JOIN T2_n75
+ON T1_n127.key = T2_n75.key;
 
-SELECT T1.key
-FROM T1 JOIN T2
-ON T1.key = T2.key;
+SELECT T1_n127.key
+FROM T1_n127 JOIN T2_n75
+ON T1_n127.key = T2_n75.key;
 
 SELECT *
-FROM T1 JOIN T2
-ON T1.key = T2.key AND T1.val = T2.val;
+FROM T1_n127 JOIN T2_n75
+ON T1_n127.key = T2_n75.key AND T1_n127.val = T2_n75.val;
 
 -- Map join
 SELECT /*+ MAPJOIN(a) */ * 
-FROM T1 a JOIN T2 b 
+FROM T1_n127 a JOIN T2_n75 b 
 ON a.key = b.key;
 
 -- More joins
 EXPLAIN
 SELECT *
-FROM T1 JOIN T2
-ON T1.key = T2.key AND T1.val = 3 and T2.val = 3;
+FROM T1_n127 JOIN T2_n75
+ON T1_n127.key = T2_n75.key AND T1_n127.val = 3 and T2_n75.val = 3;
 
 SELECT *
-FROM T1 JOIN T2
-ON T1.key = T2.key AND T1.val = 3 and T2.val = 3;
+FROM T1_n127 JOIN T2_n75
+ON T1_n127.key = T2_n75.key AND T1_n127.val = 3 and T2_n75.val = 3;
 
 EXPLAIN
 SELECT subq1.val
 FROM 
 (
-  SELECT val FROM T1 WHERE key = 5  
+  SELECT val FROM T1_n127 WHERE key = 5  
 ) subq1
 JOIN 
 (
-  SELECT val FROM T2 WHERE key = 6
+  SELECT val FROM T2_n75 WHERE key = 6
 ) subq2 
 ON subq1.val = subq2.val;
 
 SELECT subq1.val
 FROM 
 (
-  SELECT val FROM T1 WHERE key = 5  
+  SELECT val FROM T1_n127 WHERE key = 5  
 ) subq1
 JOIN 
 (
-  SELECT val FROM T2 WHERE key = 6
+  SELECT val FROM T2_n75 WHERE key = 6
 ) subq2 
 ON subq1.val = subq2.val;
 
@@ -124,16 +124,16 @@ FROM
   SELECT subq1.key as key
   FROM
   (
-SELECT key, val FROM T1
+SELECT key, val FROM T1_n127
   ) subq1
   JOIN
   (
-SELECT key, 'teststring' as val FRO

[10/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q
index 52535dd..2d0015b 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_part_llap_io.q
@@ -19,9 +19,9 @@ set hive.llap.io.encode.enabled=true;
 -- FILE VARIATION: ORC, Vectorized, MapWork, Partitioned
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n42(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n42;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -30,51 +30,51 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) 
PARTITIONED BY(part INT);
+CREATE TABLE part_add_int_permute_select_n12(insert_num int, a INT, b STRING) 
PARTITIONED BY(part INT);
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (1, 
, 'new');
+insert into table part_add_int_permute_select_n12 partition(part=1) VALUES (1, 
, 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_permute_select add columns(c int);
+alter table part_add_int_permute_select_n12 add columns(c int);
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (2, 
, 'new', );
+insert into table part_add_int_permute_select_n12 partition(part=1) VALUES (2, 
, 'new', );
 
 explain vectorization detail
-select insert_num,part,a,b from part_add_int_permute_select;
+select insert_num,part,a,b from part_add_int_permute_select_n12;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,part,a,b from part_add_int_permute_select;
-select insert_num,part,a,b,c from part_add_int_permute_select;
-select insert_num,part,c from part_add_int_permute_select;
+select insert_num,part,a,b from part_add_int_permute_select_n12;
+select insert_num,part,a,b,c from part_add_int_permute_select_n12;
+select insert_num,part,c from part_add_int_permute_select_n12;
 
-drop table part_add_int_permute_select;
+drop table part_add_int_permute_select_n12;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b 
STRING) PARTITIONED BY(part INT);
+CREATE TABLE part_add_int_string_permute_select_n12(insert_num int, a INT, b 
STRING) PARTITIONED BY(part INT);
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES 
(1, , 'new');
+insert into table part_add_int_string_permute_select_n12 partition(part=1) 
VALUES (1, , 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_string_permute_select add columns(c int, d string);
+alter table part_add_int_string_permute_select_n12 add columns(c int, d 
string);
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES 
(2, , 'new', , '');
+insert into table part_add_int_string_permute_select_n12 partition(part=1) 
VALUES (2, , 'new', , '');
 
 explain vectorization detail
-select insert_num,part,a,b from part_add_int_string_permute_select;
+select insert_num,part,a,b from part_add_int_string_permute_select_n12;
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,part,a,b from part_add_int_string_permute_select;
-select insert_num,part,a,b,c from part_add_

[19/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q
--
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q
index b85fec5..1869bc1 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat3.q
@@ -1,18 +1,18 @@
 
 
-create table partition_test_partitioned(key string, value string) partitioned 
by (dt string);
+create table partition_test_partitioned_n8(key string, value string) 
partitioned by (dt string);
 
-alter table partition_test_partitioned set fileformat rcfile;
-insert overwrite table partition_test_partitioned partition(dt=101) select * 
from src1;
-show table extended like partition_test_partitioned partition(dt=101);
+alter table partition_test_partitioned_n8 set fileformat rcfile;
+insert overwrite table partition_test_partitioned_n8 partition(dt=101) select 
* from src1;
+show table extended like partition_test_partitioned_n8 partition(dt=101);
 
-alter table partition_test_partitioned set fileformat Sequencefile;
-insert overwrite table partition_test_partitioned partition(dt=102) select * 
from src1;
-show table extended like partition_test_partitioned partition(dt=102);
-select key from partition_test_partitioned where dt=102;
+alter table partition_test_partitioned_n8 set fileformat Sequencefile;
+insert overwrite table partition_test_partitioned_n8 partition(dt=102) select 
* from src1;
+show table extended like partition_test_partitioned_n8 partition(dt=102);
+select key from partition_test_partitioned_n8 where dt=102;
 
-insert overwrite table partition_test_partitioned partition(dt=101) select * 
from src1;
-show table extended like partition_test_partitioned partition(dt=101);
-select key from partition_test_partitioned where dt=101;
+insert overwrite table partition_test_partitioned_n8 partition(dt=101) select 
* from src1;
+show table extended like partition_test_partitioned_n8 partition(dt=101);
+select key from partition_test_partitioned_n8 where dt=101;
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q
--
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q
index 33decec..f64b54e 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat4.q
@@ -1,8 +1,9 @@
-create table partition_test_partitioned(key string, value string) partitioned 
by (dt string);
-alter table partition_test_partitioned set fileformat sequencefile;
-insert overwrite table partition_test_partitioned partition(dt='1') select * 
from src1;
-alter table partition_test_partitioned partition (dt='1') set fileformat 
sequencefile;
+--! qt:dataset:src1
+create table partition_test_partitioned_n5(key string, value string) 
partitioned by (dt string);
+alter table partition_test_partitioned_n5 set fileformat sequencefile;
+insert overwrite table partition_test_partitioned_n5 partition(dt='1') select 
* from src1;
+alter table partition_test_partitioned_n5 partition (dt='1') set fileformat 
sequencefile;
 
-alter table partition_test_partitioned add partition (dt='2');
-alter table partition_test_partitioned drop partition (dt='2');
+alter table partition_test_partitioned_n5 add partition (dt='2');
+alter table partition_test_partitioned_n5 drop partition (dt='2');
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q
--
diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q 
b/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q
index fc3bb84..88dba6d 100644
--- a/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q
+++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat5.q
@@ -1,14 +1,14 @@
 set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 
-create table partition_test_partitioned(key string, value string) partitioned 
by (dt string);
+create table partition_test_partitioned_n3(key string, value string) 
partitioned by (dt string);
 
-alter table partition_test_partitioned set fileformat rcfile;
-insert overwrite table partition_test_partitioned partition(dt=101) select * 
from src1;
-alter table partition_test_partitioned set fileformat Sequencefile;
-insert overwrite table partition_test_partitioned partition(dt=102) select * 
from src1;
+alter table partition_test_partitioned_n3 set fileformat rcfile;
+insert overwrite table partition_test_partitioned_n3 partition(dt=101) s

[17/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q
--
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q
index 1e5f69b..b5025e7 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q
@@ -24,9 +24,9 @@ set hive.llap.io.encode.enabled=true;
 -- Instead just one explain vectorization only detail
 --
 
-CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 
tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), 
float1 float, double1 double, string1 string, string2 string, date1 date, 
timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str 
string, int_str string, bigint_str string, decimal_str string, float_str 
string, double_str string, date_str string, timestamp_str string, filler string)
+CREATE TABLE schema_evolution_data_n22(insert_num int, boolean1 boolean, 
tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 
decimal(38,18), float1 float, double1 double, string1 string, string2 string, 
date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, 
smallint_str string, int_str string, bigint_str string, decimal_str string, 
float_str string, double_str string, date_str string, timestamp_str string, 
filler string)
 row format delimited fields terminated by '|' stored as textfile;
-load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data;
+load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.txt' overwrite into 
table schema_evolution_data_n22;
 
 
--
 -- SECTION: ALTER TABLE ADD COLUMNS
@@ -35,48 +35,48 @@ load data local inpath 
'../../data/files/schema_evolution/schema_evolution_data.
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_permute_select(insert_num int, a INT, b STRING) 
PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC 
TBLPROPERTIES ('transactional'='true');
+CREATE TABLE part_add_int_permute_select_n5(insert_num int, a INT, b STRING) 
PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC 
TBLPROPERTIES ('transactional'='true');
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (1, 
, 'new');
+insert into table part_add_int_permute_select_n5 partition(part=1) VALUES (1, 
, 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_permute_select add columns(c int);
+alter table part_add_int_permute_select_n5 add columns(c int);
 
-insert into table part_add_int_permute_select partition(part=1) VALUES (2, 
, 'new', );
+insert into table part_add_int_permute_select_n5 partition(part=1) VALUES (2, 
, 'new', );
 
 -- SELECT permutation columns to make sure NULL defaulting works right
-select insert_num,part,a,b from part_add_int_permute_select;
-select insert_num,part,a,b,c from part_add_int_permute_select;
-select insert_num,part,c from part_add_int_permute_select;
+select insert_num,part,a,b from part_add_int_permute_select_n5;
+select insert_num,part,a,b,c from part_add_int_permute_select_n5;
+select insert_num,part,c from part_add_int_permute_select_n5;
 
-drop table part_add_int_permute_select;
+drop table part_add_int_permute_select_n5;
 
 
 -- SUBSECTION: ALTER TABLE ADD COLUMNS: INT, STRING, PERMUTE SELECT
 --
 --
-CREATE TABLE part_add_int_string_permute_select(insert_num int, a INT, b 
STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC 
TBLPROPERTIES ('transactional'='true');
+CREATE TABLE part_add_int_string_permute_select_n5(insert_num int, a INT, b 
STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC 
TBLPROPERTIES ('transactional'='true');
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES 
(1, , 'new');
+insert into table part_add_int_string_permute_select_n5 partition(part=1) 
VALUES (1, , 'new');
 
 -- Table-Non-Cascade ADD COLUMNS ...
-alter table part_add_int_string_permute_select add columns(c int, d string);
+alter table part_add_int_string_permute_select_n5 add columns(c int, d string);
 
-insert into table part_add_int_string_permute_select partition(part=1) VALUES 
(2, , 'new', , '');
+insert into table part_add_int_string_permute_select_n5 partition(part=1) 
VALUES (2, , 'new', , '');
 
 explain vectorization only detail
-select insert_num,part,a,b,c,d from part_add_int_string_permute_select;
+select insert_num,part,a,b,c,d from part_add_int_string_permute_select_n5;
 

[57/58] [abbrv] hive git commit: HIVE-19698: TestAMReporter#testMultipleAM is flaky (Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)

2018-05-24 Thread jcamacho
HIVE-19698: TestAMReporter#testMultipleAM is flaky (Jesus Camacho Rodriguez, 
reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c065d823
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c065d823
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c065d823

Branch: refs/heads/branch-3
Commit: c065d8234ad948c64d8cd35b3fcaff6c0747f2b9
Parents: 4c80bae
Author: Jesus Camacho Rodriguez 
Authored: Thu May 24 10:53:47 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 17:57:04 2018 -0700

--
 llap-server/pom.xml   | 7 +++
 .../hive/llap/daemon/impl/comparator/TestAMReporter.java  | 3 +++
 2 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c065d823/llap-server/pom.xml
--
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index 6928f77..4405700 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -286,6 +286,13 @@
 
 
   org.apache.hive
+  hive-common
+  ${project.version}
+  test-jar
+  test
+
+
+  org.apache.hive
   hive-standalone-metastore
   ${project.version}
   test-jar

http://git-wip-us.apache.org/repos/asf/hive/blob/c065d823/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
--
diff --git 
a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
 
b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
index 19f8048..068aad4 100644
--- 
a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
+++ 
b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
@@ -25,8 +25,10 @@ import org.apache.hadoop.hive.llap.daemon.impl.AMReporter;
 import org.apache.hadoop.hive.llap.daemon.impl.QueryIdentifier;
 import org.apache.hadoop.hive.llap.protocol.LlapTaskUmbilicalProtocol;
 import org.apache.hadoop.io.Text;
+import org.apache.hive.common.util.RetryTestRunner;
 import org.apache.tez.dag.records.TezTaskAttemptID;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -44,6 +46,7 @@ import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+@RunWith(RetryTestRunner.class)
 public class TestAMReporter {
   @Test(timeout = 5000)
   public void testMultipleAM() throws InterruptedException {



[28/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q 
b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
index 6d73896..7dd2523 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
@@ -50,7 +50,7 @@ set hive.merge.mapredfiles=false;
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 -- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
+create table list_bucketing_dynamic_part_n2 (key String, value String) 
 partitioned by (ds String, hr String) 
 skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
 stored as DIRECTORIES
@@ -58,32 +58,32 @@ create table list_bucketing_dynamic_part (key String, value 
String)
 
 -- list bucketing DML without merge. use bucketize to generate a few small 
files.
 explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
+insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = 
'2008-04-08', hr)
 select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08';
 
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
+insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = 
'2008-04-08', hr)
 select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08';
 
 -- check DML result
-show partitions list_bucketing_dynamic_part;
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr='a1');   
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr='b1');
+show partitions list_bucketing_dynamic_part_n2;
+desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', 
hr='a1');
+desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', 
hr='b1');
 
 -- concatenate the partition and it will merge files
-alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') 
concatenate;
+alter table list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', 
hr='b1') concatenate;
 
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr='b1');
+desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', 
hr='b1');
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 select count(1) from srcpart where ds = '2008-04-08';
-select count(*) from list_bucketing_dynamic_part;
+select count(*) from list_bucketing_dynamic_part_n2;
 explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 
'val_484';
-select * from list_bucketing_dynamic_part where key = '484' and value = 
'val_484';
+select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 
'val_484';
+select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 
'val_484';
 select * from srcpart where ds = '2008-04-08' and key = '484' and value = 
'val_484' order by hr;
 
 -- clean up
-drop table list_bucketing_dynamic_part;
+drop table list_bucketing_dynamic_part_n2;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
--
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q 
b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
index d2e24af..5cdc144 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
@@ -26,7 +26,7 @@ set hive.merge.mapredfiles=false;
 -- 118 01_0
 
 -- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
+create table list_bucketing_static_part_n0 (key String, value String) 
 partitioned by (ds String, hr String) 
 skewed by (key) on ('484','103')
 stored as DIRECTORIES
@@ -34,39 +34,39 @@ create table list_bucketing_static_part (key String, value 
String)
 
 -- list bucketing DML without merge. use bucketize to generate a few small 
files.
 explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+insert overwrite table list_bucketing_static_part_n0 partition (ds = 
'2008-04-08',  hr = '11')
 select key, value from srcpart where ds = '2008-04-08';
 
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08', hr = '11')
+insert overwrite table list_bucketing_static_part_n0 partition (ds = 
'2008-04-08', hr = '11')
 select key, value from srcpart where ds = '2008-04-08';
 
 -- check DML result
-show partitions list_bucketing_static_part;
-desc formatted list_bucketing_static_part partition (ds='2008-04-08', 
hr='11');
+show partitions list_bucketing_static_part_n0;
+de

[35/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/explainuser_3.q
--
diff --git a/ql/src/test/queries/clientpositive/explainuser_3.q 
b/ql/src/test/queries/clientpositive/explainuser_3.q
index 1e40329..776381e 100644
--- a/ql/src/test/queries/clientpositive/explainuser_3.q
+++ b/ql/src/test/queries/clientpositive/explainuser_3.q
@@ -11,10 +11,10 @@ set 
hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.vectorized.execution.enabled=true;
 
-CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS 
STORED AS ORC TBLPROPERTIES ('transactional'='true');
-insert into table acid_vectorized select cint, cstring1 from alltypesorc where 
cint is not null order by cint limit 10;
-analyze table acid_vectorized compute statistics for columns;
-explain select a, b from acid_vectorized order by a, b;
+CREATE TABLE acid_vectorized_n0(a INT, b STRING) CLUSTERED BY(a) INTO 2 
BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true');
+insert into table acid_vectorized_n0 select cint, cstring1 from alltypesorc 
where cint is not null order by cint limit 10;
+analyze table acid_vectorized_n0 compute statistics for columns;
+explain select a, b from acid_vectorized_n0 order by a, b;
 
 explain select key, value
 FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol;
@@ -33,13 +33,13 @@ explain use newDB;
 
 use newDB;
 
-create table tab (name string);
+create table tab_n1 (name string);
 
-explain alter table tab rename to newName;
+explain alter table tab_n1 rename to newName;
 
-explain drop table tab;
+explain drop table tab_n1;
 
-drop table tab;
+drop table tab_n1;
 
 explain use default;
 
@@ -60,24 +60,24 @@ EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1;
 explain DROP TEMPORARY MACRO SIGMOID;
 DROP TEMPORARY MACRO SIGMOID;
 
-explain create table src_autho_test as select * from src;
-create table src_autho_test as select * from src;
+explain create table src_autho_test_n3 as select * from src;
+create table src_autho_test_n3 as select * from src;
 
 set hive.security.authorization.enabled=true;
 
-explain grant select on table src_autho_test to user hive_test_user;
-grant select on table src_autho_test to user hive_test_user;
+explain grant select on table src_autho_test_n3 to user hive_test_user;
+grant select on table src_autho_test_n3 to user hive_test_user;
 
-explain show grant user hive_test_user on table src_autho_test;
-explain show grant user hive_test_user on table src_autho_test(key);
+explain show grant user hive_test_user on table src_autho_test_n3;
+explain show grant user hive_test_user on table src_autho_test_n3(key);
 
-select key from src_autho_test order by key limit 20;
+select key from src_autho_test_n3 order by key limit 20;
 
-explain revoke select on table src_autho_test from user hive_test_user;
+explain revoke select on table src_autho_test_n3 from user hive_test_user;
 
-explain grant select(key) on table src_autho_test to user hive_test_user;
+explain grant select(key) on table src_autho_test_n3 to user hive_test_user;
 
-explain revoke select(key) on table src_autho_test from user hive_test_user;
+explain revoke select(key) on table src_autho_test_n3 from user hive_test_user;
 
 explain 
 create role sRc_roLE;
@@ -95,19 +95,19 @@ explain drop role sRc_roLE;
 drop role sRc_roLE;
 
 set hive.security.authorization.enabled=false;
-drop table src_autho_test;
+drop table src_autho_test_n3;
 
-explain drop view v;
+explain drop view v_n1;
 
-explain create view v as with cte as (select * from src  order by key limit 5)
+explain create view v_n1 as with cte as (select * from src  order by key limit 
5)
 select * from cte;
 
 explain with cte as (select * from src  order by key limit 5)
 select * from cte;
 
-create table orc_merge5 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5_n0 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
 
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5_n0;
 
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET mapred.min.split.size=1000;
@@ -126,40 +126,40 @@ set hive.merge.tezfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 
-explain insert overwrite table orc_merge5 select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
+explain insert overwrite table orc_merge5_n0 select 
userid,string1,subtype,decimal1,ts from orc_merge5_n0 where userid<=13;
 
-drop table orc_merge5;
+drop table orc_merge5_n0;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=1;
 
-CREAT

[29/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q
--
diff --git a/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q 
b/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q
index ebe832d..aec7dea 100644
--- a/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q
+++ b/ql/src/test/queries/clientpositive/join_is_not_distinct_from.q
@@ -1,71 +1,71 @@
 set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1;
+CREATE TABLE myinput1_n10(key int, value int);
+LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1_n10;
 
 -- merging
-explain select * from myinput1 a join myinput1 b on a.key is not distinct from 
b.value;
+explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not 
distinct from b.value;
 -- SORT_QUERY_RESULTS
-select * from myinput1 a join myinput1 b on a.key is not distinct from b.value;
+select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from 
b.value;
 
-explain select * from myinput1 a join myinput1 b on a.key is not distinct from 
b.value join myinput1 c on a.key=c.key;
-select * from myinput1 a join myinput1 b on a.key is not distinct from b.value 
join myinput1 c on a.key=c.key;
+explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not 
distinct from b.value join myinput1_n10 c on a.key=c.key;
+select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from 
b.value join myinput1_n10 c on a.key=c.key;
 
-explain select * from myinput1 a join myinput1 b on a.key is not distinct from 
b.value join myinput1 c on a.key is not distinct from c.key;
-select * from myinput1 a join myinput1 b on a.key is not distinct from b.value 
join myinput1 c on a.key is not distinct from c.key;
+explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not 
distinct from b.value join myinput1_n10 c on a.key is not distinct from c.key;
+select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from 
b.value join myinput1_n10 c on a.key is not distinct from c.key;
 
-explain select * from myinput1 a join myinput1 b on a.key is not distinct from 
b.value AND a.value=b.key join myinput1 c on a.key is not distinct from c.key 
AND a.value=c.value;
+explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not 
distinct from b.value AND a.value=b.key join myinput1_n10 c on a.key is not 
distinct from c.key AND a.value=c.value;
 
-select * from myinput1 a join myinput1 b on a.key is not distinct from b.value 
AND a.value=b.key join myinput1 c on a.key is not distinct from c.key AND 
a.value=c.value;
+select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from 
b.value AND a.value=b.key join myinput1_n10 c on a.key is not distinct from 
c.key AND a.value=c.value;
 
-explain select * from myinput1 a join myinput1 b on a.key is not distinct from 
b.value AND a.value is not distinct from b.key join myinput1 c on a.key is not 
distinct from c.key AND a.value is not distinct from c.value;
-select * from myinput1 a join myinput1 b on a.key is not distinct from b.value 
AND a.value is not distinct from b.key join myinput1 c on a.key is not distinct 
from c.key AND a.value is not distinct from c.value;
+explain select * from myinput1_n10 a join myinput1_n10 b on a.key is not 
distinct from b.value AND a.value is not distinct from b.key join myinput1_n10 
c on a.key is not distinct from c.key AND a.value is not distinct from c.value;
+select * from myinput1_n10 a join myinput1_n10 b on a.key is not distinct from 
b.value AND a.value is not distinct from b.key join myinput1_n10 c on a.key is 
not distinct from c.key AND a.value is not distinct from c.value;
 
 -- outer joins
-SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key is not distinct 
from b.value;
-SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key is not distinct 
from b.value;
-SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key is not distinct 
from b.value;
+SELECT * FROM myinput1_n10 a LEFT OUTER JOIN myinput1_n10 b ON a.key is not 
distinct from b.value;
+SELECT * FROM myinput1_n10 a RIGHT OUTER JOIN myinput1_n10 b ON a.key is not 
distinct from b.value;
+SELECT * FROM myinput1_n10 a FULL OUTER JOIN myinput1_n10 b ON a.key is not 
distinct from b.value;
 
 -- map joins
-SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key is not 
distinct from b.value;
-SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key is not 
distinct from b.value;
+SELECT /*+ MAPJOIN(a) */ * FROM myinput1_n10 a JOIN myinput1_n10 b ON a.key is 
not distinct from b.value;
+SELECT /*+ MAPJOIN(b) */ * FROM myinput1_n10 a JOIN myinput1_n10 b ON a.key is 
not distinct from b.value;
 
-CREATE TABLE smb_input(key int, value int

[36/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
--
diff --git a/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q 
b/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
index 5b29ebb..644a0ce 100644
--- a/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
+++ b/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
@@ -1,25 +1,25 @@
 set hive.mapred.mode=nonstrict;
 set hive.test.mode=true;
 set hive.test.mode.prefix=;
-set hive.test.mode.nosamplelist=exim_department,exim_employee;
+set hive.test.mode.nosamplelist=exim_department,exim_employee_n1;
 
-create table exim_employee ( emp_id int comment "employee id") 
+create table exim_employee_n1 ( emp_id int comment "employee id")  
comment "employee table"
partitioned by (emp_country string comment "two char iso code", 
emp_state string comment "free text")
stored as textfile  
tblproperties("creator"="krishna");
 load data local inpath "../../data/files/test.dat" 
-   into table exim_employee partition (emp_country="in", emp_state="tn");  
+   into table exim_employee_n1 partition (emp_country="in", 
emp_state="tn");   
 load data local inpath "../../data/files/test.dat" 
-   into table exim_employee partition (emp_country="in", emp_state="ka");  
+   into table exim_employee_n1 partition (emp_country="in", 
emp_state="ka");   
 load data local inpath "../../data/files/test.dat" 
-   into table exim_employee partition (emp_country="us", emp_state="tn");  
+   into table exim_employee_n1 partition (emp_country="us", 
emp_state="tn");   
 load data local inpath "../../data/files/test.dat" 
-   into table exim_employee partition (emp_country="us", emp_state="ka");  

+   into table exim_employee_n1 partition (emp_country="us", 
emp_state="ka");   
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/exports/exim_employee/temp;
 dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
-export table exim_employee to 'ql/test/data/exports/exim_employee';
-drop table exim_employee;
+export table exim_employee_n1 to 'ql/test/data/exports/exim_employee';
+drop table exim_employee_n1;
 
 create database importer;
 use importer;
@@ -27,16 +27,16 @@ use importer;
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/tablestore/exim_employee/temp;
 dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
 
-import table exim_employee partition (emp_country="us", emp_state="tn") 
+import table exim_employee_n1 partition (emp_country="us", emp_state="tn") 
from 'ql/test/data/exports/exim_employee'
location 'ql/test/data/tablestore/exim_employee';
-describe extended exim_employee;   
-show table extended like exim_employee;
-show table extended like exim_employee partition (emp_country="us", 
emp_state="tn");
+describe extended exim_employee_n1;
+show table extended like exim_employee_n1;
+show table extended like exim_employee_n1 partition (emp_country="us", 
emp_state="tn");
 dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
-select * from exim_employee;
+select * from exim_employee_n1;
 dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
-select * from exim_employee;
-drop table exim_employee;
+select * from exim_employee_n1;
+drop table exim_employee_n1;
 
 drop database importer;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
--
diff --git a/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q 
b/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
index 1e3eaee..9d4cd04 100644
--- a/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
+++ b/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
@@ -2,15 +2,15 @@ set 
hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho
 set hive.test.mode=true;
 set hive.test.mode.prefix=;
 
-create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../../data/files/test.dat" into table exim_department;
+create table exim_department_n3 ( dep_id int) stored as textfile;
+load data local inpath "../../data/files/test.dat" into table 
exim_department_n3;
 
 set hive.security.authorization.enabled=true;
 
-grant Select on table exim_department to user hive_test_user;
+grant Select on table exim_department_n3 to user hive_test_user;
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/exports/exim_department/temp;
 dfs -rmr target/tmp/ql/test/data/exports/exim_department;
-export table exim_department to 'ql/test/data/exports/exim_department';
+export table exim_department_n3 to 'ql/test/data/exports/exim_department';
 
 set hive.security.authorizat

[45/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q
--
diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q 
b/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q
index cc43b5b..359560a 100644
--- a/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q
+++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez_empty.q
@@ -4,15 +4,15 @@ set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=1;
 
-CREATE TABLE tab1(key1 int, value string) CLUSTERED BY (key1) INTO 10 BUCKETS 
STORED AS TEXTFILE;
-CREATE TABLE tab2 (key1 int, value string) CLUSTERED BY (key1) INTO 10 BUCKETS 
STORED AS TEXTFILE;
+CREATE TABLE tab1_n0(key1 int, value string) CLUSTERED BY (key1) INTO 10 
BUCKETS STORED AS TEXTFILE;
+CREATE TABLE tab2_n0 (key1 int, value string) CLUSTERED BY (key1) INTO 10 
BUCKETS STORED AS TEXTFILE;
 
 
 -- HIVE-18721 : Make sure only certain buckets have data.
-insert into tab1 VALUES (1,"abc"),(4,"def"),(8, "ghi");
-insert into tab2 VALUES (1, "abc"), (5, "aa");
+insert into tab1_n0 VALUES (1,"abc"),(4,"def"),(8, "ghi");
+insert into tab2_n0 VALUES (1, "abc"), (5, "aa");
 
 set hive.convert.join.bucket.mapjoin.tez = true;
 
-explain select * from tab1, tab2 where tab1.key1 = tab2.key1;
-select * from tab1, tab2 where tab1.key1 = tab2.key1;
+explain select * from tab1_n0, tab2_n0 where tab1_n0.key1 = tab2_n0.key1;
+select * from tab1_n0, tab2_n0 where tab1_n0.key1 = tab2_n0.key1;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
--
diff --git a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q 
b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
index 6428f09..6e5bc39 100644
--- a/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
+++ b/ql/src/test/queries/clientpositive/bucket_num_reducers2.q
@@ -4,11 +4,11 @@ set hive.exec.reducers.max = 2;
 -- This test sets the maximum number of reduce tasks to 2 for overwriting a
 -- table with 3 buckets, and uses a post-hook to confirm that 1 reducer was 
used
 
-CREATE TABLE test_table(key int, value string) CLUSTERED BY (key) INTO 3 
BUCKETS;
+CREATE TABLE test_table_n4(key int, value string) CLUSTERED BY (key) INTO 3 
BUCKETS;
 
-explain extended insert overwrite table test_table
+explain extended insert overwrite table test_table_n4
   select * from src;
-insert overwrite table test_table
+insert overwrite table test_table_n4
 select * from src;
 
-drop table test_table;
+drop table test_table_n4;

http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/bucketcontext_1.q
--
diff --git a/ql/src/test/queries/clientpositive/bucketcontext_1.q 
b/ql/src/test/queries/clientpositive/bucketcontext_1.q
index 876bc0f..4658a65 100644
--- a/ql/src/test/queries/clientpositive/bucketcontext_1.q
+++ b/ql/src/test/queries/clientpositive/bucketcontext_1.q
@@ -2,25 +2,25 @@ set hive.strict.checks.bucketing=false;
 
 set hive.mapred.mode=nonstrict;
 -- small 1 part, 2 bucket & big 2 part, 4 bucket
-CREATE TABLE bucket_small (key string, value string) partitioned by (ds 
string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/auto_sortmerge_join/big/00_0' 
INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/big/01_0' 
INTO TABLE bucket_small partition(ds='2008-04-08');
+CREATE TABLE bucket_small_n14 (key string, value string) partitioned by (ds 
string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+load data local inpath '../../data/files/auto_sortmerge_join/big/00_0' 
INTO TABLE bucket_small_n14 partition(ds='2008-04-08');
+load data local inpath '../../data/files/auto_sortmerge_join/big/01_0' 
INTO TABLE bucket_small_n14 partition(ds='2008-04-08');
 
-CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) 
CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../../data/files/auto_sortmerge_join/big/00_0' 
INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/big/01_0' 
INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/big/02_0' 
INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../../data/files/auto_sortmerge_join/big/03_0' 
INTO TABLE bucket_big partition(ds='2008-04-08');
+CREATE TABLE bucket_big_n14 (key string, value string) partitioned by (ds 
string) CLUSTERED BY (key) SORT

[23/58] [abbrv] [partial] hive git commit: HIVE-19617: Rename test tables to avoid collisions during execution in batches (Jesus Camacho Rodriguez, reviewed by Gunther Hagleitner)

2018-05-24 Thread jcamacho
http://git-wip-us.apache.org/repos/asf/hive/blob/9bf28a3c/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
--
diff --git 
a/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
 
b/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
index 3ddaa47..cd6073b 100644
--- 
a/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
+++ 
b/ql/src/test/queries/clientpositive/multi_insert_move_tasks_share_dependencies.q
@@ -2,23 +2,23 @@ set hive.multi.insert.move.tasks.share.dependencies=true;
 set hive.stats.dbclass=fs;
 -- SORT_QUERY_RESULTS
 
-create table src_multi1 like src;
-create table src_multi2 like src;
+create table src_multi1_n4 like src;
+create table src_multi2_n5 like src;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 set hive.merge.mapfiles=true;
@@ -26,45 +26,45 @@ set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
 from src
-insert overwrite table src_multi1 select * where key < 10
-insert overwrite table src_multi2 select * where key > 10 and key < 20;
+insert overwrite table src_multi1_n4 select * where key < 10
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 
@@ -73,15 +73,15 @@ set hive.merge.mapredfiles=false;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group 
by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, 
value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 
group by key, value;
 
 from src
-insert overwrite table src_multi1 select * where key < 10 group by key, value
-insert overwrite table src_multi2 select * where key > 10 and key < 20 group 
by key, value;
+insert overwrite table src_multi1_n4 select * where key < 10 group by key, 
value
+insert overwrite table src_multi2_n5 select * where key > 10 and key < 20 
group by key, value;
 
-select * from src_multi1;
-select * from src_multi2;
+select * from src_multi1_n4;
+select * from src_multi2_n5;
 
 
 set hive.merge.mapfiles=false;
@@ -89,30 +89,30 @@ set hive.merge.mapredfiles=true;
 
 explain
 from src
-insert overwrite table src_multi1 select * where key < 10 

[2/2] hive git commit: Revert "HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt McCline, reviewed by Teddy Choi)"

2018-05-24 Thread jcamacho
Revert "HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt 
McCline, reviewed by Teddy Choi)"

This reverts commit 25aaf7db0d62d6007c79213a33dae0fb8ac9a7be.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fc040d52
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fc040d52
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fc040d52

Branch: refs/heads/master
Commit: fc040d52c051e4655b2cb80d047a7a18a7f08e7a
Parents: 25aaf7d
Author: Jesus Camacho Rodriguez 
Authored: Thu May 24 15:48:04 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 15:48:04 2018 -0700

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   4 -
 .../UDAFTemplates/VectorUDAFAvgDecimal.txt  |   2 +-
 .../UDAFTemplates/VectorUDAFAvgDecimalMerge.txt |   2 +-
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |   2 +-
 .../exec/vector/VectorExpressionDescriptor.java |  72 ++-
 .../exec/vector/VectorHashKeyWrapperBatch.java  |   2 +-
 .../ql/exec/vector/VectorizationContext.java|  26 +-
 .../ql/exec/vector/VectorizedBatchUtil.java |   4 -
 .../vector/expressions/CastDateToBoolean.java   |  61 ---
 .../expressions/CastDecimalToDecimal.java   |   2 +-
 .../vector/expressions/CastDoubleToDecimal.java |  15 +-
 .../vector/expressions/CastFloatToDecimal.java  |  65 ---
 .../vector/expressions/CastLongToDecimal.java   |   2 +-
 .../vector/expressions/CastStringToDecimal.java |   2 +-
 .../vector/expressions/CastTimestampToLong.java |  60 +--
 .../expressions/NullVectorExpression.java   |  56 ---
 .../aggregates/VectorUDAFSumDecimal.java|   2 +-
 .../VectorUDAFSumDecimal64ToDecimal.java|   2 +-
 .../VectorPTFEvaluatorDecimalFirstValue.java|   2 +-
 .../exec/vector/ptf/VectorPTFGroupBatches.java  |   2 +-
 .../hive/ql/plan/ExprNodeGenericFuncDesc.java   |   5 +-
 .../apache/hadoop/hive/ql/udf/UDFToBoolean.java |   4 +-
 .../apache/hadoop/hive/ql/udf/UDFToByte.java|   7 +-
 .../apache/hadoop/hive/ql/udf/UDFToInteger.java |   7 +-
 .../apache/hadoop/hive/ql/udf/UDFToShort.java   |   7 +-
 .../ql/exec/vector/VectorRandomRowSource.java   |  68 +--
 .../expressions/TestVectorCastStatement.java| 502 ---
 .../vector/expressions/TestVectorTypeCasts.java |   4 -
 .../llap/vector_decimal_aggregate.q.out |   4 +-
 .../clientpositive/spark/timestamp_1.q.out  |  24 +-
 .../clientpositive/spark/timestamp_2.q.out  |  24 +-
 .../clientpositive/spark/timestamp_3.q.out  |   4 +-
 .../spark/vector_decimal_aggregate.q.out|  36 +-
 .../results/clientpositive/timestamp_1.q.out|  24 +-
 .../results/clientpositive/timestamp_2.q.out|  24 +-
 .../results/clientpositive/timestamp_3.q.out|   4 +-
 .../vector_decimal_aggregate.q.out  |  32 +-
 .../apache/hadoop/hive/tools/GenVectorCode.java |   2 +
 38 files changed, 197 insertions(+), 970 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/fc040d52/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 7ed3a9c..931533a 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3554,10 +3554,6 @@ public class HiveConf extends Configuration {
 "1. chosen : use VectorUDFAdaptor for a small set of UDFs that were 
chosen for good performance\n" +
 "2. all: use VectorUDFAdaptor for all UDFs"
 ),
-HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE("hive.test.vectorized.adaptor.override", 
false,
-"internal use only, used to force always using the 
VectorUDFAdaptor.\n" +
-"The default is false, of course",
-true),
 HIVE_VECTORIZATION_PTF_ENABLED("hive.vectorized.execution.ptf.enabled", 
true,
 "This flag should be set to true to enable vectorized mode of the PTF 
of query execution.\n" +
 "The default value is true."),

http://git-wip-us.apache.org/repos/asf/hive/blob/fc040d52/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
--
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt 
b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
index f512639..fa72171 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
@@ -522,7 +522,7 @@ public class  extends VectorAggregateExpression {
 fields[AVERAGE_COUNT_FIELD_INDEX].isNull[batchIndex] = false;
 ((LongColumnVector) fields[AVERAGE_COUNT_FIELD_INDEX]).vector[ba

[1/2] hive git commit: Revert "HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt McCline, reviewed by Teddy Choi)"

2018-05-24 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 25aaf7db0 -> fc040d52c


http://git-wip-us.apache.org/repos/asf/hive/blob/fc040d52/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out 
b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index c5d0214..d37a27e 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -457,7 +457,7 @@ STAGE PLANS:
 Map Operator Tree:
 TableScan
   alias: decimal_vgby_small
-  Statistics: Num rows: 12289 Data size: 346462 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 12289 Data size: 346472 Basic stats: 
COMPLETE Column stats: NONE
   TableScan Vectorization:
   native: true
   vectorizationSchemaColumns: [0:cdouble:double, 
1:cdecimal1:decimal(11,5)/DECIMAL_64, 2:cdecimal2:decimal(16,0)/DECIMAL_64, 
3:cint:int, 4:ROW__ID:struct]
@@ -468,7 +468,7 @@ STAGE PLANS:
 className: VectorSelectOperator
 native: true
 projectedOutputColumnNums: [1, 2, 3]
-Statistics: Num rows: 12289 Data size: 346462 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 12289 Data size: 346472 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   aggregations: count(cdecimal1), max(cdecimal1), 
min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), 
min(cdecimal2), sum(cdecimal2), count()
   Group By Vectorization:
@@ -482,7 +482,7 @@ STAGE PLANS:
   keys: cint (type: int)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9
-  Statistics: Num rows: 12289 Data size: 346462 Basic 
stats: COMPLETE Column stats: NONE
+  Statistics: Num rows: 12289 Data size: 346472 Basic 
stats: COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: int)
 sort order: +
@@ -493,7 +493,7 @@ STAGE PLANS:
 native: true
 nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS 
true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
 valueColumnNums: [1, 2, 3, 4, 5, 6, 7, 8, 9]
-Statistics: Num rows: 12289 Data size: 346462 Basic 
stats: COMPLETE Column stats: NONE
+Statistics: Num rows: 12289 Data size: 346472 Basic 
stats: COMPLETE Column stats: NONE
 value expressions: _col1 (type: bigint), _col2 (type: 
decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 
(type: bigint), _col6 (type: decimal(16,0)), _col7 (type: decimal(16,0)), _col8 
(type: decimal(26,0)), _col9 (type: bigint)
 Execution mode: vectorized
 Map Vectorization:
@@ -540,14 +540,14 @@ STAGE PLANS:
 keys: KEY._col0 (type: int)
 mode: mergepartial
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col9
-Statistics: Num rows: 6144 Data size: 173216 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 6144 Data size: 173221 Basic stats: 
COMPLETE Column stats: NONE
 Filter Operator
   Filter Vectorization:
   className: VectorFilterOperator
   native: true
   predicateExpression: FilterLongColGreaterLongScalar(col 
9:bigint, val 1)
   predicate: (_col9 > 1L) (type: boolean)
-  Statistics: Num rows: 2048 Data size: 57738 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 2048 Data size: 57740 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
 expressions: _col0 (type: int), _col1 (type: bigint), 
_col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: 
decimal(21,5)), _col5 (type: bigint), _col6 (type: decimal(16,0)), _col7 (type: 
decimal(16,0)), _col8 (type: decimal(26,0))
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
@@ -555,13 +555,13 @@ STAGE PLANS:
 className: VectorSelectOperator
 

[2/2] hive git commit: HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt McCline, reviewed by Teddy Choi)

2018-05-24 Thread mmccline
HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt 
McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/678e9fee
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/678e9fee
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/678e9fee

Branch: refs/heads/branch-3
Commit: 678e9fee1c016adf93601837c0d9521440291f83
Parents: fa30fe4
Author: Matt McCline 
Authored: Thu May 24 16:41:42 2018 -0500
Committer: Matt McCline 
Committed: Thu May 24 16:46:02 2018 -0500

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   4 +
 .../UDAFTemplates/VectorUDAFAvgDecimal.txt  |   2 +-
 .../UDAFTemplates/VectorUDAFAvgDecimalMerge.txt |   2 +-
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |   2 +-
 .../exec/vector/VectorExpressionDescriptor.java |  72 +--
 .../exec/vector/VectorHashKeyWrapperBatch.java  |   2 +-
 .../ql/exec/vector/VectorizationContext.java|  26 +-
 .../ql/exec/vector/VectorizedBatchUtil.java |   4 +
 .../vector/expressions/CastDateToBoolean.java   |  61 +++
 .../expressions/CastDecimalToDecimal.java   |   2 +-
 .../vector/expressions/CastDoubleToDecimal.java |  15 +-
 .../vector/expressions/CastFloatToDecimal.java  |  65 +++
 .../vector/expressions/CastLongToDecimal.java   |   2 +-
 .../vector/expressions/CastStringToDecimal.java |   2 +-
 .../vector/expressions/CastTimestampToLong.java |  60 ++-
 .../expressions/NullVectorExpression.java   |  56 +++
 .../aggregates/VectorUDAFSumDecimal.java|   2 +-
 .../VectorUDAFSumDecimal64ToDecimal.java|   2 +-
 .../VectorPTFEvaluatorDecimalFirstValue.java|   2 +-
 .../exec/vector/ptf/VectorPTFGroupBatches.java  |   2 +-
 .../hive/ql/plan/ExprNodeGenericFuncDesc.java   |   5 +-
 .../apache/hadoop/hive/ql/udf/UDFToBoolean.java |   4 +-
 .../apache/hadoop/hive/ql/udf/UDFToByte.java|   7 +-
 .../apache/hadoop/hive/ql/udf/UDFToInteger.java |   7 +-
 .../apache/hadoop/hive/ql/udf/UDFToShort.java   |   7 +-
 .../ql/exec/vector/VectorRandomRowSource.java   |  68 ++-
 .../expressions/TestVectorCastStatement.java| 502 +++
 .../vector/expressions/TestVectorTypeCasts.java |   4 +
 .../llap/vector_decimal_aggregate.q.out |   4 +-
 .../clientpositive/spark/timestamp_1.q.out  |  24 +-
 .../clientpositive/spark/timestamp_2.q.out  |  24 +-
 .../clientpositive/spark/timestamp_3.q.out  |   4 +-
 .../spark/vector_decimal_aggregate.q.out|  36 +-
 .../results/clientpositive/timestamp_1.q.out|  24 +-
 .../results/clientpositive/timestamp_2.q.out|  24 +-
 .../results/clientpositive/timestamp_3.q.out|   4 +-
 .../vector_decimal_aggregate.q.out  |  32 +-
 .../apache/hadoop/hive/tools/GenVectorCode.java |   2 -
 38 files changed, 970 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/678e9fee/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index b81c47d..0a99fbf 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3538,6 +3538,10 @@ public class HiveConf extends Configuration {
 "1. chosen : use VectorUDFAdaptor for a small set of UDFs that were 
chosen for good performance\n" +
 "2. all: use VectorUDFAdaptor for all UDFs"
 ),
+HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE("hive.test.vectorized.adaptor.override", 
false,
+"internal use only, used to force always using the 
VectorUDFAdaptor.\n" +
+"The default is false, of course",
+true),
 HIVE_VECTORIZATION_PTF_ENABLED("hive.vectorized.execution.ptf.enabled", 
true,
 "This flag should be set to true to enable vectorized mode of the PTF 
of query execution.\n" +
 "The default value is true."),

http://git-wip-us.apache.org/repos/asf/hive/blob/678e9fee/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
--
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt 
b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
index fa72171..f512639 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
@@ -522,7 +522,7 @@ public class  extends VectorAggregateExpression {
 fields[AVERAGE_COUNT_FIELD_INDEX].isNull[batchIndex] = false;
 ((LongColumnVector) fields[AVERAGE_COUNT_FIELD_INDEX]).vector[batchIndex] 
= myagg.count;
 fields[AVERAGE_SUM_FIELD_INDEX].isNull[batchIndex] = false;
-

[1/2] hive git commit: HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt McCline, reviewed by Teddy Choi)

2018-05-24 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-3 fa30fe4b1 -> 678e9fee1


http://git-wip-us.apache.org/repos/asf/hive/blob/678e9fee/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out 
b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index d37a27e..c5d0214 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -457,7 +457,7 @@ STAGE PLANS:
 Map Operator Tree:
 TableScan
   alias: decimal_vgby_small
-  Statistics: Num rows: 12289 Data size: 346472 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 12289 Data size: 346462 Basic stats: 
COMPLETE Column stats: NONE
   TableScan Vectorization:
   native: true
   vectorizationSchemaColumns: [0:cdouble:double, 
1:cdecimal1:decimal(11,5)/DECIMAL_64, 2:cdecimal2:decimal(16,0)/DECIMAL_64, 
3:cint:int, 4:ROW__ID:struct]
@@ -468,7 +468,7 @@ STAGE PLANS:
 className: VectorSelectOperator
 native: true
 projectedOutputColumnNums: [1, 2, 3]
-Statistics: Num rows: 12289 Data size: 346472 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 12289 Data size: 346462 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   aggregations: count(cdecimal1), max(cdecimal1), 
min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), 
min(cdecimal2), sum(cdecimal2), count()
   Group By Vectorization:
@@ -482,7 +482,7 @@ STAGE PLANS:
   keys: cint (type: int)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9
-  Statistics: Num rows: 12289 Data size: 346472 Basic 
stats: COMPLETE Column stats: NONE
+  Statistics: Num rows: 12289 Data size: 346462 Basic 
stats: COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: int)
 sort order: +
@@ -493,7 +493,7 @@ STAGE PLANS:
 native: true
 nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS 
true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
 valueColumnNums: [1, 2, 3, 4, 5, 6, 7, 8, 9]
-Statistics: Num rows: 12289 Data size: 346472 Basic 
stats: COMPLETE Column stats: NONE
+Statistics: Num rows: 12289 Data size: 346462 Basic 
stats: COMPLETE Column stats: NONE
 value expressions: _col1 (type: bigint), _col2 (type: 
decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 
(type: bigint), _col6 (type: decimal(16,0)), _col7 (type: decimal(16,0)), _col8 
(type: decimal(26,0)), _col9 (type: bigint)
 Execution mode: vectorized
 Map Vectorization:
@@ -540,14 +540,14 @@ STAGE PLANS:
 keys: KEY._col0 (type: int)
 mode: mergepartial
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col9
-Statistics: Num rows: 6144 Data size: 173221 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 6144 Data size: 173216 Basic stats: 
COMPLETE Column stats: NONE
 Filter Operator
   Filter Vectorization:
   className: VectorFilterOperator
   native: true
   predicateExpression: FilterLongColGreaterLongScalar(col 
9:bigint, val 1)
   predicate: (_col9 > 1L) (type: boolean)
-  Statistics: Num rows: 2048 Data size: 57740 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 2048 Data size: 57738 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
 expressions: _col0 (type: int), _col1 (type: bigint), 
_col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: 
decimal(21,5)), _col5 (type: bigint), _col6 (type: decimal(16,0)), _col7 (type: 
decimal(16,0)), _col8 (type: decimal(26,0))
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
@@ -555,13 +555,13 @@ STAGE PLANS:
 className: VectorSelectOperator
   

[2/2] hive git commit: HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt McCline, reviewed by Teddy Choi)

2018-05-24 Thread mmccline
HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt 
McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/25aaf7db
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/25aaf7db
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/25aaf7db

Branch: refs/heads/master
Commit: 25aaf7db0d62d6007c79213a33dae0fb8ac9a7be
Parents: 2c848ef
Author: Matt McCline 
Authored: Thu May 24 16:41:42 2018 -0500
Committer: Matt McCline 
Committed: Thu May 24 16:41:42 2018 -0500

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   4 +
 .../UDAFTemplates/VectorUDAFAvgDecimal.txt  |   2 +-
 .../UDAFTemplates/VectorUDAFAvgDecimalMerge.txt |   2 +-
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |   2 +-
 .../exec/vector/VectorExpressionDescriptor.java |  72 +--
 .../exec/vector/VectorHashKeyWrapperBatch.java  |   2 +-
 .../ql/exec/vector/VectorizationContext.java|  26 +-
 .../ql/exec/vector/VectorizedBatchUtil.java |   4 +
 .../vector/expressions/CastDateToBoolean.java   |  61 +++
 .../expressions/CastDecimalToDecimal.java   |   2 +-
 .../vector/expressions/CastDoubleToDecimal.java |  15 +-
 .../vector/expressions/CastFloatToDecimal.java  |  65 +++
 .../vector/expressions/CastLongToDecimal.java   |   2 +-
 .../vector/expressions/CastStringToDecimal.java |   2 +-
 .../vector/expressions/CastTimestampToLong.java |  60 ++-
 .../expressions/NullVectorExpression.java   |  56 +++
 .../aggregates/VectorUDAFSumDecimal.java|   2 +-
 .../VectorUDAFSumDecimal64ToDecimal.java|   2 +-
 .../VectorPTFEvaluatorDecimalFirstValue.java|   2 +-
 .../exec/vector/ptf/VectorPTFGroupBatches.java  |   2 +-
 .../hive/ql/plan/ExprNodeGenericFuncDesc.java   |   5 +-
 .../apache/hadoop/hive/ql/udf/UDFToBoolean.java |   4 +-
 .../apache/hadoop/hive/ql/udf/UDFToByte.java|   7 +-
 .../apache/hadoop/hive/ql/udf/UDFToInteger.java |   7 +-
 .../apache/hadoop/hive/ql/udf/UDFToShort.java   |   7 +-
 .../ql/exec/vector/VectorRandomRowSource.java   |  68 ++-
 .../expressions/TestVectorCastStatement.java| 502 +++
 .../vector/expressions/TestVectorTypeCasts.java |   4 +
 .../llap/vector_decimal_aggregate.q.out |   4 +-
 .../clientpositive/spark/timestamp_1.q.out  |  24 +-
 .../clientpositive/spark/timestamp_2.q.out  |  24 +-
 .../clientpositive/spark/timestamp_3.q.out  |   4 +-
 .../spark/vector_decimal_aggregate.q.out|  36 +-
 .../results/clientpositive/timestamp_1.q.out|  24 +-
 .../results/clientpositive/timestamp_2.q.out|  24 +-
 .../results/clientpositive/timestamp_3.q.out|   4 +-
 .../vector_decimal_aggregate.q.out  |  32 +-
 .../apache/hadoop/hive/tools/GenVectorCode.java |   2 -
 38 files changed, 970 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/25aaf7db/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 931533a..7ed3a9c 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3554,6 +3554,10 @@ public class HiveConf extends Configuration {
 "1. chosen : use VectorUDFAdaptor for a small set of UDFs that were 
chosen for good performance\n" +
 "2. all: use VectorUDFAdaptor for all UDFs"
 ),
+HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE("hive.test.vectorized.adaptor.override", 
false,
+"internal use only, used to force always using the 
VectorUDFAdaptor.\n" +
+"The default is false, of course",
+true),
 HIVE_VECTORIZATION_PTF_ENABLED("hive.vectorized.execution.ptf.enabled", 
true,
 "This flag should be set to true to enable vectorized mode of the PTF 
of query execution.\n" +
 "The default value is true."),

http://git-wip-us.apache.org/repos/asf/hive/blob/25aaf7db/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
--
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt 
b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
index fa72171..f512639 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
@@ -522,7 +522,7 @@ public class  extends VectorAggregateExpression {
 fields[AVERAGE_COUNT_FIELD_INDEX].isNull[batchIndex] = false;
 ((LongColumnVector) fields[AVERAGE_COUNT_FIELD_INDEX]).vector[batchIndex] 
= myagg.count;
 fields[AVERAGE_SUM_FIELD_INDEX].isNull[batchIndex] = false;
-  

[1/2] hive git commit: HIVE-19498: Vectorization: CAST expressions produce wrong results (Matt McCline, reviewed by Teddy Choi)

2018-05-24 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/master 2c848ef5d -> 25aaf7db0


http://git-wip-us.apache.org/repos/asf/hive/blob/25aaf7db/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out 
b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index d37a27e..c5d0214 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -457,7 +457,7 @@ STAGE PLANS:
 Map Operator Tree:
 TableScan
   alias: decimal_vgby_small
-  Statistics: Num rows: 12289 Data size: 346472 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 12289 Data size: 346462 Basic stats: 
COMPLETE Column stats: NONE
   TableScan Vectorization:
   native: true
   vectorizationSchemaColumns: [0:cdouble:double, 
1:cdecimal1:decimal(11,5)/DECIMAL_64, 2:cdecimal2:decimal(16,0)/DECIMAL_64, 
3:cint:int, 4:ROW__ID:struct]
@@ -468,7 +468,7 @@ STAGE PLANS:
 className: VectorSelectOperator
 native: true
 projectedOutputColumnNums: [1, 2, 3]
-Statistics: Num rows: 12289 Data size: 346472 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 12289 Data size: 346462 Basic stats: 
COMPLETE Column stats: NONE
 Group By Operator
   aggregations: count(cdecimal1), max(cdecimal1), 
min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), 
min(cdecimal2), sum(cdecimal2), count()
   Group By Vectorization:
@@ -482,7 +482,7 @@ STAGE PLANS:
   keys: cint (type: int)
   mode: hash
   outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8, _col9
-  Statistics: Num rows: 12289 Data size: 346472 Basic 
stats: COMPLETE Column stats: NONE
+  Statistics: Num rows: 12289 Data size: 346462 Basic 
stats: COMPLETE Column stats: NONE
   Reduce Output Operator
 key expressions: _col0 (type: int)
 sort order: +
@@ -493,7 +493,7 @@ STAGE PLANS:
 native: true
 nativeConditionsMet: 
hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine 
spark IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS 
true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
 valueColumnNums: [1, 2, 3, 4, 5, 6, 7, 8, 9]
-Statistics: Num rows: 12289 Data size: 346472 Basic 
stats: COMPLETE Column stats: NONE
+Statistics: Num rows: 12289 Data size: 346462 Basic 
stats: COMPLETE Column stats: NONE
 value expressions: _col1 (type: bigint), _col2 (type: 
decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: decimal(21,5)), _col5 
(type: bigint), _col6 (type: decimal(16,0)), _col7 (type: decimal(16,0)), _col8 
(type: decimal(26,0)), _col9 (type: bigint)
 Execution mode: vectorized
 Map Vectorization:
@@ -540,14 +540,14 @@ STAGE PLANS:
 keys: KEY._col0 (type: int)
 mode: mergepartial
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, 
_col6, _col7, _col8, _col9
-Statistics: Num rows: 6144 Data size: 173221 Basic stats: 
COMPLETE Column stats: NONE
+Statistics: Num rows: 6144 Data size: 173216 Basic stats: 
COMPLETE Column stats: NONE
 Filter Operator
   Filter Vectorization:
   className: VectorFilterOperator
   native: true
   predicateExpression: FilterLongColGreaterLongScalar(col 
9:bigint, val 1)
   predicate: (_col9 > 1L) (type: boolean)
-  Statistics: Num rows: 2048 Data size: 57740 Basic stats: 
COMPLETE Column stats: NONE
+  Statistics: Num rows: 2048 Data size: 57738 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
 expressions: _col0 (type: int), _col1 (type: bigint), 
_col2 (type: decimal(11,5)), _col3 (type: decimal(11,5)), _col4 (type: 
decimal(21,5)), _col5 (type: bigint), _col6 (type: decimal(16,0)), _col7 (type: 
decimal(16,0)), _col8 (type: decimal(26,0))
 outputColumnNames: _col0, _col1, _col2, _col3, _col4, 
_col5, _col6, _col7, _col8
@@ -555,13 +555,13 @@ STAGE PLANS:
 className: VectorSelectOperator
 

[1/2] hive git commit: HIVE-19631 : reduce epic locking in AbstractService (Sergey Shelukhin, reviewed by Thejas M Nair)

2018-05-24 Thread sershe
Repository: hive
Updated Branches:
  refs/heads/branch-3 eaeb20079 -> fa30fe4b1
  refs/heads/master 68a2beabc -> 2c848ef5d


HIVE-19631 : reduce epic locking in AbstractService (Sergey Shelukhin, reviewed 
by Thejas M Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2c848ef5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2c848ef5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2c848ef5

Branch: refs/heads/master
Commit: 2c848ef5d6bccabcd9ded777dc1f0f11605ec7e1
Parents: 68a2bea
Author: sergey 
Authored: Thu May 24 14:32:00 2018 -0700
Committer: sergey 
Committed: Thu May 24 14:32:00 2018 -0700

--
 .../apache/hive/service/AbstractService.java| 23 ++--
 .../org/apache/hive/service/cli/CLIService.java |  5 -
 .../apache/hive/service/server/HiveServer2.java | 12 +-
 3 files changed, 27 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2c848ef5/service/src/java/org/apache/hive/service/AbstractService.java
--
diff --git a/service/src/java/org/apache/hive/service/AbstractService.java 
b/service/src/java/org/apache/hive/service/AbstractService.java
index 37a9f4c..2ddb069 100644
--- a/service/src/java/org/apache/hive/service/AbstractService.java
+++ b/service/src/java/org/apache/hive/service/AbstractService.java
@@ -50,7 +50,7 @@ public abstract class AbstractService implements Service {
   /**
* The configuration. Will be null until the service is initialized.
*/
-  protected HiveConf hiveConf;
+  private HiveConf hiveConf;
 
   /**
* List of state change listeners; it is final to ensure
@@ -69,6 +69,7 @@ public abstract class AbstractService implements Service {
 this.name = name;
   }
 
+  // This probably doesn't need to be sync, but nobody calls this, so it 
doesn't matter.
   @Override
   public synchronized STATE getServiceState() {
 return state;
@@ -84,11 +85,15 @@ public abstract class AbstractService implements Service {
   @Override
   public synchronized void init(HiveConf hiveConf) {
 ensureCurrentState(STATE.NOTINITED);
-this.hiveConf = hiveConf;
+setHiveConf(hiveConf);
 changeState(STATE.INITED);
 LOG.info("Service:" + getName() + " is inited.");
   }
 
+  protected final void setHiveConf(HiveConf hiveConf) {
+this.hiveConf = hiveConf;
+  }
+
   /**
* {@inheritDoc}
*
@@ -126,13 +131,17 @@ public abstract class AbstractService implements Service {
   }
 
   @Override
-  public synchronized void register(ServiceStateChangeListener l) {
-listeners.add(l);
+  public void register(ServiceStateChangeListener l) {
+synchronized (listeners) {
+  listeners.add(l);
+}
   }
 
   @Override
-  public synchronized void unregister(ServiceStateChangeListener l) {
-listeners.remove(l);
+  public void unregister(ServiceStateChangeListener l) {
+synchronized (listeners) {
+  listeners.remove(l);
+}
   }
 
   @Override
@@ -141,7 +150,7 @@ public abstract class AbstractService implements Service {
   }
 
   @Override
-  public synchronized HiveConf getHiveConf() {
+  public HiveConf getHiveConf() {
 return hiveConf;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2c848ef5/service/src/java/org/apache/hive/service/cli/CLIService.java
--
diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java 
b/service/src/java/org/apache/hive/service/cli/CLIService.java
index c9914ba..3e26197 100644
--- a/service/src/java/org/apache/hive/service/cli/CLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/CLIService.java
@@ -80,7 +80,7 @@ public class CLIService extends CompositeService implements 
ICLIService {
 
   @Override
   public synchronized void init(HiveConf hiveConf) {
-this.hiveConf = hiveConf;
+setHiveConf(hiveConf);
 sessionManager = new SessionManager(hiveServer2);
 defaultFetchRows = 
hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE);
 addService(sessionManager);
@@ -132,6 +132,7 @@ public class CLIService extends CompositeService implements 
ICLIService {
   }
 
   private void setupBlockedUdfs() {
+HiveConf hiveConf = getHiveConf();
 FunctionRegistry.setupPermissionsForBuiltinUDFs(
 hiveConf.getVar(ConfVars.HIVE_SERVER2_BUILTIN_UDF_WHITELIST),
 hiveConf.getVar(ConfVars.HIVE_SERVER2_BUILTIN_UDF_BLACKLIST));
@@ -563,8 +564,10 @@ public class CLIService extends CompositeService 
implements ICLIService {
   }
 
   // obtain delegation token for the give user from metastore
+  // TODO: why is this synchronized?
   public synchronized String getDelegationTokenFromMetaStore(String owner)
   throws HiveSQLExc

[2/2] hive git commit: HIVE-19631 : reduce epic locking in AbstractService (Sergey Shelukhin, reviewed by Thejas M Nair)

2018-05-24 Thread sershe
HIVE-19631 : reduce epic locking in AbstractService (Sergey Shelukhin, reviewed 
by Thejas M Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fa30fe4b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fa30fe4b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fa30fe4b

Branch: refs/heads/branch-3
Commit: fa30fe4b1ca63c7f0579b9d33e0a00588c53b38d
Parents: eaeb200
Author: sergey 
Authored: Thu May 24 14:32:00 2018 -0700
Committer: sergey 
Committed: Thu May 24 14:32:18 2018 -0700

--
 .../apache/hive/service/AbstractService.java| 23 ++--
 .../org/apache/hive/service/cli/CLIService.java |  5 -
 .../apache/hive/service/server/HiveServer2.java | 12 +-
 3 files changed, 27 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/fa30fe4b/service/src/java/org/apache/hive/service/AbstractService.java
--
diff --git a/service/src/java/org/apache/hive/service/AbstractService.java 
b/service/src/java/org/apache/hive/service/AbstractService.java
index 37a9f4c..2ddb069 100644
--- a/service/src/java/org/apache/hive/service/AbstractService.java
+++ b/service/src/java/org/apache/hive/service/AbstractService.java
@@ -50,7 +50,7 @@ public abstract class AbstractService implements Service {
   /**
* The configuration. Will be null until the service is initialized.
*/
-  protected HiveConf hiveConf;
+  private HiveConf hiveConf;
 
   /**
* List of state change listeners; it is final to ensure
@@ -69,6 +69,7 @@ public abstract class AbstractService implements Service {
 this.name = name;
   }
 
+  // This probably doesn't need to be sync, but nobody calls this, so it 
doesn't matter.
   @Override
   public synchronized STATE getServiceState() {
 return state;
@@ -84,11 +85,15 @@ public abstract class AbstractService implements Service {
   @Override
   public synchronized void init(HiveConf hiveConf) {
 ensureCurrentState(STATE.NOTINITED);
-this.hiveConf = hiveConf;
+setHiveConf(hiveConf);
 changeState(STATE.INITED);
 LOG.info("Service:" + getName() + " is inited.");
   }
 
+  protected final void setHiveConf(HiveConf hiveConf) {
+this.hiveConf = hiveConf;
+  }
+
   /**
* {@inheritDoc}
*
@@ -126,13 +131,17 @@ public abstract class AbstractService implements Service {
   }
 
   @Override
-  public synchronized void register(ServiceStateChangeListener l) {
-listeners.add(l);
+  public void register(ServiceStateChangeListener l) {
+synchronized (listeners) {
+  listeners.add(l);
+}
   }
 
   @Override
-  public synchronized void unregister(ServiceStateChangeListener l) {
-listeners.remove(l);
+  public void unregister(ServiceStateChangeListener l) {
+synchronized (listeners) {
+  listeners.remove(l);
+}
   }
 
   @Override
@@ -141,7 +150,7 @@ public abstract class AbstractService implements Service {
   }
 
   @Override
-  public synchronized HiveConf getHiveConf() {
+  public HiveConf getHiveConf() {
 return hiveConf;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/fa30fe4b/service/src/java/org/apache/hive/service/cli/CLIService.java
--
diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java 
b/service/src/java/org/apache/hive/service/cli/CLIService.java
index c9914ba..3e26197 100644
--- a/service/src/java/org/apache/hive/service/cli/CLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/CLIService.java
@@ -80,7 +80,7 @@ public class CLIService extends CompositeService implements 
ICLIService {
 
   @Override
   public synchronized void init(HiveConf hiveConf) {
-this.hiveConf = hiveConf;
+setHiveConf(hiveConf);
 sessionManager = new SessionManager(hiveServer2);
 defaultFetchRows = 
hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE);
 addService(sessionManager);
@@ -132,6 +132,7 @@ public class CLIService extends CompositeService implements 
ICLIService {
   }
 
   private void setupBlockedUdfs() {
+HiveConf hiveConf = getHiveConf();
 FunctionRegistry.setupPermissionsForBuiltinUDFs(
 hiveConf.getVar(ConfVars.HIVE_SERVER2_BUILTIN_UDF_WHITELIST),
 hiveConf.getVar(ConfVars.HIVE_SERVER2_BUILTIN_UDF_BLACKLIST));
@@ -563,8 +564,10 @@ public class CLIService extends CompositeService 
implements ICLIService {
   }
 
   // obtain delegation token for the give user from metastore
+  // TODO: why is this synchronized?
   public synchronized String getDelegationTokenFromMetaStore(String owner)
   throws HiveSQLException, UnsupportedOperationException, LoginException, 
IOException {
+HiveConf hiveConf = getHiveConf();
 if (!hiv

hive git commit: HIVE-19559: SparkClientImpl shouldn't name redirector thread RemoteDriver (Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar)

2018-05-24 Thread stakiar
Repository: hive
Updated Branches:
  refs/heads/master 92dd70739 -> 68a2beabc


HIVE-19559: SparkClientImpl shouldn't name redirector thread RemoteDriver 
(Bharathkrishna Guruvayoor Murali, reviewed by Sahil Takiar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/68a2beab
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/68a2beab
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/68a2beab

Branch: refs/heads/master
Commit: 68a2beabc38944c3e5238b454b1a0fe30dbf0034
Parents: 92dd707
Author: Bharathkrishna Guruvayoor Murali 
Authored: Thu May 24 13:40:36 2018 -0700
Committer: Sahil Takiar 
Committed: Thu May 24 13:40:36 2018 -0700

--
 .../main/java/org/apache/hive/spark/client/SparkClientImpl.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/68a2beab/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
--
diff --git 
a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java 
b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
index f8b5d19..847c82b 100644
--- 
a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
+++ 
b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
@@ -473,9 +473,9 @@ class SparkClientImpl implements SparkClient {
 final List childErrorLog = Collections.synchronizedList(new 
ArrayList());
 final LogRedirector.LogSourceCallback callback = () -> {return isAlive;};
 
-LogRedirector.redirect("RemoteDriver-stdout-redir-" + threadName,
+LogRedirector.redirect("spark-submit-stdout-redir-" + threadName,
 new LogRedirector(child.getInputStream(), LOG, callback));
-LogRedirector.redirect("RemoteDriver-stderr-redir-" + threadName,
+LogRedirector.redirect("spark-submit-stderr-redir-" + threadName,
 new LogRedirector(child.getErrorStream(), LOG, childErrorLog, 
callback));
 
 runnable = new Runnable() {



[1/3] hive git commit: HIVE-19698: TestAMReporter#testMultipleAM is flaky (Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)

2018-05-24 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 29e4f3be4 -> 92dd70739


HIVE-19698: TestAMReporter#testMultipleAM is flaky (Jesus Camacho Rodriguez, 
reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8d24b86d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8d24b86d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8d24b86d

Branch: refs/heads/master
Commit: 8d24b86de6d661817d00b1de0a91be382afa7c2f
Parents: 29e4f3b
Author: Jesus Camacho Rodriguez 
Authored: Thu May 24 10:53:47 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 10:53:47 2018 -0700

--
 .../hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java   | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/8d24b86d/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
--
diff --git 
a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
 
b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
index 19f8048..068aad4 100644
--- 
a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
+++ 
b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java
@@ -25,8 +25,10 @@ import org.apache.hadoop.hive.llap.daemon.impl.AMReporter;
 import org.apache.hadoop.hive.llap.daemon.impl.QueryIdentifier;
 import org.apache.hadoop.hive.llap.protocol.LlapTaskUmbilicalProtocol;
 import org.apache.hadoop.io.Text;
+import org.apache.hive.common.util.RetryTestRunner;
 import org.apache.tez.dag.records.TezTaskAttemptID;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -44,6 +46,7 @@ import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+@RunWith(RetryTestRunner.class)
 public class TestAMReporter {
   @Test(timeout = 5000)
   public void testMultipleAM() throws InterruptedException {



[2/3] hive git commit: HIVE-19698: TestAMReporter#testMultipleAM is flaky (Jesus Camacho Rodriguez, reviewed by Prasanth Jayachandran)

2018-05-24 Thread jcamacho
HIVE-19698: TestAMReporter#testMultipleAM is flaky (Jesus Camacho Rodriguez, 
reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b019909a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b019909a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b019909a

Branch: refs/heads/master
Commit: b019909a94760f5f8cead8ce996fa725fbd3c3db
Parents: 8d24b86
Author: Jesus Camacho Rodriguez 
Authored: Thu May 24 11:03:44 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 11:03:44 2018 -0700

--
 llap-server/pom.xml | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b019909a/llap-server/pom.xml
--
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index 8b43066..b7a5b3f 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -235,6 +235,13 @@
 
 
   org.apache.hive
+  hive-common
+  ${project.version}
+  test-jar
+  test
+
+
+  org.apache.hive
   hive-standalone-metastore
   ${project.version}
   test-jar



[3/3] hive git commit: HIVE-19697: TestReOptimization#testStatCachingMetaStore is flaky (Jesus Camacho Rodriguez, reviewed by Zoltan Haindrich)

2018-05-24 Thread jcamacho
HIVE-19697: TestReOptimization#testStatCachingMetaStore is flaky (Jesus Camacho 
Rodriguez, reviewed by Zoltan Haindrich)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/92dd7073
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/92dd7073
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/92dd7073

Branch: refs/heads/master
Commit: 92dd7073922e5bf772a71a77a104a49bf0928893
Parents: b019909
Author: Jesus Camacho Rodriguez 
Authored: Thu May 24 11:30:13 2018 -0700
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 24 11:30:13 2018 -0700

--
 .../org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/92dd7073/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java 
b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java
index cd2a46b..b945c60 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java
@@ -42,10 +42,12 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestRule;
 
+@Ignore("Flaky. Will be re-enabled by HIVE-19697")
 public class TestReOptimization {
 
   @ClassRule



[2/4] hive git commit: HIVE-19527: Preparing for 2.4 development (Sergio Pena, reviewed by Peter Vary)

2018-05-24 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/977ea455/metastore/scripts/upgrade/oracle/hive-schema-2.4.0.oracle.sql
--
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.4.0.oracle.sql 
b/metastore/scripts/upgrade/oracle/hive-schema-2.4.0.oracle.sql
new file mode 100644
index 000..c07ca79
--- /dev/null
+++ b/metastore/scripts/upgrade/oracle/hive-schema-2.4.0.oracle.sql
@@ -0,0 +1,811 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY 
(SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY 
(CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes 
[org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+"COLUMN_NAME" VARCHAR2(767) NULL,
+CREATE_TIME NUMBER (10) NOT NULL,
+GRANT_OPTION NUMBER (5) NOT NULL,
+GRANTOR VARCHAR2(128) NULL,
+GRANTOR_TYPE VARCHAR2(128) NULL,
+PART_ID NUMBER NULL,
+PRINCIPAL_NAME VARCHAR2(128) NULL,
+PRINCIPAL_TYPE VARCHAR2(128) NULL,
+PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY 
(PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+CD_ID NUMBER NOT NULL,
+"COMMENT" VARCHAR2(256) NULL,
+"COLUMN_NAME" VARCHAR2(767) NOT NULL,
+TYPE_NAME CLOB NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY 
(CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+PART_ID NUMBER NOT NULL,
+PART_KEY_VAL VARCHAR2(256) NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY 
KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+DB_ID NUMBER NOT NULL,
+"DESC" VARCHAR2(4000) NULL,
+DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+OWNER_NAME VARCHAR2(128) NULL,
+OWNER_TYPE VARCHAR2(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+PART_ID NUMBER NOT NULL,
+PARAM_KEY VARCHAR2(256) NOT NULL,
+PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY 
(PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+SERDE_ID NUMBER NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+TYPES_ID NUMBER NOT NULL,
+TYPE_NAME VARCHAR2(128) NULL,
+TYPE1 VARCHAR2(767) NULL,
+TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+TBL_ID NUMBER NOT NULL,
+PKEY_COMMENT VARCHAR2(4000) NULL,
+PKEY_NAME VARCHAR2(128) NOT NULL,
+PKEY_TYPE VARCHAR2(767) NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY 
(TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+ROLE_ID NUMBER NOT NULL,
+CREATE_TIME NUMBER (10) NOT NULL,
+OWNER_NAME VARCHAR2(128) NULL,
+ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes 
[org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+PART_ID NUMBER NOT NULL,
+CREATE_TI

[4/4] hive git commit: HIVE-19527: Preparing for 2.4 development (Sergio Pena, reviewed by Peter Vary)

2018-05-24 Thread spena
HIVE-19527: Preparing for 2.4 development (Sergio Pena, reviewed by Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/977ea455
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/977ea455
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/977ea455

Branch: refs/heads/branch-2
Commit: 977ea45599cc0948b74825fe086a768bc133e3ff
Parents: 3baae5f
Author: Sergio Pena 
Authored: Thu May 24 09:52:24 2018 -0500
Committer: Sergio Pena 
Committed: Thu May 24 09:52:24 2018 -0500

--
 accumulo-handler/pom.xml|2 +-
 beeline/pom.xml |2 +-
 cli/pom.xml |2 +-
 common/pom.xml  |2 +-
 contrib/pom.xml |2 +-
 druid-handler/pom.xml   |2 +-
 hbase-handler/pom.xml   |2 +-
 hcatalog/core/pom.xml   |2 +-
 hcatalog/hcatalog-pig-adapter/pom.xml   |2 +-
 hcatalog/pom.xml|2 +-
 hcatalog/server-extensions/pom.xml  |2 +-
 hcatalog/streaming/pom.xml  |2 +-
 hcatalog/webhcat/java-client/pom.xml|2 +-
 hcatalog/webhcat/svr/pom.xml|2 +-
 hplsql/pom.xml  |2 +-
 itests/custom-serde/pom.xml |2 +-
 itests/custom-udfs/pom.xml  |2 +-
 itests/custom-udfs/udf-classloader-udf1/pom.xml |2 +-
 itests/custom-udfs/udf-classloader-udf2/pom.xml |2 +-
 itests/custom-udfs/udf-classloader-util/pom.xml |2 +-
 .../udf-vectorized-badexample/pom.xml   |2 +-
 itests/hcatalog-unit/pom.xml|2 +-
 itests/hive-blobstore/pom.xml   |2 +-
 itests/hive-jmh/pom.xml |2 +-
 itests/hive-minikdc/pom.xml |2 +-
 itests/hive-unit-hadoop2/pom.xml|2 +-
 itests/hive-unit/pom.xml|2 +-
 itests/pom.xml  |2 +-
 itests/qtest-accumulo/pom.xml   |2 +-
 itests/qtest-spark/pom.xml  |2 +-
 itests/qtest/pom.xml|2 +-
 itests/test-serde/pom.xml   |2 +-
 itests/util/pom.xml |2 +-
 jdbc-handler/pom.xml|2 +-
 jdbc/pom.xml|2 +-
 llap-client/pom.xml |2 +-
 llap-common/pom.xml |2 +-
 llap-ext-client/pom.xml |2 +-
 llap-server/pom.xml |2 +-
 llap-tez/pom.xml|2 +-
 metastore/pom.xml   |2 +-
 .../upgrade/derby/hive-schema-2.4.0.derby.sql   |  340 
 .../derby/hive-txn-schema-2.4.0.derby.sql   |  134 ++
 .../derby/upgrade-2.3.0-to-2.4.0.derby.sql  |3 +
 .../scripts/upgrade/derby/upgrade.order.derby   |1 +
 .../upgrade/mssql/hive-schema-2.4.0.mssql.sql   | 1023 
 .../mssql/upgrade-2.3.0-to-2.4.0.mssql.sql  |4 +
 .../scripts/upgrade/mssql/upgrade.order.mssql   |1 +
 .../upgrade/mysql/hive-schema-2.4.0.mysql.sql   |  853 ++
 .../mysql/hive-txn-schema-2.4.0.mysql.sql   |  135 ++
 .../mysql/upgrade-2.3.0-to-2.4.0.mysql.sql  |5 +
 .../scripts/upgrade/mysql/upgrade.order.mysql   |1 +
 .../upgrade/oracle/hive-schema-2.4.0.oracle.sql |  811 ++
 .../oracle/hive-txn-schema-2.4.0.oracle.sql |  133 ++
 .../oracle/upgrade-2.3.0-to-2.4.0.oracle.sql|6 +
 .../scripts/upgrade/oracle/upgrade.order.oracle |1 +
 .../postgres/hive-schema-2.4.0.postgres.sql | 1478 ++
 .../postgres/hive-txn-schema-2.4.0.postgres.sql |  133 ++
 .../upgrade-2.3.0-to-2.4.0.postgres.sql |7 +
 .../upgrade/postgres/upgrade.order.postgres |1 +
 packaging/pom.xml   |2 +-
 pom.xml |4 +-
 ql/pom.xml  |2 +-
 serde/pom.xml   |2 +-
 service-rpc/pom.xml |2 +-
 service/pom.xml |2 +-
 shims/0.23/pom.xml  |2 +-
 shims/aggregator/pom.xml|2 +-
 shims/common/pom.xml|2 +-
 shims/pom.xml   |2 +-
 shims/scheduler/pom.xml |2 +-
 spark-client/pom.xml|4 +-
 testutils/pom.xml   |2 +-
 vector-code-gen/pom.xml   

[1/4] hive git commit: HIVE-19527: Preparing for 2.4 development (Sergio Pena, reviewed by Peter Vary)

2018-05-24 Thread spena
Repository: hive
Updated Branches:
  refs/heads/branch-2 3baae5f4c -> 977ea4559


http://git-wip-us.apache.org/repos/asf/hive/blob/977ea455/spark-client/pom.xml
--
diff --git a/spark-client/pom.xml b/spark-client/pom.xml
index e3bdb47..5de45b8 100644
--- a/spark-client/pom.xml
+++ b/spark-client/pom.xml
@@ -22,14 +22,14 @@
   
 org.apache.hive
 hive
-2.3.0-SNAPSHOT
+2.4.0-SNAPSHOT
   
 
   org.apache.hive
   spark-client
   jar
   Spark Remote Client
-  2.3.0-SNAPSHOT
+  2.4.0-SNAPSHOT
 
   
 ..

http://git-wip-us.apache.org/repos/asf/hive/blob/977ea455/testutils/pom.xml
--
diff --git a/testutils/pom.xml b/testutils/pom.xml
index 0ab295a..b547602 100644
--- a/testutils/pom.xml
+++ b/testutils/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-2.3.0-SNAPSHOT
+2.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/hive/blob/977ea455/vector-code-gen/pom.xml
--
diff --git a/vector-code-gen/pom.xml b/vector-code-gen/pom.xml
index 81fcdb8..a066a1d 100644
--- a/vector-code-gen/pom.xml
+++ b/vector-code-gen/pom.xml
@@ -19,7 +19,7 @@
   
 org.apache.hive
 hive
-2.3.0-SNAPSHOT
+2.4.0-SNAPSHOT
 ../pom.xml
   
 



[3/4] hive git commit: HIVE-19527: Preparing for 2.4 development (Sergio Pena, reviewed by Peter Vary)

2018-05-24 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/977ea455/metastore/scripts/upgrade/mssql/hive-schema-2.4.0.mssql.sql
--
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.4.0.mssql.sql 
b/metastore/scripts/upgrade/mssql/hive-schema-2.4.0.mssql.sql
new file mode 100644
index 000..20f6be0
--- /dev/null
+++ b/metastore/scripts/upgrade/mssql/hive-schema-2.4.0.mssql.sql
@@ -0,0 +1,1023 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
+--
+-- Complete schema required for the following classes:-
+-- org.apache.hadoop.hive.metastore.model.MColumnDescriptor
+-- org.apache.hadoop.hive.metastore.model.MDBPrivilege
+-- org.apache.hadoop.hive.metastore.model.MDatabase
+-- org.apache.hadoop.hive.metastore.model.MDelegationToken
+-- org.apache.hadoop.hive.metastore.model.MFieldSchema
+-- org.apache.hadoop.hive.metastore.model.MFunction
+-- org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
+-- org.apache.hadoop.hive.metastore.model.MIndex
+-- org.apache.hadoop.hive.metastore.model.MMasterKey
+-- org.apache.hadoop.hive.metastore.model.MOrder
+-- org.apache.hadoop.hive.metastore.model.MPartition
+-- org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
+-- org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
+-- org.apache.hadoop.hive.metastore.model.MPartitionEvent
+-- org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
+-- org.apache.hadoop.hive.metastore.model.MResourceUri
+-- org.apache.hadoop.hive.metastore.model.MRole
+-- org.apache.hadoop.hive.metastore.model.MRoleMap
+-- org.apache.hadoop.hive.metastore.model.MSerDeInfo
+-- org.apache.hadoop.hive.metastore.model.MStorageDescriptor
+-- org.apache.hadoop.hive.metastore.model.MStringList
+-- org.apache.hadoop.hive.metastore.model.MTable
+-- org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
+-- org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
+-- org.apache.hadoop.hive.metastore.model.MTablePrivilege
+-- org.apache.hadoop.hive.metastore.model.MType
+-- org.apache.hadoop.hive.metastore.model.MVersionTable
+--
+-- Table MASTER_KEYS for classes 
[org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE MASTER_KEYS
+(
+KEY_ID int NOT NULL,
+MASTER_KEY nvarchar(767) NULL
+);
+
+ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+INDEX_ID bigint NOT NULL,
+CREATE_TIME int NOT NULL,
+DEFERRED_REBUILD bit NOT NULL,
+INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+INDEX_NAME nvarchar(128) NULL,
+INDEX_TBL_ID bigint NULL,
+LAST_ACCESS_TIME int NOT NULL,
+ORIG_TBL_ID bigint NULL,
+SD_ID bigint NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table PART_COL_STATS for classes 
[org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+CREATE TABLE PART_COL_STATS
+(
+CS_ID bigint NOT NULL,
+AVG_COL_LEN float NULL,
+"COLUMN_NAME" nvarchar(767) NOT NULL,
+COLUMN_TYPE nvarchar(128) NOT NULL,
+DB_NAME nvarchar(128) NOT NULL,
+BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+DOUBLE_HIGH_VALUE float NULL,
+DOUBLE_LOW_VALUE float NULL,
+LAST_ANALYZED bigint NOT NULL,
+LONG_HIGH_VALUE bigint NULL,
+LONG_LOW_VALUE bigint NULL,
+MAX_COL_LEN bigint NULL,
+NUM_DISTINCTS bigint NULL,
+NUM_FALSES bigint NULL,
+NUM_NULLS bigint NOT NULL,
+NUM_TRUES bigint NULL,
+PART_ID bigint NULL,
+PARTITION_NAME nvarchar(767) NOT NULL,
+"TABLE_NAME" nvarchar(256) NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY 
(CS_ID);
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS 
(DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
+-- Table PART_PRIVS for classes 
[org.apache.hadoo