[2/6] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/0a24c885/ql/src/test/results/clientpositive/vector_multi_or_projection.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vector_multi_or_projection.q.out 
b/ql/src/test/results/clientpositive/vector_multi_or_projection.q.out
new file mode 100644
index 000..17f2d8b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_multi_or_projection.q.out
@@ -0,0 +1,800 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM vectortab2k
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@vectortab2k
+PREHOOK: Output: database:default
+PREHOOK: Output: default@scratch
+POSTHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM 
vectortab2k
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@vectortab2k
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: scratch.bo SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: scratch.i SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: scratch.si SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: scratch.t SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
+t  si  i   b   bo
+PREHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@scratch
+POSTHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, 
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.bo EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.i EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.si EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.t EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+_col0  _col1   _col2   _col3   _col4
+PREHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@scratch
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k_orc
+POSTHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@scratch
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k_orc
+POSTHOOK: Lineage: vectortab2k_orc.b SIMPLE 
[(scratch)scratch.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.bo SIMPLE 
[(scratch)scratch.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.i SIMPLE 

[4/6] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/0a24c885/ql/src/test/results/clientpositive/tez/vector_multi_or_projection.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/tez/vector_multi_or_projection.q.out 
b/ql/src/test/results/clientpositive/tez/vector_multi_or_projection.q.out
new file mode 100644
index 000..0174ca1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_multi_or_projection.q.out
@@ -0,0 +1,821 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM vectortab2k
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@vectortab2k
+PREHOOK: Output: database:default
+PREHOOK: Output: default@scratch
+POSTHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM 
vectortab2k
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@vectortab2k
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: scratch.bo SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: scratch.i SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: scratch.si SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: scratch.t SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
+t  si  i   b   bo
+PREHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@scratch
+POSTHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, 
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.bo EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.i EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.si EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.t EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+_col0  _col1   _col2   _col3   _col4
+PREHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@scratch
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k_orc
+POSTHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@scratch
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k_orc
+POSTHOOK: Lineage: vectortab2k_orc.b SIMPLE 
[(scratch)scratch.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.bo SIMPLE 
[(scratch)scratch.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: 

[1/6] hive git commit: HIVE-13338: Differences in vectorized_casts.q output for vectorized and non-vectorized runs (Matt McCline, reviewed by Prasanth Jayachandran)

2016-05-27 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/master f38a42e52 -> 0a24c8859


HIVE-13338: Differences in vectorized_casts.q output for vectorized and 
non-vectorized runs (Matt McCline, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a6626cc6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a6626cc6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a6626cc6

Branch: refs/heads/master
Commit: a6626cc6fe071b3bee491fe3a0dbf31c58116712
Parents: f38a42e
Author: Matt McCline 
Authored: Fri May 27 18:13:46 2016 -0700
Committer: Matt McCline 
Committed: Fri May 27 20:36:22 2016 -0700

--
 .../apache/hadoop/hive/ant/GenVectorCode.java   |  1 +
 .../ql/exec/vector/VectorizationContext.java|  7 ++-
 .../apache/hadoop/hive/ql/udf/UDFToFloat.java   |  4 ++--
 .../clientpositive/tez/vectorized_casts.q.out   | 20 ++--
 .../clientpositive/vectorized_casts.q.out   | 20 ++--
 5 files changed, 29 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a6626cc6/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
--
diff --git a/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java 
b/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
index 6c6cc63..e9fe8fa 100644
--- a/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
+++ b/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
@@ -933,6 +933,7 @@ public class GenVectorCode extends Task {
   // Casts
   {"ColumnUnaryFunc", "Cast", "long", "double", "", "", "(long)", "", ""},
   {"ColumnUnaryFunc", "Cast", "double", "long", "", "", "(double)", "", 
""},
+  {"ColumnUnaryFunc", "CastLongToFloatVia", "double", "long", "", "", 
"(float)", "", ""},
   {"ColumnUnaryFunc", "CastDoubleToBooleanVia", "long", "double", 
"MathExpr.toBool", "",
 "", "", ""},
   {"ColumnUnaryFunc", "CastLongToBooleanVia", "long", "long", 
"MathExpr.toBool", "",

http://git-wip-us.apache.org/repos/asf/hive/blob/a6626cc6/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index a76e31d..7f55b31 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -1803,7 +1803,12 @@ public class VectorizationContext {
 return getConstantVectorExpression(doubleValue, returnType, 
Mode.PROJECTION);
 }
 if (isIntFamily(inputType)) {
-  return createVectorExpression(CastLongToDouble.class, childExpr, 
Mode.PROJECTION, returnType);
+  if (udf.equals(UDFToFloat.class)) {
+// In order to convert from integer to float correctly, we need to 
apply the float cast not the double cast (HIVE-13338).
+return createVectorExpression(CastLongToFloatViaLongToDouble.class, 
childExpr, Mode.PROJECTION, returnType);
+  } else {
+return createVectorExpression(CastLongToDouble.class, childExpr, 
Mode.PROJECTION, returnType);
+  }
 } else if (inputType.equals("timestamp")) {
   return createVectorExpression(CastTimestampToDouble.class, childExpr, 
Mode.PROJECTION,
   returnType);

http://git-wip-us.apache.org/repos/asf/hive/blob/a6626cc6/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
index c612307..5808c90 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hive.ql.udf;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.CastDecimalToDouble;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastLongToDouble;
+import 
org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastLongToFloatViaLongToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.CastTimestampToDouble;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -40,7 +40,7 @@ import org.apache.hadoop.io.Text;
  * UDFToFloat.
  *
  */
-@VectorizedExpressions({CastTimestampToDouble.class, CastLongToDouble.class,

[3/6] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/0a24c885/ql/src/test/results/clientpositive/vector_multi_and_projection.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vector_multi_and_projection.q.out 
b/ql/src/test/results/clientpositive/vector_multi_and_projection.q.out
new file mode 100644
index 000..15edf54
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_multi_and_projection.q.out
@@ -0,0 +1,800 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM vectortab2k
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@vectortab2k
+PREHOOK: Output: database:default
+PREHOOK: Output: default@scratch
+POSTHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM 
vectortab2k
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@vectortab2k
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: scratch.bo SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: scratch.i SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: scratch.si SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: scratch.t SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
+t  si  i   b   bo
+PREHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@scratch
+POSTHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, 
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.bo EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.i EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.si EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.t EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+_col0  _col1   _col2   _col3   _col4
+PREHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@scratch
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k_orc
+POSTHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@scratch
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k_orc
+POSTHOOK: Lineage: vectortab2k_orc.b SIMPLE 
[(scratch)scratch.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.bo SIMPLE 
[(scratch)scratch.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.i SIMPLE 

[6/6] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt 
McCline, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0a24c885
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0a24c885
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0a24c885

Branch: refs/heads/master
Commit: 0a24c8859f8a0cd6dc4dec99acbaf7f7aa93aa32
Parents: a6626cc
Author: Matt McCline 
Authored: Fri May 27 20:00:17 2016 -0700
Committer: Matt McCline 
Committed: Fri May 27 20:36:49 2016 -0700

--
 .../vectorization/VectorizedLogicBench.java |  12 +-
 .../test/resources/testconfiguration.properties |   2 +
 .../ql/exec/vector/VectorizationContext.java| 125 ++-
 .../ql/exec/vector/expressions/ColAndCol.java   | 687 +++-
 .../ql/exec/vector/expressions/ColOrCol.java| 694 +++-
 .../exec/vector/TestVectorizationContext.java   |   4 -
 .../TestVectorLogicalExpressions.java   |   4 +-
 .../vector_multi_and_projection.q   | 196 +
 .../clientpositive/vector_multi_or_projection.q | 198 +
 .../tez/vector_multi_and_projection.q.out   | 821 +++
 .../tez/vector_multi_or_projection.q.out| 821 +++
 .../vector_multi_and_projection.q.out   | 800 ++
 .../vector_multi_or_projection.q.out| 800 ++
 13 files changed, 4667 insertions(+), 497 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0a24c885/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
--
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
index 50dadb2..7ff6158 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
@@ -55,7 +55,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanLongColumnVector(),
   getBooleanLongColumnVector());
-  expression = new ColAndCol(0, 1, 2);
+  expression = new ColAndCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -64,7 +64,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanLongColumnVector(),
   getBooleanRepeatingLongColumnVector());
-  expression = new ColAndCol(0, 1, 2);
+  expression = new ColAndCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -73,7 +73,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanRepeatingLongColumnVector(),
   getBooleanLongColumnVector());
-  expression = new ColAndCol(0, 1, 2);
+  expression = new ColAndCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -82,7 +82,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanLongColumnVector(),
   getBooleanLongColumnVector());
-  expression = new ColOrCol(0, 1, 2);
+  expression = new ColOrCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -91,7 +91,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanLongColumnVector(),
   getBooleanRepeatingLongColumnVector());
-  expression = new ColOrCol(0, 1, 2);
+  expression = new ColOrCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -100,7 +100,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanRepeatingLongColumnVector(),
   getBooleanLongColumnVector());
-  expression = new ColOrCol(0, 1, 2);
+  expression = new ColOrCol(new int[] {0, 1}, 2);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0a24c885/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 1ab914d..fd6901c 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -311,7 +311,9 @@ minitez.query.files.shared=acid_globallimit.q,\
   vector_leftsemi_mapjoin.q,\
   vector_mapjoin_reduce.q,\
   vector_mr_diff_schema_alias.q,\
+  

[5/5] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt 
McCline, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9dd70f0d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9dd70f0d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9dd70f0d

Branch: refs/heads/branch-2.1
Commit: 9dd70f0d38674370a557465f0acec5e1cfa5e8d1
Parents: 2bd2e9a
Author: Matt McCline 
Authored: Fri May 27 20:00:17 2016 -0700
Committer: Matt McCline 
Committed: Fri May 27 20:29:43 2016 -0700

--
 .../vectorization/VectorizedLogicBench.java |  12 +-
 .../test/resources/testconfiguration.properties |   2 +
 .../ql/exec/vector/VectorizationContext.java| 125 ++-
 .../ql/exec/vector/expressions/ColAndCol.java   | 687 +++-
 .../ql/exec/vector/expressions/ColOrCol.java| 694 +++-
 .../exec/vector/TestVectorizationContext.java   |   4 -
 .../TestVectorLogicalExpressions.java   |   4 +-
 .../vector_multi_and_projection.q   | 196 +
 .../clientpositive/vector_multi_or_projection.q | 198 +
 .../tez/vector_multi_and_projection.q.out   | 821 +++
 .../tez/vector_multi_or_projection.q.out| 821 +++
 .../vector_multi_and_projection.q.out   | 800 ++
 .../vector_multi_or_projection.q.out| 800 ++
 13 files changed, 4667 insertions(+), 497 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/9dd70f0d/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
--
diff --git 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
index 50dadb2..7ff6158 100644
--- 
a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
+++ 
b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/VectorizedLogicBench.java
@@ -55,7 +55,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanLongColumnVector(),
   getBooleanLongColumnVector());
-  expression = new ColAndCol(0, 1, 2);
+  expression = new ColAndCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -64,7 +64,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanLongColumnVector(),
   getBooleanRepeatingLongColumnVector());
-  expression = new ColAndCol(0, 1, 2);
+  expression = new ColAndCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -73,7 +73,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanRepeatingLongColumnVector(),
   getBooleanLongColumnVector());
-  expression = new ColAndCol(0, 1, 2);
+  expression = new ColAndCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -82,7 +82,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanLongColumnVector(),
   getBooleanLongColumnVector());
-  expression = new ColOrCol(0, 1, 2);
+  expression = new ColOrCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -91,7 +91,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanLongColumnVector(),
   getBooleanRepeatingLongColumnVector());
-  expression = new ColOrCol(0, 1, 2);
+  expression = new ColOrCol(new int[] {0, 1}, 2);
 }
   }
 
@@ -100,7 +100,7 @@ public class VectorizedLogicBench {
 public void setup() {
   rowBatch = buildRowBatch(new LongColumnVector(), 2, 
getBooleanRepeatingLongColumnVector(),
   getBooleanLongColumnVector());
-  expression = new ColOrCol(0, 1, 2);
+  expression = new ColOrCol(new int[] {0, 1}, 2);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9dd70f0d/itests/src/test/resources/testconfiguration.properties
--
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 1ab914d..fd6901c 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -311,7 +311,9 @@ minitez.query.files.shared=acid_globallimit.q,\
   vector_leftsemi_mapjoin.q,\
   vector_mapjoin_reduce.q,\
   vector_mr_diff_schema_alias.q,\
+  

[3/5] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/9dd70f0d/ql/src/test/results/clientpositive/tez/vector_multi_or_projection.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/tez/vector_multi_or_projection.q.out 
b/ql/src/test/results/clientpositive/tez/vector_multi_or_projection.q.out
new file mode 100644
index 000..0174ca1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_multi_or_projection.q.out
@@ -0,0 +1,821 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM vectortab2k
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@vectortab2k
+PREHOOK: Output: database:default
+PREHOOK: Output: default@scratch
+POSTHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM 
vectortab2k
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@vectortab2k
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: scratch.bo SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: scratch.i SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: scratch.si SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: scratch.t SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
+t  si  i   b   bo
+PREHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@scratch
+POSTHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, 
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.bo EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.i EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.si EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.t EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+_col0  _col1   _col2   _col3   _col4
+PREHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@scratch
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k_orc
+POSTHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@scratch
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k_orc
+POSTHOOK: Lineage: vectortab2k_orc.b SIMPLE 
[(scratch)scratch.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.bo SIMPLE 
[(scratch)scratch.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: 

[2/5] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/9dd70f0d/ql/src/test/results/clientpositive/vector_multi_and_projection.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vector_multi_and_projection.q.out 
b/ql/src/test/results/clientpositive/vector_multi_and_projection.q.out
new file mode 100644
index 000..15edf54
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_multi_and_projection.q.out
@@ -0,0 +1,800 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM vectortab2k
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@vectortab2k
+PREHOOK: Output: database:default
+PREHOOK: Output: default@scratch
+POSTHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM 
vectortab2k
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@vectortab2k
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: scratch.bo SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: scratch.i SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: scratch.si SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: scratch.t SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
+t  si  i   b   bo
+PREHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@scratch
+POSTHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, 
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.bo EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.i EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.si EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.t EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+_col0  _col1   _col2   _col3   _col4
+PREHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@scratch
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k_orc
+POSTHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@scratch
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k_orc
+POSTHOOK: Lineage: vectortab2k_orc.b SIMPLE 
[(scratch)scratch.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.bo SIMPLE 
[(scratch)scratch.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.i SIMPLE 

[4/5] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
http://git-wip-us.apache.org/repos/asf/hive/blob/9dd70f0d/ql/src/test/queries/clientpositive/vector_multi_or_projection.q
--
diff --git a/ql/src/test/queries/clientpositive/vector_multi_or_projection.q 
b/ql/src/test/queries/clientpositive/vector_multi_or_projection.q
new file mode 100644
index 000..0b680b3
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_multi_or_projection.q
@@ -0,0 +1,198 @@
+set hive.cli.print.header=true;
+set hive.explain.user=false;
+SET hive.auto.convert.join=true;
+set hive.fetch.task.conversion=none;
+set hive.mapred.mode=nonstrict;
+
+-- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE 
vectortab2k;
+
+CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM vectortab2k;
+INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, NULL);
+
+CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM scratch;
+
+SET hive.vectorized.execution.enabled=true;
+
+EXPLAIN
+SELECT sum(hash(*)) FROM
+(SELECT t, si, i, (t < 0) as child1, (si > 0) as child2, (i < 0) as 
child3, (t < 0 OR si > 0 OR i < 0) as multi_or_col from vectortab2k_orc
+order by t, si, i) as q;
+
+SELECT sum(hash(*)) FROM
+(SELECT t, si, i, (t < 0) as child1, (si > 0) as child2, (i < 0) as 
child3, (t < 0 OR si > 0 OR i < 0) as multi_or_col from vectortab2k_orc
+order by t, si, i) as q;
+
+EXPLAIN
+SELECT sum(hash(*)) FROM
+(SELECT t, si, i, b, (t < 0) as child1, (si > 0) as child2, (i < 0) as 
child3, (b > 0) as child4, (t < 0 OR si > 0 OR i < 0 OR b > 0) as multi_or_col 
from vectortab2k_orc
+order by t, si, i, b) as q;
+
+SELECT sum(hash(*)) FROM
+(SELECT t, si, i, b, (t < 0) as child1, (si > 0) as child2, (i < 0) as 
child3, (b > 0) as child4, (t < 0 OR si > 0 OR i < 0 OR b > 0) as multi_or_col 
from vectortab2k_orc
+order by t, si, i, b) as q;
+
+-- Use a boolean column rather than a column comparison expression.
+EXPLAIN
+SELECT sum(hash(*)) FROM
+(SELECT t, si, bo, b, (t < 0) as child1, (si > 0) as child2, bo as child3, 
(b > 0) as child4, (t < 0 OR si > 0 OR bo OR b > 0) as multi_or_col from 
vectortab2k_orc
+order by t, si, bo, b) as q;
+
+SELECT sum(hash(*)) FROM
+(SELECT t, si, bo, b, (t < 0) as child1, (si > 0) as child2, bo as child3, 
(b > 0) as child4, (t < 0 OR si > 0 OR bo OR b > 0) as multi_or_col from 
vectortab2k_orc
+order by t, si, bo, b) as q;
+
+-- With some filtering
+SELECT sum(hash(*)) FROM
+(SELECT t, si, i, (t < 0) as child1, (si > 0) as child2, (i < 0) as 
child3, (t < 0 OR si > 0 OR i < 0) as multi_or_col from vectortab2k_orc
+where pmod(i,4) = 2
+order by t, si, i) as q;
+SELECT sum(hash(*)) FROM
+(SELECT t, si, i, b, (t < 0) as child1, (si > 0) as child2, (i < 0) as 
child3, (b > 0) as child4, (t < 0 OR si > 0 OR i < 0 OR b > 0) as multi_or_col 
from vectortab2k_orc
+where pmod(si,4) < 2
+order by t, si, i, b) as q;
+SELECT sum(hash(*)) FROM
+(SELECT t, si, bo, b, (t < 0) as child1, (si > 0) as child2, bo as child3, 
(b > 0) as child4, (t < 0 OR si > 0 OR bo OR b > 0) as multi_or_col from 
vectortab2k_orc
+where pmod(i,4) = 2
+order by t, si, bo, b) as q;
+
+
+SET hive.vectorized.execution.enabled=false;
+
+CREATE TABLE scratch_repeat AS SELECT t, si, i, b, bo,
+20 as t_repeat, 9000 as si_repeat, 9233320 as i_repeat, -823823999339992 
as b_repeat, false as bo_repeat_false, true as bo_repeat_true FROM vectortab2k;
+
+-- The repeated columns ought to create repeated VectorizedRowBatch for those 
columns.
+-- And then when we do a comparison, we should generate a repeated boolean 
result.
+CREATE TABLE vectortab2k_orc_repeat STORED AS ORC AS SELECT * FROM 
scratch_repeat;
+
+SET hive.vectorized.execution.enabled=true;
+
+-- t_repeat < 0 should generate all false.
+SELECT sum(hash(*)) FROM
+(SELECT t_repeat, si, i, (t_repeat < 0) as child1, (si > 0) as child2, (i 
< 0) as child3, (t_repeat < 0 OR si > 0 OR i < 0) as multi_or_col from 
vectortab2k_orc_repeat
+order by t_repeat, si, i) as q;
+
+-- t_repeat > 0 should generate all true.
+SELECT sum(hash(*)) FROM
+(SELECT t_repeat, si, i, (t_repeat > 0) as child1, (si > 0) as child2, (i 
< 0) as child3, (t_repeat > 0 OR si > 0 OR i < 0) as multi_or_col from 
vectortab2k_orc_repeat
+order by t_repeat, si, i) as q;
+
+-- Two repeated false columns at beginning...
+SELECT sum(hash(*)) FROM
+(SELECT t_repeat, si_repeat, i, (t_repeat < 0) as child1, 

[1/5] hive git commit: HIVE-13084: Vectorization add support for PROJECTION Multi-AND/OR (Matt McCline, reviewed by Sergey Shelukhin)

2016-05-27 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 2bd2e9a09 -> 9dd70f0d3


http://git-wip-us.apache.org/repos/asf/hive/blob/9dd70f0d/ql/src/test/results/clientpositive/vector_multi_or_projection.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/vector_multi_or_projection.q.out 
b/ql/src/test/results/clientpositive/vector_multi_or_projection.q.out
new file mode 100644
index 000..17f2d8b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_multi_or_projection.q.out
@@ -0,0 +1,800 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table vectortab2k(
+t tinyint,
+si smallint,
+i int,
+b bigint,
+f float,
+d double,
+dc decimal(38,18),
+bo boolean,
+s string,
+s2 string,
+ts timestamp,
+ts2 timestamp,
+dt date)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@vectortab2k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' 
OVERWRITE INTO TABLE vectortab2k
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@vectortab2k
+PREHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM vectortab2k
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@vectortab2k
+PREHOOK: Output: database:default
+PREHOOK: Output: default@scratch
+POSTHOOK: query: CREATE TABLE scratch AS SELECT t, si, i, b, bo FROM 
vectortab2k
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@vectortab2k
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: scratch.bo SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: scratch.i SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: scratch.si SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: scratch.t SIMPLE 
[(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
+t  si  i   b   bo
+PREHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, NULL)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@scratch
+POSTHOOK: query: INSERT INTO TABLE scratch VALUES (NULL, NULL, NULL, NULL, 
NULL)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@scratch
+POSTHOOK: Lineage: scratch.b EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.bo EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col5, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.i EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.si EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, 
type:string, comment:), ]
+POSTHOOK: Lineage: scratch.t EXPRESSION 
[(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, 
type:string, comment:), ]
+_col0  _col1   _col2   _col3   _col4
+PREHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@scratch
+PREHOOK: Output: database:default
+PREHOOK: Output: default@vectortab2k_orc
+POSTHOOK: query: CREATE TABLE vectortab2k_orc STORED AS ORC AS SELECT * FROM 
scratch
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@scratch
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@vectortab2k_orc
+POSTHOOK: Lineage: vectortab2k_orc.b SIMPLE 
[(scratch)scratch.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab2k_orc.bo SIMPLE 

[15/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
deleted file mode 100644
index 9947c1a..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
+++ /dev/null
@@ -1,712 +0,0 @@
-PREHOOK: query: -- list bucketing alter table ... concatenate: 
--- Use list bucketing DML to generate mutilple files in partitions by turning 
off merge
--- dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 00_0
--- 155 01_0
--- with merge
--- 254 00_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 00_0
--- 99 01_0
--- with merge
--- 142 01_0
--- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 00_0
--- 5181 01_0
--- with merge
--- 5181 00_0
--- 5181 01_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 00_0
--- 87 01_0
--- with merge
--- 118 02_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing alter table ... concatenate: 
--- Use list bucketing DML to generate mutilple files in partitions by turning 
off merge
--- dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 00_0
--- 155 01_0
--- with merge
--- 254 00_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 00_0
--- 99 01_0
--- with merge
--- 142 01_0
--- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 00_0
--- 5181 01_0
--- with merge
--- 5181 00_0
--- 5181 01_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 00_0
--- 87 01_0
--- with merge
--- 118 02_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-srcpart
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   list_bucketing_dynamic_part
-TOK_PARTSPEC
-   TOK_PARTVAL
-  ds
-  '2008-04-08'
-   TOK_PARTVAL
-  hr
-  TOK_SELECT
- 

[04/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
--
diff --git 
a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out 
b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
deleted file mode 100644
index 793b8be..000
--- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
+++ /dev/null
@@ -1,766 +0,0 @@
-Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Stage-2:MAPRED' is a cross product
-PREHOOK: query: -- non agg, non corr
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain
-select key, count(*) 
-from src 
-group by key
-having key not in  
-  ( select key  from src s1 
-where s1.key > '12'
-  )
-PREHOOK: type: QUERY
-POSTHOOK: query: -- non agg, non corr
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain
-select key, count(*) 
-from src 
-group by key
-having key not in  
-  ( select key  from src s1 
-where s1.key > '12'
-  )
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Select Operator
-  expressions: key (type: string)
-  outputColumnNames: key
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-aggregations: count()
-keys: key (type: string)
-mode: hash
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  value expressions: _col1 (type: bigint)
-  Reduce Operator Tree:
-Group By Operator
-  aggregations: count(VALUE._col0)
-  keys: KEY._col0 (type: string)
-  mode: mergepartial
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
-  File Output Operator
-compressed: false
-table:
-input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-Map Reduce
-  Map Operator Tree:
-  TableScan
-Reduce Output Operator
-  sort order: 
-  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
-  value expressions: _col0 (type: string), _col1 (type: bigint)
-  TableScan
-Reduce Output Operator
-  sort order: 
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
-  Reduce Operator Tree:
-Join Operator
-  condition map:
-   Inner Join 0 to 1
-  keys:
-0 
-1 
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE 
Column stats: NONE
-  File Output Operator
-compressed: false
-table:
-input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-Map Reduce
-  Map Operator Tree:
-  TableScan
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE 
Column stats: NONE
-  value expressions: _col1 (type: bigint)
-  TableScan
-alias: src
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: (key > '12') (type: boolean)
-  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-expressions: key (type: string)
- 

[10/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out 
b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
deleted file mode 100644
index 12f41eb..000
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
+++ /dev/null
@@ -1,280 +0,0 @@
-PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key) on ('484','51','103')
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key) on ('484','51','103')
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-src
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   list_bucketing_static_part
-TOK_PARTSPEC
-   TOK_PARTVAL
-  ds
-  '2008-04-08'
-   TOK_PARTVAL
-  hr
-  '11'
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Spark
- A masked pattern was here 
-  Vertices:
-Map 1 
-Map Operator Tree:
-TableScan
-  alias: src
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  GatherStats: false
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-File Output Operator
-  compressed: false
-  GlobalTableId: 1
- A masked pattern was here 
-  NumFilesPerFileSink: 1
-  Static Partition Specification: ds=2008-04-08/hr=11/
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
- A masked pattern was here 
-  table:
-  input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-  properties:
-bucket_count -1
-columns key,value
-columns.comments 
-columns.types string:string
- A masked pattern was here 
-name default.list_bucketing_static_part
-partition_columns ds/hr
-   

[28/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, 
reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/22541610
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/22541610
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/22541610

Branch: refs/heads/master
Commit: 22541610c1db697ad2eca029f08ad8194c8c373f
Parents: 0bd21b5
Author: Mohit Sabharwal 
Authored: Fri May 20 11:14:13 2016 -0500
Committer: Sergio Pena 
Committed: Fri May 27 21:08:31 2016 -0500

--
 .../columnstats_partlvl_invalid_values.q|1 -
 .../clientpositive/authorization_explain.q  |1 -
 ql/src/test/queries/clientpositive/avro_date.q  |1 -
 .../clientpositive/avro_deserialize_map_null.q  |1 -
 .../clientpositive/avro_nullable_fields.q   |1 -
 .../queries/clientpositive/avro_timestamp.q |1 -
 .../clientpositive/cbo_rp_outer_join_ppr.q  |1 -
 ql/src/test/queries/clientpositive/char_udf1.q  |1 -
 ql/src/test/queries/clientpositive/input4.q |1 -
 ql/src/test/queries/clientpositive/join0.q  |1 -
 .../queries/clientpositive/list_bucket_dml_10.q |1 -
 .../queries/clientpositive/list_bucket_dml_11.q |1 -
 .../queries/clientpositive/list_bucket_dml_12.q |1 -
 .../queries/clientpositive/list_bucket_dml_13.q |1 -
 .../queries/clientpositive/list_bucket_dml_2.q  |1 -
 .../queries/clientpositive/list_bucket_dml_4.q  |1 -
 .../queries/clientpositive/list_bucket_dml_5.q  |1 -
 .../queries/clientpositive/list_bucket_dml_6.q  |1 -
 .../queries/clientpositive/list_bucket_dml_8.q  |1 -
 .../queries/clientpositive/list_bucket_dml_9.q  |1 -
 .../queries/clientpositive/outer_join_ppr.q |1 -
 .../queries/clientpositive/parquet_map_null.q   |1 -
 ql/src/test/queries/clientpositive/plan_json.q  |1 -
 .../queries/clientpositive/stats_list_bucket.q  |1 -
 ql/src/test/queries/clientpositive/str_to_map.q |1 -
 .../clientpositive/subquery_multiinsert.q   |1 -
 .../clientpositive/subquery_notin_having.q  |1 -
 .../test/queries/clientpositive/varchar_udf1.q  |1 -
 .../clientpositive/vector_cast_constant.q   |1 -
 ...mnstats_partlvl_invalid_values.q.java1.7.out |   73 --
 ...mnstats_partlvl_invalid_values.q.java1.8.out |   73 --
 .../columnstats_partlvl_invalid_values.q.out|   69 ++
 .../authorization_explain.q.java1.7.out |   44 -
 .../authorization_explain.q.java1.8.out |   47 -
 .../clientpositive/authorization_explain.q.out  |   40 +
 .../clientpositive/avro_date.q.java1.7.out  |  130 --
 .../clientpositive/avro_date.q.java1.8.out  |  130 --
 .../test/results/clientpositive/avro_date.q.out |  126 ++
 .../avro_deserialize_map_null.q.java1.7.out |   57 -
 .../avro_deserialize_map_null.q.java1.8.out |   57 -
 .../avro_deserialize_map_null.q.out |   55 +
 .../avro_nullable_fields.q.java1.7.out  |  179 ---
 .../avro_nullable_fields.q.java1.8.out  |  179 ---
 .../clientpositive/avro_nullable_fields.q.out   |  177 +++
 .../clientpositive/avro_timestamp.q.java1.7.out |  134 ---
 .../clientpositive/avro_timestamp.q.java1.8.out |  134 ---
 .../results/clientpositive/avro_timestamp.q.out |  132 +++
 .../cbo_rp_outer_join_ppr.q.java1.7.out |  693 ---
 .../clientpositive/cbo_rp_outer_join_ppr.q.out  |  691 +++
 .../clientpositive/char_udf1.q.java1.7.out  |  463 
 .../clientpositive/char_udf1.q.java1.8.out  |  457 ---
 .../test/results/clientpositive/char_udf1.q.out |  459 +++
 .../results/clientpositive/input4.q.java1.7.out |  559 -
 .../results/clientpositive/input4.q.java1.8.out |  559 -
 ql/src/test/results/clientpositive/input4.q.out |  555 +
 .../results/clientpositive/join0.q.java1.7.out  |  240 
 .../results/clientpositive/join0.q.java1.8.out  |  240 
 ql/src/test/results/clientpositive/join0.q.out  |  238 
 .../list_bucket_dml_10.q.java1.7.out|  361 --
 .../list_bucket_dml_10.q.java1.8.out|  389 --
 .../clientpositive/list_bucket_dml_10.q.out |  359 ++
 .../list_bucket_dml_11.q.java1.7.out|  329 -
 .../list_bucket_dml_11.q.java1.8.out|  424 ---
 .../clientpositive/list_bucket_dml_11.q.out |  327 +
 .../list_bucket_dml_12.q.java1.7.out|  426 ---
 .../list_bucket_dml_12.q.java1.8.out|  596 --
 .../clientpositive/list_bucket_dml_12.q.out |  424 +++
 .../list_bucket_dml_13.q.java1.7.out|  337 --
 .../list_bucket_dml_13.q.java1.8.out|  439 ---
 .../clientpositive/list_bucket_dml_13.q.out |  335 ++
 .../list_bucket_dml_2.q.java1.7.out |  591 -
 

[21/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
deleted file mode 100644
index dcfbec0..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
+++ /dev/null
@@ -1,591 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: srcpart
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  File Output Operator
-compressed: false
-GlobalTableId: 1
- A masked pattern was here 
-NumFilesPerFileSink: 1
-Static Partition Specification: ds=2008-04-08/hr=11/
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
- A masked pattern was here 
-table:
-input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-properties:
-  bucket_count -1
-  columns key,value
-  columns.comments 
-  columns.types string:string
- A masked pattern was here 
-  name default.list_bucketing_static_part
-  partition_columns ds/hr
-  partition_columns.types string:string
-  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-name: default.list_bucketing_static_part
-TotalFiles: 1
-GatherStats: true
-MultiFileSpray: false
-  Path -> Alias:
- A masked pattern was here 
-  Path -> Partition:
- A 

[07/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.8.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.8.out 
b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.8.out
deleted file mode 100644
index 1bfdba2..000
--- 
a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.8.out
+++ /dev/null
@@ -1,890 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_4
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_4
-RUN: Stage-0:DDL
-PREHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_5
-POSTHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_5
-RUN: Stage-0:DDL
-Warning: Shuffle Join JOIN[31][tables = [sq_2_notin_nullcheck]] in Work 
'Reducer 2' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-from src a 
-where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-from src a 
-where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-2 is a root stage
-  Stage-1 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-2
-Spark
-  Edges:
-Reducer 2 <- Map 10 (PARTITION-LEVEL SORT, 1), Reducer 9 
(PARTITION-LEVEL SORT, 1)
-Reducer 3 <- Map 7 (PARTITION-LEVEL SORT, 2), Reducer 2 
(PARTITION-LEVEL SORT, 2)
-Reducer 5 <- Map 11 (PARTITION-LEVEL SORT, 2), Map 6 (PARTITION-LEVEL 
SORT, 2)
-Reducer 9 <- Map 8 (GROUP, 1)
-Reducer 4 <- Reducer 3 (SORT, 1)
- A masked pattern was here 
-  Vertices:
-Map 10 
-Map Operator Tree:
-TableScan
-  alias: b
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-sort order: 
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-value expressions: key (type: string), value (type: string)
-Map 11 
-Map Operator Tree:
-TableScan
-  alias: b
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string), value (type: string)
-sort order: ++
-Map-reduce partition columns: key (type: string), value 
(type: string)
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Map 6 
-Map Operator Tree:
-TableScan
-  alias: a
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  Filter Operator
-predicate: ((key > '9') and value is not null) (type: 
boolean)
-Statistics: Num rows: 83 Data size: 881 Basic stats: 
COMPLETE Column stats: NONE
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 83 Data size: 881 Basic stats: 
COMPLETE Column stats: NONE
-  Group By Operator
-keys: _col0 (type: string), _col1 (type: string)
-mode: hash
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 83 Data size: 881 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string), _col1 

[27/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.7.out
--
diff --git 
a/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.7.out 
b/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.7.out
deleted file mode 100644
index 52b09d4..000
--- a/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.7.out
+++ /dev/null
@@ -1,179 +0,0 @@
-PREHOOK: query: -- Verify that nullable fields properly work
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE test_serializer(string1 STRING,
- int1 INT,
- tinyint1 TINYINT,
- smallint1 SMALLINT,
- bigint1 BIGINT,
- boolean1 BOOLEAN,
- float1 FLOAT,
- double1 DOUBLE,
- list1 ARRAY,
- map1 MAP,
- struct1 
STRUCT,
- enum1 STRING,
- nullableint INT,
- bytes1 BINARY,
- fixed1 BINARY)
- ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY 
':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
- STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_serializer
-POSTHOOK: query: -- Verify that nullable fields properly work
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE test_serializer(string1 STRING,
- int1 INT,
- tinyint1 TINYINT,
- smallint1 SMALLINT,
- bigint1 BIGINT,
- boolean1 BOOLEAN,
- float1 FLOAT,
- double1 DOUBLE,
- list1 ARRAY,
- map1 MAP,
- struct1 
STRUCT,
- enum1 STRING,
- nullableint INT,
- bytes1 BINARY,
- fixed1 BINARY)
- ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY 
':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
- STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_serializer
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE 
test_serializer
-PREHOOK: type: LOAD
- A masked pattern was here 
-PREHOOK: Output: default@test_serializer
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE 
test_serializer
-POSTHOOK: type: LOAD
- A masked pattern was here 
-POSTHOOK: Output: default@test_serializer
-PREHOOK: query: CREATE TABLE as_avro
-  ROW FORMAT
-  SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
-  STORED AS
-  INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
-  OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
-  TBLPROPERTIES (
-'avro.schema.literal'='{
-  "namespace": "com.howdy",
-  "name": "some_schema",
-  "type": "record",
-  "fields": [
-{ "name": "string1", "type": ["null", "string"] },
-{ "name": "int1", "type": ["null", "int"] },
-{ "name": "tinyint1", "type": ["null", "int"] },
-{ "name": "smallint1", "type": ["null", "int"] },
-{ "name": "bigint1", "type": ["null", "long"] },
-{ "name": "boolean1", "type": ["null", "boolean"] },
-{ "name": "float1", "type": ["null", "float"] },
-{ "name": "double1", "type": ["null", "double"] },
-{ "name": "list1", "type": ["null", {"type": "array", "items": 
"string"}] },
-{ "name": "map1", "type": ["null", {"type": "map", "values": "int"}] },
-{ "name": "struct1", "type": ["null", {"type": "record", "name": 
"struct1_name", "fields": [
-  { "name": "sInt", "type": "int" },
-  { "name": "sBoolean", "type": "boolean" },
-  { "name": "sString", "type": "string" }
-]}] },
-{ "name": "enum1", "type": ["null", {"type": "enum", "name": 
"enum1_values", "symbols": ["BLUE", "RED", "GREEN"]}] },
-{ "name": "nullableint", "type": ["null", "int"] },
-{ "name": "bytes1", "type": ["null", "bytes"] },
-{ "name": "fixed1", "type": ["null", {"type": "fixed", "name": 
"threebytes", "size": 3}] }
-  ]
-}'
-  )
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@as_avro
-POSTHOOK: query: CREATE TABLE as_avro
-  ROW FORMAT
-  SERDE 

[30/34] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/spark/sample8.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/sample8.q.out 
b/ql/src/test/results/clientpositive/spark/sample8.q.out
index e847fa5..59807de 100644
--- a/ql/src/test/results/clientpositive/spark/sample8.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample8.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
 ds 2008-04-08
 hr 11
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -126,7 +126,7 @@ STAGE PLANS:
 ds 2008-04-08
 hr 11
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -172,7 +172,7 @@ STAGE PLANS:
 ds 2008-04-08
 hr 12
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -218,7 +218,7 @@ STAGE PLANS:
 ds 2008-04-09
 hr 11
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -264,7 +264,7 @@ STAGE PLANS:
 ds 2008-04-09
 hr 12
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/spark/stats0.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/stats0.q.out 
b/ql/src/test/results/clientpositive/spark/stats0.q.out
index 0b14e21..491b4d0 100644
--- a/ql/src/test/results/clientpositive/spark/stats0.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats0.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -94,7 +94,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -1388,7 +1388,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   

[05/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
--
diff --git 
a/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out 
b/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
deleted file mode 100644
index 899723f..000
--- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
+++ /dev/null
@@ -1,999 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_4
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_4
-RUN: Stage-0:DDL
-PREHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_5
-POSTHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_5
-RUN: Stage-0:DDL
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 
'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-from src a 
-where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-from src a 
-where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-10 is a root stage
-  Stage-2 depends on stages: Stage-10
-  Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
-  Stage-6 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-6
-  Stage-7 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-10
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: s1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: ((key > '2') and key is null) (type: boolean)
-  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE 
Column stats: NONE
-Group By Operator
-  aggregations: count()
-  mode: hash
-  outputColumnNames: _col0
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-sort order: 
-Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-value expressions: _col0 (type: bigint)
-  Reduce Operator Tree:
-Group By Operator
-  aggregations: count(VALUE._col0)
-  mode: mergepartial
-  outputColumnNames: _col0
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
-  Filter Operator
-predicate: (_col0 = 0) (type: boolean)
-Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
-Select Operator
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-keys: 0 (type: bigint)
-mode: hash
-outputColumnNames: _col0
-Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
-  table:
-  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: b
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  sort order: 
-  Statistics: Num rows: 500 Data 

[34/34] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit 
Sabharwal, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cdb872a1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cdb872a1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cdb872a1

Branch: refs/heads/master
Commit: cdb872a117c95e0203c1cae8ffe8add40cfd4f0f
Parents: 2254161
Author: Mohit Sabharwal 
Authored: Tue May 24 09:30:32 2016 -0500
Committer: Sergio Pena 
Committed: Fri May 27 21:08:31 2016 -0500

--
 .../test/results/clientpositive/bucket1.q.out   |  4 +-
 .../test/results/clientpositive/bucket2.q.out   |  4 +-
 .../test/results/clientpositive/bucket3.q.out   |  4 +-
 .../test/results/clientpositive/bucket4.q.out   |  4 +-
 .../test/results/clientpositive/bucket5.q.out   |  4 +-
 .../results/clientpositive/bucket_many.q.out|  4 +-
 .../columnStatsUpdateForStatsOptimizer_1.q.out  |  8 +--
 .../columnStatsUpdateForStatsOptimizer_2.q.out  |  2 +-
 .../constantPropagateForSubQuery.q.out  |  8 +--
 ql/src/test/results/clientpositive/ctas.q.out   |  4 +-
 .../results/clientpositive/describe_table.q.out |  4 +-
 .../disable_merge_for_bucketing.q.out   |  4 +-
 .../extrapolate_part_stats_full.q.out   | 24 -
 .../extrapolate_part_stats_partial.q.out| 32 +--
 .../extrapolate_part_stats_partial_ndv.q.out| 16 +++---
 .../clientpositive/fouter_join_ppr.q.out| 40 +++---
 .../clientpositive/groupby_map_ppr.q.out|  4 +-
 .../groupby_map_ppr_multi_distinct.q.out|  4 +-
 .../results/clientpositive/groupby_ppr.q.out|  4 +-
 .../groupby_ppr_multi_distinct.q.out|  4 +-
 .../test/results/clientpositive/input23.q.out   |  2 +-
 .../test/results/clientpositive/input42.q.out   | 12 ++---
 .../results/clientpositive/input_part1.q.out|  2 +-
 .../results/clientpositive/input_part2.q.out|  4 +-
 .../results/clientpositive/input_part7.q.out|  4 +-
 .../results/clientpositive/input_part9.q.out|  4 +-
 ql/src/test/results/clientpositive/join17.q.out |  4 +-
 ql/src/test/results/clientpositive/join26.q.out |  2 +-
 ql/src/test/results/clientpositive/join32.q.out | 10 ++--
 .../clientpositive/join32_lessSize.q.out| 46 
 ql/src/test/results/clientpositive/join33.q.out | 10 ++--
 ql/src/test/results/clientpositive/join34.q.out |  8 +--
 ql/src/test/results/clientpositive/join35.q.out | 12 ++---
 ql/src/test/results/clientpositive/join9.q.out  |  6 +--
 .../results/clientpositive/join_map_ppr.q.out   |  4 +-
 .../clientpositive/list_bucket_dml_1.q.out  |  4 +-
 .../clientpositive/list_bucket_dml_14.q.out |  4 +-
 .../clientpositive/list_bucket_dml_3.q.out  |  4 +-
 .../clientpositive/list_bucket_dml_7.q.out  |  8 +--
 .../results/clientpositive/load_dyn_part8.q.out |  8 +--
 .../clientpositive/louter_join_ppr.q.out| 36 ++---
 .../clientpositive/mapjoin_mapjoin.q.out| 16 +++---
 .../offset_limit_global_optimizer.q.out | 52 +-
 .../clientpositive/optimize_nullscan.q.out  | 56 ++--
 .../partition_coltype_literals.q.out|  4 +-
 ql/src/test/results/clientpositive/pcr.q.out| 10 ++--
 ql/src/test/results/clientpositive/pcs.q.out| 38 ++---
 .../clientpositive/ppd_join_filter.q.out| 32 +--
 ql/src/test/results/clientpositive/ppd_vc.q.out | 20 +++
 .../clientpositive/ppr_allchildsarenull.q.out   | 12 ++---
 .../clientpositive/rand_partitionpruner1.q.out  |  4 +-
 .../clientpositive/rand_partitionpruner2.q.out  |  4 +-
 .../clientpositive/rand_partitionpruner3.q.out  |  4 +-
 .../clientpositive/reduce_deduplicate.q.out |  4 +-
 .../results/clientpositive/regexp_extract.q.out |  8 +--
 .../clientpositive/router_join_ppr.q.out| 36 ++---
 .../test/results/clientpositive/sample1.q.out   |  2 +-
 .../test/results/clientpositive/sample2.q.out   |  4 +-
 .../test/results/clientpositive/sample4.q.out   |  4 +-
 .../test/results/clientpositive/sample5.q.out   |  4 +-
 .../test/results/clientpositive/sample6.q.out   | 32 +--
 .../test/results/clientpositive/sample7.q.out   |  4 +-
 .../test/results/clientpositive/sample8.q.out   |  8 +--
 .../test/results/clientpositive/sample9.q.out   |  4 +-
 .../clientpositive/schema_evol_stats.q.out  |  8 +--
 .../clientpositive/serde_user_properties.q.out  | 12 ++---
 .../results/clientpositive/spark/bucket2.q.out  |  4 +-
 .../results/clientpositive/spark/bucket3.q.out  |  4 +-
 .../results/clientpositive/spark/bucket4.q.out  |  4 +-
 .../results/clientpositive/spark/ctas.q.out |  4 +-
 .../spark/disable_merge_for_bucketing.q.out |  4 +-
 .../clientpositive/spark/groupby_map_ppr.q.out  |  4 +-
 

[29/34] hive git commit: HIVE-13860: Fix more json related JDK8 test failures (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
HIVE-13860: Fix more json related JDK8 test failures (Mohit Sabharwal, reviewed 
by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f38a42e5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f38a42e5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f38a42e5

Branch: refs/heads/master
Commit: f38a42e52e28618035c05a15c349594ae814748d
Parents: cdb872a
Author: Mohit Sabharwal 
Authored: Fri May 27 10:35:16 2016 -0500
Committer: Sergio Pena 
Committed: Fri May 27 21:08:31 2016 -0500

--
 .../clientpositive/autoColumnStats_1.q.out  | 28 +++---
 .../clientpositive/autoColumnStats_2.q.out  | 24 ++---
 .../clientpositive/autoColumnStats_3.q.out  | 10 +-
 .../clientpositive/autoColumnStats_4.q.out  |  2 +-
 .../clientpositive/autoColumnStats_5.q.out  |  8 +-
 .../clientpositive/autoColumnStats_8.q.out  |  8 +-
 .../clientpositive/autoColumnStats_9.q.out  |  2 +-
 .../clientpositive/binary_output_format.q.out   |  4 +-
 .../results/clientpositive/json_serde1.q.out|  4 +-
 .../results/clientpositive/orc_create.q.out | 12 +--
 .../clientpositive/orc_int_type_promotion.q.out |  6 +-
 .../results/clientpositive/perf/query85.q.out   |  2 +-
 .../results/clientpositive/perf/query89.q.out   |  2 +-
 .../results/clientpositive/perf/query91.q.out   |  2 +-
 .../results/clientpositive/spark/bucket5.q.out  |  8 +-
 .../results/clientpositive/spark/join0.q.out|  2 +-
 .../clientpositive/spark/outer_join_ppr.q.out   |  4 +-
 .../spark/reduce_deduplicate.q.out  |  4 +-
 .../clientpositive/spark/union_ppr.q.out|  8 +-
 .../clientpositive/stats_list_bucket.q.out  |  2 +-
 .../results/clientpositive/tez/bucket2.q.out|  4 +-
 .../clientpositive/udaf_collect_set_2.q.out | 96 ++--
 .../results/clientpositive/udf_sort_array.q.out |  2 +-
 .../clientpositive/vector_complex_all.q.out |  6 +-
 .../results/clientpositive/vector_udf1.q.out|  2 +-
 25 files changed, 126 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/f38a42e5/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out 
b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
index e290e52..4cf6df1 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
@@ -60,7 +60,7 @@ Retention:0
  A masked pattern was here 
 Table Type:MANAGED_TABLE
 Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles1   
numRows 500 
rawDataSize 5312
@@ -137,7 +137,7 @@ Retention:  0
  A masked pattern was here 
 Table Type:MANAGED_TABLE
 Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles1   
numRows 500 
rawDataSize 5312
@@ -172,7 +172,7 @@ Retention:  0
  A masked pattern was here 
 Table Type:MANAGED_TABLE
 Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles1   
numRows 500 
rawDataSize 5312
@@ -257,7 +257,7 @@ Retention:  0
  A masked pattern was here 
 Table Type:MANAGED_TABLE
 Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles1   
numRows 500 
rawDataSize 5312
@@ -292,7 

[12/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
--
diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out 
b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
new file mode 100644
index 000..fc8eb1c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
@@ -0,0 +1,216 @@
+PREHOOK: query: DROP TABLE over1k
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE over1k
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE over1korc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE over1korc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: -- data setup
+CREATE TABLE over1k(t tinyint,
+   si smallint,
+   i int,
+   b bigint,
+   f float,
+   d double,
+   bo boolean,
+   s string,
+   ts timestamp,
+   dec decimal(4,2),
+   bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k
+POSTHOOK: query: -- data setup
+CREATE TABLE over1k(t tinyint,
+   si smallint,
+   i int,
+   b bigint,
+   f float,
+   d double,
+   bo boolean,
+   s string,
+   ts timestamp,
+   dec decimal(4,2),
+   bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE 
INTO TABLE over1k
+PREHOOK: type: LOAD
+ A masked pattern was here 
+PREHOOK: Output: default@over1k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE 
INTO TABLE over1k
+POSTHOOK: type: LOAD
+ A masked pattern was here 
+POSTHOOK: Output: default@over1k
+PREHOOK: query: CREATE TABLE over1korc(t tinyint,
+   si smallint,
+   i int,
+   b bigint,
+   f float,
+   d double,
+   bo boolean,
+   s string,
+   ts timestamp,
+   dec decimal(4,2),
+   bin binary)
+STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1korc
+POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
+   si smallint,
+   i int,
+   b bigint,
+   f float,
+   d double,
+   bo boolean,
+   s string,
+   ts timestamp,
+   dec decimal(4,2),
+   bin binary)
+STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1korc
+PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1korc
+POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1korc
+POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, 
type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, 
type:binary, comment:null), ]
+POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, 
type:boolean, comment:null), ]
+POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, 
type:double, comment:null), ]
+POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, 
type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, 
type:float, comment:null), ]
+POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, 
type:int, comment:null), ]
+POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, 
type:string, comment:null), ]
+POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, 
type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, 
type:tinyint, comment:null), ]
+POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, 
type:timestamp, comment:null), ]
+PREHOOK: query: EXPLAIN SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Tez
+ A masked 

[22/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
new file mode 100644
index 000..0e11f3f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
@@ -0,0 +1,424 @@
+PREHOOK: query: -- Ensure it works if skewed column is not the first column in 
the table columns
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- test where the skewed values are more than 1 say columns no. 2 and 4 in a 
table with 5 columns
+create table list_bucketing_mul_col (col1 String, col2 String, col3 String, 
col4 String, col5 string) 
+partitioned by (ds String, hr String) 
+skewed by (col2, col4) on 
(('466','val_466'),('287','val_287'),('82','val_82'))
+stored as DIRECTORIES
+STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_mul_col
+POSTHOOK: query: -- Ensure it works if skewed column is not the first column 
in the table columns
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- test where the skewed values are more than 1 say columns no. 2 and 4 in a 
table with 5 columns
+create table list_bucketing_mul_col (col1 String, col2 String, col3 String, 
col4 String, col5 string) 
+partitioned by (ds String, hr String) 
+skewed by (col2, col4) on 
(('466','val_466'),('287','val_287'),('82','val_82'))
+stored as DIRECTORIES
+STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_mul_col
+PREHOOK: query: -- list bucketing DML 
+explain extended
+insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  
hr = '11')
+select 1, key, 1, value, 1 from src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML 
+explain extended
+insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  
hr = '11')
+select 1, key, 1, value, 1 from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: src
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: '1' (type: string), key (type: string), '1' (type: 
string), value (type: string), '1' (type: string)
+  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+  File Output Operator
+compressed: false
+GlobalTableId: 1
+ A masked pattern was here 
+NumFilesPerFileSink: 1
+Static Partition Specification: ds=2008-04-08/hr=11/
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+ A masked pattern was here 
+table:
+input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+properties:
+  bucket_count -1
+  columns col1,col2,col3,col4,col5
+  columns.comments 
+  columns.types string:string:string:string:string
+ A masked pattern was here 
+  name default.list_bucketing_mul_col
+  partition_columns ds/hr
+  partition_columns.types string:string
+  serialization.ddl struct list_bucketing_mul_col { string 
col1, string col2, string col3, string col4, string col5}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+name: default.list_bucketing_mul_col
+TotalFiles: 1
+GatherStats: true
+MultiFileSpray: false
+  Path -> Alias:
+ A masked pattern was here 
+  Path -> Partition:
+ A masked pattern was here 
+  Partition
+base file name: src
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+properties:
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+  bucket_count -1
+ 

[03/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/subquery_notin_having.q.out
--
diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.out 
b/ql/src/test/results/clientpositive/subquery_notin_having.q.out
new file mode 100644
index 000..c32bf25
--- /dev/null
+++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.out
@@ -0,0 +1,764 @@
+Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 
'Stage-2:MAPRED' is a cross product
+PREHOOK: query: -- non agg, non corr
+
+explain
+select key, count(*) 
+from src 
+group by key
+having key not in  
+  ( select key  from src s1 
+where s1.key > '12'
+  )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- non agg, non corr
+
+explain
+select key, count(*) 
+from src 
+group by key
+having key not in  
+  ( select key  from src s1 
+where s1.key > '12'
+  )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1, Stage-4
+  Stage-3 depends on stages: Stage-2
+  Stage-4 is a root stage
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: src
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+Select Operator
+  expressions: key (type: string)
+  outputColumnNames: key
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+  Group By Operator
+aggregations: count()
+keys: key (type: string)
+mode: hash
+outputColumnNames: _col0, _col1
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  key expressions: _col0 (type: string)
+  sort order: +
+  Map-reduce partition columns: _col0 (type: string)
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  value expressions: _col1 (type: bigint)
+  Reduce Operator Tree:
+Group By Operator
+  aggregations: count(VALUE._col0)
+  keys: KEY._col0 (type: string)
+  mode: mergepartial
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+  File Output Operator
+compressed: false
+table:
+input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+Map Reduce
+  Map Operator Tree:
+  TableScan
+Reduce Output Operator
+  sort order: 
+  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
+  value expressions: _col0 (type: string), _col1 (type: bigint)
+  TableScan
+Reduce Output Operator
+  sort order: 
+  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: COMPLETE
+  Reduce Operator Tree:
+Join Operator
+  condition map:
+   Inner Join 0 to 1
+  keys:
+0 
+1 
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE 
Column stats: NONE
+  File Output Operator
+compressed: false
+table:
+input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+Map Reduce
+  Map Operator Tree:
+  TableScan
+Reduce Output Operator
+  key expressions: _col0 (type: string)
+  sort order: +
+  Map-reduce partition columns: _col0 (type: string)
+  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE 
Column stats: NONE
+  value expressions: _col1 (type: bigint)
+  TableScan
+alias: src
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+Filter Operator
+  predicate: (key > '12') (type: boolean)
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
+  Select Operator
+expressions: key (type: string)
+outputColumnNames: _col0
+Statistics: Num rows: 166 Data size: 1763 Basic stats: 

[13/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
new file mode 100644
index 000..81f3af3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
@@ -0,0 +1,811 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 00_0
+--  5263 01_0
+-- ds=2008-04-08/hr=11/key=103:
+-- 99 00_0
+-- 99 01_0
+-- after merge
+-- 142 00_0
+-- ds=2008-04-08/hr=11/key=484:
+-- 87 00_0
+-- 87 01_0
+-- after merge
+-- 118 01_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key) on ('484','103')
+stored as DIRECTORIES
+STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 00_0
+--  5263 01_0
+-- ds=2008-04-08/hr=11/key=103:
+-- 99 00_0
+-- 99 01_0
+-- after merge
+-- 142 00_0
+-- ds=2008-04-08/hr=11/key=484:
+-- 87 00_0
+-- 87 01_0
+-- after merge
+-- 118 01_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key) on ('484','103')
+stored as DIRECTORIES
+STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: srcpart
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+  File Output Operator
+compressed: false
+GlobalTableId: 1
+ A masked pattern was here 
+NumFilesPerFileSink: 1
+Static Partition Specification: ds=2008-04-08/hr=11/
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+ A masked pattern was here 
+table:
+input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+properties:
+  bucket_count -1
+  columns key,value
+  columns.comments 
+  columns.types string:string
+ A masked pattern was here 
+  name default.list_bucketing_static_part
+  partition_columns ds/hr
+  partition_columns.types string:string
+  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+name: default.list_bucketing_static_part
+TotalFiles: 1
+GatherStats: true
+MultiFileSpray: false
+  Path -> Alias:
+ A masked pattern was here 
+  Path -> Partition:
+ A masked pattern was here 
+  Partition
+base file name: hr=11
+input format: 

[25/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/char_udf1.q.java1.8.out
--
diff --git a/ql/src/test/results/clientpositive/char_udf1.q.java1.8.out 
b/ql/src/test/results/clientpositive/char_udf1.q.java1.8.out
deleted file mode 100644
index 5691a06..000
--- a/ql/src/test/results/clientpositive/char_udf1.q.java1.8.out
+++ /dev/null
@@ -1,457 +0,0 @@
-PREHOOK: query: drop table char_udf_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table char_udf_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 
char(20))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), 
c4 char(20))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@char_udf_1
-PREHOOK: query: insert overwrite table char_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: insert overwrite table char_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@char_udf_1
-POSTHOOK: Lineage: char_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with char support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
- A masked pattern was here 
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with char support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
- A masked pattern was here 
-238val_238 238val_238  true
-PREHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
- A masked pattern was here 
-VAL_238VAL_238 true
-PREHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
- A masked pattern was here 
-val_238val_238 true
-PREHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
- A masked pattern was here 
-POSTHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
- A masked pattern was here 
-118118 true
-PREHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
- A masked pattern was here 
-POSTHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
- A masked pattern was here 
-238|val_238238|val_238 true
-PREHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 
'US-ASCII')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 
'US-ASCII')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
- A masked pattern was here 

[24/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/join0.q.out
--
diff --git a/ql/src/test/results/clientpositive/join0.q.out 
b/ql/src/test/results/clientpositive/join0.q.out
new file mode 100644
index 000..59122e2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/join0.q.out
@@ -0,0 +1,238 @@
+Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' 
is a cross product
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+   src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+   src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: src
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+Filter Operator
+  predicate: (key < 10) (type: boolean)
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
+  Select Operator
+expressions: key (type: string), value (type: string)
+outputColumnNames: _col0, _col1
+Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  sort order: 
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+  value expressions: _col0 (type: string), _col1 (type: string)
+  TableScan
+alias: src
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+Filter Operator
+  predicate: (key < 10) (type: boolean)
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
+  Select Operator
+expressions: key (type: string), value (type: string)
+outputColumnNames: _col0, _col1
+Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  sort order: 
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+  value expressions: _col0 (type: string), _col1 (type: string)
+  Reduce Operator Tree:
+Join Operator
+  condition map:
+   Inner Join 0 to 1
+  keys:
+0 
+1 
+  outputColumnNames: _col0, _col1, _col2, _col3
+  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE 
Column stats: NONE
+  File Output Operator
+compressed: false
+table:
+input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+Map Reduce
+  Map Operator Tree:
+  TableScan
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: string), 
_col2 (type: string), _col3 (type: string)
+  sort order: 
+  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE 
Column stats: NONE
+  Reduce Operator Tree:
+Select Operator
+  expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 
(type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: 
string)
+  outputColumnNames: _col0, _col1, _col2, _col3
+  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE 
Column stats: NONE
+  File Output Operator
+compressed: false
+Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE 
Column stats: NONE
+table:
+input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+Fetch Operator
+  limit: -1
+  Processor Tree:
+ListSink
+
+Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' 
is a cross product
+PREHOOK: query: 

[31/34] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/spark/join34.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/join34.q.out 
b/ql/src/test/results/clientpositive/spark/join34.q.out
index 235d36a..ebd9c89 100644
--- a/ql/src/test/results/clientpositive/spark/join34.q.out
+++ b/ql/src/test/results/clientpositive/spark/join34.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -98,7 +98,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -150,7 +150,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -170,7 +170,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -222,7 +222,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -242,7 +242,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/spark/join35.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/join35.q.out 
b/ql/src/test/results/clientpositive/spark/join35.q.out
index 7b873c6..d14dadf 100644
--- a/ql/src/test/results/clientpositive/spark/join35.q.out
+++ b/ql/src/test/results/clientpositive/spark/join35.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 

[01/34] hive git commit: HIVE-13587: Set Hive pom to use Hadoop 2.6.1 (Mohit Sabharwal, reviewd by Sergio Pena)

2016-05-27 Thread spena
Repository: hive
Updated Branches:
  refs/heads/master 0d67cb0b7 -> f38a42e52


HIVE-13587: Set Hive pom to use Hadoop 2.6.1 (Mohit Sabharwal, reviewd by 
Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0bd21b59
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0bd21b59
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0bd21b59

Branch: refs/heads/master
Commit: 0bd21b59843668d5bdf426648ca0be6c21291934
Parents: 0d67cb0
Author: Sergio Pena 
Authored: Mon May 2 13:08:54 2016 -0500
Committer: Sergio Pena 
Committed: Fri May 27 21:08:30 2016 -0500

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0bd21b59/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 117aec9..3176caf 100644
--- a/pom.xml
+++ b/pom.xml
@@ -132,7 +132,7 @@
 
0.1.0
 14.0.1
 2.4.4
-2.6.0
+2.6.1
 
${basedir}/${hive.path.to.root}/testutils/hadoop
 1.1.1
 



[16/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
new file mode 100644
index 000..e53fee7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
@@ -0,0 +1,1005 @@
+PREHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed 
columns. merge.
+-- The following explains merge example used in this test case
+-- DML will generated 2 partitions
+-- ds=2008-04-08/hr=a1
+-- ds=2008-04-08/hr=b1
+-- without merge, each partition has more files
+-- ds=2008-04-08/hr=a1 has 2 files
+-- ds=2008-04-08/hr=b1 has 6 files
+-- with merge each partition has more files
+-- ds=2008-04-08/hr=a1 has 1 files
+-- ds=2008-04-08/hr=b1 has 4 files
+-- The following shows file size and name in each directory
+-- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 155 00_0
+-- 155 01_0
+-- with merge
+-- 254 00_0
+-- hr=b1/key=103/value=val_103:
+-- without merge
+-- 99 00_0
+-- 99 01_0
+-- with merge
+-- 142 01_0
+-- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 5181 00_0
+-- 5181 01_0
+-- with merge
+-- 5181 00_0
+-- 5181 01_0
+-- hr=b1/key=484/value=val_484
+-- without merge
+-- 87 00_0
+-- 87 01_0
+-- with merge
+-- 118 02_0 
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
+stored as DIRECTORIES
+STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed 
columns. merge.
+-- The following explains merge example used in this test case
+-- DML will generated 2 partitions
+-- ds=2008-04-08/hr=a1
+-- ds=2008-04-08/hr=b1
+-- without merge, each partition has more files
+-- ds=2008-04-08/hr=a1 has 2 files
+-- ds=2008-04-08/hr=b1 has 6 files
+-- with merge each partition has more files
+-- ds=2008-04-08/hr=a1 has 1 files
+-- ds=2008-04-08/hr=b1 has 4 files
+-- The following shows file size and name in each directory
+-- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 155 00_0
+-- 155 01_0
+-- with merge
+-- 254 00_0
+-- hr=b1/key=103/value=val_103:
+-- without merge
+-- 99 00_0
+-- 99 01_0
+-- with merge
+-- 142 01_0
+-- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 5181 00_0
+-- 5181 01_0
+-- with merge
+-- 5181 00_0
+-- 5181 01_0
+-- hr=b1/key=484/value=val_484
+-- without merge
+-- 87 00_0
+-- 87 01_0
+-- with merge
+-- 118 02_0 
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
+stored as DIRECTORIES
+STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_dynamic_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: srcpart
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string), 
if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+  

[26/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
--
diff --git 
a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out 
b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
deleted file mode 100644
index 5c40dc4..000
--- a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
+++ /dev/null
@@ -1,693 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: a
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: key, value
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string)
-null sort order: a
-sort order: +
-Map-reduce partition columns: key (type: string)
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-tag: 0
-value expressions: value (type: string)
-auto parallelism: false
-  TableScan
-alias: b
-Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string), ds (type: 
string)
-  outputColumnNames: key, value, ds
-  Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
-  Reduce Output Operator
-key expressions: key (type: string)
-null sort order: a
-sort order: +
-Map-reduce partition columns: key (type: string)
-Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
-tag: 1
-value expressions: value (type: string), ds (type: string)
-auto parallelism: false
-  Path -> Alias:
- A masked pattern was here 
-  Path -> Partition:
- A masked pattern was here 
-  Partition
-base file name: src
-input format: org.apache.hadoop.mapred.TextInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-  bucket_count -1
-  columns key,value
-  columns.comments 'default','default'
-  columns.types string:string
- A masked pattern was here 
-  name default.src
-  numFiles 1
-  numRows 500
-  rawDataSize 5312
-  serialization.ddl struct src { string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  totalSize 5812
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-bucket_count -1
-columns key,value
-columns.comments 'default','default'
-columns.types string:string
- A masked pattern was here 
-name default.src
-numFiles 1
-numRows 500
-rawDataSize 5312
-serialization.ddl struct src { string key, string value}
-serialization.format 1
-serialization.lib 

[18/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
new file mode 100644
index 000..09cb847
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
@@ -0,0 +1,504 @@
+PREHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103')) 
+stored as DIRECTORIES
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103')) 
+stored as DIRECTORIES
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_dynamic_part
+PREHOOK: query: -- list bucketing DML
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr) select key, value, hr from srcpart where ds='2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr) select key, value, hr from srcpart where ds='2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: srcpart
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string), hr (type: 
string)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+  File Output Operator
+compressed: false
+GlobalTableId: 1
+ A masked pattern was here 
+NumFilesPerFileSink: 1
+Static Partition Specification: ds=2008-04-08/
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+ A masked pattern was here 
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+properties:
+  bucket_count -1
+  columns key,value
+  columns.comments 
+  columns.types string:string
+ A masked pattern was here 
+  name default.list_bucketing_dynamic_part
+  partition_columns ds/hr
+  partition_columns.types string:string
+  serialization.ddl struct list_bucketing_dynamic_part { 
string key, string value}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.list_bucketing_dynamic_part
+TotalFiles: 1
+GatherStats: true
+MultiFileSpray: false
+  Path -> Alias:
+ A masked pattern was here 
+  Path -> Partition:
+ A masked pattern was here 
+  Partition
+base file name: hr=11
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+partition values:
+  ds 2008-04-08
+  hr 11
+properties:
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+  bucket_count -1
+  columns key,value
+  columns.comments 'default','default'
+  columns.types string:string
+ A masked pattern was here 
+  name default.srcpart
+  numFiles 1
+  numRows 500
+  partition_columns ds/hr
+  partition_columns.types 

[23/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
--
diff --git 
a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
deleted file mode 100644
index 00a6235..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
+++ /dev/null
@@ -1,424 +0,0 @@
-PREHOOK: query: -- Ensure it works if skewed column is not the first column in 
the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (value) on ('val_466','val_287','val_82')
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- Ensure it works if skewed column is not the first column 
in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (value) on ('val_466','val_287','val_82')
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-src
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   list_bucketing_static_part
-TOK_PARTSPEC
-   TOK_PARTVAL
-  ds
-  '2008-04-08'
-   TOK_PARTVAL
-  hr
-  '11'
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: src
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-  File Output Operator
-compressed: false
-GlobalTableId: 1
- A masked pattern was here 
-NumFilesPerFileSink: 1
-Static Partition Specification: ds=2008-04-08/hr=11/
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
- A masked pattern was here 
-table:
-input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-properties:
-  bucket_count -1
-  columns key,value
-  columns.comments 
-  columns.types string:string
- A masked pattern was here 
-  name default.list_bucketing_static_part
-  partition_columns ds/hr
-  partition_columns.types string:string
-  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-name: default.list_bucketing_static_part
-TotalFiles: 1
-GatherStats: true
-MultiFileSpray: false
-  Path -> Alias:
- 

[11/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/outer_join_ppr.q.out
--
diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.out 
b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
new file mode 100644
index 000..cf20851
--- /dev/null
+++ b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
@@ -0,0 +1,683 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: a
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+  Reduce Output Operator
+key expressions: _col0 (type: string)
+null sort order: a
+sort order: +
+Map-reduce partition columns: _col0 (type: string)
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+tag: 0
+value expressions: _col1 (type: string)
+auto parallelism: false
+  TableScan
+alias: b
+Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string), ds (type: 
string)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
+  Reduce Output Operator
+key expressions: _col0 (type: string)
+null sort order: a
+sort order: +
+Map-reduce partition columns: _col0 (type: string)
+Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
+tag: 1
+value expressions: _col1 (type: string), _col2 (type: string)
+auto parallelism: false
+  Path -> Alias:
+ A masked pattern was here 
+  Path -> Partition:
+ A masked pattern was here 
+  Partition
+base file name: src
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+properties:
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+  bucket_count -1
+  columns key,value
+  columns.comments 'default','default'
+  columns.types string:string
+ A masked pattern was here 
+  name default.src
+  numFiles 1
+  numRows 500
+  rawDataSize 5312
+  serialization.ddl struct src { string key, string value}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+  totalSize 5812
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+  
+  input format: org.apache.hadoop.mapred.TextInputFormat
+  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+  properties:
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+bucket_count -1
+columns key,value
+columns.comments 'default','default'
+columns.types string:string
+ A masked pattern was here 
+name default.src
+numFiles 1
+numRows 500
+rawDataSize 5312
+serialization.ddl struct src { string key, string value}
+serialization.format 1
+serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+totalSize 5812
+ A masked pattern was here 
+  

[20/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
deleted file mode 100644
index c15c6a2..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
+++ /dev/null
@@ -1,813 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- after merge
--- 142 00_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
--- after merge
--- 118 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- after merge
--- 142 00_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
--- after merge
--- 118 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: srcpart
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  File Output Operator
-compressed: false
-GlobalTableId: 1
- A masked pattern was here 
-NumFilesPerFileSink: 1
-Static Partition Specification: ds=2008-04-08/hr=11/
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
- A masked pattern was here 
-table:
-input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-properties:
-  bucket_count -1
-  columns key,value
-  columns.comments 
-  columns.types string:string
- A masked pattern was here 
-  name default.list_bucketing_static_part
-  partition_columns ds/hr
-  partition_columns.types string:string
-  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-name: default.list_bucketing_static_part
-TotalFiles: 1
-

[33/34] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/join33.q.out
--
diff --git a/ql/src/test/results/clientpositive/join33.q.out 
b/ql/src/test/results/clientpositive/join33.q.out
index 8653c2f..bebb007 100644
--- a/ql/src/test/results/clientpositive/join33.q.out
+++ b/ql/src/test/results/clientpositive/join33.q.out
@@ -159,7 +159,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -179,7 +179,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -203,7 +203,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -223,7 +223,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -250,7 +250,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 11
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/join34.q.out
--
diff --git a/ql/src/test/results/clientpositive/join34.q.out 
b/ql/src/test/results/clientpositive/join34.q.out
index bb23644..365992b 100644
--- a/ql/src/test/results/clientpositive/join34.q.out
+++ b/ql/src/test/results/clientpositive/join34.q.out
@@ -197,7 +197,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -217,7 +217,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -241,7 +241,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 

[32/34] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
--
diff --git a/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out 
b/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
index 9e9e61f..4352914 100644
--- a/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
+++ b/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
@@ -70,7 +70,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 11
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -116,7 +116,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 12
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -259,7 +259,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 11
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -305,7 +305,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 12
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -351,7 +351,7 @@ STAGE PLANS:
   ds 2008-04-09
   hr 11
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -397,7 +397,7 @@ STAGE PLANS:
   ds 2008-04-09
   hr 12
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
--
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out 
b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
index e03c055..f3fd8f8 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -75,7 +75,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb872a1/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
--
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out 

[02/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
--
diff --git a/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out 
b/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
deleted file mode 100644
index 459d93b..000
--- a/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
+++ /dev/null
@@ -1,457 +0,0 @@
-PREHOOK: query: drop table varchar_udf_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table varchar_udf_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 
varchar(10), c4 varchar(20))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 
varchar(10), c4 varchar(20))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@varchar_udf_1
-PREHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@varchar_udf_1
-POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with varchar support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with varchar support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-238val_238 238val_238  true
-PREHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-VAL_238VAL_238 true
-PREHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-val_238val_238 true
-PREHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-118118 true
-PREHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-238|val_238238|val_238 true
-PREHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 
'US-ASCII')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 
'US-ASCII')
-from varchar_udf_1 limit 1

[06/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/stats_list_bucket.q.out
--
diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.out 
b/ql/src/test/results/clientpositive/stats_list_bucket.q.out
new file mode 100644
index 000..c34c414
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_list_bucket.q.out
@@ -0,0 +1,189 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+drop table stats_list_bucket
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+drop table stats_list_bucket
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table stats_list_bucket_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table stats_list_bucket_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_list_bucket (
+  c1 string,
+  c2 string
+) partitioned by (ds string, hr string)
+skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_list_bucket
+POSTHOOK: query: create table stats_list_bucket (
+  c1 string,
+  c2 string
+) partitioned by (ds string, hr string)
+skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_list_bucket
+PREHOOK: query: -- Try partitioned table with list bucketing.
+-- The stats should show 500 rows loaded, as many rows as the src table has.
+
+insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = 
'11')
+  select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
+POSTHOOK: query: -- Try partitioned table with list bucketing.
+-- The stats should show 500 rows loaded, as many rows as the src table has.
+
+insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = 
'11')
+  select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c1 SIMPLE 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c2 SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted stats_list_bucket partition (ds = '2008-04-08', 
 hr = '11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_list_bucket
+POSTHOOK: query: desc formatted stats_list_bucket partition (ds = 
'2008-04-08',  hr = '11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_list_bucket
+# col_name data_type   comment 
+
+c1 string  
+c2 string  
+
+# Partition Information 
+# col_name data_type   comment 
+
+ds string  
+hr string  
+
+# Detailed Partition Information
+Partition Value:   [2008-04-08, 11] 
+Database:  default  
+Table: stats_list_bucket
+ A masked pattern was here 
+Partition Parameters:   
+   COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
+   numFiles4   
+   numRows 500 
+   rawDataSize 4812
+   totalSize   5522
+ A masked pattern was here 
+
+# Storage Information   
+SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
 
+InputFormat:   org.apache.hadoop.hive.ql.io.RCFileInputFormat   
+OutputFormat:  org.apache.hadoop.hive.ql.io.RCFileOutputFormat  
+Compressed:No   
+Num Buckets:   -1   
+Bucket Columns:[]   
+Sort Columns:  []   
+Stored As SubDirectories:  Yes  
+Skewed Columns:[c1, c2] 
+Skewed Values: [[466, val_466], [287, val_287], [82, val_82]]   
+ A masked pattern was here 
+Skewed Value to Truncated Path:{[466, 
val_466]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=466/c2=val_466, [82, 
val_82]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=82/c2=val_82, [287, 
val_287]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=287/c2=val_287}  
+Storage Desc Params:

[09/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out 
b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
deleted file mode 100644
index 68943e1..000
--- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
+++ /dev/null
@@ -1,709 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Spark
-  Edges:
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL 
SORT, 2)
- A masked pattern was here 
-  Vertices:
-Map 1 
-Map Operator Tree:
-TableScan
-  alias: a
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  GatherStats: false
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  null sort order: a
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  tag: 0
-  value expressions: _col1 (type: string)
-  auto parallelism: false
-Path -> Alias:
- A masked pattern was here 
-Path -> Partition:
- A masked pattern was here 
-Partition
-  base file name: src
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-bucket_count -1
-columns key,value
-columns.comments 'default','default'
-columns.types string:string
- A masked pattern was here 
-name default.src
-numFiles 1
-numRows 500
-rawDataSize 5312
-serialization.ddl struct src { string key, string value}
-serialization.format 1
-serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-totalSize 5812
- A masked pattern was here 
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-input format: org.apache.hadoop.mapred.TextInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-  bucket_count -1
-  columns key,value
-  columns.comments 'default','default'
-  columns.types string:string
- A masked pattern was here 
-  name default.src
-  numFiles 1
-  numRows 500
-  rawDataSize 5312
-  serialization.ddl struct src { string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  totalSize 5812
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-name: default.src
-  name: default.src
-Truncated Path -> Alias:
-  /src [a]
-Map 3 
-Map Operator Tree:
-TableScan
-  

[17/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
deleted file mode 100644
index 1960d41..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
+++ /dev/null
@@ -1,1119 +0,0 @@
-PREHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed 
columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 00_0
--- 155 01_0
--- with merge
--- 254 00_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 00_0
--- 99 01_0
--- with merge
--- 142 01_0
--- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 00_0
--- 5181 01_0
--- with merge
--- 5181 00_0
--- 5181 01_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 00_0
--- 87 01_0
--- with merge
--- 118 02_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed 
columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 00_0
--- 155 01_0
--- with merge
--- 254 00_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 00_0
--- 99 01_0
--- with merge
--- 142 01_0
--- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 00_0
--- 5181 01_0
--- with merge
--- 5181 00_0
--- 5181 01_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 00_0
--- 87 01_0
--- with merge
--- 118 02_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-srcpart
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   list_bucketing_dynamic_part
-TOK_PARTSPEC
-   TOK_PARTVAL
-  ds
-  '2008-04-08'
-   TOK_PARTVAL
-  hr
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
- TOK_SELEXPR
-TOK_FUNCTION
- 

[19/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
new file mode 100644
index 000..5f0406a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
@@ -0,0 +1,811 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 00_0
+--  5263 01_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 00_0
+-- 99 01_0
+-- after merge
+-- 142 00_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 00_0
+-- 87 01_0
+-- after merge
+-- 118 01_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
+stored as DIRECTORIES
+STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 00_0
+--  5263 01_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 00_0
+-- 99 01_0
+-- after merge
+-- 142 00_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 00_0
+-- 87 01_0
+-- after merge
+-- 118 01_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
+stored as DIRECTORIES
+STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: srcpart
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+  File Output Operator
+compressed: false
+GlobalTableId: 1
+ A masked pattern was here 
+NumFilesPerFileSink: 1
+Static Partition Specification: ds=2008-04-08/hr=11/
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+ A masked pattern was here 
+table:
+input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+properties:
+  bucket_count -1
+  columns key,value
+  columns.comments 
+  columns.types string:string
+ A masked pattern was here 
+  name default.list_bucketing_static_part
+  partition_columns ds/hr
+  partition_columns.types string:string
+  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+name: default.list_bucketing_static_part
+TotalFiles: 1
+GatherStats: true
+MultiFileSpray: false
+  Path -> Alias:
+ A masked pattern was here 

[14/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
deleted file mode 100644
index 752ea4e..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
+++ /dev/null
@@ -1,813 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103:
--- 99 00_0
--- 99 01_0
--- after merge
--- 142 00_0
--- ds=2008-04-08/hr=11/key=484:
--- 87 00_0
--- 87 01_0
--- after merge
--- 118 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key) on ('484','103')
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103:
--- 99 00_0
--- 99 01_0
--- after merge
--- 142 00_0
--- ds=2008-04-08/hr=11/key=484:
--- 87 00_0
--- 87 01_0
--- after merge
--- 118 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key) on ('484','103')
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: srcpart
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  File Output Operator
-compressed: false
-GlobalTableId: 1
- A masked pattern was here 
-NumFilesPerFileSink: 1
-Static Partition Specification: ds=2008-04-08/hr=11/
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
- A masked pattern was here 
-table:
-input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-properties:
-  bucket_count -1
-  columns key,value
-  columns.comments 
-  columns.types string:string
- A masked pattern was here 
-  name default.list_bucketing_static_part
-  partition_columns ds/hr
-  partition_columns.types string:string
-  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-name: default.list_bucketing_static_part
-TotalFiles: 1
-GatherStats: true
-MultiFileSpray: false
-  Path -> Alias:
- A masked pattern was here 
-  Path -> Partition:
- A masked 

[08/34] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/22541610/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out 
b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
index 217fe76..dfa6ea5 100644
--- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
@@ -20,90 +20,6 @@ EXPLAIN EXTENDED
  SELECT a.key, a.value, b.key, b.value
  WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_FULLOUTERJOIN
- TOK_TABREF
-TOK_TABNAME
-   src
-a
- TOK_TABREF
-TOK_TABNAME
-   srcpart
-b
- AND
-=
-   .
-  TOK_TABLE_OR_COL
- a
-  key
-   .
-  TOK_TABLE_OR_COL
- b
-  key
-=
-   .
-  TOK_TABLE_OR_COL
- b
-  ds
-   '2008-04-08'
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_DIR
-TOK_TMP_FILE
-  TOK_SELECT
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  a
-   value
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  b
-   key
- TOK_SELEXPR
-.
-   TOK_TABLE_OR_COL
-  b
-   value
-  TOK_WHERE
- AND
-AND
-   AND
-  >
- .
-TOK_TABLE_OR_COL
-   a
-key
- 10
-  <
- .
-TOK_TABLE_OR_COL
-   a
-key
- 20
-   >
-  .
- TOK_TABLE_OR_COL
-b
- key
-  15
-<
-   .
-  TOK_TABLE_OR_COL
- b
-  key
-   25
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -112,7 +28,7 @@ STAGE PLANS:
   Stage: Stage-1
 Spark
   Edges:
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 3 (PARTITION-LEVEL 
SORT, 1)
+Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Map 3 (PARTITION-LEVEL 
SORT, 4)
  A masked pattern was here 
   Vertices:
 Map 1 
@@ -121,14 +37,19 @@ STAGE PLANS:
   alias: a
   Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
   GatherStats: false
-  Reduce Output Operator
-key expressions: key (type: string)
-sort order: +
-Map-reduce partition columns: key (type: string)
+  Select Operator
+expressions: key (type: string), value (type: string)
+outputColumnNames: _col0, _col1
 Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-tag: 0
-value expressions: value (type: string)
-auto parallelism: false
+Reduce Output Operator
+  key expressions: _col0 (type: string)
+  null sort order: a
+  sort order: +
+  Map-reduce partition columns: _col0 (type: string)
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+  tag: 0
+  value expressions: _col1 (type: string)
+  auto parallelism: false
 Path -> Alias:
  A masked pattern was here 
 Path -> Partition:
@@ -138,7 +59,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE true
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -158,7 +79,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 

hive git commit: HIVE-13338: Differences in vectorized_casts.q output for vectorized and non-vectorized runs (Matt McCline, reviewed by Prasanth Jayachandran)

2016-05-27 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-1 6e0504d9a -> 213f9f185


HIVE-13338: Differences in vectorized_casts.q output for vectorized and 
non-vectorized runs (Matt McCline, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/213f9f18
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/213f9f18
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/213f9f18

Branch: refs/heads/branch-1
Commit: 213f9f185a80d597ce698708131f9c6d49d5b95f
Parents: 6e0504d
Author: Matt McCline 
Authored: Fri May 27 18:58:27 2016 -0700
Committer: Matt McCline 
Committed: Fri May 27 18:58:27 2016 -0700

--
 .../apache/hadoop/hive/ant/GenVectorCode.java   |  1 +
 .../ql/exec/vector/VectorizationContext.java|  7 ++-
 .../apache/hadoop/hive/ql/udf/UDFToFloat.java   |  4 ++--
 .../clientpositive/vectorized_casts.q.out   | 20 ++--
 4 files changed, 19 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/213f9f18/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
--
diff --git a/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java 
b/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
index 08eee0b..5a67a16 100644
--- a/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
+++ b/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
@@ -951,6 +951,7 @@ public class GenVectorCode extends Task {
   // Casts
   {"ColumnUnaryFunc", "Cast", "long", "double", "", "", "(long)", "", ""},
   {"ColumnUnaryFunc", "Cast", "double", "long", "", "", "(double)", "", 
""},
+  {"ColumnUnaryFunc", "CastLongToFloatVia", "double", "long", "", "", 
"(float)", "", ""},
   {"ColumnUnaryFunc", "CastDoubleToBooleanVia", "long", "double", 
"MathExpr.toBool", "",
 "", "", ""},
   {"ColumnUnaryFunc", "CastLongToBooleanVia", "long", "long", 
"MathExpr.toBool", "",

http://git-wip-us.apache.org/repos/asf/hive/blob/213f9f18/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 86fdf1c..9caa771 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -1784,7 +1784,12 @@ public class VectorizationContext {
 return getConstantVectorExpression(doubleValue, returnType, 
Mode.PROJECTION);
 }
 if (isIntFamily(inputType)) {
-  return createVectorExpression(CastLongToDouble.class, childExpr, 
Mode.PROJECTION, returnType);
+  if (udf.equals(UDFToFloat.class)) {
+// In order to convert from integer to float correctly, we need to 
apply the float cast not the double cast (HIVE-13338).
+return createVectorExpression(CastLongToFloatViaLongToDouble.class, 
childExpr, Mode.PROJECTION, returnType);
+  } else {
+return createVectorExpression(CastLongToDouble.class, childExpr, 
Mode.PROJECTION, returnType);
+  }
 } else if (inputType.equals("timestamp")) {
   return createVectorExpression(CastTimestampToDouble.class, childExpr, 
Mode.PROJECTION,
   returnType);

http://git-wip-us.apache.org/repos/asf/hive/blob/213f9f18/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
index e2183f4..6e2c396 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hive.ql.udf;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.CastDecimalToDouble;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastLongToDouble;
+import 
org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastLongToFloatViaLongToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.CastTimestampToDouble;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -39,7 +39,7 @@ import org.apache.hadoop.io.Text;
  * UDFToFloat.
  *
  */
-@VectorizedExpressions({CastTimestampToDouble.class, CastLongToDouble.class,
+@VectorizedExpressions({CastTimestampToDouble.class, 

hive git commit: HIVE-13338: Differences in vectorized_casts.q output for vectorized and non-vectorized runs (Matt McCline, reviewed by Prasanth Jayachandran)

2016-05-27 Thread mmccline
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 7a41e7ccf -> 2bd2e9a09


HIVE-13338: Differences in vectorized_casts.q output for vectorized and 
non-vectorized runs (Matt McCline, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2bd2e9a0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2bd2e9a0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2bd2e9a0

Branch: refs/heads/branch-2.1
Commit: 2bd2e9a091a81751c4c2d5392538a2763fe754a2
Parents: 7a41e7c
Author: Matt McCline 
Authored: Fri May 27 18:13:46 2016 -0700
Committer: Matt McCline 
Committed: Fri May 27 18:13:46 2016 -0700

--
 .../apache/hadoop/hive/ant/GenVectorCode.java   |  1 +
 .../ql/exec/vector/VectorizationContext.java|  7 ++-
 .../apache/hadoop/hive/ql/udf/UDFToFloat.java   |  4 ++--
 .../clientpositive/tez/vectorized_casts.q.out   | 20 ++--
 .../clientpositive/vectorized_casts.q.out   | 20 ++--
 5 files changed, 29 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/2bd2e9a0/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
--
diff --git a/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java 
b/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
index 6c6cc63..e9fe8fa 100644
--- a/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
+++ b/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
@@ -933,6 +933,7 @@ public class GenVectorCode extends Task {
   // Casts
   {"ColumnUnaryFunc", "Cast", "long", "double", "", "", "(long)", "", ""},
   {"ColumnUnaryFunc", "Cast", "double", "long", "", "", "(double)", "", 
""},
+  {"ColumnUnaryFunc", "CastLongToFloatVia", "double", "long", "", "", 
"(float)", "", ""},
   {"ColumnUnaryFunc", "CastDoubleToBooleanVia", "long", "double", 
"MathExpr.toBool", "",
 "", "", ""},
   {"ColumnUnaryFunc", "CastLongToBooleanVia", "long", "long", 
"MathExpr.toBool", "",

http://git-wip-us.apache.org/repos/asf/hive/blob/2bd2e9a0/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index a76e31d..7f55b31 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -1803,7 +1803,12 @@ public class VectorizationContext {
 return getConstantVectorExpression(doubleValue, returnType, 
Mode.PROJECTION);
 }
 if (isIntFamily(inputType)) {
-  return createVectorExpression(CastLongToDouble.class, childExpr, 
Mode.PROJECTION, returnType);
+  if (udf.equals(UDFToFloat.class)) {
+// In order to convert from integer to float correctly, we need to 
apply the float cast not the double cast (HIVE-13338).
+return createVectorExpression(CastLongToFloatViaLongToDouble.class, 
childExpr, Mode.PROJECTION, returnType);
+  } else {
+return createVectorExpression(CastLongToDouble.class, childExpr, 
Mode.PROJECTION, returnType);
+  }
 } else if (inputType.equals("timestamp")) {
   return createVectorExpression(CastTimestampToDouble.class, childExpr, 
Mode.PROJECTION,
   returnType);

http://git-wip-us.apache.org/repos/asf/hive/blob/2bd2e9a0/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
index c612307..5808c90 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hive.ql.udf;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.CastDecimalToDouble;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastLongToDouble;
+import 
org.apache.hadoop.hive.ql.exec.vector.expressions.gen.CastLongToFloatViaLongToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.CastTimestampToDouble;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -40,7 +40,7 @@ import org.apache.hadoop.io.Text;
  * UDFToFloat.
  *
  */
-@VectorizedExpressions({CastTimestampToDouble.class, CastLongToDouble.class,

hive git commit: HIVE-13857: insert overwrite select from some table fails throwing org.apache.hadoop.security.AccessControlException - II(Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-05-27 Thread harisankar
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 a18b0225e -> 7a41e7ccf


HIVE-13857: insert overwrite select from some table fails throwing 
org.apache.hadoop.security.AccessControlException - II(Hari Subramaniyan, 
reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7a41e7cc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7a41e7cc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7a41e7cc

Branch: refs/heads/branch-2.1
Commit: 7a41e7ccf3293ce062e5d86e6032195c6a2dbe7e
Parents: a18b022
Author: Hari Subramaniyan 
Authored: Fri May 27 17:50:07 2016 -0700
Committer: Hari Subramaniyan 
Committed: Fri May 27 17:53:04 2016 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java   |  4 +++-
 ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java  |  6 --
 .../main/java/org/apache/hadoop/hive/io/HdfsUtils.java| 10 +-
 3 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/7a41e7cc/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index bc39994..00bff6b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4232,10 +4232,12 @@ public class DDLTask extends Task implements 
Serializable {
   for (Path location : getLocations(db, table, partSpec)) {
 FileSystem fs = location.getFileSystem(conf);
 HdfsUtils.HadoopFileStatus status = new 
HdfsUtils.HadoopFileStatus(conf, fs, location);
+FileStatus targetStatus = fs.getFileStatus(location);
+String targetGroup = targetStatus == null ? null : 
targetStatus.getGroup();
 fs.delete(location, true);
 fs.mkdirs(location);
 try {
-  HdfsUtils.setFullFileStatus(conf, status, fs, location, false);
+  HdfsUtils.setFullFileStatus(conf, status, targetGroup, fs, location, 
false);
 } catch (Exception e) {
   LOG.warn("Error setting permissions of " + location, e);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/7a41e7cc/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 379eddc..ee6c564 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -2688,6 +2688,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 }
 
 Path destPath = new Path(destf, srcP.getName());
+String srcGroup = srcFile.getGroup();
 if (!needToCopy && !isSrcLocal) {
   for (int counter = 1; !destFs.rename(srcP,destPath); counter++) {
 destPath = new Path(destf, name + ("_copy_" + counter) + 
filetype);
@@ -2697,7 +2698,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 }
 
 if (inheritPerms) {
-  HdfsUtils.setFullFileStatus(conf, fullDestStatus, destFs, 
destPath, false);
+  HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, 
destFs, destPath, false);
 }
 if (null != newFiles) {
   newFiles.add(destPath);
@@ -2871,10 +2872,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
 public Void call() throws Exception {
   SessionState.setCurrentSessionState(parentSession);
   Path destPath = new Path(destf, status.getPath().getName());
+  String group = status.getGroup();
   try {
 if(destFs.rename(status.getPath(), destf)) {
   if (inheritPerms) {
-HdfsUtils.setFullFileStatus(conf, desiredStatus, 
destFs, destPath, false);
+HdfsUtils.setFullFileStatus(conf, desiredStatus, 
group, destFs, destPath, false);
   }
 } else {
   throw new IOException("rename for src path: " + 
status.getPath() + " to dest path:"

http://git-wip-us.apache.org/repos/asf/hive/blob/7a41e7cc/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
--
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java 
b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
index 

hive git commit: HIVE-13857: insert overwrite select from some table fails throwing org.apache.hadoop.security.AccessControlException - II(Hari Subramaniyan, reviewed by Ashutosh Chauhan)

2016-05-27 Thread harisankar
Repository: hive
Updated Branches:
  refs/heads/master 4e3da98d7 -> 0d67cb0b7


HIVE-13857: insert overwrite select from some table fails throwing 
org.apache.hadoop.security.AccessControlException - II(Hari Subramaniyan, 
reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0d67cb0b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0d67cb0b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0d67cb0b

Branch: refs/heads/master
Commit: 0d67cb0b7d8a08ea7ce7bbe43f0ba37d258cd5be
Parents: 4e3da98
Author: Hari Subramaniyan 
Authored: Fri May 27 17:50:07 2016 -0700
Committer: Hari Subramaniyan 
Committed: Fri May 27 17:50:07 2016 -0700

--
 ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java   |  4 +++-
 ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java  |  6 --
 .../main/java/org/apache/hadoop/hive/io/HdfsUtils.java| 10 +-
 3 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/0d67cb0b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index bc39994..00bff6b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4232,10 +4232,12 @@ public class DDLTask extends Task implements 
Serializable {
   for (Path location : getLocations(db, table, partSpec)) {
 FileSystem fs = location.getFileSystem(conf);
 HdfsUtils.HadoopFileStatus status = new 
HdfsUtils.HadoopFileStatus(conf, fs, location);
+FileStatus targetStatus = fs.getFileStatus(location);
+String targetGroup = targetStatus == null ? null : 
targetStatus.getGroup();
 fs.delete(location, true);
 fs.mkdirs(location);
 try {
-  HdfsUtils.setFullFileStatus(conf, status, fs, location, false);
+  HdfsUtils.setFullFileStatus(conf, status, targetGroup, fs, location, 
false);
 } catch (Exception e) {
   LOG.warn("Error setting permissions of " + location, e);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0d67cb0b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 379eddc..ee6c564 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -2688,6 +2688,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 }
 
 Path destPath = new Path(destf, srcP.getName());
+String srcGroup = srcFile.getGroup();
 if (!needToCopy && !isSrcLocal) {
   for (int counter = 1; !destFs.rename(srcP,destPath); counter++) {
 destPath = new Path(destf, name + ("_copy_" + counter) + 
filetype);
@@ -2697,7 +2698,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 }
 
 if (inheritPerms) {
-  HdfsUtils.setFullFileStatus(conf, fullDestStatus, destFs, 
destPath, false);
+  HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, 
destFs, destPath, false);
 }
 if (null != newFiles) {
   newFiles.add(destPath);
@@ -2871,10 +2872,11 @@ private void constructOneLBLocationMap(FileStatus fSta,
 public Void call() throws Exception {
   SessionState.setCurrentSessionState(parentSession);
   Path destPath = new Path(destf, status.getPath().getName());
+  String group = status.getGroup();
   try {
 if(destFs.rename(status.getPath(), destf)) {
   if (inheritPerms) {
-HdfsUtils.setFullFileStatus(conf, desiredStatus, 
destFs, destPath, false);
+HdfsUtils.setFullFileStatus(conf, desiredStatus, 
group, destFs, destPath, false);
   }
 } else {
   throw new IOException("rename for src path: " + 
status.getPath() + " to dest path:"

http://git-wip-us.apache.org/repos/asf/hive/blob/0d67cb0b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
--
diff --git 
a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java 
b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
index 

hive git commit: HIVE-13841: Orc split generation returns different strategies with cache enabled vs disabled (Prasanth Jayachandran reviewed by Sergey Shelukhin)

2016-05-27 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 e276929df -> a18b0225e


HIVE-13841: Orc split generation returns different strategies with cache 
enabled vs disabled (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a18b0225
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a18b0225
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a18b0225

Branch: refs/heads/branch-2.1
Commit: a18b0225ed20c5be2ab898c6ca941e4c1ab1e5f4
Parents: e276929
Author: Prasanth Jayachandran 
Authored: Fri May 27 16:41:50 2016 -0700
Committer: Prasanth Jayachandran 
Committed: Fri May 27 16:42:35 2016 -0700

--
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  7 ---
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 21 
 2 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/a18b0225/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 33fe3b6..087207b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -155,6 +155,7 @@ public class OrcInputFormat implements 
InputFormat,
 
   private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024;
   private static final long DEFAULT_MAX_SPLIT_SIZE = 256 * 1024 * 1024;
+  private static final int DEFAULT_ETL_FILE_THRESHOLD = 100;
 
   /**
* When picking the hosts for a split that crosses block boundaries,
@@ -510,7 +511,7 @@ public class OrcInputFormat implements 
InputFormat,
 private final int splitStrategyBatchMs;
 private final long maxSize;
 private final long minSize;
-private final int minSplits;
+private final int etlFileThreshold;
 private final boolean footerInSplits;
 private final boolean cacheStripeDetails;
 private final boolean forceThreadpool;
@@ -555,7 +556,7 @@ public class OrcInputFormat implements 
InputFormat,
 
   cacheStripeDetails = (cacheStripeDetailsSize > 0);
 
-  this.minSplits = Math.min(cacheStripeDetailsSize, minSplits);
+  this.etlFileThreshold = minSplits <= 0 ? DEFAULT_ETL_FILE_THRESHOLD : 
minSplits;
 
   synchronized (Context.class) {
 if (threadPool == null) {
@@ -1938,7 +1939,7 @@ public class OrcInputFormat implements 
InputFormat,
   deltas, covered, isOriginal, ugi, allowSyntheticFileIds);
 default:
   // HYBRID strategy
-  if (avgFileSize > context.maxSize || totalFiles <= 
context.minSplits) {
+  if (avgFileSize > context.maxSize || totalFiles <= 
context.etlFileThreshold) {
 return combineOrCreateETLStrategy(combinedCtx, context, fs, dir, 
baseOrOriginalFiles,
 deltas, covered, isOriginal, ugi, allowSyntheticFileIds);
   } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/a18b0225/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index c1ef0e7..52098ae 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -525,6 +525,27 @@ public class TestInputOutputFormat {
 }
   }
 }
+
+k = 0;
+conf.set("hive.orc.cache.stripe.details.size", "-1");
+for (int c : counts) {
+  for (int s : sizes) {
+final FileSystem fs = generateMockFiles(c, s);
+for (int n : numSplits) {
+  final OrcInputFormat.Context context = new OrcInputFormat.Context(
+  conf, n);
+  OrcInputFormat.FileGenerator gen = new OrcInputFormat.FileGenerator(
+  context, fs, new MockPath(fs, "mock:/a/b"), false, null);
+  final SplitStrategy splitStrategy = createSplitStrategy(context, 
gen);
+  assertTrue(
+  String.format(
+  "Split strategy for %d files x %d size for %d splits", c, s,
+  n),
+  splitStrategy.getClass().getSimpleName()
+  .equals(strategyResults[k++]));
+}
+  }
+}
   }
 
   @Test



hive git commit: HIVE-13841: Orc split generation returns different strategies with cache enabled vs disabled (Prasanth Jayachandran reviewed by Sergey Shelukhin)

2016-05-27 Thread prasanthj
Repository: hive
Updated Branches:
  refs/heads/master 02b2fb5a9 -> 4e3da98d7


HIVE-13841: Orc split generation returns different strategies with cache 
enabled vs disabled (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4e3da98d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4e3da98d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4e3da98d

Branch: refs/heads/master
Commit: 4e3da98d7f05ae29c71bd379c3f59691588c0209
Parents: 02b2fb5
Author: Prasanth Jayachandran 
Authored: Fri May 27 16:41:50 2016 -0700
Committer: Prasanth Jayachandran 
Committed: Fri May 27 16:41:50 2016 -0700

--
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  7 ---
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 21 
 2 files changed, 25 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4e3da98d/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java 
b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 33fe3b6..087207b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -155,6 +155,7 @@ public class OrcInputFormat implements 
InputFormat,
 
   private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024;
   private static final long DEFAULT_MAX_SPLIT_SIZE = 256 * 1024 * 1024;
+  private static final int DEFAULT_ETL_FILE_THRESHOLD = 100;
 
   /**
* When picking the hosts for a split that crosses block boundaries,
@@ -510,7 +511,7 @@ public class OrcInputFormat implements 
InputFormat,
 private final int splitStrategyBatchMs;
 private final long maxSize;
 private final long minSize;
-private final int minSplits;
+private final int etlFileThreshold;
 private final boolean footerInSplits;
 private final boolean cacheStripeDetails;
 private final boolean forceThreadpool;
@@ -555,7 +556,7 @@ public class OrcInputFormat implements 
InputFormat,
 
   cacheStripeDetails = (cacheStripeDetailsSize > 0);
 
-  this.minSplits = Math.min(cacheStripeDetailsSize, minSplits);
+  this.etlFileThreshold = minSplits <= 0 ? DEFAULT_ETL_FILE_THRESHOLD : 
minSplits;
 
   synchronized (Context.class) {
 if (threadPool == null) {
@@ -1938,7 +1939,7 @@ public class OrcInputFormat implements 
InputFormat,
   deltas, covered, isOriginal, ugi, allowSyntheticFileIds);
 default:
   // HYBRID strategy
-  if (avgFileSize > context.maxSize || totalFiles <= 
context.minSplits) {
+  if (avgFileSize > context.maxSize || totalFiles <= 
context.etlFileThreshold) {
 return combineOrCreateETLStrategy(combinedCtx, context, fs, dir, 
baseOrOriginalFiles,
 deltas, covered, isOriginal, ugi, allowSyntheticFileIds);
   } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/4e3da98d/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index c1ef0e7..52098ae 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -525,6 +525,27 @@ public class TestInputOutputFormat {
 }
   }
 }
+
+k = 0;
+conf.set("hive.orc.cache.stripe.details.size", "-1");
+for (int c : counts) {
+  for (int s : sizes) {
+final FileSystem fs = generateMockFiles(c, s);
+for (int n : numSplits) {
+  final OrcInputFormat.Context context = new OrcInputFormat.Context(
+  conf, n);
+  OrcInputFormat.FileGenerator gen = new OrcInputFormat.FileGenerator(
+  context, fs, new MockPath(fs, "mock:/a/b"), false, null);
+  final SplitStrategy splitStrategy = createSplitStrategy(context, 
gen);
+  assertTrue(
+  String.format(
+  "Split strategy for %d files x %d size for %d splits", c, s,
+  n),
+  splitStrategy.getClass().getSimpleName()
+  .equals(strategyResults[k++]));
+}
+  }
+}
   }
 
   @Test



[7/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 298384c..5a35a50 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size773;
-::apache::thrift::protocol::TType _etype776;
-xfer += iprot->readListBegin(_etype776, _size773);
-this->success.resize(_size773);
-uint32_t _i777;
-for (_i777 = 0; _i777 < _size773; ++_i777)
+uint32_t _size781;
+::apache::thrift::protocol::TType _etype784;
+xfer += iprot->readListBegin(_etype784, _size781);
+this->success.resize(_size781);
+uint32_t _i785;
+for (_i785 = 0; _i785 < _size781; ++_i785)
 {
-  xfer += iprot->readString(this->success[_i777]);
+  xfer += iprot->readString(this->success[_i785]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter778;
-  for (_iter778 = this->success.begin(); _iter778 != this->success.end(); 
++_iter778)
+  std::vector ::const_iterator _iter786;
+  for (_iter786 = this->success.begin(); _iter786 != this->success.end(); 
++_iter786)
   {
-xfer += oprot->writeString((*_iter778));
+xfer += oprot->writeString((*_iter786));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size779;
-::apache::thrift::protocol::TType _etype782;
-xfer += iprot->readListBegin(_etype782, _size779);
-(*(this->success)).resize(_size779);
-uint32_t _i783;
-for (_i783 = 0; _i783 < _size779; ++_i783)
+uint32_t _size787;
+::apache::thrift::protocol::TType _etype790;
+xfer += iprot->readListBegin(_etype790, _size787);
+(*(this->success)).resize(_size787);
+uint32_t _i791;
+for (_i791 = 0; _i791 < _size787; ++_i791)
 {
-  xfer += iprot->readString((*(this->success))[_i783]);
+  xfer += iprot->readString((*(this->success))[_i791]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size784;
-::apache::thrift::protocol::TType _etype787;
-xfer += iprot->readListBegin(_etype787, _size784);
-this->success.resize(_size784);
-uint32_t _i788;
-for (_i788 = 0; _i788 < _size784; ++_i788)
+uint32_t _size792;
+::apache::thrift::protocol::TType _etype795;
+xfer += iprot->readListBegin(_etype795, _size792);
+this->success.resize(_size792);
+uint32_t _i796;
+for (_i796 = 0; _i796 < _size792; ++_i796)
 {
-  xfer += iprot->readString(this->success[_i788]);
+  xfer += iprot->readString(this->success[_i796]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter789;
-  for (_iter789 = this->success.begin(); _iter789 != this->success.end(); 
++_iter789)
+  std::vector ::const_iterator _iter797;
+  for (_iter797 = this->success.begin(); _iter797 != this->success.end(); 
++_iter797)
   {
-xfer += oprot->writeString((*_iter789));
+xfer += oprot->writeString((*_iter797));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

[4/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 13a8b71..cb5dec9 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -28842,13 +28842,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list632 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list632.size);
-  String _elem633;
-  for (int _i634 = 0; _i634 < _list632.size; ++_i634)
+  org.apache.thrift.protocol.TList _list642 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list642.size);
+  String _elem643;
+  for (int _i644 = 0; _i644 < _list642.size; ++_i644)
   {
-_elem633 = iprot.readString();
-struct.success.add(_elem633);
+_elem643 = iprot.readString();
+struct.success.add(_elem643);
   }
   iprot.readListEnd();
 }
@@ -28883,9 +28883,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for (String _iter635 : struct.success)
+for (String _iter645 : struct.success)
 {
-  oprot.writeString(_iter635);
+  oprot.writeString(_iter645);
 }
 oprot.writeListEnd();
   }
@@ -28924,9 +28924,9 @@ public class ThriftHiveMetastore {
 if (struct.isSetSuccess()) {
   {
 oprot.writeI32(struct.success.size());
-for (String _iter636 : struct.success)
+for (String _iter646 : struct.success)
 {
-  oprot.writeString(_iter636);
+  oprot.writeString(_iter646);
 }
   }
 }
@@ -28941,13 +28941,13 @@ public class ThriftHiveMetastore {
 BitSet incoming = iprot.readBitSet(2);
 if (incoming.get(0)) {
   {
-org.apache.thrift.protocol.TList _list637 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
-struct.success = new ArrayList(_list637.size);
-String _elem638;
-for (int _i639 = 0; _i639 < _list637.size; ++_i639)
+org.apache.thrift.protocol.TList _list647 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
+struct.success = new ArrayList(_list647.size);
+String _elem648;
+for (int _i649 = 0; _i649 < _list647.size; ++_i649)
 {
-  _elem638 = iprot.readString();
-  struct.success.add(_elem638);
+  _elem648 = iprot.readString();
+  struct.success.add(_elem648);
 }
   }
   struct.setSuccessIsSet(true);
@@ -29601,13 +29601,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list640 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list640.size);
-  String _elem641;
-  for (int _i642 = 0; _i642 < _list640.size; ++_i642)
+  org.apache.thrift.protocol.TList _list650 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list650.size);
+  String _elem651;
+  for (int _i652 = 0; _i652 < _list650.size; ++_i652)
   {
-_elem641 = iprot.readString();
-struct.success.add(_elem641);
+_elem651 = iprot.readString();
+struct.success.add(_elem651);
   }
   iprot.readListEnd();
 }
@@ -29642,9 +29642,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for 

[6/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index f8ca7cd..79460a8 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -14413,6 +14413,11 @@ void CompactionRequest::__set_runas(const std::string& 
val) {
 __isset.runas = true;
 }
 
+void CompactionRequest::__set_properties(const std::map & val) {
+  this->properties = val;
+__isset.properties = true;
+}
+
 uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) 
{
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -14479,6 +14484,29 @@ uint32_t 
CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
   xfer += iprot->skip(ftype);
 }
 break;
+  case 6:
+if (ftype == ::apache::thrift::protocol::T_MAP) {
+  {
+this->properties.clear();
+uint32_t _size603;
+::apache::thrift::protocol::TType _ktype604;
+::apache::thrift::protocol::TType _vtype605;
+xfer += iprot->readMapBegin(_ktype604, _vtype605, _size603);
+uint32_t _i607;
+for (_i607 = 0; _i607 < _size603; ++_i607)
+{
+  std::string _key608;
+  xfer += iprot->readString(_key608);
+  std::string& _val609 = this->properties[_key608];
+  xfer += iprot->readString(_val609);
+}
+xfer += iprot->readMapEnd();
+  }
+  this->__isset.properties = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
   default:
 xfer += iprot->skip(ftype);
 break;
@@ -14524,6 +14552,20 @@ uint32_t 
CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot)
 xfer += oprot->writeString(this->runas);
 xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.properties) {
+xfer += oprot->writeFieldBegin("properties", 
::apache::thrift::protocol::T_MAP, 6);
+{
+  xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, 
::apache::thrift::protocol::T_STRING, 
static_cast(this->properties.size()));
+  std::map ::const_iterator _iter610;
+  for (_iter610 = this->properties.begin(); _iter610 != 
this->properties.end(); ++_iter610)
+  {
+xfer += oprot->writeString(_iter610->first);
+xfer += oprot->writeString(_iter610->second);
+  }
+  xfer += oprot->writeMapEnd();
+}
+xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -14536,24 +14578,27 @@ void swap(CompactionRequest , CompactionRequest ) 
{
   swap(a.partitionname, b.partitionname);
   swap(a.type, b.type);
   swap(a.runas, b.runas);
+  swap(a.properties, b.properties);
   swap(a.__isset, b.__isset);
 }
 
-CompactionRequest::CompactionRequest(const CompactionRequest& other603) {
-  dbname = other603.dbname;
-  tablename = other603.tablename;
-  partitionname = other603.partitionname;
-  type = other603.type;
-  runas = other603.runas;
-  __isset = other603.__isset;
-}
-CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other604) {
-  dbname = other604.dbname;
-  tablename = other604.tablename;
-  partitionname = other604.partitionname;
-  type = other604.type;
-  runas = other604.runas;
-  __isset = other604.__isset;
+CompactionRequest::CompactionRequest(const CompactionRequest& other611) {
+  dbname = other611.dbname;
+  tablename = other611.tablename;
+  partitionname = other611.partitionname;
+  type = other611.type;
+  runas = other611.runas;
+  properties = other611.properties;
+  __isset = other611.__isset;
+}
+CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other612) {
+  dbname = other612.dbname;
+  tablename = other612.tablename;
+  partitionname = other612.partitionname;
+  type = other612.type;
+  runas = other612.runas;
+  properties = other612.properties;
+  __isset = other612.__isset;
   return *this;
 }
 void CompactionRequest::printTo(std::ostream& out) const {
@@ -14564,6 +14609,7 @@ void CompactionRequest::printTo(std::ostream& out) 
const {
   out << ", " << "partitionname="; (__isset.partitionname ? (out << 
to_string(partitionname)) : (out << ""));
   out << ", " << "type=" << to_string(type);
   out << ", " << "runas="; (__isset.runas ? (out << to_string(runas)) : (out 
<< ""));
+  out << ", " << "properties="; (__isset.properties ? (out << 
to_string(properties)) : (out << ""));
   out << ")";
 }
 
@@ -14616,11 +14662,11 @@ void swap(ShowCompactRequest , ShowCompactRequest 
) {
   (void) b;
 }
 

[8/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
HIVE-13354 : Add ability to specify Compaction options per table and per 
request (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e276929d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e276929d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e276929d

Branch: refs/heads/branch-2.1
Commit: e276929df46cca5acb3b392fed4d72313748c8f7
Parents: 4b55335
Author: Wei Zheng 
Authored: Fri May 27 15:16:01 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 27 15:16:01 2016 -0700

--
 .../hive/ql/txn/compactor/TestCompactor.java|  161 ++
 metastore/if/hive_metastore.thrift  |1 +
 .../upgrade/derby/036-HIVE-13354.derby.sql  |2 +
 .../derby/hive-txn-schema-1.3.0.derby.sql   |2 +
 .../derby/hive-txn-schema-2.1.0.derby.sql   |2 +
 .../derby/upgrade-1.2.0-to-1.3.0.derby.sql  |1 +
 .../derby/upgrade-2.0.0-to-2.1.0.derby.sql  |1 +
 .../upgrade/mssql/021-HIVE-13354.mssql.sql  |2 +
 .../upgrade/mssql/hive-schema-1.3.0.mssql.sql   |2 +
 .../upgrade/mssql/hive-schema-2.1.0.mssql.sql   |2 +
 .../mssql/upgrade-1.2.0-to-1.3.0.mssql.sql  |1 +
 .../mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |1 +
 .../upgrade/mysql/036-HIVE-13354.mysql.sql  |2 +
 .../mysql/hive-txn-schema-1.3.0.mysql.sql   |2 +
 .../mysql/hive-txn-schema-2.1.0.mysql.sql   |2 +
 .../mysql/upgrade-1.2.0-to-1.3.0.mysql.sql  |1 +
 .../mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |1 +
 .../upgrade/oracle/036-HIVE-13354.oracle.sql|2 +
 .../oracle/hive-txn-schema-1.3.0.oracle.sql |2 +
 .../oracle/hive-txn-schema-2.1.0.oracle.sql |2 +
 .../oracle/upgrade-1.2.0-to-1.3.0.oracle.sql|1 +
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql|1 +
 .../postgres/035-HIVE-13354.postgres.sql|2 +
 .../postgres/hive-txn-schema-1.3.0.postgres.sql |2 +
 .../postgres/hive-txn-schema-2.1.0.postgres.sql |2 +
 .../upgrade-1.2.0-to-1.3.0.postgres.sql |1 +
 .../upgrade-2.0.0-to-2.1.0.postgres.sql |1 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2020 
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  980 
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   10 +-
 .../metastore/api/AddDynamicPartitions.java |   32 +-
 .../metastore/api/ClearFileMetadataRequest.java |   32 +-
 .../hive/metastore/api/CompactionRequest.java   |  166 +-
 .../hive/metastore/api/FireEventRequest.java|   32 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../api/GetFileMetadataByExprRequest.java   |   32 +-
 .../api/GetFileMetadataByExprResult.java|   48 +-
 .../metastore/api/GetFileMetadataRequest.java   |   32 +-
 .../metastore/api/GetFileMetadataResult.java|   44 +-
 .../metastore/api/InsertEventRequestData.java   |   32 +-
 .../api/NotificationEventResponse.java  |   36 +-
 .../metastore/api/PutFileMetadataRequest.java   |   64 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 2188 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1232 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  315 +--
 .../hive_metastore/ThriftHiveMetastore.py   |  830 +++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  197 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../hive/metastore/HiveMetaStoreClient.java |   14 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   18 +
 .../hive/metastore/txn/CompactionInfo.java  |   30 +-
 .../metastore/txn/CompactionTxnHandler.java |   11 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|2 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |8 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   56 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |2 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|6 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |5 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|4 +-
 .../hive/ql/plan/AlterTableSimpleDesc.java  |8 +
 .../hive/ql/txn/compactor/CompactorMR.java  |  121 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |   22 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java|9 +
 .../hive/ql/txn/compactor/TestWorker.java   |9 +-
 65 files changed, 4780 insertions(+), 4145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
--
diff --git 

[2/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 5aef35c..f505208 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -14373,6 +14373,10 @@ class CompactionRequest {
* @var string
*/
   public $runas = null;
+  /**
+   * @var array
+   */
+  public $properties = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -14397,6 +14401,18 @@ class CompactionRequest {
   'var' => 'runas',
   'type' => TType::STRING,
   ),
+6 => array(
+  'var' => 'properties',
+  'type' => TType::MAP,
+  'ktype' => TType::STRING,
+  'vtype' => TType::STRING,
+  'key' => array(
+'type' => TType::STRING,
+  ),
+  'val' => array(
+'type' => TType::STRING,
+),
+  ),
 );
 }
 if (is_array($vals)) {
@@ -14415,6 +14431,9 @@ class CompactionRequest {
   if (isset($vals['runas'])) {
 $this->runas = $vals['runas'];
   }
+  if (isset($vals['properties'])) {
+$this->properties = $vals['properties'];
+  }
 }
   }
 
@@ -14472,6 +14491,26 @@ class CompactionRequest {
 $xfer += $input->skip($ftype);
   }
   break;
+case 6:
+  if ($ftype == TType::MAP) {
+$this->properties = array();
+$_size465 = 0;
+$_ktype466 = 0;
+$_vtype467 = 0;
+$xfer += $input->readMapBegin($_ktype466, $_vtype467, $_size465);
+for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
+{
+  $key470 = '';
+  $val471 = '';
+  $xfer += $input->readString($key470);
+  $xfer += $input->readString($val471);
+  $this->properties[$key470] = $val471;
+}
+$xfer += $input->readMapEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
 default:
   $xfer += $input->skip($ftype);
   break;
@@ -14510,6 +14549,24 @@ class CompactionRequest {
   $xfer += $output->writeString($this->runas);
   $xfer += $output->writeFieldEnd();
 }
+if ($this->properties !== null) {
+  if (!is_array($this->properties)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('properties', TType::MAP, 6);
+  {
+$output->writeMapBegin(TType::STRING, TType::STRING, 
count($this->properties));
+{
+  foreach ($this->properties as $kiter472 => $viter473)
+  {
+$xfer += $output->writeString($kiter472);
+$xfer += $output->writeString($viter473);
+  }
+}
+$output->writeMapEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
 $xfer += $output->writeFieldStop();
 $xfer += $output->writeStructEnd();
 return $xfer;
@@ -14946,15 +15003,15 @@ class ShowCompactResponse {
 case 1:
   if ($ftype == TType::LST) {
 $this->compacts = array();
-$_size465 = 0;
-$_etype468 = 0;
-$xfer += $input->readListBegin($_etype468, $_size465);
-for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
+$_size474 = 0;
+$_etype477 = 0;
+$xfer += $input->readListBegin($_etype477, $_size474);
+for ($_i478 = 0; $_i478 < $_size474; ++$_i478)
 {
-  $elem470 = null;
-  $elem470 = new \metastore\ShowCompactResponseElement();
-  $xfer += $elem470->read($input);
-  $this->compacts []= $elem470;
+  $elem479 = null;
+  $elem479 = new \metastore\ShowCompactResponseElement();
+  $xfer += $elem479->read($input);
+  $this->compacts []= $elem479;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -14982,9 +15039,9 @@ class ShowCompactResponse {
   {
 $output->writeListBegin(TType::STRUCT, count($this->compacts));
 {
-  foreach ($this->compacts as $iter471)
+  foreach ($this->compacts as $iter480)
   {
-$xfer += $iter471->write($output);
+$xfer += $iter480->write($output);
   }
 }
 $output->writeListEnd();
@@ -15113,14 +15170,14 @@ class AddDynamicPartitions {
 case 4:
   if ($ftype == TType::LST) {
 $this->partitionnames = array();
-$_size472 = 0;
-$_etype475 = 0;
-$xfer += $input->readListBegin($_etype475, $_size472);
- 

[3/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 3c9e038..2d82c92 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -10792,14 +10792,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size560 = 0;
-$_etype563 = 0;
-$xfer += $input->readListBegin($_etype563, $_size560);
-for ($_i564 = 0; $_i564 < $_size560; ++$_i564)
+$_size569 = 0;
+$_etype572 = 0;
+$xfer += $input->readListBegin($_etype572, $_size569);
+for ($_i573 = 0; $_i573 < $_size569; ++$_i573)
 {
-  $elem565 = null;
-  $xfer += $input->readString($elem565);
-  $this->success []= $elem565;
+  $elem574 = null;
+  $xfer += $input->readString($elem574);
+  $this->success []= $elem574;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -10835,9 +10835,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter566)
+  foreach ($this->success as $iter575)
   {
-$xfer += $output->writeString($iter566);
+$xfer += $output->writeString($iter575);
   }
 }
 $output->writeListEnd();
@@ -10968,14 +10968,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size567 = 0;
-$_etype570 = 0;
-$xfer += $input->readListBegin($_etype570, $_size567);
-for ($_i571 = 0; $_i571 < $_size567; ++$_i571)
+$_size576 = 0;
+$_etype579 = 0;
+$xfer += $input->readListBegin($_etype579, $_size576);
+for ($_i580 = 0; $_i580 < $_size576; ++$_i580)
 {
-  $elem572 = null;
-  $xfer += $input->readString($elem572);
-  $this->success []= $elem572;
+  $elem581 = null;
+  $xfer += $input->readString($elem581);
+  $this->success []= $elem581;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -11011,9 +11011,9 @@ class ThriftHiveMetastore_get_all_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter573)
+  foreach ($this->success as $iter582)
   {
-$xfer += $output->writeString($iter573);
+$xfer += $output->writeString($iter582);
   }
 }
 $output->writeListEnd();
@@ -12014,18 +12014,18 @@ class ThriftHiveMetastore_get_type_all_result {
 case 0:
   if ($ftype == TType::MAP) {
 $this->success = array();
-$_size574 = 0;
-$_ktype575 = 0;
-$_vtype576 = 0;
-$xfer += $input->readMapBegin($_ktype575, $_vtype576, $_size574);
-for ($_i578 = 0; $_i578 < $_size574; ++$_i578)
+$_size583 = 0;
+$_ktype584 = 0;
+$_vtype585 = 0;
+$xfer += $input->readMapBegin($_ktype584, $_vtype585, $_size583);
+for ($_i587 = 0; $_i587 < $_size583; ++$_i587)
 {
-  $key579 = '';
-  $val580 = new \metastore\Type();
-  $xfer += $input->readString($key579);
-  $val580 = new \metastore\Type();
-  $xfer += $val580->read($input);
-  $this->success[$key579] = $val580;
+  $key588 = '';
+  $val589 = new \metastore\Type();
+  $xfer += $input->readString($key588);
+  $val589 = new \metastore\Type();
+  $xfer += $val589->read($input);
+  $this->success[$key588] = $val589;
 }
 $xfer += $input->readMapEnd();
   } else {
@@ -12061,10 +12061,10 @@ class ThriftHiveMetastore_get_type_all_result {
   {
 $output->writeMapBegin(TType::STRING, TType::STRUCT, 
count($this->success));
 {
-  foreach ($this->success as $kiter581 => $viter582)
+  foreach ($this->success as $kiter590 => $viter591)
   {
-$xfer += $output->writeString($kiter581);
-$xfer += $viter582->write($output);
+$xfer += $output->writeString($kiter590);
+$xfer += $viter591->write($output);
  

[5/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
index e028ecb..d3fc92a 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
@@ -43,6 +43,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase, SchemeFactory> schemes = 
new HashMap();
   static {
@@ -55,6 +56,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase properties; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -66,7 +68,8 @@ public class CompactionRequest implements 
org.apache.thrift.TBase byName = new HashMap();
 
@@ -91,6 +94,8 @@ public class CompactionRequest implements 
org.apache.thrift.TBase 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -145,6 +150,10 @@ public class CompactionRequest implements 
org.apache.thrift.TBase __this__properties = new 
HashMap(other.properties);
+  this.properties = __this__properties;
+}
   }
 
   public CompactionRequest deepCopy() {
@@ -195,6 +208,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase();
+}
+this.properties.put(key, val);
+  }
+
+  public Map getProperties() {
+return this.properties;
+  }
+
+  public void setProperties(Map properties) {
+this.properties = properties;
+  }
+
+  public void unsetProperties() {
+this.properties = null;
+  }
+
+  /** Returns true if field properties is set (has been assigned a value) and 
false otherwise */
+  public boolean isSetProperties() {
+return this.properties != null;
+  }
+
+  public void setPropertiesIsSet(boolean value) {
+if (!value) {
+  this.properties = null;
+}
+  }
+
   public void setFieldValue(_Fields field, Object value) {
 switch (field) {
 case DBNAME:
@@ -362,6 +410,14 @@ public class CompactionRequest implements 
org.apache.thrift.TBase)value);
+  }
+  break;
+
 }
   }
 
@@ -382,6 +438,9 @@ public class CompactionRequest implements 
org.apache.thrift.TBase(2*_map524.size);
+String _key525;
+String _val526;
+for (int _i527 = 0; _i527 < _map524.size; ++_i527)
+{
+  _key525 = iprot.readString();
+  _val526 = iprot.readString();
+  struct.properties.put(_key525, _val526);
+}
+iprot.readMapEnd();
+  }
+  struct.setPropertiesIsSet(true);
+} else { 
+  org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
+}
+break;
   default:
 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
 }
@@ -758,6 +873,21 @@ public class CompactionRequest implements 
org.apache.thrift.TBase _iter528 : 
struct.properties.entrySet())
+{
+  oprot.writeString(_iter528.getKey());
+  oprot.writeString(_iter528.getValue());
+}
+oprot.writeMapEnd();
+  }
+  oprot.writeFieldEnd();
+}
+  }
   oprot.writeFieldStop();
   oprot.writeStructEnd();
 }
@@ -785,13 +915,26 @@ public class CompactionRequest implements 
org.apache.thrift.TBase _iter529 : 
struct.properties.entrySet())
+  {
+oprot.writeString(_iter529.getKey());
+oprot.writeString(_iter529.getValue());
+  }
+}
+  }
 }
 
 @Override
@@ -803,7 +946,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase(2*_map530.size);
+  String _key531;
+  String _val532;
+  for (int _i533 = 0; _i533 < _map530.size; ++_i533)
+  {
+_key531 = iprot.readString();
+_val532 = iprot.readString();
+struct.properties.put(_key531, _val532);
+  }
+}
+struct.setPropertiesIsSet(true);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
--
diff --git 

[1/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 4b553358b -> e276929df


http://git-wip-us.apache.org/repos/asf/hive/blob/e276929d/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
--
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 4db9680..8d88cd7 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -9984,6 +9984,7 @@ class CompactionRequest:
- partitionname
- type
- runas
+   - properties
   """
 
   thrift_spec = (
@@ -9993,14 +9994,16 @@ class CompactionRequest:
 (3, TType.STRING, 'partitionname', None, None, ), # 3
 (4, TType.I32, 'type', None, None, ), # 4
 (5, TType.STRING, 'runas', None, None, ), # 5
+(6, TType.MAP, 'properties', (TType.STRING,None,TType.STRING,None), None, 
), # 6
   )
 
-  def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, runas=None,):
+  def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, runas=None, properties=None,):
 self.dbname = dbname
 self.tablename = tablename
 self.partitionname = partitionname
 self.type = type
 self.runas = runas
+self.properties = properties
 
   def read(self, iprot):
 if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
@@ -10036,6 +10039,17 @@ class CompactionRequest:
   self.runas = iprot.readString()
 else:
   iprot.skip(ftype)
+  elif fid == 6:
+if ftype == TType.MAP:
+  self.properties = {}
+  (_ktype463, _vtype464, _size462 ) = iprot.readMapBegin()
+  for _i466 in xrange(_size462):
+_key467 = iprot.readString()
+_val468 = iprot.readString()
+self.properties[_key467] = _val468
+  iprot.readMapEnd()
+else:
+  iprot.skip(ftype)
   else:
 iprot.skip(ftype)
   iprot.readFieldEnd()
@@ -10066,6 +10080,14 @@ class CompactionRequest:
   oprot.writeFieldBegin('runas', TType.STRING, 5)
   oprot.writeString(self.runas)
   oprot.writeFieldEnd()
+if self.properties is not None:
+  oprot.writeFieldBegin('properties', TType.MAP, 6)
+  oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
+  for kiter469,viter470 in self.properties.items():
+oprot.writeString(kiter469)
+oprot.writeString(viter470)
+  oprot.writeMapEnd()
+  oprot.writeFieldEnd()
 oprot.writeFieldStop()
 oprot.writeStructEnd()
 
@@ -10086,6 +10108,7 @@ class CompactionRequest:
 value = (value * 31) ^ hash(self.partitionname)
 value = (value * 31) ^ hash(self.type)
 value = (value * 31) ^ hash(self.runas)
+value = (value * 31) ^ hash(self.properties)
 return value
 
   def __repr__(self):
@@ -10387,11 +10410,11 @@ class ShowCompactResponse:
   if fid == 1:
 if ftype == TType.LIST:
   self.compacts = []
-  (_etype465, _size462) = iprot.readListBegin()
-  for _i466 in xrange(_size462):
-_elem467 = ShowCompactResponseElement()
-_elem467.read(iprot)
-self.compacts.append(_elem467)
+  (_etype474, _size471) = iprot.readListBegin()
+  for _i475 in xrange(_size471):
+_elem476 = ShowCompactResponseElement()
+_elem476.read(iprot)
+self.compacts.append(_elem476)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -10408,8 +10431,8 @@ class ShowCompactResponse:
 if self.compacts is not None:
   oprot.writeFieldBegin('compacts', TType.LIST, 1)
   oprot.writeListBegin(TType.STRUCT, len(self.compacts))
-  for iter468 in self.compacts:
-iter468.write(oprot)
+  for iter477 in self.compacts:
+iter477.write(oprot)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 oprot.writeFieldStop()
@@ -10490,10 +10513,10 @@ class AddDynamicPartitions:
   elif fid == 4:
 if ftype == TType.LIST:
   self.partitionnames = []
-  (_etype472, _size469) = iprot.readListBegin()
-  for _i473 in xrange(_size469):
-_elem474 = iprot.readString()
-self.partitionnames.append(_elem474)
+  (_etype481, _size478) = iprot.readListBegin()
+  for _i482 in xrange(_size478):
+_elem483 = iprot.readString()
+self.partitionnames.append(_elem483)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -10527,8 +10550,8 @@ class AddDynamicPartitions:
 if self.partitionnames is not None:
   oprot.writeFieldBegin('partitionnames', TType.LIST, 4)
   oprot.writeListBegin(TType.STRING, len(self.partitionnames))

[2/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 045864a..925eea4 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -12797,6 +12797,10 @@ class CompactionRequest {
* @var string
*/
   public $runas = null;
+  /**
+   * @var array
+   */
+  public $properties = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -12821,6 +12825,18 @@ class CompactionRequest {
   'var' => 'runas',
   'type' => TType::STRING,
   ),
+6 => array(
+  'var' => 'properties',
+  'type' => TType::MAP,
+  'ktype' => TType::STRING,
+  'vtype' => TType::STRING,
+  'key' => array(
+'type' => TType::STRING,
+  ),
+  'val' => array(
+'type' => TType::STRING,
+),
+  ),
 );
 }
 if (is_array($vals)) {
@@ -12839,6 +12855,9 @@ class CompactionRequest {
   if (isset($vals['runas'])) {
 $this->runas = $vals['runas'];
   }
+  if (isset($vals['properties'])) {
+$this->properties = $vals['properties'];
+  }
 }
   }
 
@@ -12896,6 +12915,26 @@ class CompactionRequest {
 $xfer += $input->skip($ftype);
   }
   break;
+case 6:
+  if ($ftype == TType::MAP) {
+$this->properties = array();
+$_size437 = 0;
+$_ktype438 = 0;
+$_vtype439 = 0;
+$xfer += $input->readMapBegin($_ktype438, $_vtype439, $_size437);
+for ($_i441 = 0; $_i441 < $_size437; ++$_i441)
+{
+  $key442 = '';
+  $val443 = '';
+  $xfer += $input->readString($key442);
+  $xfer += $input->readString($val443);
+  $this->properties[$key442] = $val443;
+}
+$xfer += $input->readMapEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
 default:
   $xfer += $input->skip($ftype);
   break;
@@ -12934,6 +12973,24 @@ class CompactionRequest {
   $xfer += $output->writeString($this->runas);
   $xfer += $output->writeFieldEnd();
 }
+if ($this->properties !== null) {
+  if (!is_array($this->properties)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('properties', TType::MAP, 6);
+  {
+$output->writeMapBegin(TType::STRING, TType::STRING, 
count($this->properties));
+{
+  foreach ($this->properties as $kiter444 => $viter445)
+  {
+$xfer += $output->writeString($kiter444);
+$xfer += $output->writeString($viter445);
+  }
+}
+$output->writeMapEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
 $xfer += $output->writeFieldStop();
 $xfer += $output->writeStructEnd();
 return $xfer;
@@ -13370,15 +13427,15 @@ class ShowCompactResponse {
 case 1:
   if ($ftype == TType::LST) {
 $this->compacts = array();
-$_size437 = 0;
-$_etype440 = 0;
-$xfer += $input->readListBegin($_etype440, $_size437);
-for ($_i441 = 0; $_i441 < $_size437; ++$_i441)
+$_size446 = 0;
+$_etype449 = 0;
+$xfer += $input->readListBegin($_etype449, $_size446);
+for ($_i450 = 0; $_i450 < $_size446; ++$_i450)
 {
-  $elem442 = null;
-  $elem442 = new \metastore\ShowCompactResponseElement();
-  $xfer += $elem442->read($input);
-  $this->compacts []= $elem442;
+  $elem451 = null;
+  $elem451 = new \metastore\ShowCompactResponseElement();
+  $xfer += $elem451->read($input);
+  $this->compacts []= $elem451;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -13406,9 +13463,9 @@ class ShowCompactResponse {
   {
 $output->writeListBegin(TType::STRUCT, count($this->compacts));
 {
-  foreach ($this->compacts as $iter443)
+  foreach ($this->compacts as $iter452)
   {
-$xfer += $iter443->write($output);
+$xfer += $iter452->write($output);
   }
 }
 $output->writeListEnd();
@@ -13537,14 +13594,14 @@ class AddDynamicPartitions {
 case 4:
   if ($ftype == TType::LST) {
 $this->partitionnames = array();
-$_size444 = 0;
-$_etype447 = 0;
-$xfer += $input->readListBegin($_etype447, $_size444);
- 

[7/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
HIVE-13354 : Add ability to specify Compaction options per table and per 
request (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6e0504d9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6e0504d9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6e0504d9

Branch: refs/heads/branch-1
Commit: 6e0504d9a8552471f7137a513abe2a0a15c124de
Parents: abaf882
Author: Wei Zheng 
Authored: Fri May 27 15:13:53 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 27 15:13:53 2016 -0700

--
 .../hive/ql/txn/compactor/TestCompactor.java|  164 ++
 metastore/if/hive_metastore.thrift  |1 +
 .../upgrade/derby/036-HIVE-13354.derby.sql  |2 +
 .../derby/hive-txn-schema-1.3.0.derby.sql   |2 +
 .../derby/upgrade-1.2.0-to-1.3.0.derby.sql  |1 +
 .../upgrade/mssql/021-HIVE-13354.mssql.sql  |2 +
 .../upgrade/mssql/hive-schema-1.3.0.mssql.sql   |2 +
 .../mssql/upgrade-1.2.0-to-1.3.0.mssql.sql  |1 +
 .../upgrade/mysql/036-HIVE-13354.mysql.sql  |2 +
 .../mysql/hive-txn-schema-1.3.0.mysql.sql   |2 +
 .../mysql/upgrade-1.2.0-to-1.3.0.mysql.sql  |1 +
 .../upgrade/oracle/036-HIVE-13354.oracle.sql|2 +
 .../oracle/hive-txn-schema-1.3.0.oracle.sql |2 +
 .../oracle/upgrade-1.2.0-to-1.3.0.oracle.sql|1 +
 .../postgres/035-HIVE-13354.postgres.sql|2 +
 .../postgres/hive-txn-schema-1.3.0.postgres.sql |2 +
 .../upgrade-1.2.0-to-1.3.0.postgres.sql |1 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 1814 
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  640 +++---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   10 +-
 .../metastore/api/AddDynamicPartitions.java |   32 +-
 .../hive/metastore/api/CompactionRequest.java   |  166 +-
 .../hive/metastore/api/FireEventRequest.java|   32 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../metastore/api/InsertEventRequestData.java   |   32 +-
 .../api/NotificationEventResponse.java  |   36 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 1948 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1098 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  171 +-
 .../hive_metastore/ThriftHiveMetastore.py   |  738 +++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  103 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../hive/metastore/HiveMetaStoreClient.java |   14 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   18 +
 .../hive/metastore/txn/CompactionInfo.java  |   30 +-
 .../metastore/txn/CompactionTxnHandler.java |   11 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|2 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |8 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   54 +
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |2 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|6 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |5 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|4 +-
 .../hive/ql/plan/AlterTableSimpleDesc.java  |8 +
 .../hive/ql/txn/compactor/CompactorMR.java  |  121 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |   22 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java|9 +
 .../hive/ql/txn/compactor/TestWorker.java   |9 +-
 49 files changed, 4016 insertions(+), 3393 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index 37bbab8..9c8bcc1 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -14,6 +14,7 @@ import 
org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.CompactionRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
@@ -34,6 +35,7 @@ import 

[4/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index e836154..957a256 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -25539,13 +25539,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list540 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list540.size);
-  String _elem541;
-  for (int _i542 = 0; _i542 < _list540.size; ++_i542)
+  org.apache.thrift.protocol.TList _list550 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list550.size);
+  String _elem551;
+  for (int _i552 = 0; _i552 < _list550.size; ++_i552)
   {
-_elem541 = iprot.readString();
-struct.success.add(_elem541);
+_elem551 = iprot.readString();
+struct.success.add(_elem551);
   }
   iprot.readListEnd();
 }
@@ -25580,9 +25580,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for (String _iter543 : struct.success)
+for (String _iter553 : struct.success)
 {
-  oprot.writeString(_iter543);
+  oprot.writeString(_iter553);
 }
 oprot.writeListEnd();
   }
@@ -25621,9 +25621,9 @@ public class ThriftHiveMetastore {
 if (struct.isSetSuccess()) {
   {
 oprot.writeI32(struct.success.size());
-for (String _iter544 : struct.success)
+for (String _iter554 : struct.success)
 {
-  oprot.writeString(_iter544);
+  oprot.writeString(_iter554);
 }
   }
 }
@@ -25638,13 +25638,13 @@ public class ThriftHiveMetastore {
 BitSet incoming = iprot.readBitSet(2);
 if (incoming.get(0)) {
   {
-org.apache.thrift.protocol.TList _list545 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
-struct.success = new ArrayList(_list545.size);
-String _elem546;
-for (int _i547 = 0; _i547 < _list545.size; ++_i547)
+org.apache.thrift.protocol.TList _list555 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
+struct.success = new ArrayList(_list555.size);
+String _elem556;
+for (int _i557 = 0; _i557 < _list555.size; ++_i557)
 {
-  _elem546 = iprot.readString();
-  struct.success.add(_elem546);
+  _elem556 = iprot.readString();
+  struct.success.add(_elem556);
 }
   }
   struct.setSuccessIsSet(true);
@@ -26298,13 +26298,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list548 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list548.size);
-  String _elem549;
-  for (int _i550 = 0; _i550 < _list548.size; ++_i550)
+  org.apache.thrift.protocol.TList _list558 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list558.size);
+  String _elem559;
+  for (int _i560 = 0; _i560 < _list558.size; ++_i560)
   {
-_elem549 = iprot.readString();
-struct.success.add(_elem549);
+_elem559 = iprot.readString();
+struct.success.add(_elem559);
   }
   iprot.readListEnd();
 }
@@ -26339,9 +26339,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for 

[1/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/branch-1 abaf88248 -> 6e0504d9a


http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index f8798b7..0601a29 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@ -169,7 +169,7 @@ class CompactionTxnHandler extends TxnHandler {
 dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
 stmt = dbConn.createStatement();
 String s = "select cq_id, cq_database, cq_table, cq_partition, " +
-  "cq_type from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE 
+ "'";
+  "cq_type, cq_tblproperties from COMPACTION_QUEUE where cq_state = '" 
+ INITIATED_STATE + "'";
 LOG.debug("Going to execute query <" + s + ">");
 rs = stmt.executeQuery(s);
 if (!rs.next()) {
@@ -185,6 +185,7 @@ class CompactionTxnHandler extends TxnHandler {
   info.tableName = rs.getString(3);
   info.partName = rs.getString(4);
   info.type = dbCompactionType2ThriftType(rs.getString(5).charAt(0));
+  info.properties = rs.getString(6);
   // Now, update this record as being worked on by this worker.
   long now = getDbTime(dbConn);
   s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', 
" +
@@ -329,7 +330,7 @@ class CompactionTxnHandler extends TxnHandler {
   try {
 dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
 stmt = dbConn.createStatement();
-rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, 
CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, 
CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE 
CQ_ID = " + info.id);
+rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, 
CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, 
CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from 
COMPACTION_QUEUE WHERE CQ_ID = " + info.id);
 if(rs.next()) {
   info = CompactionInfo.loadFullFromCompactionQueue(rs);
 }
@@ -345,7 +346,7 @@ class CompactionTxnHandler extends TxnHandler {
   LOG.debug("Going to rollback");
   dbConn.rollback();
 }
-pStmt = dbConn.prepareStatement("insert into 
COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, 
CC_TYPE, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, 
CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?)");
+pStmt = dbConn.prepareStatement("insert into 
COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, 
CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, 
CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, 
?,?,?,?)");
 info.state = SUCCEEDED_STATE;
 CompactionInfo.insertIntoCompletedCompactions(pStmt, info, 
getDbTime(dbConn));
 updCount = pStmt.executeUpdate();
@@ -838,7 +839,7 @@ class CompactionTxnHandler extends TxnHandler {
   try {
 dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
 stmt = dbConn.createStatement();
-rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, 
CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, 
CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE 
CQ_ID = " + ci.id);
+rs = stmt.executeQuery("select CQ_ID, CQ_DATABASE, CQ_TABLE, 
CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, 
CQ_RUN_AS, CQ_HIGHEST_TXN_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from 
COMPACTION_QUEUE WHERE CQ_ID = " + ci.id);
 if(rs.next()) {
   ci = CompactionInfo.loadFullFromCompactionQueue(rs);
   String s = "delete from COMPACTION_QUEUE where cq_id = " + ci.id;
@@ -866,7 +867,7 @@ class CompactionTxnHandler extends TxnHandler {
 }
 close(rs, stmt, null);
 
-pStmt = dbConn.prepareStatement("insert into 
COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, 
CC_TYPE, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_TXN_ID, 
CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?)");
+pStmt = dbConn.prepareStatement("insert into 
COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, 
CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, 
CC_HIGHEST_TXN_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, 
?,?,?,?)");
 

[5/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 9e2e883..f2a715a 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -12834,6 +12834,11 @@ void CompactionRequest::__set_runas(const std::string& 
val) {
 __isset.runas = true;
 }
 
+void CompactionRequest::__set_properties(const std::map & val) {
+  this->properties = val;
+__isset.properties = true;
+}
+
 uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) 
{
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -12900,6 +12905,29 @@ uint32_t 
CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
   xfer += iprot->skip(ftype);
 }
 break;
+  case 6:
+if (ftype == ::apache::thrift::protocol::T_MAP) {
+  {
+this->properties.clear();
+uint32_t _size561;
+::apache::thrift::protocol::TType _ktype562;
+::apache::thrift::protocol::TType _vtype563;
+xfer += iprot->readMapBegin(_ktype562, _vtype563, _size561);
+uint32_t _i565;
+for (_i565 = 0; _i565 < _size561; ++_i565)
+{
+  std::string _key566;
+  xfer += iprot->readString(_key566);
+  std::string& _val567 = this->properties[_key566];
+  xfer += iprot->readString(_val567);
+}
+xfer += iprot->readMapEnd();
+  }
+  this->__isset.properties = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
   default:
 xfer += iprot->skip(ftype);
 break;
@@ -12945,6 +12973,20 @@ uint32_t 
CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot)
 xfer += oprot->writeString(this->runas);
 xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.properties) {
+xfer += oprot->writeFieldBegin("properties", 
::apache::thrift::protocol::T_MAP, 6);
+{
+  xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, 
::apache::thrift::protocol::T_STRING, 
static_cast(this->properties.size()));
+  std::map ::const_iterator _iter568;
+  for (_iter568 = this->properties.begin(); _iter568 != 
this->properties.end(); ++_iter568)
+  {
+xfer += oprot->writeString(_iter568->first);
+xfer += oprot->writeString(_iter568->second);
+  }
+  xfer += oprot->writeMapEnd();
+}
+xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -12957,24 +12999,27 @@ void swap(CompactionRequest , CompactionRequest ) 
{
   swap(a.partitionname, b.partitionname);
   swap(a.type, b.type);
   swap(a.runas, b.runas);
+  swap(a.properties, b.properties);
   swap(a.__isset, b.__isset);
 }
 
-CompactionRequest::CompactionRequest(const CompactionRequest& other561) {
-  dbname = other561.dbname;
-  tablename = other561.tablename;
-  partitionname = other561.partitionname;
-  type = other561.type;
-  runas = other561.runas;
-  __isset = other561.__isset;
-}
-CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other562) {
-  dbname = other562.dbname;
-  tablename = other562.tablename;
-  partitionname = other562.partitionname;
-  type = other562.type;
-  runas = other562.runas;
-  __isset = other562.__isset;
+CompactionRequest::CompactionRequest(const CompactionRequest& other569) {
+  dbname = other569.dbname;
+  tablename = other569.tablename;
+  partitionname = other569.partitionname;
+  type = other569.type;
+  runas = other569.runas;
+  properties = other569.properties;
+  __isset = other569.__isset;
+}
+CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other570) {
+  dbname = other570.dbname;
+  tablename = other570.tablename;
+  partitionname = other570.partitionname;
+  type = other570.type;
+  runas = other570.runas;
+  properties = other570.properties;
+  __isset = other570.__isset;
   return *this;
 }
 void CompactionRequest::printTo(std::ostream& out) const {
@@ -12985,6 +13030,7 @@ void CompactionRequest::printTo(std::ostream& out) 
const {
   out << ", " << "partitionname="; (__isset.partitionname ? (out << 
to_string(partitionname)) : (out << ""));
   out << ", " << "type=" << to_string(type);
   out << ", " << "runas="; (__isset.runas ? (out << to_string(runas)) : (out 
<< ""));
+  out << ", " << "properties="; (__isset.properties ? (out << 
to_string(properties)) : (out << ""));
   out << ")";
 }
 
@@ -13037,11 +13083,11 @@ void swap(ShowCompactRequest , ShowCompactRequest 
) {
   (void) b;
 }
 

[6/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 028c647..c2a208f 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size659;
-::apache::thrift::protocol::TType _etype662;
-xfer += iprot->readListBegin(_etype662, _size659);
-this->success.resize(_size659);
-uint32_t _i663;
-for (_i663 = 0; _i663 < _size659; ++_i663)
+uint32_t _size667;
+::apache::thrift::protocol::TType _etype670;
+xfer += iprot->readListBegin(_etype670, _size667);
+this->success.resize(_size667);
+uint32_t _i671;
+for (_i671 = 0; _i671 < _size667; ++_i671)
 {
-  xfer += iprot->readString(this->success[_i663]);
+  xfer += iprot->readString(this->success[_i671]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter664;
-  for (_iter664 = this->success.begin(); _iter664 != this->success.end(); 
++_iter664)
+  std::vector ::const_iterator _iter672;
+  for (_iter672 = this->success.begin(); _iter672 != this->success.end(); 
++_iter672)
   {
-xfer += oprot->writeString((*_iter664));
+xfer += oprot->writeString((*_iter672));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size665;
-::apache::thrift::protocol::TType _etype668;
-xfer += iprot->readListBegin(_etype668, _size665);
-(*(this->success)).resize(_size665);
-uint32_t _i669;
-for (_i669 = 0; _i669 < _size665; ++_i669)
+uint32_t _size673;
+::apache::thrift::protocol::TType _etype676;
+xfer += iprot->readListBegin(_etype676, _size673);
+(*(this->success)).resize(_size673);
+uint32_t _i677;
+for (_i677 = 0; _i677 < _size673; ++_i677)
 {
-  xfer += iprot->readString((*(this->success))[_i669]);
+  xfer += iprot->readString((*(this->success))[_i677]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size670;
-::apache::thrift::protocol::TType _etype673;
-xfer += iprot->readListBegin(_etype673, _size670);
-this->success.resize(_size670);
-uint32_t _i674;
-for (_i674 = 0; _i674 < _size670; ++_i674)
+uint32_t _size678;
+::apache::thrift::protocol::TType _etype681;
+xfer += iprot->readListBegin(_etype681, _size678);
+this->success.resize(_size678);
+uint32_t _i682;
+for (_i682 = 0; _i682 < _size678; ++_i682)
 {
-  xfer += iprot->readString(this->success[_i674]);
+  xfer += iprot->readString(this->success[_i682]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter675;
-  for (_iter675 = this->success.begin(); _iter675 != this->success.end(); 
++_iter675)
+  std::vector ::const_iterator _iter683;
+  for (_iter683 = this->success.begin(); _iter683 != this->success.end(); 
++_iter683)
   {
-xfer += oprot->writeString((*_iter675));
+xfer += oprot->writeString((*_iter683));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

[3/7] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/6e0504d9/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 438e368..c85150d 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -9428,14 +9428,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size479 = 0;
-$_etype482 = 0;
-$xfer += $input->readListBegin($_etype482, $_size479);
-for ($_i483 = 0; $_i483 < $_size479; ++$_i483)
+$_size488 = 0;
+$_etype491 = 0;
+$xfer += $input->readListBegin($_etype491, $_size488);
+for ($_i492 = 0; $_i492 < $_size488; ++$_i492)
 {
-  $elem484 = null;
-  $xfer += $input->readString($elem484);
-  $this->success []= $elem484;
+  $elem493 = null;
+  $xfer += $input->readString($elem493);
+  $this->success []= $elem493;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -9471,9 +9471,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter485)
+  foreach ($this->success as $iter494)
   {
-$xfer += $output->writeString($iter485);
+$xfer += $output->writeString($iter494);
   }
 }
 $output->writeListEnd();
@@ -9604,14 +9604,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size486 = 0;
-$_etype489 = 0;
-$xfer += $input->readListBegin($_etype489, $_size486);
-for ($_i490 = 0; $_i490 < $_size486; ++$_i490)
+$_size495 = 0;
+$_etype498 = 0;
+$xfer += $input->readListBegin($_etype498, $_size495);
+for ($_i499 = 0; $_i499 < $_size495; ++$_i499)
 {
-  $elem491 = null;
-  $xfer += $input->readString($elem491);
-  $this->success []= $elem491;
+  $elem500 = null;
+  $xfer += $input->readString($elem500);
+  $this->success []= $elem500;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -9647,9 +9647,9 @@ class ThriftHiveMetastore_get_all_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter492)
+  foreach ($this->success as $iter501)
   {
-$xfer += $output->writeString($iter492);
+$xfer += $output->writeString($iter501);
   }
 }
 $output->writeListEnd();
@@ -10650,18 +10650,18 @@ class ThriftHiveMetastore_get_type_all_result {
 case 0:
   if ($ftype == TType::MAP) {
 $this->success = array();
-$_size493 = 0;
-$_ktype494 = 0;
-$_vtype495 = 0;
-$xfer += $input->readMapBegin($_ktype494, $_vtype495, $_size493);
-for ($_i497 = 0; $_i497 < $_size493; ++$_i497)
+$_size502 = 0;
+$_ktype503 = 0;
+$_vtype504 = 0;
+$xfer += $input->readMapBegin($_ktype503, $_vtype504, $_size502);
+for ($_i506 = 0; $_i506 < $_size502; ++$_i506)
 {
-  $key498 = '';
-  $val499 = new \metastore\Type();
-  $xfer += $input->readString($key498);
-  $val499 = new \metastore\Type();
-  $xfer += $val499->read($input);
-  $this->success[$key498] = $val499;
+  $key507 = '';
+  $val508 = new \metastore\Type();
+  $xfer += $input->readString($key507);
+  $val508 = new \metastore\Type();
+  $xfer += $val508->read($input);
+  $this->success[$key507] = $val508;
 }
 $xfer += $input->readMapEnd();
   } else {
@@ -10697,10 +10697,10 @@ class ThriftHiveMetastore_get_type_all_result {
   {
 $output->writeMapBegin(TType::STRING, TType::STRUCT, 
count($this->success));
 {
-  foreach ($this->success as $kiter500 => $viter501)
+  foreach ($this->success as $kiter509 => $viter510)
   {
-$xfer += $output->writeString($kiter500);
-$xfer += $viter501->write($output);
+$xfer += $output->writeString($kiter509);
+$xfer += $viter510->write($output);
   }
 

hive git commit: HIVE-13511. Run clidriver tests from within the qtest dir for the precommit tests. (Siddharth Seth, reviewed by Ashutosh Chauhan)

2016-05-27 Thread sseth
Repository: hive
Updated Branches:
  refs/heads/master c57a59611 -> 02b2fb5a9


HIVE-13511. Run clidriver tests from within the qtest dir for the precommit 
tests. (Siddharth Seth, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/02b2fb5a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/02b2fb5a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/02b2fb5a

Branch: refs/heads/master
Commit: 02b2fb5a991b3dada90b442dbb013d466c3a50b0
Parents: c57a596
Author: Siddharth Seth 
Authored: Fri May 27 14:04:55 2016 -0700
Committer: Siddharth Seth 
Committed: Fri May 27 14:04:55 2016 -0700

--
 .../hive/ptest/execution/HostExecutor.java  |  4 
 .../org/apache/hive/ptest/execution/PTest.java  |  1 +
 .../ptest/execution/conf/QFileTestBatch.java| 12 ++--
 .../hive/ptest/execution/conf/TestBatch.java|  1 +
 .../hive/ptest/execution/conf/TestParser.java   | 20 +---
 .../ptest/execution/conf/UnitTestBatch.java |  6 ++
 .../ptest/execution/TestExecutionPhase.java |  4 +++-
 .../execution/conf/TestQFileTestBatch.java  | 12 +---
 8 files changed, 51 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/02b2fb5a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
--
diff --git 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
index 735b261..2c9100e 100644
--- 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
+++ 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
@@ -40,6 +40,7 @@ import org.apache.hive.ptest.execution.ssh.SSHCommand;
 import org.apache.hive.ptest.execution.ssh.SSHCommandExecutor;
 import org.apache.hive.ptest.execution.ssh.SSHExecutionException;
 import org.apache.hive.ptest.execution.ssh.SSHResult;
+import org.apache.logging.log4j.util.Strings;
 import org.slf4j.Logger;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -243,6 +244,9 @@ class HostExecutor {
 templateVariables.put("testArguments", batch.getTestArguments());
 templateVariables.put("localDir", drone.getLocalDirectory());
 templateVariables.put("logDir", drone.getLocalLogDirectory());
+if (!Strings.isEmpty(batch.getTestModule())) {
+  templateVariables.put("testModule", batch.getTestModule());
+}
 String command = Templates.getTemplateResult("bash 
$localDir/$instanceName/scratch/" + script.getName(),
 templateVariables);
 Templates.writeTemplateResult("batch-exec.vm", script, templateVariables);

http://git-wip-us.apache.org/repos/asf/hive/blob/02b2fb5a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
--
diff --git 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
index 35cc752..de5c322 100644
--- a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
+++ b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
@@ -164,6 +164,7 @@ public class PTest {
 Map elapsedTimes = Maps.newTreeMap();
 try {
   mLogger.info("Running tests with " + mConfiguration);
+  mLogger.info("Running tests with configuration context=[{}]", 
mConfiguration.getContext());
   for(Phase phase : mPhases) {
 String msg = "Executing " + phase.getClass().getName();
 mLogger.info(msg);

http://git-wip-us.apache.org/repos/asf/hive/blob/02b2fb5a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
--
diff --git 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
index 61ecc88..fa213db 100644
--- 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
+++ 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
@@ -29,10 +29,11 @@ public class QFileTestBatch implements TestBatch {
   private final String driver;
   private final String queryFilesProperty;
   private final String name;
+  private final String moduleName;
   private final Set tests;
   private final boolean isParallel;
   public QFileTestBatch(String testCasePropertyName, String driver, 
-  String queryFilesProperty, Set tests, boolean 

hive git commit: HIVE-13511. Run clidriver tests from within the qtest dir for the precommit tests. (Siddharth Seth, reviewed by Ashutosh Chauhan) (cherry picked from commit 02b2fb5a991b3dada90b442dbb

2016-05-27 Thread sseth
Repository: hive
Updated Branches:
  refs/heads/branch-2.1 f547a6420 -> 4b553358b


HIVE-13511. Run clidriver tests from within the qtest dir for the precommit 
tests. (Siddharth Seth, reviewed by Ashutosh Chauhan)
(cherry picked from commit 02b2fb5a991b3dada90b442dbb013d466c3a50b0)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4b553358
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4b553358
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4b553358

Branch: refs/heads/branch-2.1
Commit: 4b553358b0cfa19a1f1eb7eebf1b82f20f9b3e81
Parents: f547a64
Author: Siddharth Seth 
Authored: Fri May 27 14:04:55 2016 -0700
Committer: Siddharth Seth 
Committed: Fri May 27 14:10:56 2016 -0700

--
 .../hive/ptest/execution/HostExecutor.java  |  4 
 .../org/apache/hive/ptest/execution/PTest.java  |  1 +
 .../ptest/execution/conf/QFileTestBatch.java| 12 ++--
 .../hive/ptest/execution/conf/TestBatch.java|  1 +
 .../hive/ptest/execution/conf/TestParser.java   | 20 +---
 .../ptest/execution/conf/UnitTestBatch.java |  6 ++
 .../ptest/execution/TestExecutionPhase.java |  4 +++-
 .../execution/conf/TestQFileTestBatch.java  | 12 +---
 8 files changed, 51 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4b553358/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
--
diff --git 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
index 735b261..2c9100e 100644
--- 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
+++ 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutor.java
@@ -40,6 +40,7 @@ import org.apache.hive.ptest.execution.ssh.SSHCommand;
 import org.apache.hive.ptest.execution.ssh.SSHCommandExecutor;
 import org.apache.hive.ptest.execution.ssh.SSHExecutionException;
 import org.apache.hive.ptest.execution.ssh.SSHResult;
+import org.apache.logging.log4j.util.Strings;
 import org.slf4j.Logger;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -243,6 +244,9 @@ class HostExecutor {
 templateVariables.put("testArguments", batch.getTestArguments());
 templateVariables.put("localDir", drone.getLocalDirectory());
 templateVariables.put("logDir", drone.getLocalLogDirectory());
+if (!Strings.isEmpty(batch.getTestModule())) {
+  templateVariables.put("testModule", batch.getTestModule());
+}
 String command = Templates.getTemplateResult("bash 
$localDir/$instanceName/scratch/" + script.getName(),
 templateVariables);
 Templates.writeTemplateResult("batch-exec.vm", script, templateVariables);

http://git-wip-us.apache.org/repos/asf/hive/blob/4b553358/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
--
diff --git 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
index 35cc752..de5c322 100644
--- a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
+++ b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
@@ -164,6 +164,7 @@ public class PTest {
 Map elapsedTimes = Maps.newTreeMap();
 try {
   mLogger.info("Running tests with " + mConfiguration);
+  mLogger.info("Running tests with configuration context=[{}]", 
mConfiguration.getContext());
   for(Phase phase : mPhases) {
 String msg = "Executing " + phase.getClass().getName();
 mLogger.info(msg);

http://git-wip-us.apache.org/repos/asf/hive/blob/4b553358/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
--
diff --git 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
index 61ecc88..fa213db 100644
--- 
a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
+++ 
b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/QFileTestBatch.java
@@ -29,10 +29,11 @@ public class QFileTestBatch implements TestBatch {
   private final String driver;
   private final String queryFilesProperty;
   private final String name;
+  private final String moduleName;
   private final Set tests;
   private final boolean isParallel;
   public QFileTestBatch(String 

[5/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
index e028ecb..d3fc92a 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
@@ -43,6 +43,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase, SchemeFactory> schemes = 
new HashMap();
   static {
@@ -55,6 +56,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase properties; // optional
 
   /** The set of fields this struct contains, along with convenience methods 
for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -66,7 +68,8 @@ public class CompactionRequest implements 
org.apache.thrift.TBase byName = new HashMap();
 
@@ -91,6 +94,8 @@ public class CompactionRequest implements 
org.apache.thrift.TBase 
metaDataMap;
   static {
 Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new 
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -145,6 +150,10 @@ public class CompactionRequest implements 
org.apache.thrift.TBase __this__properties = new 
HashMap(other.properties);
+  this.properties = __this__properties;
+}
   }
 
   public CompactionRequest deepCopy() {
@@ -195,6 +208,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase();
+}
+this.properties.put(key, val);
+  }
+
+  public Map getProperties() {
+return this.properties;
+  }
+
+  public void setProperties(Map properties) {
+this.properties = properties;
+  }
+
+  public void unsetProperties() {
+this.properties = null;
+  }
+
+  /** Returns true if field properties is set (has been assigned a value) and 
false otherwise */
+  public boolean isSetProperties() {
+return this.properties != null;
+  }
+
+  public void setPropertiesIsSet(boolean value) {
+if (!value) {
+  this.properties = null;
+}
+  }
+
   public void setFieldValue(_Fields field, Object value) {
 switch (field) {
 case DBNAME:
@@ -362,6 +410,14 @@ public class CompactionRequest implements 
org.apache.thrift.TBase)value);
+  }
+  break;
+
 }
   }
 
@@ -382,6 +438,9 @@ public class CompactionRequest implements 
org.apache.thrift.TBase(2*_map524.size);
+String _key525;
+String _val526;
+for (int _i527 = 0; _i527 < _map524.size; ++_i527)
+{
+  _key525 = iprot.readString();
+  _val526 = iprot.readString();
+  struct.properties.put(_key525, _val526);
+}
+iprot.readMapEnd();
+  }
+  struct.setPropertiesIsSet(true);
+} else { 
+  org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
+}
+break;
   default:
 org.apache.thrift.protocol.TProtocolUtil.skip(iprot, 
schemeField.type);
 }
@@ -758,6 +873,21 @@ public class CompactionRequest implements 
org.apache.thrift.TBase _iter528 : 
struct.properties.entrySet())
+{
+  oprot.writeString(_iter528.getKey());
+  oprot.writeString(_iter528.getValue());
+}
+oprot.writeMapEnd();
+  }
+  oprot.writeFieldEnd();
+}
+  }
   oprot.writeFieldStop();
   oprot.writeStructEnd();
 }
@@ -785,13 +915,26 @@ public class CompactionRequest implements 
org.apache.thrift.TBase _iter529 : 
struct.properties.entrySet())
+  {
+oprot.writeString(_iter529.getKey());
+oprot.writeString(_iter529.getValue());
+  }
+}
+  }
 }
 
 @Override
@@ -803,7 +946,7 @@ public class CompactionRequest implements 
org.apache.thrift.TBase(2*_map530.size);
+  String _key531;
+  String _val532;
+  for (int _i533 = 0; _i533 < _map530.size; ++_i533)
+  {
+_key531 = iprot.readString();
+_val532 = iprot.readString();
+struct.properties.put(_key531, _val532);
+  }
+}
+struct.setPropertiesIsSet(true);
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
--
diff --git 

[7/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp 
b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 298384c..5a35a50 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size773;
-::apache::thrift::protocol::TType _etype776;
-xfer += iprot->readListBegin(_etype776, _size773);
-this->success.resize(_size773);
-uint32_t _i777;
-for (_i777 = 0; _i777 < _size773; ++_i777)
+uint32_t _size781;
+::apache::thrift::protocol::TType _etype784;
+xfer += iprot->readListBegin(_etype784, _size781);
+this->success.resize(_size781);
+uint32_t _i785;
+for (_i785 = 0; _i785 < _size781; ++_i785)
 {
-  xfer += iprot->readString(this->success[_i777]);
+  xfer += iprot->readString(this->success[_i785]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1286,10 +1286,10 @@ uint32_t 
ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter778;
-  for (_iter778 = this->success.begin(); _iter778 != this->success.end(); 
++_iter778)
+  std::vector ::const_iterator _iter786;
+  for (_iter786 = this->success.begin(); _iter786 != this->success.end(); 
++_iter786)
   {
-xfer += oprot->writeString((*_iter778));
+xfer += oprot->writeString((*_iter786));
   }
   xfer += oprot->writeListEnd();
 }
@@ -1334,14 +1334,14 @@ uint32_t 
ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 (*(this->success)).clear();
-uint32_t _size779;
-::apache::thrift::protocol::TType _etype782;
-xfer += iprot->readListBegin(_etype782, _size779);
-(*(this->success)).resize(_size779);
-uint32_t _i783;
-for (_i783 = 0; _i783 < _size779; ++_i783)
+uint32_t _size787;
+::apache::thrift::protocol::TType _etype790;
+xfer += iprot->readListBegin(_etype790, _size787);
+(*(this->success)).resize(_size787);
+uint32_t _i791;
+for (_i791 = 0; _i791 < _size787; ++_i791)
 {
-  xfer += iprot->readString((*(this->success))[_i783]);
+  xfer += iprot->readString((*(this->success))[_i791]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1458,14 +1458,14 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
 if (ftype == ::apache::thrift::protocol::T_LIST) {
   {
 this->success.clear();
-uint32_t _size784;
-::apache::thrift::protocol::TType _etype787;
-xfer += iprot->readListBegin(_etype787, _size784);
-this->success.resize(_size784);
-uint32_t _i788;
-for (_i788 = 0; _i788 < _size784; ++_i788)
+uint32_t _size792;
+::apache::thrift::protocol::TType _etype795;
+xfer += iprot->readListBegin(_etype795, _size792);
+this->success.resize(_size792);
+uint32_t _i796;
+for (_i796 = 0; _i796 < _size792; ++_i796)
 {
-  xfer += iprot->readString(this->success[_i788]);
+  xfer += iprot->readString(this->success[_i796]);
 }
 xfer += iprot->readListEnd();
   }
@@ -1504,10 +1504,10 @@ uint32_t 
ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
 xfer += oprot->writeFieldBegin("success", 
::apache::thrift::protocol::T_LIST, 0);
 {
   xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, 
static_cast(this->success.size()));
-  std::vector ::const_iterator _iter789;
-  for (_iter789 = this->success.begin(); _iter789 != this->success.end(); 
++_iter789)
+  std::vector ::const_iterator _iter797;
+  for (_iter797 = this->success.begin(); _iter797 != this->success.end(); 
++_iter797)
   {
-xfer += oprot->writeString((*_iter789));
+xfer += oprot->writeString((*_iter797));
   }
   xfer += oprot->writeListEnd();
 }
@@ 

[2/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-php/metastore/Types.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php 
b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 5aef35c..f505208 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -14373,6 +14373,10 @@ class CompactionRequest {
* @var string
*/
   public $runas = null;
+  /**
+   * @var array
+   */
+  public $properties = null;
 
   public function __construct($vals=null) {
 if (!isset(self::$_TSPEC)) {
@@ -14397,6 +14401,18 @@ class CompactionRequest {
   'var' => 'runas',
   'type' => TType::STRING,
   ),
+6 => array(
+  'var' => 'properties',
+  'type' => TType::MAP,
+  'ktype' => TType::STRING,
+  'vtype' => TType::STRING,
+  'key' => array(
+'type' => TType::STRING,
+  ),
+  'val' => array(
+'type' => TType::STRING,
+),
+  ),
 );
 }
 if (is_array($vals)) {
@@ -14415,6 +14431,9 @@ class CompactionRequest {
   if (isset($vals['runas'])) {
 $this->runas = $vals['runas'];
   }
+  if (isset($vals['properties'])) {
+$this->properties = $vals['properties'];
+  }
 }
   }
 
@@ -14472,6 +14491,26 @@ class CompactionRequest {
 $xfer += $input->skip($ftype);
   }
   break;
+case 6:
+  if ($ftype == TType::MAP) {
+$this->properties = array();
+$_size465 = 0;
+$_ktype466 = 0;
+$_vtype467 = 0;
+$xfer += $input->readMapBegin($_ktype466, $_vtype467, $_size465);
+for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
+{
+  $key470 = '';
+  $val471 = '';
+  $xfer += $input->readString($key470);
+  $xfer += $input->readString($val471);
+  $this->properties[$key470] = $val471;
+}
+$xfer += $input->readMapEnd();
+  } else {
+$xfer += $input->skip($ftype);
+  }
+  break;
 default:
   $xfer += $input->skip($ftype);
   break;
@@ -14510,6 +14549,24 @@ class CompactionRequest {
   $xfer += $output->writeString($this->runas);
   $xfer += $output->writeFieldEnd();
 }
+if ($this->properties !== null) {
+  if (!is_array($this->properties)) {
+throw new TProtocolException('Bad type in structure.', 
TProtocolException::INVALID_DATA);
+  }
+  $xfer += $output->writeFieldBegin('properties', TType::MAP, 6);
+  {
+$output->writeMapBegin(TType::STRING, TType::STRING, 
count($this->properties));
+{
+  foreach ($this->properties as $kiter472 => $viter473)
+  {
+$xfer += $output->writeString($kiter472);
+$xfer += $output->writeString($viter473);
+  }
+}
+$output->writeMapEnd();
+  }
+  $xfer += $output->writeFieldEnd();
+}
 $xfer += $output->writeFieldStop();
 $xfer += $output->writeStructEnd();
 return $xfer;
@@ -14946,15 +15003,15 @@ class ShowCompactResponse {
 case 1:
   if ($ftype == TType::LST) {
 $this->compacts = array();
-$_size465 = 0;
-$_etype468 = 0;
-$xfer += $input->readListBegin($_etype468, $_size465);
-for ($_i469 = 0; $_i469 < $_size465; ++$_i469)
+$_size474 = 0;
+$_etype477 = 0;
+$xfer += $input->readListBegin($_etype477, $_size474);
+for ($_i478 = 0; $_i478 < $_size474; ++$_i478)
 {
-  $elem470 = null;
-  $elem470 = new \metastore\ShowCompactResponseElement();
-  $xfer += $elem470->read($input);
-  $this->compacts []= $elem470;
+  $elem479 = null;
+  $elem479 = new \metastore\ShowCompactResponseElement();
+  $xfer += $elem479->read($input);
+  $this->compacts []= $elem479;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -14982,9 +15039,9 @@ class ShowCompactResponse {
   {
 $output->writeListBegin(TType::STRUCT, count($this->compacts));
 {
-  foreach ($this->compacts as $iter471)
+  foreach ($this->compacts as $iter480)
   {
-$xfer += $iter471->write($output);
+$xfer += $iter480->write($output);
   }
 }
 $output->writeListEnd();
@@ -15113,14 +15170,14 @@ class AddDynamicPartitions {
 case 4:
   if ($ftype == TType::LST) {
 $this->partitionnames = array();
-$_size472 = 0;
-$_etype475 = 0;
-$xfer += $input->readListBegin($_etype475, $_size472);
- 

[6/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
--
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp 
b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index f8ca7cd..79460a8 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -14413,6 +14413,11 @@ void CompactionRequest::__set_runas(const std::string& 
val) {
 __isset.runas = true;
 }
 
+void CompactionRequest::__set_properties(const std::map & val) {
+  this->properties = val;
+__isset.properties = true;
+}
+
 uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) 
{
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -14479,6 +14484,29 @@ uint32_t 
CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
   xfer += iprot->skip(ftype);
 }
 break;
+  case 6:
+if (ftype == ::apache::thrift::protocol::T_MAP) {
+  {
+this->properties.clear();
+uint32_t _size603;
+::apache::thrift::protocol::TType _ktype604;
+::apache::thrift::protocol::TType _vtype605;
+xfer += iprot->readMapBegin(_ktype604, _vtype605, _size603);
+uint32_t _i607;
+for (_i607 = 0; _i607 < _size603; ++_i607)
+{
+  std::string _key608;
+  xfer += iprot->readString(_key608);
+  std::string& _val609 = this->properties[_key608];
+  xfer += iprot->readString(_val609);
+}
+xfer += iprot->readMapEnd();
+  }
+  this->__isset.properties = true;
+} else {
+  xfer += iprot->skip(ftype);
+}
+break;
   default:
 xfer += iprot->skip(ftype);
 break;
@@ -14524,6 +14552,20 @@ uint32_t 
CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot)
 xfer += oprot->writeString(this->runas);
 xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.properties) {
+xfer += oprot->writeFieldBegin("properties", 
::apache::thrift::protocol::T_MAP, 6);
+{
+  xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, 
::apache::thrift::protocol::T_STRING, 
static_cast(this->properties.size()));
+  std::map ::const_iterator _iter610;
+  for (_iter610 = this->properties.begin(); _iter610 != 
this->properties.end(); ++_iter610)
+  {
+xfer += oprot->writeString(_iter610->first);
+xfer += oprot->writeString(_iter610->second);
+  }
+  xfer += oprot->writeMapEnd();
+}
+xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -14536,24 +14578,27 @@ void swap(CompactionRequest , CompactionRequest ) 
{
   swap(a.partitionname, b.partitionname);
   swap(a.type, b.type);
   swap(a.runas, b.runas);
+  swap(a.properties, b.properties);
   swap(a.__isset, b.__isset);
 }
 
-CompactionRequest::CompactionRequest(const CompactionRequest& other603) {
-  dbname = other603.dbname;
-  tablename = other603.tablename;
-  partitionname = other603.partitionname;
-  type = other603.type;
-  runas = other603.runas;
-  __isset = other603.__isset;
-}
-CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other604) {
-  dbname = other604.dbname;
-  tablename = other604.tablename;
-  partitionname = other604.partitionname;
-  type = other604.type;
-  runas = other604.runas;
-  __isset = other604.__isset;
+CompactionRequest::CompactionRequest(const CompactionRequest& other611) {
+  dbname = other611.dbname;
+  tablename = other611.tablename;
+  partitionname = other611.partitionname;
+  type = other611.type;
+  runas = other611.runas;
+  properties = other611.properties;
+  __isset = other611.__isset;
+}
+CompactionRequest& CompactionRequest::operator=(const CompactionRequest& 
other612) {
+  dbname = other612.dbname;
+  tablename = other612.tablename;
+  partitionname = other612.partitionname;
+  type = other612.type;
+  runas = other612.runas;
+  properties = other612.properties;
+  __isset = other612.__isset;
   return *this;
 }
 void CompactionRequest::printTo(std::ostream& out) const {
@@ -14564,6 +14609,7 @@ void CompactionRequest::printTo(std::ostream& out) 
const {
   out << ", " << "partitionname="; (__isset.partitionname ? (out << 
to_string(partitionname)) : (out << ""));
   out << ", " << "type=" << to_string(type);
   out << ", " << "runas="; (__isset.runas ? (out << to_string(runas)) : (out 
<< ""));
+  out << ", " << "properties="; (__isset.properties ? (out << 
to_string(properties)) : (out << ""));
   out << ")";
 }
 
@@ -14616,11 +14662,11 @@ void swap(ShowCompactRequest , ShowCompactRequest 
) {
   (void) b;
 }
 

[4/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
--
diff --git 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 13a8b71..cb5dec9 100644
--- 
a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ 
b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -28842,13 +28842,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list632 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list632.size);
-  String _elem633;
-  for (int _i634 = 0; _i634 < _list632.size; ++_i634)
+  org.apache.thrift.protocol.TList _list642 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list642.size);
+  String _elem643;
+  for (int _i644 = 0; _i644 < _list642.size; ++_i644)
   {
-_elem633 = iprot.readString();
-struct.success.add(_elem633);
+_elem643 = iprot.readString();
+struct.success.add(_elem643);
   }
   iprot.readListEnd();
 }
@@ -28883,9 +28883,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for (String _iter635 : struct.success)
+for (String _iter645 : struct.success)
 {
-  oprot.writeString(_iter635);
+  oprot.writeString(_iter645);
 }
 oprot.writeListEnd();
   }
@@ -28924,9 +28924,9 @@ public class ThriftHiveMetastore {
 if (struct.isSetSuccess()) {
   {
 oprot.writeI32(struct.success.size());
-for (String _iter636 : struct.success)
+for (String _iter646 : struct.success)
 {
-  oprot.writeString(_iter636);
+  oprot.writeString(_iter646);
 }
   }
 }
@@ -28941,13 +28941,13 @@ public class ThriftHiveMetastore {
 BitSet incoming = iprot.readBitSet(2);
 if (incoming.get(0)) {
   {
-org.apache.thrift.protocol.TList _list637 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
-struct.success = new ArrayList(_list637.size);
-String _elem638;
-for (int _i639 = 0; _i639 < _list637.size; ++_i639)
+org.apache.thrift.protocol.TList _list647 = new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
iprot.readI32());
+struct.success = new ArrayList(_list647.size);
+String _elem648;
+for (int _i649 = 0; _i649 < _list647.size; ++_i649)
 {
-  _elem638 = iprot.readString();
-  struct.success.add(_elem638);
+  _elem648 = iprot.readString();
+  struct.success.add(_elem648);
 }
   }
   struct.setSuccessIsSet(true);
@@ -29601,13 +29601,13 @@ public class ThriftHiveMetastore {
 case 0: // SUCCESS
   if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 {
-  org.apache.thrift.protocol.TList _list640 = 
iprot.readListBegin();
-  struct.success = new ArrayList(_list640.size);
-  String _elem641;
-  for (int _i642 = 0; _i642 < _list640.size; ++_i642)
+  org.apache.thrift.protocol.TList _list650 = 
iprot.readListBegin();
+  struct.success = new ArrayList(_list650.size);
+  String _elem651;
+  for (int _i652 = 0; _i652 < _list650.size; ++_i652)
   {
-_elem641 = iprot.readString();
-struct.success.add(_elem641);
+_elem651 = iprot.readString();
+struct.success.add(_elem651);
   }
   iprot.readListEnd();
 }
@@ -29642,9 +29642,9 @@ public class ThriftHiveMetastore {
   oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
   {
 oprot.writeListBegin(new 
org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, 
struct.success.size()));
-for 

[3/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
--
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php 
b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 3c9e038..2d82c92 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -10792,14 +10792,14 @@ class ThriftHiveMetastore_get_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size560 = 0;
-$_etype563 = 0;
-$xfer += $input->readListBegin($_etype563, $_size560);
-for ($_i564 = 0; $_i564 < $_size560; ++$_i564)
+$_size569 = 0;
+$_etype572 = 0;
+$xfer += $input->readListBegin($_etype572, $_size569);
+for ($_i573 = 0; $_i573 < $_size569; ++$_i573)
 {
-  $elem565 = null;
-  $xfer += $input->readString($elem565);
-  $this->success []= $elem565;
+  $elem574 = null;
+  $xfer += $input->readString($elem574);
+  $this->success []= $elem574;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -10835,9 +10835,9 @@ class ThriftHiveMetastore_get_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter566)
+  foreach ($this->success as $iter575)
   {
-$xfer += $output->writeString($iter566);
+$xfer += $output->writeString($iter575);
   }
 }
 $output->writeListEnd();
@@ -10968,14 +10968,14 @@ class ThriftHiveMetastore_get_all_databases_result {
 case 0:
   if ($ftype == TType::LST) {
 $this->success = array();
-$_size567 = 0;
-$_etype570 = 0;
-$xfer += $input->readListBegin($_etype570, $_size567);
-for ($_i571 = 0; $_i571 < $_size567; ++$_i571)
+$_size576 = 0;
+$_etype579 = 0;
+$xfer += $input->readListBegin($_etype579, $_size576);
+for ($_i580 = 0; $_i580 < $_size576; ++$_i580)
 {
-  $elem572 = null;
-  $xfer += $input->readString($elem572);
-  $this->success []= $elem572;
+  $elem581 = null;
+  $xfer += $input->readString($elem581);
+  $this->success []= $elem581;
 }
 $xfer += $input->readListEnd();
   } else {
@@ -11011,9 +11011,9 @@ class ThriftHiveMetastore_get_all_databases_result {
   {
 $output->writeListBegin(TType::STRING, count($this->success));
 {
-  foreach ($this->success as $iter573)
+  foreach ($this->success as $iter582)
   {
-$xfer += $output->writeString($iter573);
+$xfer += $output->writeString($iter582);
   }
 }
 $output->writeListEnd();
@@ -12014,18 +12014,18 @@ class ThriftHiveMetastore_get_type_all_result {
 case 0:
   if ($ftype == TType::MAP) {
 $this->success = array();
-$_size574 = 0;
-$_ktype575 = 0;
-$_vtype576 = 0;
-$xfer += $input->readMapBegin($_ktype575, $_vtype576, $_size574);
-for ($_i578 = 0; $_i578 < $_size574; ++$_i578)
+$_size583 = 0;
+$_ktype584 = 0;
+$_vtype585 = 0;
+$xfer += $input->readMapBegin($_ktype584, $_vtype585, $_size583);
+for ($_i587 = 0; $_i587 < $_size583; ++$_i587)
 {
-  $key579 = '';
-  $val580 = new \metastore\Type();
-  $xfer += $input->readString($key579);
-  $val580 = new \metastore\Type();
-  $xfer += $val580->read($input);
-  $this->success[$key579] = $val580;
+  $key588 = '';
+  $val589 = new \metastore\Type();
+  $xfer += $input->readString($key588);
+  $val589 = new \metastore\Type();
+  $xfer += $val589->read($input);
+  $this->success[$key588] = $val589;
 }
 $xfer += $input->readMapEnd();
   } else {
@@ -12061,10 +12061,10 @@ class ThriftHiveMetastore_get_type_all_result {
   {
 $output->writeMapBegin(TType::STRING, TType::STRUCT, 
count($this->success));
 {
-  foreach ($this->success as $kiter581 => $viter582)
+  foreach ($this->success as $kiter590 => $viter591)
   {
-$xfer += $output->writeString($kiter581);
-$xfer += $viter582->write($output);
+$xfer += $output->writeString($kiter590);
+$xfer += $viter591->write($output);
  

[8/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
HIVE-13354 : Add ability to specify Compaction options per table and per 
request (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c57a5961
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c57a5961
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c57a5961

Branch: refs/heads/master
Commit: c57a59611fa168ee38c6ee0ee60b1d6c4994f9f8
Parents: 793681c
Author: Wei Zheng 
Authored: Fri May 27 11:20:14 2016 -0700
Committer: Wei Zheng 
Committed: Fri May 27 11:20:14 2016 -0700

--
 .../hive/ql/txn/compactor/TestCompactor.java|  161 ++
 metastore/if/hive_metastore.thrift  |1 +
 .../upgrade/derby/036-HIVE-13354.derby.sql  |2 +
 .../derby/hive-txn-schema-1.3.0.derby.sql   |2 +
 .../derby/hive-txn-schema-2.1.0.derby.sql   |2 +
 .../derby/upgrade-1.2.0-to-1.3.0.derby.sql  |1 +
 .../derby/upgrade-2.0.0-to-2.1.0.derby.sql  |1 +
 .../upgrade/mssql/021-HIVE-13354.mssql.sql  |2 +
 .../upgrade/mssql/hive-schema-1.3.0.mssql.sql   |2 +
 .../upgrade/mssql/hive-schema-2.1.0.mssql.sql   |2 +
 .../mssql/upgrade-1.2.0-to-1.3.0.mssql.sql  |1 +
 .../mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |1 +
 .../upgrade/mysql/036-HIVE-13354.mysql.sql  |2 +
 .../mysql/hive-txn-schema-1.3.0.mysql.sql   |2 +
 .../mysql/hive-txn-schema-2.1.0.mysql.sql   |2 +
 .../mysql/upgrade-1.2.0-to-1.3.0.mysql.sql  |1 +
 .../mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |1 +
 .../upgrade/oracle/036-HIVE-13354.oracle.sql|2 +
 .../oracle/hive-txn-schema-1.3.0.oracle.sql |2 +
 .../oracle/hive-txn-schema-2.1.0.oracle.sql |2 +
 .../oracle/upgrade-1.2.0-to-1.3.0.oracle.sql|1 +
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql|1 +
 .../postgres/035-HIVE-13354.postgres.sql|2 +
 .../postgres/hive-txn-schema-1.3.0.postgres.sql |2 +
 .../postgres/hive-txn-schema-2.1.0.postgres.sql |2 +
 .../upgrade-1.2.0-to-1.3.0.postgres.sql |1 +
 .../upgrade-2.0.0-to-2.1.0.postgres.sql |1 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2020 
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  980 
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   10 +-
 .../metastore/api/AddDynamicPartitions.java |   32 +-
 .../metastore/api/ClearFileMetadataRequest.java |   32 +-
 .../hive/metastore/api/CompactionRequest.java   |  166 +-
 .../hive/metastore/api/FireEventRequest.java|   32 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   36 +-
 .../api/GetFileMetadataByExprRequest.java   |   32 +-
 .../api/GetFileMetadataByExprResult.java|   48 +-
 .../metastore/api/GetFileMetadataRequest.java   |   32 +-
 .../metastore/api/GetFileMetadataResult.java|   44 +-
 .../metastore/api/InsertEventRequestData.java   |   32 +-
 .../api/NotificationEventResponse.java  |   36 +-
 .../metastore/api/PutFileMetadataRequest.java   |   64 +-
 .../hive/metastore/api/ShowCompactResponse.java |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 2188 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1232 +-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  315 +--
 .../hive_metastore/ThriftHiveMetastore.py   |  830 +++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  197 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |4 +-
 .../hive/metastore/HiveMetaStoreClient.java |   14 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   18 +
 .../hive/metastore/txn/CompactionInfo.java  |   30 +-
 .../metastore/txn/CompactionTxnHandler.java |   11 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java|2 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |8 +
 .../hadoop/hive/metastore/txn/TxnUtils.java |   56 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |2 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java|6 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java  |5 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g|4 +-
 .../hive/ql/plan/AlterTableSimpleDesc.java  |8 +
 .../hive/ql/txn/compactor/CompactorMR.java  |  121 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |   22 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java|9 +
 .../hive/ql/txn/compactor/TestWorker.java   |9 +-
 65 files changed, 4780 insertions(+), 4145 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
--
diff --git 

[1/8] hive git commit: HIVE-13354 : Add ability to specify Compaction options per table and per request (Wei Zheng, reviewed by Eugene Koifman)

2016-05-27 Thread weiz
Repository: hive
Updated Branches:
  refs/heads/master 793681c76 -> c57a59611


http://git-wip-us.apache.org/repos/asf/hive/blob/c57a5961/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
--
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 4db9680..8d88cd7 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -9984,6 +9984,7 @@ class CompactionRequest:
- partitionname
- type
- runas
+   - properties
   """
 
   thrift_spec = (
@@ -9993,14 +9994,16 @@ class CompactionRequest:
 (3, TType.STRING, 'partitionname', None, None, ), # 3
 (4, TType.I32, 'type', None, None, ), # 4
 (5, TType.STRING, 'runas', None, None, ), # 5
+(6, TType.MAP, 'properties', (TType.STRING,None,TType.STRING,None), None, 
), # 6
   )
 
-  def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, runas=None,):
+  def __init__(self, dbname=None, tablename=None, partitionname=None, 
type=None, runas=None, properties=None,):
 self.dbname = dbname
 self.tablename = tablename
 self.partitionname = partitionname
 self.type = type
 self.runas = runas
+self.properties = properties
 
   def read(self, iprot):
 if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
@@ -10036,6 +10039,17 @@ class CompactionRequest:
   self.runas = iprot.readString()
 else:
   iprot.skip(ftype)
+  elif fid == 6:
+if ftype == TType.MAP:
+  self.properties = {}
+  (_ktype463, _vtype464, _size462 ) = iprot.readMapBegin()
+  for _i466 in xrange(_size462):
+_key467 = iprot.readString()
+_val468 = iprot.readString()
+self.properties[_key467] = _val468
+  iprot.readMapEnd()
+else:
+  iprot.skip(ftype)
   else:
 iprot.skip(ftype)
   iprot.readFieldEnd()
@@ -10066,6 +10080,14 @@ class CompactionRequest:
   oprot.writeFieldBegin('runas', TType.STRING, 5)
   oprot.writeString(self.runas)
   oprot.writeFieldEnd()
+if self.properties is not None:
+  oprot.writeFieldBegin('properties', TType.MAP, 6)
+  oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
+  for kiter469,viter470 in self.properties.items():
+oprot.writeString(kiter469)
+oprot.writeString(viter470)
+  oprot.writeMapEnd()
+  oprot.writeFieldEnd()
 oprot.writeFieldStop()
 oprot.writeStructEnd()
 
@@ -10086,6 +10108,7 @@ class CompactionRequest:
 value = (value * 31) ^ hash(self.partitionname)
 value = (value * 31) ^ hash(self.type)
 value = (value * 31) ^ hash(self.runas)
+value = (value * 31) ^ hash(self.properties)
 return value
 
   def __repr__(self):
@@ -10387,11 +10410,11 @@ class ShowCompactResponse:
   if fid == 1:
 if ftype == TType.LIST:
   self.compacts = []
-  (_etype465, _size462) = iprot.readListBegin()
-  for _i466 in xrange(_size462):
-_elem467 = ShowCompactResponseElement()
-_elem467.read(iprot)
-self.compacts.append(_elem467)
+  (_etype474, _size471) = iprot.readListBegin()
+  for _i475 in xrange(_size471):
+_elem476 = ShowCompactResponseElement()
+_elem476.read(iprot)
+self.compacts.append(_elem476)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -10408,8 +10431,8 @@ class ShowCompactResponse:
 if self.compacts is not None:
   oprot.writeFieldBegin('compacts', TType.LIST, 1)
   oprot.writeListBegin(TType.STRUCT, len(self.compacts))
-  for iter468 in self.compacts:
-iter468.write(oprot)
+  for iter477 in self.compacts:
+iter477.write(oprot)
   oprot.writeListEnd()
   oprot.writeFieldEnd()
 oprot.writeFieldStop()
@@ -10490,10 +10513,10 @@ class AddDynamicPartitions:
   elif fid == 4:
 if ftype == TType.LIST:
   self.partitionnames = []
-  (_etype472, _size469) = iprot.readListBegin()
-  for _i473 in xrange(_size469):
-_elem474 = iprot.readString()
-self.partitionnames.append(_elem474)
+  (_etype481, _size478) = iprot.readListBegin()
+  for _i482 in xrange(_size478):
+_elem483 = iprot.readString()
+self.partitionnames.append(_elem483)
   iprot.readListEnd()
 else:
   iprot.skip(ftype)
@@ -10527,8 +10550,8 @@ class AddDynamicPartitions:
 if self.partitionnames is not None:
   oprot.writeFieldBegin('partitionnames', TType.LIST, 4)
   oprot.writeListBegin(TType.STRING, len(self.partitionnames))
-   

[46/48] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/4ca8a63b/ql/src/test/results/clientpositive/join33.q.out
--
diff --git a/ql/src/test/results/clientpositive/join33.q.out 
b/ql/src/test/results/clientpositive/join33.q.out
index 8653c2f..bebb007 100644
--- a/ql/src/test/results/clientpositive/join33.q.out
+++ b/ql/src/test/results/clientpositive/join33.q.out
@@ -159,7 +159,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -179,7 +179,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -203,7 +203,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -223,7 +223,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -250,7 +250,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 11
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/4ca8a63b/ql/src/test/results/clientpositive/join34.q.out
--
diff --git a/ql/src/test/results/clientpositive/join34.q.out 
b/ql/src/test/results/clientpositive/join34.q.out
index bb23644..365992b 100644
--- a/ql/src/test/results/clientpositive/join34.q.out
+++ b/ql/src/test/results/clientpositive/join34.q.out
@@ -197,7 +197,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -217,7 +217,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -241,7 +241,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 

[19/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
--
diff --git 
a/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out 
b/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
deleted file mode 100644
index 899723f..000
--- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
+++ /dev/null
@@ -1,999 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_4
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_4
-RUN: Stage-0:DDL
-PREHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_5
-POSTHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_5
-RUN: Stage-0:DDL
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 
'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-from src a 
-where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-from src a 
-where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-10 is a root stage
-  Stage-2 depends on stages: Stage-10
-  Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
-  Stage-6 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-6
-  Stage-7 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-10
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: s1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Filter Operator
-  predicate: ((key > '2') and key is null) (type: boolean)
-  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE 
Column stats: NONE
-  Select Operator
-Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE 
Column stats: NONE
-Group By Operator
-  aggregations: count()
-  mode: hash
-  outputColumnNames: _col0
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-  Reduce Output Operator
-sort order: 
-Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-value expressions: _col0 (type: bigint)
-  Reduce Operator Tree:
-Group By Operator
-  aggregations: count(VALUE._col0)
-  mode: mergepartial
-  outputColumnNames: _col0
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
-  Filter Operator
-predicate: (_col0 = 0) (type: boolean)
-Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
stats: NONE
-Select Operator
-  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-  Group By Operator
-keys: 0 (type: bigint)
-mode: hash
-outputColumnNames: _col0
-Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
-  table:
-  input format: 
org.apache.hadoop.mapred.SequenceFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-  serde: 
org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: b
-Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
-Reduce Output Operator
-  sort order: 
-  Statistics: Num rows: 500 Data 

[48/48] hive git commit: HIVE-13860: Fix more json related JDK8 test failures (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
HIVE-13860: Fix more json related JDK8 test failures (Mohit Sabharwal, reviewed 
by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eaa8ff21
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eaa8ff21
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eaa8ff21

Branch: refs/heads/java8
Commit: eaa8ff214d3acdf6245a5e0f490a55cdc0097a83
Parents: 4ca8a63
Author: Mohit Sabharwal 
Authored: Fri May 27 10:35:16 2016 -0500
Committer: Sergio Pena 
Committed: Fri May 27 10:36:29 2016 -0500

--
 .../clientpositive/autoColumnStats_1.q.out  | 28 +++---
 .../clientpositive/autoColumnStats_2.q.out  | 24 ++---
 .../clientpositive/autoColumnStats_3.q.out  | 10 +-
 .../clientpositive/autoColumnStats_4.q.out  |  2 +-
 .../clientpositive/autoColumnStats_5.q.out  |  8 +-
 .../clientpositive/autoColumnStats_8.q.out  |  8 +-
 .../clientpositive/autoColumnStats_9.q.out  |  2 +-
 .../clientpositive/binary_output_format.q.out   |  4 +-
 .../results/clientpositive/json_serde1.q.out|  4 +-
 .../results/clientpositive/orc_create.q.out | 12 +--
 .../clientpositive/orc_int_type_promotion.q.out |  6 +-
 .../results/clientpositive/perf/query85.q.out   |  2 +-
 .../results/clientpositive/perf/query89.q.out   |  2 +-
 .../results/clientpositive/perf/query91.q.out   |  2 +-
 .../results/clientpositive/spark/bucket5.q.out  |  8 +-
 .../results/clientpositive/spark/join0.q.out|  2 +-
 .../clientpositive/spark/outer_join_ppr.q.out   |  4 +-
 .../spark/reduce_deduplicate.q.out  |  4 +-
 .../clientpositive/spark/union_ppr.q.out|  8 +-
 .../clientpositive/stats_list_bucket.q.out  |  2 +-
 .../results/clientpositive/tez/bucket2.q.out|  4 +-
 .../clientpositive/udaf_collect_set_2.q.out | 96 ++--
 .../results/clientpositive/udf_sort_array.q.out |  2 +-
 .../clientpositive/vector_complex_all.q.out |  6 +-
 .../results/clientpositive/vector_udf1.q.out|  2 +-
 25 files changed, 126 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/eaa8ff21/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
--
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out 
b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
index e290e52..4cf6df1 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_1.q.out
@@ -60,7 +60,7 @@ Retention:0
  A masked pattern was here 
 Table Type:MANAGED_TABLE
 Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles1   
numRows 500 
rawDataSize 5312
@@ -137,7 +137,7 @@ Retention:  0
  A masked pattern was here 
 Table Type:MANAGED_TABLE
 Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles1   
numRows 500 
rawDataSize 5312
@@ -172,7 +172,7 @@ Retention:  0
  A masked pattern was here 
 Table Type:MANAGED_TABLE
 Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles1   
numRows 500 
rawDataSize 5312
@@ -257,7 +257,7 @@ Retention:  0
  A masked pattern was here 
 Table Type:MANAGED_TABLE
 Table Parameters:   
-   COLUMN_STATS_ACCURATE   
{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+   COLUMN_STATS_ACCURATE   
{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles1   
numRows 500 
rawDataSize 5312
@@ -292,7 

[38/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/join0.q.out
--
diff --git a/ql/src/test/results/clientpositive/join0.q.out 
b/ql/src/test/results/clientpositive/join0.q.out
new file mode 100644
index 000..59122e2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/join0.q.out
@@ -0,0 +1,238 @@
+Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' 
is a cross product
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+   src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+   src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: src
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+Filter Operator
+  predicate: (key < 10) (type: boolean)
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
+  Select Operator
+expressions: key (type: string), value (type: string)
+outputColumnNames: _col0, _col1
+Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  sort order: 
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+  value expressions: _col0 (type: string), _col1 (type: string)
+  TableScan
+alias: src
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+Filter Operator
+  predicate: (key < 10) (type: boolean)
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE 
Column stats: NONE
+  Select Operator
+expressions: key (type: string), value (type: string)
+outputColumnNames: _col0, _col1
+Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+Reduce Output Operator
+  sort order: 
+  Statistics: Num rows: 166 Data size: 1763 Basic stats: 
COMPLETE Column stats: NONE
+  value expressions: _col0 (type: string), _col1 (type: string)
+  Reduce Operator Tree:
+Join Operator
+  condition map:
+   Inner Join 0 to 1
+  keys:
+0 
+1 
+  outputColumnNames: _col0, _col1, _col2, _col3
+  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE 
Column stats: NONE
+  File Output Operator
+compressed: false
+table:
+input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+Map Reduce
+  Map Operator Tree:
+  TableScan
+Reduce Output Operator
+  key expressions: _col0 (type: string), _col1 (type: string), 
_col2 (type: string), _col3 (type: string)
+  sort order: 
+  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE 
Column stats: NONE
+  Reduce Operator Tree:
+Select Operator
+  expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 
(type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: 
string)
+  outputColumnNames: _col0, _col1, _col2, _col3
+  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE 
Column stats: NONE
+  File Output Operator
+compressed: false
+Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE 
Column stats: NONE
+table:
+input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+Fetch Operator
+  limit: -1
+  Processor Tree:
+ListSink
+
+Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' 
is a cross product
+PREHOOK: query: 

[25/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/outer_join_ppr.q.out
--
diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.out 
b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
new file mode 100644
index 000..cf20851
--- /dev/null
+++ b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
@@ -0,0 +1,683 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: a
+Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE 
Column stats: NONE
+  Reduce Output Operator
+key expressions: _col0 (type: string)
+null sort order: a
+sort order: +
+Map-reduce partition columns: _col0 (type: string)
+Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
+tag: 0
+value expressions: _col1 (type: string)
+auto parallelism: false
+  TableScan
+alias: b
+Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string), ds (type: 
string)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
+  Reduce Output Operator
+key expressions: _col0 (type: string)
+null sort order: a
+sort order: +
+Map-reduce partition columns: _col0 (type: string)
+Statistics: Num rows: 2000 Data size: 21248 Basic stats: 
COMPLETE Column stats: NONE
+tag: 1
+value expressions: _col1 (type: string), _col2 (type: string)
+auto parallelism: false
+  Path -> Alias:
+ A masked pattern was here 
+  Path -> Partition:
+ A masked pattern was here 
+  Partition
+base file name: src
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+properties:
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+  bucket_count -1
+  columns key,value
+  columns.comments 'default','default'
+  columns.types string:string
+ A masked pattern was here 
+  name default.src
+  numFiles 1
+  numRows 500
+  rawDataSize 5312
+  serialization.ddl struct src { string key, string value}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+  totalSize 5812
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+  
+  input format: org.apache.hadoop.mapred.TextInputFormat
+  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+  properties:
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+bucket_count -1
+columns key,value
+columns.comments 'default','default'
+columns.types string:string
+ A masked pattern was here 
+name default.src
+numFiles 1
+numRows 500
+rawDataSize 5312
+serialization.ddl struct src { string key, string value}
+serialization.format 1
+serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+totalSize 5812
+ A masked pattern was here 
+  

[35/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
deleted file mode 100644
index dcfbec0..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
+++ /dev/null
@@ -1,591 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Map Reduce
-  Map Operator Tree:
-  TableScan
-alias: srcpart
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
-GatherStats: false
-Select Operator
-  expressions: key (type: string), value (type: string)
-  outputColumnNames: _col0, _col1
-  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
-  File Output Operator
-compressed: false
-GlobalTableId: 1
- A masked pattern was here 
-NumFilesPerFileSink: 1
-Static Partition Specification: ds=2008-04-08/hr=11/
-Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
- A masked pattern was here 
-table:
-input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-properties:
-  bucket_count -1
-  columns key,value
-  columns.comments 
-  columns.types string:string
- A masked pattern was here 
-  name default.list_bucketing_static_part
-  partition_columns ds/hr
-  partition_columns.types string:string
-  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-name: default.list_bucketing_static_part
-TotalFiles: 1
-GatherStats: true
-MultiFileSpray: false
-  Path -> Alias:
- A masked pattern was here 
-  Path -> Partition:
- A 

[12/48] hive git commit: HIVE-13868: Include derby.log file in the Hive ptest logs (Sergio Pena)

2016-05-27 Thread spena
HIVE-13868: Include derby.log file in the Hive ptest logs (Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b420e1da
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b420e1da
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b420e1da

Branch: refs/heads/java8
Commit: b420e1da98505f1b446b7c65e2a6cf1f0c4d5e00
Parents: 7172586
Author: Sergio Pena 
Authored: Thu May 26 17:47:04 2016 -0500
Committer: Sergio Pena 
Committed: Thu May 26 17:47:04 2016 -0500

--
 testutils/ptest2/src/main/resources/batch-exec.vm | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b420e1da/testutils/ptest2/src/main/resources/batch-exec.vm
--
diff --git a/testutils/ptest2/src/main/resources/batch-exec.vm 
b/testutils/ptest2/src/main/resources/batch-exec.vm
index 99ddf80..652084d 100644
--- a/testutils/ptest2/src/main/resources/batch-exec.vm
+++ b/testutils/ptest2/src/main/resources/batch-exec.vm
@@ -86,7 +86,7 @@ fi
 echo $pid >> batch.pid
 wait $pid
 ret=$?
-find ./ -type f -name hive.log -o -name spark.log | \
+find ./ -type f -name hive.log -o -name spark.log -o -name derby.log | \
   xargs -I {} sh -c 'f=$(basename {}); test -f ${logDir}/$f && 
f=$f-$(uuidgen); mv {} ${logDir}/$f'
 find ./ -type f -name 'TEST-*.xml' | \
   xargs -I {} sh -c 'f=TEST-${batchName}-$(basename {}); test -f ${logDir}/$f 
&& f=$f-$(uuidgen); mv {} ${logDir}/$f'



[23/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out 
b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
deleted file mode 100644
index 68943e1..000
--- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
+++ /dev/null
@@ -1,709 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-Spark
-  Edges:
-Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL 
SORT, 2)
- A masked pattern was here 
-  Vertices:
-Map 1 
-Map Operator Tree:
-TableScan
-  alias: a
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  GatherStats: false
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-Reduce Output Operator
-  key expressions: _col0 (type: string)
-  null sort order: a
-  sort order: +
-  Map-reduce partition columns: _col0 (type: string)
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  tag: 0
-  value expressions: _col1 (type: string)
-  auto parallelism: false
-Path -> Alias:
- A masked pattern was here 
-Path -> Partition:
- A masked pattern was here 
-Partition
-  base file name: src
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-bucket_count -1
-columns key,value
-columns.comments 'default','default'
-columns.types string:string
- A masked pattern was here 
-name default.src
-numFiles 1
-numRows 500
-rawDataSize 5312
-serialization.ddl struct src { string key, string value}
-serialization.format 1
-serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-totalSize 5812
- A masked pattern was here 
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-input format: org.apache.hadoop.mapred.TextInputFormat
-output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-  bucket_count -1
-  columns key,value
-  columns.comments 'default','default'
-  columns.types string:string
- A masked pattern was here 
-  name default.src
-  numFiles 1
-  numRows 500
-  rawDataSize 5312
-  serialization.ddl struct src { string key, string value}
-  serialization.format 1
-  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  totalSize 5812
- A masked pattern was here 
-serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-name: default.src
-  name: default.src
-Truncated Path -> Alias:
-  /src [a]
-Map 3 
-Map Operator Tree:
-TableScan
-  

[32/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
new file mode 100644
index 000..09cb847
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
@@ -0,0 +1,504 @@
+PREHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103')) 
+stored as DIRECTORIES
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103')) 
+stored as DIRECTORIES
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_dynamic_part
+PREHOOK: query: -- list bucketing DML
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr) select key, value, hr from srcpart where ds='2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr) select key, value, hr from srcpart where ds='2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: srcpart
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string), hr (type: 
string)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+  File Output Operator
+compressed: false
+GlobalTableId: 1
+ A masked pattern was here 
+NumFilesPerFileSink: 1
+Static Partition Specification: ds=2008-04-08/
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+ A masked pattern was here 
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+properties:
+  bucket_count -1
+  columns key,value
+  columns.comments 
+  columns.types string:string
+ A masked pattern was here 
+  name default.list_bucketing_dynamic_part
+  partition_columns ds/hr
+  partition_columns.types string:string
+  serialization.ddl struct list_bucketing_dynamic_part { 
string key, string value}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.list_bucketing_dynamic_part
+TotalFiles: 1
+GatherStats: true
+MultiFileSpray: false
+  Path -> Alias:
+ A masked pattern was here 
+  Path -> Partition:
+ A masked pattern was here 
+  Partition
+base file name: hr=11
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+partition values:
+  ds 2008-04-08
+  hr 11
+properties:
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+  bucket_count -1
+  columns key,value
+  columns.comments 'default','default'
+  columns.types string:string
+ A masked pattern was here 
+  name default.srcpart
+  numFiles 1
+  numRows 500
+  partition_columns ds/hr
+  partition_columns.types 

[01/48] hive git commit: HIVE-13800. Disable LLAP UI auth by default. (Siddharth Seth, reviewed by Sergey Shelukhin) [Forced Update!]

2016-05-27 Thread spena
Repository: hive
Updated Branches:
  refs/heads/java8 ace92dbc5 -> eaa8ff214 (forced update)


HIVE-13800. Disable LLAP UI auth by default. (Siddharth Seth, reviewed by 
Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d52131d7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d52131d7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d52131d7

Branch: refs/heads/java8
Commit: d52131d763b8b8d26b6ee81ebac68f8ee9ffdb47
Parents: 3a2a3e1
Author: Siddharth Seth 
Authored: Wed May 25 14:57:26 2016 -0700
Committer: Siddharth Seth 
Committed: Wed May 25 14:57:26 2016 -0700

--
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d52131d7/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 3e295fe..b1f37ff 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2720,7 +2720,7 @@ public class HiveConf extends Configuration {
 "LLAP delegation token lifetime, in seconds if specified without a 
unit."),
 LLAP_MANAGEMENT_RPC_PORT("hive.llap.management.rpc.port", 15004,
 "RPC port for LLAP daemon management service."),
-LLAP_WEB_AUTO_AUTH("hive.llap.auto.auth", true,
+LLAP_WEB_AUTO_AUTH("hive.llap.auto.auth", false,
 "Whether or not to set Hadoop configs to enable auth in LLAP web 
app."),
 LLAP_CREATE_TOKEN_LOCALLY("hive.llap.create.token.locally", "hs2",
 new StringSet("true", "hs2", "false"),



[20/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/stats_list_bucket.q.out
--
diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.out 
b/ql/src/test/results/clientpositive/stats_list_bucket.q.out
new file mode 100644
index 000..c34c414
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_list_bucket.q.out
@@ -0,0 +1,189 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+drop table stats_list_bucket
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+drop table stats_list_bucket
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table stats_list_bucket_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table stats_list_bucket_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_list_bucket (
+  c1 string,
+  c2 string
+) partitioned by (ds string, hr string)
+skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_list_bucket
+POSTHOOK: query: create table stats_list_bucket (
+  c1 string,
+  c2 string
+) partitioned by (ds string, hr string)
+skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_list_bucket
+PREHOOK: query: -- Try partitioned table with list bucketing.
+-- The stats should show 500 rows loaded, as many rows as the src table has.
+
+insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = 
'11')
+  select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
+POSTHOOK: query: -- Try partitioned table with list bucketing.
+-- The stats should show 500 rows loaded, as many rows as the src table has.
+
+insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = 
'11')
+  select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c1 SIMPLE 
[(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c2 SIMPLE 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted stats_list_bucket partition (ds = '2008-04-08', 
 hr = '11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_list_bucket
+POSTHOOK: query: desc formatted stats_list_bucket partition (ds = 
'2008-04-08',  hr = '11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_list_bucket
+# col_name data_type   comment 
+
+c1 string  
+c2 string  
+
+# Partition Information 
+# col_name data_type   comment 
+
+ds string  
+hr string  
+
+# Detailed Partition Information
+Partition Value:   [2008-04-08, 11] 
+Database:  default  
+Table: stats_list_bucket
+ A masked pattern was here 
+Partition Parameters:   
+   COLUMN_STATS_ACCURATE   {\"BASIC_STATS\":\"true\"}
+   numFiles4   
+   numRows 500 
+   rawDataSize 4812
+   totalSize   5522
+ A masked pattern was here 
+
+# Storage Information   
+SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
 
+InputFormat:   org.apache.hadoop.hive.ql.io.RCFileInputFormat   
+OutputFormat:  org.apache.hadoop.hive.ql.io.RCFileOutputFormat  
+Compressed:No   
+Num Buckets:   -1   
+Bucket Columns:[]   
+Sort Columns:  []   
+Stored As SubDirectories:  Yes  
+Skewed Columns:[c1, c2] 
+Skewed Values: [[466, val_466], [287, val_287], [82, val_82]]   
+ A masked pattern was here 
+Skewed Value to Truncated Path:{[466, 
val_466]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=466/c2=val_466, [82, 
val_82]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=82/c2=val_82, [287, 
val_287]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=287/c2=val_287}  
+Storage Desc Params:

[04/48] hive git commit: HIVE-13513: cleardanglingscratchdir does not work in some version of HDFS (Daniel Dai, reviewed by Thejas Nair)

2016-05-27 Thread spena
HIVE-13513: cleardanglingscratchdir does not work in some version of HDFS 
(Daniel Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/13428845
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/13428845
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/13428845

Branch: refs/heads/java8
Commit: 1342884528453ea710e5a61d67762e35a2be129b
Parents: ae6ad6d
Author: Daniel Dai 
Authored: Wed May 25 15:23:57 2016 -0700
Committer: Daniel Dai 
Committed: Wed May 25 15:23:57 2016 -0700

--
 .../hadoop/hive/ql/session/ClearDanglingScratchDir.java | 6 ++
 .../org/apache/hadoop/hive/ql/session/SessionState.java | 9 ++---
 2 files changed, 12 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/13428845/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java 
b/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java
index ee012c2..725f954 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hive.common.LogUtils;
+import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
@@ -52,6 +54,10 @@ import org.apache.hadoop.ipc.RemoteException;
 public class ClearDanglingScratchDir {
 
   public static void main(String[] args) throws Exception {
+try {
+  LogUtils.initHiveLog4j();
+} catch (LogInitializationException e) {
+}
 Options opts = createOptions();
 CommandLine cli = new GnuParser().parse(opts, args);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/13428845/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 37ef165..ce43f7d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -109,6 +109,7 @@ public class SessionState {
   private static final String HDFS_SESSION_PATH_KEY = 
"_hive.hdfs.session.path";
   private static final String TMP_TABLE_SPACE_KEY = "_hive.tmp_table_space";
   static final String LOCK_FILE_NAME = "inuse.lck";
+  static final String INFO_FILE_NAME = "inuse.info";
 
   private final Map> tempTables = new 
HashMap>();
   private final Map> 
tempTableColStats =
@@ -643,10 +644,12 @@ public class SessionState {
 // 5. hold a lock file in HDFS session dir to indicate the it is in use
 if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK)) {
   FileSystem fs = hdfsSessionPath.getFileSystem(conf);
+  FSDataOutputStream hdfsSessionPathInfoFile = fs.create(new 
Path(hdfsSessionPath, INFO_FILE_NAME),
+  true);
+  hdfsSessionPathInfoFile.writeUTF("process: " + 
ManagementFactory.getRuntimeMXBean().getName()
+  +"\n");
+  hdfsSessionPathInfoFile.close();
   hdfsSessionPathLockFile = fs.create(new Path(hdfsSessionPath, 
LOCK_FILE_NAME), true);
-  hdfsSessionPathLockFile.writeUTF("hostname: " + 
InetAddress.getLocalHost().getHostName() + "\n");
-  hdfsSessionPathLockFile.writeUTF("process: " + 
ManagementFactory.getRuntimeMXBean().getName() + "\n");
-  hdfsSessionPathLockFile.hsync();
 }
 // 6. Local session path
 localSessionPath = new Path(HiveConf.getVar(conf, 
HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId);



[33/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
new file mode 100644
index 000..5f0406a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
@@ -0,0 +1,811 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 00_0
+--  5263 01_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 00_0
+-- 99 01_0
+-- after merge
+-- 142 00_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 00_0
+-- 87 01_0
+-- after merge
+-- 118 01_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
+stored as DIRECTORIES
+STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 00_0
+--  5263 01_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 00_0
+-- 99 01_0
+-- after merge
+-- 142 00_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 00_0
+-- 87 01_0
+-- after merge
+-- 118 01_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
+stored as DIRECTORIES
+STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: srcpart
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+  File Output Operator
+compressed: false
+GlobalTableId: 1
+ A masked pattern was here 
+NumFilesPerFileSink: 1
+Static Partition Specification: ds=2008-04-08/hr=11/
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+ A masked pattern was here 
+table:
+input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+properties:
+  bucket_count -1
+  columns key,value
+  columns.comments 
+  columns.types string:string
+ A masked pattern was here 
+  name default.list_bucketing_static_part
+  partition_columns ds/hr
+  partition_columns.types string:string
+  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+name: default.list_bucketing_static_part
+TotalFiles: 1
+GatherStats: true
+MultiFileSpray: false
+  Path -> Alias:
+ A masked pattern was here 

[31/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
deleted file mode 100644
index 1960d41..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
+++ /dev/null
@@ -1,1119 +0,0 @@
-PREHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed 
columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 00_0
--- 155 01_0
--- with merge
--- 254 00_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 00_0
--- 99 01_0
--- with merge
--- 142 01_0
--- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 00_0
--- 5181 01_0
--- with merge
--- 5181 00_0
--- 5181 01_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 00_0
--- 87 01_0
--- with merge
--- 118 02_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed 
columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 00_0
--- 155 01_0
--- with merge
--- 254 00_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 00_0
--- 99 01_0
--- with merge
--- 142 01_0
--- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 00_0
--- 5181 01_0
--- with merge
--- 5181 00_0
--- 5181 01_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 00_0
--- 87 01_0
--- with merge
--- 118 02_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-srcpart
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   list_bucketing_dynamic_part
-TOK_PARTSPEC
-   TOK_PARTVAL
-  ds
-  '2008-04-08'
-   TOK_PARTVAL
-  hr
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
- TOK_SELEXPR
-TOK_FUNCTION
- 

[44/48] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/4ca8a63b/ql/src/test/results/clientpositive/spark/join34.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/join34.q.out 
b/ql/src/test/results/clientpositive/spark/join34.q.out
index 235d36a..ebd9c89 100644
--- a/ql/src/test/results/clientpositive/spark/join34.q.out
+++ b/ql/src/test/results/clientpositive/spark/join34.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -98,7 +98,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -150,7 +150,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -170,7 +170,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -222,7 +222,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'
@@ -242,7 +242,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/4ca8a63b/ql/src/test/results/clientpositive/spark/join35.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/join35.q.out 
b/ql/src/test/results/clientpositive/spark/join35.q.out
index 7b873c6..d14dadf 100644
--- a/ql/src/test/results/clientpositive/spark/join35.q.out
+++ b/ql/src/test/results/clientpositive/spark/join35.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 

[07/48] hive git commit: HIVE-13821: OrcSplit groups all delta files together into a single split (Prasanth Jayachandran reviewed by Eugene Koifman)

2016-05-27 Thread spena
HIVE-13821: OrcSplit groups all delta files together into a single split 
(Prasanth Jayachandran reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/76961d1f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/76961d1f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/76961d1f

Branch: refs/heads/java8
Commit: 76961d1f67a5d5e3614d3d81c417684fab92c6c2
Parents: 51609a0
Author: Prasanth Jayachandran 
Authored: Wed May 25 18:22:34 2016 -0700
Committer: Prasanth Jayachandran 
Committed: Wed May 25 18:22:34 2016 -0700

--
 .../ql/exec/tez/ColumnarSplitSizeEstimator.java |  6 +++--
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 23 
 2 files changed, 27 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/76961d1f/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
index dfc778a..ecd4ddc 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ColumnarSplitSizeEstimator.java
@@ -42,7 +42,6 @@ public class ColumnarSplitSizeEstimator implements 
SplitSizeEstimator {
   if (isDebugEnabled) {
 LOG.debug("Estimated column projection size: " + colProjSize);
   }
-  return colProjSize;
 } else if (inputSplit instanceof HiveInputFormat.HiveInputSplit) {
   InputSplit innerSplit = ((HiveInputFormat.HiveInputSplit) 
inputSplit).getInputSplit();
 
@@ -51,9 +50,12 @@ public class ColumnarSplitSizeEstimator implements 
SplitSizeEstimator {
 if (isDebugEnabled) {
   LOG.debug("Estimated column projection size: " + colProjSize);
 }
-return colProjSize;
   }
 }
+if (colProjSize <= 0) {
+  /* columnar splits of unknown size - estimate worst-case */
+  return Integer.MAX_VALUE;
+}
 return colProjSize;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/76961d1f/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
--
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java 
b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 4eb0249..c1ef0e7 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -56,6 +56,7 @@ import 
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.mr.ExecMapper;
+import org.apache.hadoop.hive.ql.exec.tez.ColumnarSplitSizeEstimator;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
@@ -556,6 +557,28 @@ public class TestInputOutputFormat {
   }
 
   @Test
+  public void testACIDSplitStrategy() throws Exception {
+conf.set("bucket_count", "2");
+OrcInputFormat.Context context = new OrcInputFormat.Context(conf);
+MockFileSystem fs = new MockFileSystem(conf,
+new MockFile("mock:/a/delta_000_001/part-00", 1000, new byte[1], new 
MockBlock("host1")),
+new MockFile("mock:/a/delta_000_001/part-01", 1000, new byte[1], new 
MockBlock("host1")),
+new MockFile("mock:/a/delta_001_002/part-02", 1000, new byte[1], new 
MockBlock("host1")),
+new MockFile("mock:/a/delta_001_002/part-03", 1000, new byte[1], new 
MockBlock("host1")));
+OrcInputFormat.FileGenerator gen =
+new OrcInputFormat.FileGenerator(context, fs,
+new MockPath(fs, "mock:/a"), false, null);
+OrcInputFormat.SplitStrategy splitStrategy = createSplitStrategy(context, 
gen);
+assertEquals(true, splitStrategy instanceof 
OrcInputFormat.ACIDSplitStrategy);
+List splits = splitStrategy.getSplits();
+ColumnarSplitSizeEstimator splitSizeEstimator = new 
ColumnarSplitSizeEstimator();
+for (OrcSplit split: splits) {
+  assertEquals(Integer.MAX_VALUE, 
splitSizeEstimator.getEstimatedSize(split));
+}
+assertEquals(2, splits.size());
+  }
+
+  @Test
   public void testBIStrategySplitBlockBoundary() throws Exception {
 conf.set(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname, "BI");
 OrcInputFormat.Context context = new 

[27/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
new file mode 100644
index 000..81f3af3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
@@ -0,0 +1,811 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 00_0
+--  5263 01_0
+-- ds=2008-04-08/hr=11/key=103:
+-- 99 00_0
+-- 99 01_0
+-- after merge
+-- 142 00_0
+-- ds=2008-04-08/hr=11/key=484:
+-- 87 00_0
+-- 87 01_0
+-- after merge
+-- 118 01_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key) on ('484','103')
+stored as DIRECTORIES
+STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 00_0
+--  5263 01_0
+-- ds=2008-04-08/hr=11/key=103:
+-- 99 00_0
+-- 99 01_0
+-- after merge
+-- 142 00_0
+-- ds=2008-04-08/hr=11/key=484:
+-- 87 00_0
+-- 87 01_0
+-- after merge
+-- 118 01_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key) on ('484','103')
+stored as DIRECTORIES
+STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+Map Reduce
+  Map Operator Tree:
+  TableScan
+alias: srcpart
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE 
Column stats: NONE
+GatherStats: false
+Select Operator
+  expressions: key (type: string), value (type: string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+  File Output Operator
+compressed: false
+GlobalTableId: 1
+ A masked pattern was here 
+NumFilesPerFileSink: 1
+Static Partition Specification: ds=2008-04-08/hr=11/
+Statistics: Num rows: 1000 Data size: 10624 Basic stats: 
COMPLETE Column stats: NONE
+ A masked pattern was here 
+table:
+input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+properties:
+  bucket_count -1
+  columns key,value
+  columns.comments 
+  columns.types string:string
+ A masked pattern was here 
+  name default.list_bucketing_static_part
+  partition_columns ds/hr
+  partition_columns.types string:string
+  serialization.ddl struct list_bucketing_static_part { 
string key, string value}
+  serialization.format 1
+  serialization.lib 
org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+ A masked pattern was here 
+serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+name: default.list_bucketing_static_part
+TotalFiles: 1
+GatherStats: true
+MultiFileSpray: false
+  Path -> Alias:
+ A masked pattern was here 
+  Path -> Partition:
+ A masked pattern was here 
+  Partition
+base file name: hr=11
+input format: 

[09/48] hive git commit: HIVE-13269: Simplify comparison expressions using column stats (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2016-05-27 Thread spena
HIVE-13269: Simplify comparison expressions using column stats (Jesus Camacho 
Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/76130a9d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/76130a9d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/76130a9d

Branch: refs/heads/java8
Commit: 76130a9d54d773619d3c525789d2e4ae590bfe4f
Parents: ba07055
Author: Jesus Camacho Rodriguez 
Authored: Thu May 26 10:07:37 2016 +0100
Committer: Jesus Camacho Rodriguez 
Committed: Thu May 26 10:07:37 2016 +0100

--
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   3 +
 data/conf/perf-reg/hive-site.xml|   5 +
 .../hive/ql/optimizer/calcite/HiveRexUtil.java  |  24 +
 .../HiveReduceExpressionsWithStatsRule.java | 330 ++
 .../hadoop/hive/ql/parse/CalcitePlanner.java|   4 +
 .../queries/clientpositive/remove_exprs_stats.q |  55 ++
 .../clientpositive/remove_exprs_stats.q.out | 610 +++
 7 files changed, 1031 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/76130a9d/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index b1f37ff..6a404bd 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1485,6 +1485,9 @@ public class HiveConf extends Configuration {
 "When hive.optimize.limittranspose is true, this variable specifies 
the minimal reduction in the\n" +
 "number of tuples of the outer input of the join or the input of the 
union that you should get in order to apply the rule."),
 
+HIVE_OPTIMIZE_REDUCE_WITH_STATS("hive.optimize.filter.stats.reduction", 
false, "Whether to simplify comparison\n" +
+"expressions in filter operators using column stats"),
+
 HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", 
false,
 "Whether to create a separate plan for skewed keys for the tables in 
the join.\n" +
 "This is based on the skewed keys stored in the metadata. At compile 
time, the plan is broken\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/76130a9d/data/conf/perf-reg/hive-site.xml
--
diff --git a/data/conf/perf-reg/hive-site.xml b/data/conf/perf-reg/hive-site.xml
index 9e929fc..012369f 100644
--- a/data/conf/perf-reg/hive-site.xml
+++ b/data/conf/perf-reg/hive-site.xml
@@ -277,4 +277,9 @@
   true
 
 
+
+  hive.optimize.filter.stats.reduction
+  true
+
+
 

http://git-wip-us.apache.org/repos/asf/hive/blob/76130a9d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
--
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java 
b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
index 6933fec..a5dcffb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
@@ -420,6 +420,12 @@ public class HiveRexUtil {
 
   private static RexCall invert(RexBuilder rexBuilder, RexCall call) {
 switch (call.getKind()) {
+  case EQUALS:
+return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
+Lists.reverse(call.getOperands()));
+  case NOT_EQUALS:
+return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.NOT_EQUALS,
+Lists.reverse(call.getOperands()));
   case LESS_THAN:
 return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN,
 Lists.reverse(call.getOperands()));
@@ -469,6 +475,24 @@ public class HiveRexUtil {
 }
   }
 
+  public static SqlKind invert(SqlKind kind) {
+switch (kind) {
+  case EQUALS:
+return SqlKind.EQUALS;
+  case NOT_EQUALS:
+return SqlKind.NOT_EQUALS;
+  case LESS_THAN:
+return SqlKind.GREATER_THAN;
+  case GREATER_THAN:
+return SqlKind.LESS_THAN;
+  case LESS_THAN_OR_EQUAL:
+return SqlKind.GREATER_THAN_OR_EQUAL;
+  case GREATER_THAN_OR_EQUAL:
+return SqlKind.LESS_THAN_OR_EQUAL;
+}
+return null;
+  }
+
   public static class ExprSimplifier extends RexShuttle {
 private final RexBuilder rexBuilder;
 private final boolean unknownAsFalse;


[24/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
--
diff --git 
a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out 
b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
deleted file mode 100644
index 12f41eb..000
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
+++ /dev/null
@@ -1,280 +0,0 @@
-PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key) on ('484','51','103')
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 00_0
---  5263 01_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 00_0
--- 99 01_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 00_0
--- 87 01_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key) on ('484','51','103')
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-src
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   list_bucketing_static_part
-TOK_PARTSPEC
-   TOK_PARTVAL
-  ds
-  '2008-04-08'
-   TOK_PARTVAL
-  hr
-  '11'
-  TOK_SELECT
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   key
- TOK_SELEXPR
-TOK_TABLE_OR_COL
-   value
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-Spark
- A masked pattern was here 
-  Vertices:
-Map 1 
-Map Operator Tree:
-TableScan
-  alias: src
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-  GatherStats: false
-  Select Operator
-expressions: key (type: string), value (type: string)
-outputColumnNames: _col0, _col1
-Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
-File Output Operator
-  compressed: false
-  GlobalTableId: 1
- A masked pattern was here 
-  NumFilesPerFileSink: 1
-  Static Partition Specification: ds=2008-04-08/hr=11/
-  Statistics: Num rows: 500 Data size: 5312 Basic stats: 
COMPLETE Column stats: NONE
- A masked pattern was here 
-  table:
-  input format: 
org.apache.hadoop.hive.ql.io.RCFileInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-  properties:
-bucket_count -1
-columns key,value
-columns.comments 
-columns.types string:string
- A masked pattern was here 
-name default.list_bucketing_static_part
-partition_columns ds/hr
-   

[13/48] hive git commit: HIVE-13561: HiveServer2 is leaking ClassLoaders when add jar / temporary functions are used (Trystan Leftwich reviewed by Vaibhav Gumashta)

2016-05-27 Thread spena
HIVE-13561: HiveServer2 is leaking ClassLoaders when add jar / temporary 
functions are used (Trystan Leftwich reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/68a42108
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/68a42108
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/68a42108

Branch: refs/heads/java8
Commit: 68a4210808ecf965d9d8bb4c934cb548c334fe72
Parents: b420e1d
Author: Vaibhav Gumashta 
Authored: Fri May 27 00:23:25 2016 -0700
Committer: Vaibhav Gumashta 
Committed: Fri May 27 00:23:25 2016 -0700

--
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   | 145 +++
 .../apache/hadoop/hive/ql/exec/Registry.java|  10 +-
 2 files changed, 150 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/68a42108/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 4aa98ca..a01daa4 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -24,7 +24,9 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.lang.reflect.Constructor;
 import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -46,6 +48,8 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -54,6 +58,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hive.common.util.ReflectionUtil;
 import org.apache.hive.jdbc.miniHS2.MiniHS2;
 import org.datanucleus.ClassLoaderResolver;
 import org.datanucleus.NucleusContext;
@@ -943,4 +948,144 @@ public class TestJdbcWithMiniHS2 {
 }
 return -1;
   }
+
+  /**
+   * Tests ADD JAR uses Hives ReflectionUtil.CONSTRUCTOR_CACHE
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testAddJarConstructorUnCaching() throws Exception {
+// This test assumes the hive-contrib JAR has been built as part of the 
Hive build.
+// Also dependent on the UDFExampleAdd class within that JAR.
+setReflectionUtilCache();
+String udfClassName = 
"org.apache.hadoop.hive.contrib.udf.example.UDFExampleAdd";
+String mvnRepo = System.getProperty("maven.local.repository");
+String hiveVersion = System.getProperty("hive.version");
+String jarFileName = "hive-contrib-" + hiveVersion + ".jar";
+String[] pathParts = {
+"org", "apache", "hive",
+"hive-contrib", hiveVersion, jarFileName
+};
+
+// Create path to hive-contrib JAR on local filesystem
+Path jarFilePath = new Path(mvnRepo);
+for (String pathPart : pathParts) {
+  jarFilePath = new Path(jarFilePath, pathPart);
+}
+
+Connection conn = getConnection(miniHS2.getJdbcURL(), "foo", "bar");
+String tableName = "testAddJar";
+Statement stmt = conn.createStatement();
+stmt.execute("SET hive.support.concurrency = false");
+// Create table
+stmt.execute("DROP TABLE IF EXISTS " + tableName);
+stmt.execute("CREATE TABLE " + tableName + " (key INT, value STRING)");
+// Load data
+stmt.execute("LOAD DATA LOCAL INPATH '" + kvDataFilePath.toString() + "' 
INTO TABLE "
++ tableName);
+ResultSet res = stmt.executeQuery("SELECT * FROM " + tableName);
+// Ensure table is populated
+assertTrue(res.next());
+
+long cacheBeforeAddJar, cacheAfterAddJar, cacheAfterClose;
+// Force the cache clear so we know its empty
+invalidateReflectionUtlCache();
+cacheBeforeAddJar = getReflectionUtilCacheSize();
+System.out.println("CONSTRUCTOR_CACHE size before add jar: " + 
cacheBeforeAddJar);
+System.out.println("CONSTRUCTOR_CACHE as map before add jar:" + 
getReflectionUtilCache().asMap());
+Assert.assertTrue("FAILED: CONSTRUCTOR_CACHE size before add jar: " + 
cacheBeforeAddJar,
+cacheBeforeAddJar == 0);
+
+// Add the jar file
+stmt.execute("ADD 

[45/48] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/4ca8a63b/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
--
diff --git a/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out 
b/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
index 9e9e61f..4352914 100644
--- a/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
+++ b/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
@@ -70,7 +70,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 11
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -116,7 +116,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 12
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -259,7 +259,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 11
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -305,7 +305,7 @@ STAGE PLANS:
   ds 2008-04-08
   hr 12
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -351,7 +351,7 @@ STAGE PLANS:
   ds 2008-04-09
   hr 11
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -397,7 +397,7 @@ STAGE PLANS:
   ds 2008-04-09
   hr 12
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/4ca8a63b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
--
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out 
b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
index e03c055..f3fd8f8 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
 input format: org.apache.hadoop.mapred.TextInputFormat
 output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 properties:
-  COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+  COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
   bucket_count -1
   columns key,value
   columns.comments 'default','default'
@@ -75,7 +75,7 @@ STAGE PLANS:
   input format: org.apache.hadoop.mapred.TextInputFormat
   output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
   properties:
-COLUMN_STATS_ACCURATE 
{"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE 
{"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
 bucket_count -1
 columns key,value
 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/4ca8a63b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
--
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out 

[02/48] hive git commit: HIVE-13720. Fix failing test - TestLlapTaskCommunicator. (Siddharth Seth, reviewed by Sergey Shelukhin)

2016-05-27 Thread spena
HIVE-13720. Fix failing test - TestLlapTaskCommunicator. (Siddharth Seth, 
reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b93ce782
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b93ce782
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b93ce782

Branch: refs/heads/java8
Commit: b93ce7825ef095bfcf1bd3474f110773c35ea9ab
Parents: d52131d
Author: Siddharth Seth 
Authored: Wed May 25 14:58:34 2016 -0700
Committer: Siddharth Seth 
Committed: Wed May 25 14:58:34 2016 -0700

--
 .../hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/b93ce782/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
--
diff --git 
a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
 
b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
index 8e2d0ac..1901328 100644
--- 
a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
+++ 
b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java
@@ -36,6 +36,7 @@ import java.util.concurrent.locks.ReentrantLock;
 
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.llap.LlapNodeId;
 import org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos;
 import org.apache.hadoop.hive.llap.tez.LlapProtocolClientProxy;
@@ -273,11 +274,15 @@ public class TestLlapTaskCommunicator {
 final TezVertexID vertexId1 = TezVertexID.getInstance(dagid, 300);
 final TezVertexID vertexId2 = TezVertexID.getInstance(dagid, 301);
 final Configuration conf = new Configuration(false);
-final UserPayload userPayload = TezUtils.createUserPayloadFromConf(conf);
+final UserPayload userPayload;
 
 final LlapTaskCommunicatorForTest taskCommunicator;
 
 public LlapTaskCommunicatorWrapperForTest(LlapProtocolClientProxy 
llapProxy) throws Exception {
+
+  HiveConf.setVar(conf, HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS, 
"fake-non-zk-cluster");
+  userPayload = TezUtils.createUserPayloadFromConf(conf);
+
   
doReturn(appAttemptId).when(taskCommunicatorContext).getApplicationAttemptId();
   doReturn(new 
Credentials()).when(taskCommunicatorContext).getAMCredentials();
   
doReturn(userPayload).when(taskCommunicatorContext).getInitialUserPayload();



[29/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
--
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out 
b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
deleted file mode 100644
index 9947c1a..000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
+++ /dev/null
@@ -1,712 +0,0 @@
-PREHOOK: query: -- list bucketing alter table ... concatenate: 
--- Use list bucketing DML to generate mutilple files in partitions by turning 
off merge
--- dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 00_0
--- 155 01_0
--- with merge
--- 254 00_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 00_0
--- 99 01_0
--- with merge
--- 142 01_0
--- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 00_0
--- 5181 01_0
--- with merge
--- 5181 00_0
--- 5181 01_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 00_0
--- 87 01_0
--- with merge
--- 118 02_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing alter table ... concatenate: 
--- Use list bucketing DML to generate mutilple files in partitions by turning 
off merge
--- dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- 
hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 00_0
--- 155 01_0
--- with merge
--- 254 00_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 00_0
--- 99 01_0
--- with merge
--- 142 01_0
--- 
hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 00_0
--- 5181 01_0
--- with merge
--- 5181 00_0
--- 5181 01_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 00_0
--- 87 01_0
--- with merge
--- 118 02_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
-stored as DIRECTORIES
-STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate 
a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to 
generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-  TOK_TABREF
- TOK_TABNAME
-srcpart
-   TOK_INSERT
-  TOK_DESTINATION
- TOK_TAB
-TOK_TABNAME
-   list_bucketing_dynamic_part
-TOK_PARTSPEC
-   TOK_PARTVAL
-  ds
-  '2008-04-08'
-   TOK_PARTVAL
-  hr
-  TOK_SELECT
- 

[16/48] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
http://git-wip-us.apache.org/repos/asf/hive/blob/9349b8e5/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
--
diff --git a/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out 
b/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
deleted file mode 100644
index 459d93b..000
--- a/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
+++ /dev/null
@@ -1,457 +0,0 @@
-PREHOOK: query: drop table varchar_udf_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table varchar_udf_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 
varchar(10), c4 varchar(20))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 
varchar(10), c4 varchar(20))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@varchar_udf_1
-PREHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@varchar_udf_1
-POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, 
type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, 
type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION 
[(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with varchar support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with varchar support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-238val_238 238val_238  true
-PREHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-VAL_238VAL_238 true
-PREHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-val_238val_238 true
-PREHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-118118 true
-PREHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-238|val_238238|val_238 true
-PREHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 
'US-ASCII')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
- A masked pattern was here 
-POSTHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 
'US-ASCII')
-from varchar_udf_1 limit 1

[47/48] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

2016-05-27 Thread spena
HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit 
Sabharwal, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4ca8a63b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4ca8a63b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4ca8a63b

Branch: refs/heads/java8
Commit: 4ca8a63b2eb3707bbc8529d8039be2ff358ca764
Parents: 9349b8e
Author: Mohit Sabharwal 
Authored: Tue May 24 09:30:32 2016 -0500
Committer: Sergio Pena 
Committed: Fri May 27 10:36:29 2016 -0500

--
 .../test/results/clientpositive/bucket1.q.out   |  4 +-
 .../test/results/clientpositive/bucket2.q.out   |  4 +-
 .../test/results/clientpositive/bucket3.q.out   |  4 +-
 .../test/results/clientpositive/bucket4.q.out   |  4 +-
 .../test/results/clientpositive/bucket5.q.out   |  4 +-
 .../results/clientpositive/bucket_many.q.out|  4 +-
 .../columnStatsUpdateForStatsOptimizer_1.q.out  |  8 +--
 .../columnStatsUpdateForStatsOptimizer_2.q.out  |  2 +-
 .../constantPropagateForSubQuery.q.out  |  8 +--
 ql/src/test/results/clientpositive/ctas.q.out   |  4 +-
 .../results/clientpositive/describe_table.q.out |  4 +-
 .../disable_merge_for_bucketing.q.out   |  4 +-
 .../extrapolate_part_stats_full.q.out   | 24 -
 .../extrapolate_part_stats_partial.q.out| 32 +--
 .../extrapolate_part_stats_partial_ndv.q.out| 16 +++---
 .../clientpositive/fouter_join_ppr.q.out| 40 +++---
 .../clientpositive/groupby_map_ppr.q.out|  4 +-
 .../groupby_map_ppr_multi_distinct.q.out|  4 +-
 .../results/clientpositive/groupby_ppr.q.out|  4 +-
 .../groupby_ppr_multi_distinct.q.out|  4 +-
 .../test/results/clientpositive/input23.q.out   |  2 +-
 .../test/results/clientpositive/input42.q.out   | 12 ++---
 .../results/clientpositive/input_part1.q.out|  2 +-
 .../results/clientpositive/input_part2.q.out|  4 +-
 .../results/clientpositive/input_part7.q.out|  4 +-
 .../results/clientpositive/input_part9.q.out|  4 +-
 ql/src/test/results/clientpositive/join17.q.out |  4 +-
 ql/src/test/results/clientpositive/join26.q.out |  2 +-
 ql/src/test/results/clientpositive/join32.q.out | 10 ++--
 .../clientpositive/join32_lessSize.q.out| 46 
 ql/src/test/results/clientpositive/join33.q.out | 10 ++--
 ql/src/test/results/clientpositive/join34.q.out |  8 +--
 ql/src/test/results/clientpositive/join35.q.out | 12 ++---
 ql/src/test/results/clientpositive/join9.q.out  |  6 +--
 .../results/clientpositive/join_map_ppr.q.out   |  4 +-
 .../clientpositive/list_bucket_dml_1.q.out  |  4 +-
 .../clientpositive/list_bucket_dml_14.q.out |  4 +-
 .../clientpositive/list_bucket_dml_3.q.out  |  4 +-
 .../clientpositive/list_bucket_dml_7.q.out  |  8 +--
 .../results/clientpositive/load_dyn_part8.q.out |  8 +--
 .../clientpositive/louter_join_ppr.q.out| 36 ++---
 .../clientpositive/mapjoin_mapjoin.q.out| 16 +++---
 .../offset_limit_global_optimizer.q.out | 52 +-
 .../clientpositive/optimize_nullscan.q.out  | 56 ++--
 .../partition_coltype_literals.q.out|  4 +-
 ql/src/test/results/clientpositive/pcr.q.out| 10 ++--
 ql/src/test/results/clientpositive/pcs.q.out| 38 ++---
 .../clientpositive/ppd_join_filter.q.out| 32 +--
 ql/src/test/results/clientpositive/ppd_vc.q.out | 20 +++
 .../clientpositive/ppr_allchildsarenull.q.out   | 12 ++---
 .../clientpositive/rand_partitionpruner1.q.out  |  4 +-
 .../clientpositive/rand_partitionpruner2.q.out  |  4 +-
 .../clientpositive/rand_partitionpruner3.q.out  |  4 +-
 .../clientpositive/reduce_deduplicate.q.out |  4 +-
 .../results/clientpositive/regexp_extract.q.out |  8 +--
 .../clientpositive/router_join_ppr.q.out| 36 ++---
 .../test/results/clientpositive/sample1.q.out   |  2 +-
 .../test/results/clientpositive/sample2.q.out   |  4 +-
 .../test/results/clientpositive/sample4.q.out   |  4 +-
 .../test/results/clientpositive/sample5.q.out   |  4 +-
 .../test/results/clientpositive/sample6.q.out   | 32 +--
 .../test/results/clientpositive/sample7.q.out   |  4 +-
 .../test/results/clientpositive/sample8.q.out   |  8 +--
 .../test/results/clientpositive/sample9.q.out   |  4 +-
 .../clientpositive/schema_evol_stats.q.out  |  8 +--
 .../clientpositive/serde_user_properties.q.out  | 12 ++---
 .../results/clientpositive/spark/bucket2.q.out  |  4 +-
 .../results/clientpositive/spark/bucket3.q.out  |  4 +-
 .../results/clientpositive/spark/bucket4.q.out  |  4 +-
 .../results/clientpositive/spark/ctas.q.out |  4 +-
 .../spark/disable_merge_for_bucketing.q.out |  4 +-
 .../clientpositive/spark/groupby_map_ppr.q.out  |  4 +-
 

  1   2   >