[1/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
Repository: hive
Updated Branches:
  refs/heads/master 1a1e8357b -> 23ac04d3b


http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/spark/union17.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/union17.q.out 
b/ql/src/test/results/clientpositive/spark/union17.q.out
index 9063e64..bcb95e4 100644
--- a/ql/src/test/results/clientpositive/spark/union17.q.out
+++ b/ql/src/test/results/clientpositive/spark/union17.q.out
@@ -109,14 +109,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest1
+Select Operator
+  expressions: _col0 (type: string), UDFToString(_col1) (type: 
string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 1 Data size: 272 Basic stats: COMPLETE 
Column stats: PARTIAL
+  File Output Operator
+compressed: false
+Statistics: Num rows: 1 Data size: 272 Basic stats: 
COMPLETE Column stats: PARTIAL
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.dest1
 Reducer 4 
 Reduce Operator Tree:
   Group By Operator
@@ -125,14 +129,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest2
+Select Operator
+  expressions: _col0 (type: string), _col1 (type: string), 
UDFToString(_col2) (type: string)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 1 Data size: 456 Basic stats: COMPLETE 
Column stats: PARTIAL
+  File Output Operator
+compressed: false
+Statistics: Num rows: 1 Data size: 456 Basic stats: 
COMPLETE Column stats: PARTIAL
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.dest2
 Reducer 8 
 Reduce Operator Tree:
   Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/spark/union19.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/union19.q.out 
b/ql/src/test/results/clientpositive/spark/union19.q.out
index f7168fe..31795b2 100644
--- a/ql/src/test/results/clientpositive/spark/union19.q.out
+++ b/ql/src/test/results/clientpositive/spark/union19.q.out
@@ -135,14 +135,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest1
+

[3/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out 
b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
index e8a3610..2745958 100644
--- a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
@@ -72,7 +72,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col5
 Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
-  expressions: _col0 (type: string), _col5 (type: 
double)
+  expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -94,7 +94,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col5
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
-expressions: _col0 (type: string), _col5 (type: 
double)
+expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 20 Data size: 208 Basic 
stats: COMPLETE Column stats: NONE
 File Output Operator
@@ -115,7 +115,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col5
 Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
-  expressions: _col0 (type: string), _col5 (type: 
double)
+  expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -137,7 +137,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col5
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
-expressions: _col0 (type: string), _col5 (type: 
double)
+expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 20 Data size: 208 Basic 
stats: COMPLETE Column stats: NONE
 File Output Operator
@@ -383,14 +383,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
+Select Operator
+  expressions: _col0 (type: string), UDFToString(_col1) (type: 
string)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 10 Data size: 104 Basic stats: 
COMPLETE Column stats: NONE
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.src_lv1
+  File Output Operator
+compressed: false
+Statistics: Num rows: 10 Data size: 104 Basic stats: 
COMPLETE Column stats: NONE
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.src_lv1
 Reducer 3 
 Execution mode: llap
 Reduce Operator Tree:
@@ -400,14 +404,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
+Select Operator
+  expressions: _col0 (type: 

[5/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
HIVE-16293: Column pruner should continue to work when SEL has more than 1 
child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/23ac04d3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/23ac04d3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/23ac04d3

Branch: refs/heads/master
Commit: 23ac04d3b511cfc2239b39a30dcc72a173e3ee99
Parents: 1a1e835
Author: Pengcheng Xiong 
Authored: Tue Apr 4 14:39:44 2017 -0700
Committer: Pengcheng Xiong 
Committed: Tue Apr 4 14:39:53 2017 -0700

--
 .../hadoop/hive/ql/optimizer/ColumnPruner.java  |  13 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   8 +-
 .../column_pruner_multiple_children.q   |  19 ++
 .../clientnegative/udf_assert_true.q.out|  72 +++--
 .../clientpositive/add_part_multiple.q.out  |  16 +-
 .../alter_partition_coltype.q.out   |   2 +-
 .../clientpositive/annotate_stats_select.q.out  |   4 +-
 .../clientpositive/autoColumnStats_7.q.out  |   2 +-
 .../clientpositive/autoColumnStats_8.q.out  |   4 +-
 .../results/clientpositive/ba_table_udfs.q.out  |   2 +-
 .../clientpositive/bucket_map_join_spark1.q.out |   8 +-
 .../clientpositive/bucket_map_join_spark2.q.out |   8 +-
 .../clientpositive/bucket_map_join_spark3.q.out |   8 +-
 .../results/clientpositive/bucketmapjoin5.q.out |  12 +-
 .../clientpositive/bucketmapjoin_negative.q.out |   2 +-
 .../bucketmapjoin_negative2.q.out   |   2 +-
 .../bucketsortoptimize_insert_3.q.out   |   4 +-
 ql/src/test/results/clientpositive/cast1.q.out  |   6 +-
 ql/src/test/results/clientpositive/char_1.q.out |   8 +-
 .../column_pruner_multiple_children.q.out   | 189 
 .../clientpositive/dynamic_rdd_cache.q.out  |   4 +-
 .../clientpositive/filter_join_breaktask2.q.out |  46 +--
 .../test/results/clientpositive/fold_case.q.out |   6 +-
 .../test/results/clientpositive/groupby12.q.out |   2 +-
 .../test/results/clientpositive/groupby5.q.out  |   2 +-
 .../clientpositive/groupby5_noskew.q.out|   2 +-
 .../results/clientpositive/groupby7_map.q.out   |   4 +-
 .../groupby7_map_multi_single_reducer.q.out |   4 +-
 .../clientpositive/groupby7_map_skew.q.out  |   4 +-
 .../clientpositive/groupby7_noskew.q.out|   4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |   4 +-
 .../test/results/clientpositive/groupby8.q.out  |   8 +-
 .../results/clientpositive/groupby8_map.q.out   |   4 +-
 .../clientpositive/groupby8_map_skew.q.out  |   4 +-
 .../clientpositive/groupby8_noskew.q.out|   4 +-
 .../test/results/clientpositive/groupby9.q.out  |  20 +-
 .../clientpositive/groupby_cube_multi_gby.q.out |   2 +-
 .../clientpositive/groupby_position.q.out   |   8 +-
 .../clientpositive/infer_bucket_sort.q.out  |   6 +-
 .../infer_bucket_sort_grouping_operators.q.out  |  30 +-
 .../infer_bucket_sort_map_operators.q.out   |  53 ++--
 .../infer_bucket_sort_reducers_power_two.q.out  |   6 +-
 ql/src/test/results/clientpositive/input8.q.out |   4 +-
 ql/src/test/results/clientpositive/input9.q.out |   4 +-
 .../results/clientpositive/input_part10.q.out   |  24 +-
 ql/src/test/results/clientpositive/join38.q.out |  22 +-
 .../clientpositive/literal_decimal.q.out|   6 +-
 .../clientpositive/llap/autoColumnStats_2.q.out |   4 +-
 .../clientpositive/llap/bucketmapjoin1.q.out|  12 +-
 .../clientpositive/llap/bucketmapjoin2.q.out|  18 +-
 .../clientpositive/llap/bucketmapjoin3.q.out|  12 +-
 .../clientpositive/llap/bucketmapjoin4.q.out|  12 +-
 .../clientpositive/llap/explainuser_2.q.out | 296 ++-
 .../llap/filter_join_breaktask2.q.out   |  46 +--
 .../llap/multi_insert_lateral_view.q.out| 224 --
 .../results/clientpositive/llap/stats11.q.out   |  12 +-
 .../llap/table_access_keys_stats.q.out  |   2 +-
 .../llap/tez_union_multiinsert.q.out| 182 +++-
 .../clientpositive/llap/unionDistinct_1.q.out   |  42 +--
 .../llap/vector_null_projection.q.out   |  31 +-
 .../clientpositive/llap/vector_nvl.q.out|  18 +-
 .../llap/vectorization_short_regress.q.out  |   4 +-
 .../clientpositive/multi_insert_mixed.q.out |   4 +-
 .../results/clientpositive/null_column.q.out|   4 +-
 .../clientpositive/ppd_constant_expr.q.out  |   8 +-
 .../results/clientpositive/smb_mapjoin_20.q.out |   6 +-
 .../spark/add_part_multiple.q.out   |  16 +-
 .../spark/bucket_map_join_spark1.q.out  |   8 +-
 .../spark/bucket_map_join_spark2.q.out  |   8 +-
 .../spark/bucket_map_join_spark3.q.out  |   8 +-
 .../clientpositive/spark/bucketmapjoin1.q.out   |  12 +-
 .../clientpositive/spark/bucketmapjoin2.q.out   |  18 +-
 

[2/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
http://git-wip-us.apache.org/repos/asf/hive/blob/23ac04d3/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out 
b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
index 2598d34..6ac97a1 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
@@ -300,7 +300,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col4
 Statistics: Num rows: 60 Data size: 6393 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col0 (type: int), _col1 (type: string), _col4 
(type: string)
+  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col4 (type: string)
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -388,7 +388,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -433,7 +433,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -661,7 +661,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col4
 Statistics: Num rows: 60 Data size: 6393 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col0 (type: int), _col1 (type: string), _col4 
(type: string)
+  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col4 (type: string)
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -749,7 +749,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -794,7 +794,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 

[5/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
HIVE-16293: Column pruner should continue to work when SEL has more than 1 
child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f1e0d56b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f1e0d56b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f1e0d56b

Branch: refs/heads/branch-2.3
Commit: f1e0d56b2dcefa5d3786ea6611610ed9d803f2a4
Parents: e283305
Author: Pengcheng Xiong 
Authored: Tue Apr 4 14:39:44 2017 -0700
Committer: Pengcheng Xiong 
Committed: Tue Apr 4 14:40:51 2017 -0700

--
 .../hadoop/hive/ql/optimizer/ColumnPruner.java  |  13 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   8 +-
 .../column_pruner_multiple_children.q   |  19 ++
 .../clientnegative/udf_assert_true.q.out|  72 +++--
 .../clientpositive/add_part_multiple.q.out  |  16 +-
 .../alter_partition_coltype.q.out   |   2 +-
 .../clientpositive/annotate_stats_select.q.out  |   4 +-
 .../clientpositive/autoColumnStats_7.q.out  |   2 +-
 .../clientpositive/autoColumnStats_8.q.out  |   4 +-
 .../results/clientpositive/ba_table_udfs.q.out  |   2 +-
 .../clientpositive/bucket_map_join_spark1.q.out |   8 +-
 .../clientpositive/bucket_map_join_spark2.q.out |   8 +-
 .../clientpositive/bucket_map_join_spark3.q.out |   8 +-
 .../results/clientpositive/bucketmapjoin5.q.out |  12 +-
 .../clientpositive/bucketmapjoin_negative.q.out |   2 +-
 .../bucketmapjoin_negative2.q.out   |   2 +-
 .../bucketsortoptimize_insert_3.q.out   |   4 +-
 ql/src/test/results/clientpositive/cast1.q.out  |   6 +-
 ql/src/test/results/clientpositive/char_1.q.out |   8 +-
 .../column_pruner_multiple_children.q.out   | 189 
 .../clientpositive/dynamic_rdd_cache.q.out  |   4 +-
 .../clientpositive/filter_join_breaktask2.q.out |  46 +--
 .../test/results/clientpositive/fold_case.q.out |   6 +-
 .../test/results/clientpositive/groupby12.q.out |   2 +-
 .../test/results/clientpositive/groupby5.q.out  |   2 +-
 .../clientpositive/groupby5_noskew.q.out|   2 +-
 .../results/clientpositive/groupby7_map.q.out   |   4 +-
 .../groupby7_map_multi_single_reducer.q.out |   4 +-
 .../clientpositive/groupby7_map_skew.q.out  |   4 +-
 .../clientpositive/groupby7_noskew.q.out|   4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |   4 +-
 .../test/results/clientpositive/groupby8.q.out  |   8 +-
 .../results/clientpositive/groupby8_map.q.out   |   4 +-
 .../clientpositive/groupby8_map_skew.q.out  |   4 +-
 .../clientpositive/groupby8_noskew.q.out|   4 +-
 .../test/results/clientpositive/groupby9.q.out  |  20 +-
 .../clientpositive/groupby_cube_multi_gby.q.out |   2 +-
 .../clientpositive/groupby_position.q.out   |   8 +-
 .../clientpositive/infer_bucket_sort.q.out  |   6 +-
 .../infer_bucket_sort_grouping_operators.q.out  |  30 +-
 .../infer_bucket_sort_map_operators.q.out   |  53 ++--
 .../infer_bucket_sort_reducers_power_two.q.out  |   6 +-
 ql/src/test/results/clientpositive/input8.q.out |   4 +-
 ql/src/test/results/clientpositive/input9.q.out |   4 +-
 .../results/clientpositive/input_part10.q.out   |  24 +-
 ql/src/test/results/clientpositive/join38.q.out |  22 +-
 .../clientpositive/literal_decimal.q.out|   6 +-
 .../clientpositive/llap/autoColumnStats_2.q.out |   4 +-
 .../clientpositive/llap/bucketmapjoin1.q.out|  12 +-
 .../clientpositive/llap/bucketmapjoin2.q.out|  18 +-
 .../clientpositive/llap/bucketmapjoin3.q.out|  12 +-
 .../clientpositive/llap/bucketmapjoin4.q.out|  12 +-
 .../clientpositive/llap/explainuser_2.q.out | 296 ++-
 .../llap/filter_join_breaktask2.q.out   |  46 +--
 .../llap/multi_insert_lateral_view.q.out| 224 --
 .../results/clientpositive/llap/stats11.q.out   |  12 +-
 .../llap/table_access_keys_stats.q.out  |   2 +-
 .../llap/tez_union_multiinsert.q.out| 182 +++-
 .../clientpositive/llap/unionDistinct_1.q.out   |  42 +--
 .../llap/vector_null_projection.q.out   |  31 +-
 .../clientpositive/llap/vector_nvl.q.out|  18 +-
 .../llap/vectorization_short_regress.q.out  |   4 +-
 .../clientpositive/multi_insert_mixed.q.out |   4 +-
 .../results/clientpositive/null_column.q.out|   4 +-
 .../clientpositive/ppd_constant_expr.q.out  |   8 +-
 .../results/clientpositive/smb_mapjoin_20.q.out |   6 +-
 .../spark/add_part_multiple.q.out   |  16 +-
 .../spark/bucket_map_join_spark1.q.out  |   8 +-
 .../spark/bucket_map_join_spark2.q.out  |   8 +-
 .../spark/bucket_map_join_spark3.q.out  |   8 +-
 .../clientpositive/spark/bucketmapjoin1.q.out   |  12 +-
 .../clientpositive/spark/bucketmapjoin2.q.out   |  18 +-
 

[3/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
http://git-wip-us.apache.org/repos/asf/hive/blob/f1e0d56b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out 
b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
index e8a3610..2745958 100644
--- a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
@@ -72,7 +72,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col5
 Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
-  expressions: _col0 (type: string), _col5 (type: 
double)
+  expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -94,7 +94,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col5
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
-expressions: _col0 (type: string), _col5 (type: 
double)
+expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 20 Data size: 208 Basic 
stats: COMPLETE Column stats: NONE
 File Output Operator
@@ -115,7 +115,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col5
 Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
-  expressions: _col0 (type: string), _col5 (type: 
double)
+  expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -137,7 +137,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col5
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
-expressions: _col0 (type: string), _col5 (type: 
double)
+expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 20 Data size: 208 Basic 
stats: COMPLETE Column stats: NONE
 File Output Operator
@@ -383,14 +383,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
+Select Operator
+  expressions: _col0 (type: string), UDFToString(_col1) (type: 
string)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 10 Data size: 104 Basic stats: 
COMPLETE Column stats: NONE
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.src_lv1
+  File Output Operator
+compressed: false
+Statistics: Num rows: 10 Data size: 104 Basic stats: 
COMPLETE Column stats: NONE
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.src_lv1
 Reducer 3 
 Execution mode: llap
 Reduce Operator Tree:
@@ -400,14 +404,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
+Select Operator
+  expressions: _col0 (type: 

[1/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
Repository: hive
Updated Branches:
  refs/heads/branch-2.3 e28330535 -> f1e0d56b2


http://git-wip-us.apache.org/repos/asf/hive/blob/f1e0d56b/ql/src/test/results/clientpositive/spark/union17.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/union17.q.out 
b/ql/src/test/results/clientpositive/spark/union17.q.out
index 9063e64..bcb95e4 100644
--- a/ql/src/test/results/clientpositive/spark/union17.q.out
+++ b/ql/src/test/results/clientpositive/spark/union17.q.out
@@ -109,14 +109,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest1
+Select Operator
+  expressions: _col0 (type: string), UDFToString(_col1) (type: 
string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 1 Data size: 272 Basic stats: COMPLETE 
Column stats: PARTIAL
+  File Output Operator
+compressed: false
+Statistics: Num rows: 1 Data size: 272 Basic stats: 
COMPLETE Column stats: PARTIAL
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.dest1
 Reducer 4 
 Reduce Operator Tree:
   Group By Operator
@@ -125,14 +129,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest2
+Select Operator
+  expressions: _col0 (type: string), _col1 (type: string), 
UDFToString(_col2) (type: string)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 1 Data size: 456 Basic stats: COMPLETE 
Column stats: PARTIAL
+  File Output Operator
+compressed: false
+Statistics: Num rows: 1 Data size: 456 Basic stats: 
COMPLETE Column stats: PARTIAL
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.dest2
 Reducer 8 
 Reduce Operator Tree:
   Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/f1e0d56b/ql/src/test/results/clientpositive/spark/union19.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/union19.q.out 
b/ql/src/test/results/clientpositive/spark/union19.q.out
index f7168fe..31795b2 100644
--- a/ql/src/test/results/clientpositive/spark/union19.q.out
+++ b/ql/src/test/results/clientpositive/spark/union19.q.out
@@ -135,14 +135,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest1
+

[2/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
http://git-wip-us.apache.org/repos/asf/hive/blob/f1e0d56b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out 
b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
index 2598d34..6ac97a1 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
@@ -300,7 +300,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col4
 Statistics: Num rows: 60 Data size: 6393 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col0 (type: int), _col1 (type: string), _col4 
(type: string)
+  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col4 (type: string)
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -388,7 +388,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -433,7 +433,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -661,7 +661,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col4
 Statistics: Num rows: 60 Data size: 6393 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col0 (type: int), _col1 (type: string), _col4 
(type: string)
+  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col4 (type: string)
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -749,7 +749,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -794,7 +794,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 

[4/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
http://git-wip-us.apache.org/repos/asf/hive/blob/f1e0d56b/ql/src/test/results/clientpositive/groupby9.q.out
--
diff --git a/ql/src/test/results/clientpositive/groupby9.q.out 
b/ql/src/test/results/clientpositive/groupby9.q.out
index c840df8..f4f1163 100644
--- a/ql/src/test/results/clientpositive/groupby9.q.out
+++ b/ql/src/test/results/clientpositive/groupby9.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -120,7 +120,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), _col2 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), UDFToString(_col2) (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -862,7 +862,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -904,7 +904,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col1) (type: int), _col0 (type: 
string), _col2 (type: bigint)
+expressions: UDFToInteger(_col1) (type: int), _col0 (type: 
string), UDFToString(_col2) (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -1646,7 +1646,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -1688,7 +1688,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), _col2 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), UDFToString(_col2) (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -2431,7 +2431,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -2474,7 +2474,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), _col2 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), UDFToString(_col2) (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 250 Data size: 2656 Basic 

[5/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
HIVE-16293: Column pruner should continue to work when SEL has more than 1 
child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/96c2a2c0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/96c2a2c0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/96c2a2c0

Branch: refs/heads/branch-2
Commit: 96c2a2c0869f5e267346dc7a422e6a25d9b89087
Parents: d0df902
Author: Pengcheng Xiong 
Authored: Tue Apr 4 14:39:44 2017 -0700
Committer: Pengcheng Xiong 
Committed: Tue Apr 4 14:50:23 2017 -0700

--
 .../hadoop/hive/ql/optimizer/ColumnPruner.java  |  13 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   8 +-
 .../column_pruner_multiple_children.q   |  19 ++
 .../clientnegative/udf_assert_true.q.out|  72 +++--
 .../clientpositive/add_part_multiple.q.out  |  16 +-
 .../alter_partition_coltype.q.out   |   2 +-
 .../clientpositive/annotate_stats_select.q.out  |   4 +-
 .../clientpositive/autoColumnStats_7.q.out  |   2 +-
 .../clientpositive/autoColumnStats_8.q.out  |   4 +-
 .../results/clientpositive/ba_table_udfs.q.out  |   2 +-
 .../clientpositive/bucket_map_join_spark1.q.out |   8 +-
 .../clientpositive/bucket_map_join_spark2.q.out |   8 +-
 .../clientpositive/bucket_map_join_spark3.q.out |   8 +-
 .../results/clientpositive/bucketmapjoin5.q.out |  12 +-
 .../clientpositive/bucketmapjoin_negative.q.out |   2 +-
 .../bucketmapjoin_negative2.q.out   |   2 +-
 .../bucketsortoptimize_insert_3.q.out   |   4 +-
 ql/src/test/results/clientpositive/cast1.q.out  |   6 +-
 ql/src/test/results/clientpositive/char_1.q.out |   8 +-
 .../column_pruner_multiple_children.q.out   | 189 
 .../clientpositive/dynamic_rdd_cache.q.out  |   4 +-
 .../clientpositive/filter_join_breaktask2.q.out |  46 +--
 .../test/results/clientpositive/fold_case.q.out |   6 +-
 .../test/results/clientpositive/groupby12.q.out |   2 +-
 .../test/results/clientpositive/groupby5.q.out  |   2 +-
 .../clientpositive/groupby5_noskew.q.out|   2 +-
 .../results/clientpositive/groupby7_map.q.out   |   4 +-
 .../groupby7_map_multi_single_reducer.q.out |   4 +-
 .../clientpositive/groupby7_map_skew.q.out  |   4 +-
 .../clientpositive/groupby7_noskew.q.out|   4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |   4 +-
 .../test/results/clientpositive/groupby8.q.out  |   8 +-
 .../results/clientpositive/groupby8_map.q.out   |   4 +-
 .../clientpositive/groupby8_map_skew.q.out  |   4 +-
 .../clientpositive/groupby8_noskew.q.out|   4 +-
 .../test/results/clientpositive/groupby9.q.out  |  20 +-
 .../clientpositive/groupby_cube_multi_gby.q.out |   2 +-
 .../clientpositive/groupby_position.q.out   |   8 +-
 .../clientpositive/infer_bucket_sort.q.out  |   6 +-
 .../infer_bucket_sort_grouping_operators.q.out  |  30 +-
 .../infer_bucket_sort_map_operators.q.out   |  53 ++--
 .../infer_bucket_sort_reducers_power_two.q.out  |   6 +-
 ql/src/test/results/clientpositive/input8.q.out |   4 +-
 ql/src/test/results/clientpositive/input9.q.out |   4 +-
 .../results/clientpositive/input_part10.q.out   |  24 +-
 ql/src/test/results/clientpositive/join38.q.out |  22 +-
 .../clientpositive/literal_decimal.q.out|   6 +-
 .../clientpositive/llap/autoColumnStats_2.q.out |   4 +-
 .../clientpositive/llap/bucketmapjoin1.q.out|  12 +-
 .../clientpositive/llap/bucketmapjoin2.q.out|  18 +-
 .../clientpositive/llap/bucketmapjoin3.q.out|  12 +-
 .../clientpositive/llap/bucketmapjoin4.q.out|  12 +-
 .../clientpositive/llap/explainuser_2.q.out | 296 ++-
 .../llap/filter_join_breaktask2.q.out   |  46 +--
 .../llap/multi_insert_lateral_view.q.out| 224 --
 .../results/clientpositive/llap/stats11.q.out   |  12 +-
 .../llap/table_access_keys_stats.q.out  |   2 +-
 .../llap/tez_union_multiinsert.q.out| 182 +++-
 .../clientpositive/llap/unionDistinct_1.q.out   |  42 +--
 .../llap/vector_null_projection.q.out   |  31 +-
 .../clientpositive/llap/vector_nvl.q.out|  18 +-
 .../llap/vectorization_short_regress.q.out  |   4 +-
 .../clientpositive/multi_insert_mixed.q.out |   4 +-
 .../results/clientpositive/null_column.q.out|   4 +-
 .../clientpositive/ppd_constant_expr.q.out  |   8 +-
 .../results/clientpositive/smb_mapjoin_20.q.out |   6 +-
 .../spark/add_part_multiple.q.out   |  16 +-
 .../spark/bucket_map_join_spark1.q.out  |   8 +-
 .../spark/bucket_map_join_spark2.q.out  |   8 +-
 .../spark/bucket_map_join_spark3.q.out  |   8 +-
 .../clientpositive/spark/bucketmapjoin1.q.out   |  12 +-
 .../clientpositive/spark/bucketmapjoin2.q.out   |  18 +-
 

[1/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
Repository: hive
Updated Branches:
  refs/heads/branch-2 d0df902e4 -> 96c2a2c08


http://git-wip-us.apache.org/repos/asf/hive/blob/96c2a2c0/ql/src/test/results/clientpositive/spark/union17.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/union17.q.out 
b/ql/src/test/results/clientpositive/spark/union17.q.out
index 9063e64..bcb95e4 100644
--- a/ql/src/test/results/clientpositive/spark/union17.q.out
+++ b/ql/src/test/results/clientpositive/spark/union17.q.out
@@ -109,14 +109,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest1
+Select Operator
+  expressions: _col0 (type: string), UDFToString(_col1) (type: 
string)
+  outputColumnNames: _col0, _col1
+  Statistics: Num rows: 1 Data size: 272 Basic stats: COMPLETE 
Column stats: PARTIAL
+  File Output Operator
+compressed: false
+Statistics: Num rows: 1 Data size: 272 Basic stats: 
COMPLETE Column stats: PARTIAL
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.dest1
 Reducer 4 
 Reduce Operator Tree:
   Group By Operator
@@ -125,14 +129,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 280 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest2
+Select Operator
+  expressions: _col0 (type: string), _col1 (type: string), 
UDFToString(_col2) (type: string)
+  outputColumnNames: _col0, _col1, _col2
+  Statistics: Num rows: 1 Data size: 456 Basic stats: COMPLETE 
Column stats: PARTIAL
+  File Output Operator
+compressed: false
+Statistics: Num rows: 1 Data size: 456 Basic stats: 
COMPLETE Column stats: PARTIAL
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.dest2
 Reducer 8 
 Reduce Operator Tree:
   Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/96c2a2c0/ql/src/test/results/clientpositive/spark/union19.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/union19.q.out 
b/ql/src/test/results/clientpositive/spark/union19.q.out
index f7168fe..31795b2 100644
--- a/ql/src/test/results/clientpositive/spark/union19.q.out
+++ b/ql/src/test/results/clientpositive/spark/union19.q.out
@@ -135,14 +135,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-File Output Operator
-  compressed: false
-  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE 
Column stats: PARTIAL
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.dest1
+  

[4/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
http://git-wip-us.apache.org/repos/asf/hive/blob/96c2a2c0/ql/src/test/results/clientpositive/groupby9.q.out
--
diff --git a/ql/src/test/results/clientpositive/groupby9.q.out 
b/ql/src/test/results/clientpositive/groupby9.q.out
index c840df8..f4f1163 100644
--- a/ql/src/test/results/clientpositive/groupby9.q.out
+++ b/ql/src/test/results/clientpositive/groupby9.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -120,7 +120,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), _col2 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), UDFToString(_col2) (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -862,7 +862,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -904,7 +904,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col1) (type: int), _col0 (type: 
string), _col2 (type: bigint)
+expressions: UDFToInteger(_col1) (type: int), _col0 (type: 
string), UDFToString(_col2) (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -1646,7 +1646,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -1688,7 +1688,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), _col2 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), UDFToString(_col2) (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -2431,7 +2431,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), UDFToString(_col1) 
(type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
 File Output Operator
@@ -2474,7 +2474,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE 
Column stats: NONE
   Select Operator
-expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), _col2 (type: bigint)
+expressions: UDFToInteger(_col0) (type: int), _col1 (type: 
string), UDFToString(_col2) (type: string)
 outputColumnNames: _col0, _col1, _col2
 Statistics: Num rows: 250 Data size: 2656 Basic 

[2/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
http://git-wip-us.apache.org/repos/asf/hive/blob/96c2a2c0/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
--
diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out 
b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
index 2598d34..6ac97a1 100644
--- a/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin3.q.out
@@ -300,7 +300,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col4
 Statistics: Num rows: 60 Data size: 6393 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col0 (type: int), _col1 (type: string), _col4 
(type: string)
+  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col4 (type: string)
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -388,7 +388,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -433,7 +433,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -661,7 +661,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col1, _col4
 Statistics: Num rows: 60 Data size: 6393 Basic stats: COMPLETE 
Column stats: NONE
 Select Operator
-  expressions: _col0 (type: int), _col1 (type: string), _col4 
(type: string)
+  expressions: UDFToString(_col0) (type: string), _col1 (type: 
string), _col4 (type: string)
   outputColumnNames: _col0, _col1, _col2
   Statistics: Num rows: 60 Data size: 6393 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -749,7 +749,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:value, type:string, 
comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value2 SIMPLE 
[(srcbucket_mapjoin_part)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select count(1) from bucketmapjoin_tmp_result
@@ -794,7 +794,7 @@ POSTHOOK: Input: 
default@srcbucket_mapjoin_part@ds=2008-04-08
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2
 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@ds=2008-04-08
 POSTHOOK: Output: default@bucketmapjoin_tmp_result
-POSTHOOK: Lineage: bucketmapjoin_tmp_result.key SIMPLE 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: bucketmapjoin_tmp_result.key EXPRESSION 
[(srcbucket_mapjoin_part_2)a.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: bucketmapjoin_tmp_result.value1 SIMPLE 

[3/5] hive git commit: HIVE-16293: Column pruner should continue to work when SEL has more than 1 child (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

2017-04-04 Thread pxiong
http://git-wip-us.apache.org/repos/asf/hive/blob/96c2a2c0/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
--
diff --git 
a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out 
b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
index e8a3610..2745958 100644
--- a/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/llap/multi_insert_lateral_view.q.out
@@ -72,7 +72,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col5
 Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
-  expressions: _col0 (type: string), _col5 (type: 
double)
+  expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -94,7 +94,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col5
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
-expressions: _col0 (type: string), _col5 (type: 
double)
+expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 20 Data size: 208 Basic 
stats: COMPLETE Column stats: NONE
 File Output Operator
@@ -115,7 +115,7 @@ STAGE PLANS:
 outputColumnNames: _col0, _col5
 Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
 Select Operator
-  expressions: _col0 (type: string), _col5 (type: 
double)
+  expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
   outputColumnNames: _col0, _col1
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   File Output Operator
@@ -137,7 +137,7 @@ STAGE PLANS:
   outputColumnNames: _col0, _col5
   Statistics: Num rows: 20 Data size: 208 Basic stats: 
COMPLETE Column stats: NONE
   Select Operator
-expressions: _col0 (type: string), _col5 (type: 
double)
+expressions: _col0 (type: string), 
UDFToString(_col5) (type: string)
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 20 Data size: 208 Basic 
stats: COMPLETE Column stats: NONE
 File Output Operator
@@ -383,14 +383,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
+Select Operator
+  expressions: _col0 (type: string), UDFToString(_col1) (type: 
string)
+  outputColumnNames: _col0, _col1
   Statistics: Num rows: 10 Data size: 104 Basic stats: 
COMPLETE Column stats: NONE
-  table:
-  input format: org.apache.hadoop.mapred.TextInputFormat
-  output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-  name: default.src_lv1
+  File Output Operator
+compressed: false
+Statistics: Num rows: 10 Data size: 104 Basic stats: 
COMPLETE Column stats: NONE
+table:
+input format: org.apache.hadoop.mapred.TextInputFormat
+output format: 
org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+serde: 
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+name: default.src_lv1
 Reducer 3 
 Execution mode: llap
 Reduce Operator Tree:
@@ -400,14 +404,18 @@ STAGE PLANS:
 mode: mergepartial
 outputColumnNames: _col0, _col1
 Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE 
Column stats: NONE
-File Output Operator
-  compressed: false
+Select Operator
+  expressions: _col0 (type: 

[2/2] hive git commit: HIVE-16297: Improving hive logging configuration variables (Vihang Karajgaonkar, reviewed by Peter Vary & Aihua Xu)

2017-04-04 Thread aihuaxu
HIVE-16297: Improving hive logging configuration variables (Vihang 
Karajgaonkar, reviewed by Peter Vary & Aihua Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4e60ea3f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4e60ea3f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4e60ea3f

Branch: refs/heads/master
Commit: 4e60ea3f786ed597b0594c2452dd7d64c44645b9
Parents: c7a44eb
Author: Aihua Xu 
Authored: Tue Apr 4 19:14:28 2017 -0400
Committer: Aihua Xu 
Committed: Tue Apr 4 19:28:46 2017 -0400

--
 .../java/org/apache/hadoop/hive/conf/HiveConfUtil.java | 13 -
 common/src/java/org/apache/hive/http/ConfServlet.java  | 10 +++---
 .../apache/hadoop/hive/ql/exec/FileSinkOperator.java   | 10 --
 .../hive/ql/exec/spark/RemoteHiveSparkClient.java  |  9 +++--
 4 files changed, 26 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4e60ea3f/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
--
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
index 9ba08e5..dc02803 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java
@@ -94,11 +94,22 @@ public class HiveConfUtil {
   public static void stripConfigurations(Configuration conf, Set 
hiddenSet) {
 for (String name : hiddenSet) {
   if (conf.get(name) != null) {
-conf.set(name, "");
+conf.set(name, StringUtils.EMPTY);
   }
 }
   }
 
+  /**
+   * Searches the given configuration object and replaces all the 
configuration values for keys
+   * defined hive.conf.hidden.list by empty String
+   *
+   * @param conf - Configuration object which needs to be modified to remove 
sensitive keys
+   */
+  public static void stripConfigurations(Configuration conf) {
+Set hiddenSet = getHiddenSet(conf);
+stripConfigurations(conf, hiddenSet);
+  }
+
   public static void dumpConfig(Configuration originalConf, StringBuilder sb) {
 Set hiddenSet = getHiddenSet(originalConf);
 sb.append("Values omitted for security reason if present: 
").append(hiddenSet).append("\n");

http://git-wip-us.apache.org/repos/asf/hive/blob/4e60ea3f/common/src/java/org/apache/hive/http/ConfServlet.java
--
diff --git a/common/src/java/org/apache/hive/http/ConfServlet.java 
b/common/src/java/org/apache/hive/http/ConfServlet.java
index 253df4f..856a5d2 100644
--- a/common/src/java/org/apache/hive/http/ConfServlet.java
+++ b/common/src/java/org/apache/hive/http/ConfServlet.java
@@ -26,6 +26,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConfUtil;
 
 /**
  * A servlet to print out the running configuration data.
@@ -81,11 +82,14 @@ public class ConfServlet extends HttpServlet {
* Guts of the servlet - extracted for easy testing.
*/
   static void writeResponse(Configuration conf, Writer out, String format)
-throws IOException, BadFormatException {
+  throws IOException, BadFormatException {
+//redact the sensitive information from the configuration values
+Configuration hconf = new Configuration(conf);
+HiveConfUtil.stripConfigurations(hconf);
 if (FORMAT_JSON.equals(format)) {
-  Configuration.dumpConfiguration(conf, out);
+  Configuration.dumpConfiguration(hconf, out);
 } else if (FORMAT_XML.equals(format)) {
-  conf.writeXml(out);
+  hconf.writeXml(out);
 } else {
   throw new BadFormatException("Bad format: " + format);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/4e60ea3f/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index a9d03d0..4d727ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConfUtil;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
 import 

[1/2] hive git commit: HIVE-16335: Beeline user HS2 connection file should use /etc/hive/conf instead of /etc/conf/hive (Vihang Karajgaonkar, reviewed by Aihua Xu)

2017-04-04 Thread aihuaxu
Repository: hive
Updated Branches:
  refs/heads/master 23ac04d3b -> 4e60ea3f7


HIVE-16335: Beeline user HS2 connection file should use /etc/hive/conf instead 
of /etc/conf/hive (Vihang Karajgaonkar, reviewed by Aihua Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c7a44eb7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c7a44eb7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c7a44eb7

Branch: refs/heads/master
Commit: c7a44eb707084c65722a9779d3fc32746b36ed09
Parents: 23ac04d
Author: Aihua Xu 
Authored: Tue Apr 4 19:12:50 2017 -0400
Committer: Aihua Xu 
Committed: Tue Apr 4 19:28:16 2017 -0400

--
 .../hive/beeline/hs2connection/UserHS2ConnectionFileParser.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/c7a44eb7/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
--
diff --git 
a/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
 
b/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
index 93a6231..7d7d9ae 100644
--- 
a/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
+++ 
b/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
@@ -44,7 +44,7 @@ public class UserHS2ConnectionFileParser implements 
HS2ConnectionFileParser {
   + (System.getProperty("os.name").toLowerCase().indexOf("windows") != 
-1 ? "" : ".")
   + "beeline" + File.separator;
   public static final String ETC_HIVE_CONF_LOCATION =
-  File.separator + "etc" + File.separator + "conf" + File.separator + 
"hive";
+  File.separator + "etc" + File.separator + "hive" + File.separator + 
"conf";
 
   private final List locations = new ArrayList<>();
   private static final Logger log = 
LoggerFactory.getLogger(UserHS2ConnectionFileParser.class);



[2/2] hive git commit: HIVE-16164: Provide mechanism for passing HMS notification ID between transactional and non-transactional listeners. (Sergio Pena, reviewed by Mohit Sabharwal, Alexander Kolbaso

2017-04-04 Thread spena
HIVE-16164: Provide mechanism for passing HMS notification ID between 
transactional and non-transactional listeners. (Sergio Pena, reviewed by Mohit 
Sabharwal, Alexander Kolbasov)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/aa29cd9d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/aa29cd9d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/aa29cd9d

Branch: refs/heads/master
Commit: aa29cd9d6044edb7d425b99ff347c92079475d1d
Parents: 2985262
Author: Sergio Pena 
Authored: Tue Apr 4 09:42:06 2017 -0500
Committer: Sergio Pena 
Committed: Tue Apr 4 09:43:05 2017 -0500

--
 .../listener/DbNotificationListener.java|  46 +-
 .../MetaStoreEventListenerConstants.java|  33 ++
 .../listener/TestDbNotificationListener.java| 190 +++
 .../hadoop/hive/metastore/HiveAlterHandler.java |  60 +--
 .../hadoop/hive/metastore/HiveMetaStore.java| 529 +++
 .../metastore/MetaStoreListenerNotifier.java| 224 
 .../hive/metastore/events/ListenerEvent.java| 106 
 .../hadoop/hive/metastore/TestObjectStore.java  |  50 ++
 8 files changed, 959 insertions(+), 279 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/aa29cd9d/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
--
diff --git 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index ea6cb79..bbfbc36 100644
--- 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.InsertEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
 import org.apache.hadoop.hive.metastore.messaging.PartitionFiles;
@@ -137,7 +138,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildCreateTableMessage(t, new 
FileIterator(t.getSd().getLocation())).toString());
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-process(event);
+process(event, tableEvent);
   }
 
   /**
@@ -152,7 +153,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildDropTableMessage(t).toString());
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-process(event);
+process(event, tableEvent);
   }
 
   /**
@@ -168,7 +169,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildAlterTableMessage(before, after).toString());
 event.setDbName(after.getDbName());
 event.setTableName(after.getTableName());
-process(event);
+process(event, tableEvent);
   }
 
   class FileIterator implements Iterator {
@@ -276,7 +277,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 new NotificationEvent(0, now(), EventType.ADD_PARTITION.toString(), 
msg);
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-process(event);
+process(event, partitionEvent);
   }
 
   /**
@@ -291,7 +292,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildDropPartitionMessage(t, 
partitionEvent.getPartitionIterator()).toString());
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-process(event);
+process(event, partitionEvent);
   }
 
   /**
@@ -307,7 +308,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildAlterPartitionMessage(partitionEvent.getTable(), before, 
after).toString());
 event.setDbName(before.getDbName());
 event.setTableName(before.getTableName());
-process(event);
+process(event, partitionEvent);
   }
 
   /**
@@ -321,7 +322,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 new NotificationEvent(0, now(), EventType.CREATE_DATABASE.toString(), 
msgFactory
 .buildCreateDatabaseMessage(db).toString());
 event.setDbName(db.getName());
-process(event);
+process(event, dbEvent);
   }
 
   /**
@@ 

[1/2] hive git commit: HIVE-16164: Provide mechanism for passing HMS notification ID between transactional and non-transactional listeners. (Sergio Pena, reviewed by Mohit Sabharwal, Alexander Kolbaso

2017-04-04 Thread spena
Repository: hive
Updated Branches:
  refs/heads/master 2985262b8 -> aa29cd9d6


http://git-wip-us.apache.org/repos/asf/hive/blob/aa29cd9d/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
new file mode 100644
index 000..20011cc
--- /dev/null
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience.Private;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.InsertEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
+
+/**
+ * This class is used to notify a list of listeners about specific MetaStore 
events.
+ */
+@Private
+public class MetaStoreListenerNotifier {
+  private interface EventNotifier {
+void notify(MetaStoreEventListener listener, ListenerEvent event) throws 
MetaException;
+  }
+
+  private static Map notificationEvents = 
Maps.newHashMap(
+  ImmutableMap.builder()
+  .put(EventType.CREATE_DATABASE, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onCreateDatabase((CreateDatabaseEvent)event);
+}
+  })
+  .put(EventType.DROP_DATABASE, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onDropDatabase((DropDatabaseEvent)event);
+}
+  })
+  .put(EventType.CREATE_TABLE, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onCreateTable((CreateTableEvent)event);
+}
+  })
+  .put(EventType.DROP_TABLE, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onDropTable((DropTableEvent)event);
+}
+  })
+  .put(EventType.ADD_PARTITION, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onAddPartition((AddPartitionEvent)event);
+}
+  })
+  .put(EventType.DROP_PARTITION, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener 

[2/2] hive git commit: HIVE-16164: Provide mechanism for passing HMS notification ID between transactional and non-transactional listeners. (Sergio Pena, reviewed by Mohit Sabharwal, Alexander Kolbaso

2017-04-04 Thread spena
HIVE-16164: Provide mechanism for passing HMS notification ID between 
transactional and non-transactional listeners. (Sergio Pena, reviewed by Mohit 
Sabharwal, Alexander Kolbasov)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d0df902e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d0df902e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d0df902e

Branch: refs/heads/branch-2
Commit: d0df902e41354d6d80a6d192e0964da8c043467b
Parents: e283305
Author: Sergio Pena 
Authored: Tue Apr 4 09:42:06 2017 -0500
Committer: Sergio Pena 
Committed: Tue Apr 4 09:46:29 2017 -0500

--
 .../listener/DbNotificationListener.java|  46 +-
 .../MetaStoreEventListenerConstants.java|  33 ++
 .../listener/TestDbNotificationListener.java| 190 +++
 .../hadoop/hive/metastore/HiveAlterHandler.java |  60 +--
 .../hadoop/hive/metastore/HiveMetaStore.java| 529 +++
 .../metastore/MetaStoreListenerNotifier.java| 224 
 .../hive/metastore/events/ListenerEvent.java| 106 
 .../hadoop/hive/metastore/TestObjectStore.java  |  50 ++
 8 files changed, 959 insertions(+), 279 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/d0df902e/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
--
diff --git 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
index ea6cb79..bbfbc36 100644
--- 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
+++ 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.InsertEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
 import org.apache.hadoop.hive.metastore.messaging.PartitionFiles;
@@ -137,7 +138,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildCreateTableMessage(t, new 
FileIterator(t.getSd().getLocation())).toString());
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-process(event);
+process(event, tableEvent);
   }
 
   /**
@@ -152,7 +153,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildDropTableMessage(t).toString());
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-process(event);
+process(event, tableEvent);
   }
 
   /**
@@ -168,7 +169,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildAlterTableMessage(before, after).toString());
 event.setDbName(after.getDbName());
 event.setTableName(after.getTableName());
-process(event);
+process(event, tableEvent);
   }
 
   class FileIterator implements Iterator {
@@ -276,7 +277,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 new NotificationEvent(0, now(), EventType.ADD_PARTITION.toString(), 
msg);
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-process(event);
+process(event, partitionEvent);
   }
 
   /**
@@ -291,7 +292,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildDropPartitionMessage(t, 
partitionEvent.getPartitionIterator()).toString());
 event.setDbName(t.getDbName());
 event.setTableName(t.getTableName());
-process(event);
+process(event, partitionEvent);
   }
 
   /**
@@ -307,7 +308,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 .buildAlterPartitionMessage(partitionEvent.getTable(), before, 
after).toString());
 event.setDbName(before.getDbName());
 event.setTableName(before.getTableName());
-process(event);
+process(event, partitionEvent);
   }
 
   /**
@@ -321,7 +322,7 @@ public class DbNotificationListener extends 
MetaStoreEventListener {
 new NotificationEvent(0, now(), EventType.CREATE_DATABASE.toString(), 
msgFactory
 .buildCreateDatabaseMessage(db).toString());
 event.setDbName(db.getName());
-process(event);
+process(event, dbEvent);
   }
 
   /**
@@ 

[1/2] hive git commit: HIVE-16164: Provide mechanism for passing HMS notification ID between transactional and non-transactional listeners. (Sergio Pena, reviewed by Mohit Sabharwal, Alexander Kolbaso

2017-04-04 Thread spena
Repository: hive
Updated Branches:
  refs/heads/branch-2 e28330535 -> d0df902e4


http://git-wip-us.apache.org/repos/asf/hive/blob/d0df902e/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
new file mode 100644
index 000..20011cc
--- /dev/null
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.hive.common.classification.InterfaceAudience.Private;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.InsertEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+
+import java.util.List;
+import java.util.Map;
+
+import static 
org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
+
+/**
+ * This class is used to notify a list of listeners about specific MetaStore 
events.
+ */
+@Private
+public class MetaStoreListenerNotifier {
+  private interface EventNotifier {
+void notify(MetaStoreEventListener listener, ListenerEvent event) throws 
MetaException;
+  }
+
+  private static Map notificationEvents = 
Maps.newHashMap(
+  ImmutableMap.builder()
+  .put(EventType.CREATE_DATABASE, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onCreateDatabase((CreateDatabaseEvent)event);
+}
+  })
+  .put(EventType.DROP_DATABASE, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onDropDatabase((DropDatabaseEvent)event);
+}
+  })
+  .put(EventType.CREATE_TABLE, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onCreateTable((CreateTableEvent)event);
+}
+  })
+  .put(EventType.DROP_TABLE, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onDropTable((DropTableEvent)event);
+}
+  })
+  .put(EventType.ADD_PARTITION, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener listener, ListenerEvent 
event) throws MetaException {
+  listener.onAddPartition((AddPartitionEvent)event);
+}
+  })
+  .put(EventType.DROP_PARTITION, new EventNotifier() {
+@Override
+public void notify(MetaStoreEventListener 

hive git commit: HIVE-16254 : metadata for values temporary tables for INSERTs are getting replicated during bootstrap (Anishek Agarwal, reviewed by Sushanth Sowmyan)

2017-04-04 Thread khorgath
Repository: hive
Updated Branches:
  refs/heads/master aa29cd9d6 -> 4e162e01f


HIVE-16254 : metadata for values temporary tables for INSERTs are getting 
replicated during bootstrap (Anishek Agarwal, reviewed by Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4e162e01
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4e162e01
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4e162e01

Branch: refs/heads/master
Commit: 4e162e01f92bc5bf434bd2552bfafb24fa41f6b3
Parents: aa29cd9
Author: Sushanth Sowmyan 
Authored: Tue Apr 4 09:38:50 2017 -0700
Committer: Sushanth Sowmyan 
Committed: Tue Apr 4 09:39:28 2017 -0700

--
 .../hive/ql/TestReplicationScenarios.java   | 43 +-
 .../hive/metastore/messaging/EventUtils.java| 87 ++--
 .../messaging/event/filters/AndFilter.java  | 22 +
 .../messaging/event/filters/BasicFilter.java| 16 
 .../event/filters/DatabaseAndTableFilter.java   | 35 
 .../event/filters/EventBoundaryFilter.java  | 17 
 .../event/filters/MessageFormatFilter.java  | 19 +
 ql/pom.xml  |  6 ++
 .../ql/parse/ReplicationSemanticAnalyzer.java   | 32 +--
 .../parse/TestReplicationSemanticAnalyzer.java  | 22 -
 10 files changed, 187 insertions(+), 112 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/4e162e01/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
--
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
index 2688f35..4c9a1a2 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestReplicationScenarios.java
@@ -28,19 +28,20 @@ import 
org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.messaging.EventUtils;
+import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter;
+import 
org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter;
+import 
org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter;
+import 
org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFilter;
 import org.apache.hadoop.hive.metastore.messaging.MessageFactory;
 import org.apache.hadoop.hive.ql.parse.ReplicationSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ReplicationSpec.ReplStateMap;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.util.Shell;
 import org.apache.thrift.TException;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -1177,8 +1178,8 @@ public class TestReplicationScenarios {
 // events to those that match the dbname and tblname provided to the 
filter.
 // If the tblname passed in to the filter is null, then it restricts itself
 // to dbname-matching alone.
-IMetaStoreClient.NotificationFilter dbTblFilter = 
EventUtils.getDbTblNotificationFilter(dbname,tblname);
-IMetaStoreClient.NotificationFilter dbFilter = 
EventUtils.getDbTblNotificationFilter(dbname,null);
+IMetaStoreClient.NotificationFilter dbTblFilter = new 
DatabaseAndTableFilter(dbname,tblname);
+IMetaStoreClient.NotificationFilter dbFilter = new 
DatabaseAndTableFilter(dbname,null);
 
 assertFalse(dbTblFilter.accept(null));
 assertTrue(dbTblFilter.accept(createDummyEvent(dbname, tblname, 0)));
@@ -1195,7 +1196,7 @@ public class TestReplicationScenarios {
 // within a range specified.
 long evBegin = 50;
 long evEnd = 75;
-IMetaStoreClient.NotificationFilter evRangeFilter = 
EventUtils.getEventBoundaryFilter(evBegin,evEnd);
+IMetaStoreClient.NotificationFilter evRangeFilter = new 
EventBoundaryFilter(evBegin,evEnd);
 
 assertTrue(evBegin < evEnd);
 assertFalse(evRangeFilter.accept(null));
@@ -1211,9 +1212,9 @@ public class TestReplicationScenarios {
 // that match a provided message format
 
 IMetaStoreClient.NotificationFilter restrictByDefaultMessageFormat =
-
EventUtils.restrictByMessageFormat(MessageFactory.getInstance().getMessageFormat());
+

hive git commit: HIVE-15724 : getPrimaryKeys and getForeignKeys in metastore does not normalize db and table name (Daniel Dai via Pengcheng Xiong)

2017-04-04 Thread hashutosh
Repository: hive
Updated Branches:
  refs/heads/master 4e162e01f -> 90f2a047a


HIVE-15724 : getPrimaryKeys and getForeignKeys in metastore does not normalize 
db and table name (Daniel Dai via Pengcheng Xiong)

Signed-off-by: Ashutosh Chauhan 


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/90f2a047
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/90f2a047
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/90f2a047

Branch: refs/heads/master
Commit: 90f2a047af1788c070900c999a181a4863bfe045
Parents: 4e162e0
Author: Daniel Dai 
Authored: Tue Jan 24 23:22:00 2017 -0800
Committer: Ashutosh Chauhan 
Committed: Tue Apr 4 09:49:58 2017 -0700

--
 .../hadoop/hive/metastore/ObjectStore.java  | 19 +--
 .../hadoop/hive/metastore/hbase/HBaseStore.java | 22 
 .../clientpositive/create_with_constraints.q| 24 -
 .../create_with_constraints.q.out   | 56 ++--
 4 files changed, 76 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/90f2a047/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index a63519a..6b21751 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1139,6 +1139,9 @@ public class ObjectStore implements RawStore, 
Configurable {
 
   private List listAllTableConstraintsWithOptionalConstraintName
 (String dbName, String tableName, String constraintname) {
+dbName = HiveStringUtils.normalizeIdentifier(dbName);
+tableName = HiveStringUtils.normalizeIdentifier(tableName);
+constraintname = 
constraintname!=null?HiveStringUtils.normalizeIdentifier(constraintname):null;
 List mConstraints = null;
 List constraintNames = new ArrayList();
 Query query = null;
@@ -8540,10 +8543,12 @@ public class ObjectStore implements RawStore, 
Configurable {
 }
   }
 
-  protected List getPrimaryKeysInternal(final String db_name,
-final String tbl_name,
+  protected List getPrimaryKeysInternal(final String 
db_name_input,
+final String tbl_name_input,
 boolean allowSql, boolean allowJdo)
   throws MetaException, NoSuchObjectException {
+final String db_name = HiveStringUtils.normalizeIdentifier(db_name_input);
+final String tbl_name = 
HiveStringUtils.normalizeIdentifier(tbl_name_input);
 return new GetListHelper(db_name, tbl_name, allowSql, 
allowJdo) {
 
   @Override
@@ -8637,9 +8642,13 @@ public class ObjectStore implements RawStore, 
Configurable {
 }
   }
 
-  protected List getForeignKeysInternal(final String 
parent_db_name,
-final String parent_tbl_name, final String foreign_db_name, final String 
foreign_tbl_name,
-boolean allowSql, boolean allowJdo) throws MetaException, 
NoSuchObjectException {
+  protected List getForeignKeysInternal(final String 
parent_db_name_input,
+final String parent_tbl_name_input, final String foreign_db_name_input,
+final String foreign_tbl_name_input, boolean allowSql, boolean allowJdo) 
throws MetaException, NoSuchObjectException {
+final String parent_db_name = parent_db_name_input;
+final String parent_tbl_name = parent_tbl_name_input;
+final String foreign_db_name = foreign_db_name_input;
+final String foreign_tbl_name = foreign_tbl_name_input;
 return new GetListHelper(foreign_db_name, foreign_tbl_name, 
allowSql, allowJdo) {
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/90f2a047/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
--
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index 6593fa6..f9619e5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -2692,6 +2692,8 @@ public class HBaseStore implements RawStore {
 
   @Override
   public List getPrimaryKeys(String db_name, String tbl_name) 
throws MetaException {
+db_name = HiveStringUtils.normalizeIdentifier(db_name);
+tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name);
 boolean commit = false;
 openTransaction();
 try {
@@ -2710,6 +2712,10 @@ public class HBaseStore implements RawStore {
   public List getForeignKeys(String parent_db_name, String 

hive git commit: HIVE-15996: Implement multiargument GROUPING function (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

2017-04-04 Thread jcamacho
Repository: hive
Updated Branches:
  refs/heads/master 90f2a047a -> 1a1e8357b


HIVE-15996: Implement multiargument GROUPING function (Jesus Camacho Rodriguez, 
reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1a1e8357
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1a1e8357
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1a1e8357

Branch: refs/heads/master
Commit: 1a1e8357bcb09ab7b775f26b83f00d6f687bbc23
Parents: 90f2a04
Author: Jesus Camacho Rodriguez 
Authored: Mon Feb 27 09:24:06 2017 +
Committer: Jesus Camacho Rodriguez 
Committed: Tue Apr 4 19:54:16 2017 +0200

--
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  80 ++--
 .../hive/ql/udf/generic/GenericUDFGrouping.java |  45 +-
 .../groupby_grouping_sets_grouping.q|  36 ++
 .../vector_groupby_grouping_sets_grouping.q |  36 ++
 .../groupby_grouping_sets_grouping.q.out| 366 -
 .../vector_groupby_grouping_sets_grouping.q.out | 410 ++-
 .../results/clientpositive/perf/query36.q.out   |   4 +-
 .../results/clientpositive/perf/query70.q.out   |   4 +-
 .../results/clientpositive/perf/query86.q.out   |   4 +-
 9 files changed, 903 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hive/blob/1a1e8357/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
--
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index f2a6ade..b2e1c88 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -53,7 +53,6 @@ import org.antlr.runtime.tree.TreeWizard;
 import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
 import org.apache.calcite.rel.RelNode;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.mutable.MutableBoolean;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -3062,8 +3061,6 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
 
   protected static ASTNode rewriteGroupingFunctionAST(final List 
grpByAstExprs, ASTNode targetNode,
   final boolean noneSet) throws SemanticException {
-final MutableBoolean visited = new MutableBoolean(false);
-final MutableBoolean found = new MutableBoolean(false);
 
 TreeVisitorAction action = new TreeVisitorAction() {
 
@@ -3075,45 +3072,62 @@ public class SemanticAnalyzer extends 
BaseSemanticAnalyzer {
   @Override
   public Object post(Object t) {
 ASTNode root = (ASTNode) t;
-if (root.getType() == HiveParser.TOK_FUNCTION && root.getChildCount() 
== 2) {
+if (root.getType() == HiveParser.TOK_FUNCTION) {
   ASTNode func = (ASTNode) ParseDriver.adaptor.getChild(root, 0);
-  if (func.getText().equals("grouping")) {
-ASTNode c = (ASTNode) ParseDriver.adaptor.getChild(root, 1);
-visited.setValue(true);
-for (int i = 0; i < grpByAstExprs.size(); i++) {
-  ASTNode grpByExpr = grpByAstExprs.get(i);
-  if (grpByExpr.toStringTree().equals(c.toStringTree())) {
-ASTNode child1;
-if (noneSet) {
-  // Query does not contain CUBE, ROLLUP, or GROUPING SETS, 
and thus,
-  // grouping should return 0
-  child1 = (ASTNode) 
ParseDriver.adaptor.create(HiveParser.IntegralLiteral,
-String.valueOf(0));
-} else {
-  // We refer to grouping_id column
-  child1 = (ASTNode) ParseDriver.adaptor.create(
-  HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL");
-  ParseDriver.adaptor.addChild(child1, 
ParseDriver.adaptor.create(
-  HiveParser.Identifier, 
VirtualColumn.GROUPINGID.getName()));
+  if (func.getText().equals("grouping") && func.getChildCount() == 0) {
+int numberOperands = ParseDriver.adaptor.getChildCount(root);
+// We implement this logic using replaceChildren instead of 
replacing
+// the root node itself because windowing logic stores multiple
+// pointers to the AST, and replacing root might lead to some 
pointers
+// leading to non-rewritten version
+ASTNode newRoot = new ASTNode();
+// Rewritten grouping function
+ASTNode groupingFunc = (ASTNode) ParseDriver.adaptor.create(
+HiveParser.Identifier, "grouping");
+