HIVE-12656: Turn hive.compute.query.using.stats on by default (Pengcheng Xiong, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8763c7aa Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8763c7aa Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8763c7aa Branch: refs/heads/master Commit: 8763c7aa287b8a07c54d22a227bc9d368aa8b626 Parents: 0e05914 Author: Pengcheng Xiong <[email protected]> Authored: Tue Aug 16 15:20:33 2016 -0700 Committer: Pengcheng Xiong <[email protected]> Committed: Tue Aug 16 15:20:33 2016 -0700 ---------------------------------------------------------------------- .../org/apache/hadoop/hive/conf/HiveConf.java | 2 +- .../test/queries/clientnegative/insert_into6.q | 1 + .../lockneg_query_tbl_in_locked_db.q | 1 + .../queries/clientpositive/alter_merge_orc.q | 1 + .../clientpositive/bucketizedhiveinputformat.q | 1 + .../test/queries/clientpositive/cbo_udf_udaf.q | 1 + .../clientpositive/dynamic_partition_pruning.q | 1 + .../dynpart_sort_opt_vectorization.q | 1 + .../clientpositive/dynpart_sort_optimization.q | 1 + ql/src/test/queries/clientpositive/escape1.q | 1 + ql/src/test/queries/clientpositive/escape2.q | 1 + .../queries/clientpositive/orc_llap_counters.q | 1 + ql/src/test/queries/clientpositive/orc_merge1.q | 1 + .../test/queries/clientpositive/orc_merge10.q | 1 + .../queries/clientpositive/orc_merge_diff_fs.q | 1 + .../test/queries/clientpositive/orc_ppd_basic.q | 1 + .../clientpositive/partition_coltype_literals.q | 1 + .../clientpositive/stats_aggregator_error_1.q | 1 + .../clientpositive/stats_publisher_error_1.q | 1 + .../clientpositive/symlink_text_input_format.q | 2 + ql/src/test/queries/clientpositive/tez_union.q | 1 + .../queries/clientpositive/vector_complex_all.q | 2 + .../vectorization_short_regress.q | 1 + .../vectorized_dynamic_partition_pruning.q | 1 + .../clientpositive/alter_merge_2_orc.q.out | 2 - .../clientpositive/alter_merge_orc.q.out | 2 - .../alter_partition_coltype.q.out | 384 +--------------- .../clientpositive/annotate_stats_select.q.out | 72 +-- .../clientpositive/avro_partitioned.q.out | 16 - .../bucketsortoptimize_insert_1.q.out | 4 - .../bucketsortoptimize_insert_3.q.out | 4 - .../clientpositive/cbo_rp_udf_udaf.q.out | 2 - .../cbo_rp_udf_udaf_stats_opt.q.out | 2 - .../results/clientpositive/cbo_udf_udaf.q.out | 2 - .../test/results/clientpositive/combine2.q.out | 444 +------------------ .../dynpart_sort_opt_vectorization.q.out | 104 +---- .../dynpart_sort_optimization.q.out | 32 -- .../clientpositive/explain_dependency2.q.out | 2 +- .../results/clientpositive/fileformat_mix.q.out | 4 - .../test/results/clientpositive/fold_case.q.out | 36 +- .../test/results/clientpositive/input24.q.out | 38 +- .../list_bucket_query_multiskew_1.q.out | 2 - .../list_bucket_query_multiskew_2.q.out | 2 - .../list_bucket_query_multiskew_3.q.out | 109 +---- .../merge_dynamic_partition4.q.out | 4 - .../merge_dynamic_partition5.q.out | 4 - .../results/clientpositive/orc_merge1.q.out | 12 - .../results/clientpositive/orc_merge10.q.out | 12 - .../clientpositive/partition_boolexpr.q.out | 144 +----- .../results/clientpositive/partition_date.q.out | 32 -- .../clientpositive/partition_decode_name.q.out | 12 - .../clientpositive/partition_special_char.q.out | 12 - .../clientpositive/partition_timestamp.q.out | 28 -- .../clientpositive/partition_varchar1.q.out | 22 - .../test/results/clientpositive/plan_json.q.out | 2 +- .../clientpositive/ppd_constant_where.q.out | 40 +- .../rename_partition_location.q.out | 1 - .../clientpositive/select_unquote_and.q.out | 4 - .../clientpositive/select_unquote_not.q.out | 4 - .../clientpositive/select_unquote_or.q.out | 4 - .../results/clientpositive/smb_mapjoin_18.q.out | 6 - .../results/clientpositive/smb_mapjoin_19.q.out | 4 - .../results/clientpositive/smb_mapjoin_20.q.out | 4 - .../spark/list_bucket_dml_2.q.out | 6 - .../clientpositive/spark/smb_mapjoin_18.q.out | 6 - .../clientpositive/spark/smb_mapjoin_19.q.out | 4 - .../clientpositive/spark/smb_mapjoin_20.q.out | 4 - .../results/clientpositive/spark/stats3.q.out | 4 - .../clientpositive/spark/stats_noscan_2.q.out | 2 - .../clientpositive/spark/union_view.q.out | 129 +----- ql/src/test/results/clientpositive/stats3.q.out | 4 - .../results/clientpositive/stats_noscan_2.q.out | 2 - .../test/results/clientpositive/udf_count.q.out | 110 +---- .../results/clientpositive/union_view.q.out | 183 +------- .../clientpositive/updateAccessTime.q.out | 2 - 75 files changed, 78 insertions(+), 2019 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java ---------------------------------------------------------------------- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 0abb788..2bd2eea 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1912,7 +1912,7 @@ public class HiveConf extends Configuration { "final aggregations in single reduce task. If this is set true, Hive delegates final aggregation\n" + "stage to fetch task, possibly decreasing the query time."), - HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", false, + HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", true, "When set to true Hive will answer a few queries like count(1) purely using stats\n" + "stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\n" + "For more advanced stats collection need to run analyze table queries."), http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientnegative/insert_into6.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientnegative/insert_into6.q b/ql/src/test/queries/clientnegative/insert_into6.q index 0feb00e..588cf86 100644 --- a/ql/src/test/queries/clientnegative/insert_into6.q +++ b/ql/src/test/queries/clientnegative/insert_into6.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; DROP TABLE IF EXISTS insert_into6_neg; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q b/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q index 4966f2b..6ccdae3 100644 --- a/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q +++ b/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; create database lockneg1; use lockneg1; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/alter_merge_orc.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/alter_merge_orc.q b/ql/src/test/queries/clientpositive/alter_merge_orc.q index 9b836a6..aac98a8 100644 --- a/ql/src/test/queries/clientpositive/alter_merge_orc.q +++ b/ql/src/test/queries/clientpositive/alter_merge_orc.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; create table src_orc_merge_test(key int, value string) stored as orc; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q b/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q index dc48ee6..a87fa1a 100644 --- a/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q +++ b/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set mapred.max.split.size = 32000000; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/cbo_udf_udaf.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/cbo_udf_udaf.q b/ql/src/test/queries/clientpositive/cbo_udf_udaf.q index 34d5985..8534cec 100644 --- a/ql/src/test/queries/clientpositive/cbo_udf_udaf.q +++ b/ql/src/test/queries/clientpositive/cbo_udf_udaf.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.cbo.enable=true; set hive.exec.check.crossproducts=false; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q index 9e60fe8..d28da6e 100644 --- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.optimize.ppd=true; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q index 48a2f87..a300f91 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.optimize.sort.dynamic.partition=true; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q index b4ff0e8..5ef8ead 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.optimize.sort.dynamic.partition=true; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/escape1.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/escape1.q b/ql/src/test/queries/clientpositive/escape1.q index 967db78..a28dba8 100644 --- a/ql/src/test/queries/clientpositive/escape1.q +++ b/ql/src/test/queries/clientpositive/escape1.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.exec.dynamic.partition=true; set hive.exec.max.dynamic.partitions.pernode=200; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/escape2.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/escape2.q b/ql/src/test/queries/clientpositive/escape2.q index 416d2e4..5814650 100644 --- a/ql/src/test/queries/clientpositive/escape2.q +++ b/ql/src/test/queries/clientpositive/escape2.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.exec.dynamic.partition=true; set hive.exec.max.dynamic.partitions.pernode=200; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/orc_llap_counters.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/orc_llap_counters.q b/ql/src/test/queries/clientpositive/orc_llap_counters.q index 1bd55d3..cc0e991 100644 --- a/ql/src/test/queries/clientpositive/orc_llap_counters.q +++ b/ql/src/test/queries/clientpositive/orc_llap_counters.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; SET hive.optimize.index.filter=true; SET hive.cbo.enable=false; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/orc_merge1.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/orc_merge1.q b/ql/src/test/queries/clientpositive/orc_merge1.q index d0f0b28..f704a1c 100644 --- a/ql/src/test/queries/clientpositive/orc_merge1.q +++ b/ql/src/test/queries/clientpositive/orc_merge1.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.merge.orcfile.stripe.level=false; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/orc_merge10.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/orc_merge10.q b/ql/src/test/queries/clientpositive/orc_merge10.q index 98d3aa3..b84ed80 100644 --- a/ql/src/test/queries/clientpositive/orc_merge10.q +++ b/ql/src/test/queries/clientpositive/orc_merge10.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.merge.orcfile.stripe.level=false; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q b/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q index 1787f08..94c0e6a 100644 --- a/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q +++ b/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.merge.orcfile.stripe.level=false; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/orc_ppd_basic.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/orc_ppd_basic.q b/ql/src/test/queries/clientpositive/orc_ppd_basic.q index c367848..43f2c85 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_basic.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_basic.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; SET hive.fetch.task.conversion=none; SET hive.optimize.index.filter=true; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/partition_coltype_literals.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/partition_coltype_literals.q b/ql/src/test/queries/clientpositive/partition_coltype_literals.q index b918dd3..0c2365a 100644 --- a/ql/src/test/queries/clientpositive/partition_coltype_literals.q +++ b/ql/src/test/queries/clientpositive/partition_coltype_literals.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; drop table if exists partcoltypenum; create table partcoltypenum (key int, value string) partitioned by (tint tinyint, sint smallint, bint bigint); http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q b/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q index 5e6b0aa..d6f84ed 100644 --- a/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q +++ b/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q @@ -10,6 +10,7 @@ set hive.stats.dbclass=custom; set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher; set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.DummyStatsAggregator; set hive.stats.reliable=false; +set hive.compute.query.using.stats=false; set hive.test.dummystats.aggregator=connect; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/stats_publisher_error_1.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/stats_publisher_error_1.q b/ql/src/test/queries/clientpositive/stats_publisher_error_1.q index 513b8e7..50751f7 100644 --- a/ql/src/test/queries/clientpositive/stats_publisher_error_1.q +++ b/ql/src/test/queries/clientpositive/stats_publisher_error_1.q @@ -10,6 +10,7 @@ set hive.stats.dbclass=custom; set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher; set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.DummyStatsAggregator; set hive.stats.reliable=false; +set hive.compute.query.using.stats=false; set hive.test.dummystats.publisher=connect; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/symlink_text_input_format.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/symlink_text_input_format.q b/ql/src/test/queries/clientpositive/symlink_text_input_format.q index d89aad4..d7759c6 100644 --- a/ql/src/test/queries/clientpositive/symlink_text_input_format.q +++ b/ql/src/test/queries/clientpositive/symlink_text_input_format.q @@ -1,4 +1,6 @@ set hive.mapred.mode=nonstrict; +set hive.compute.query.using.stats=false; + DROP TABLE IF EXISTS symlink_text_input_format; EXPLAIN http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/tez_union.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/tez_union.q b/ql/src/test/queries/clientpositive/tez_union.q index c49c96d..fba543c 100644 --- a/ql/src/test/queries/clientpositive/tez_union.q +++ b/ql/src/test/queries/clientpositive/tez_union.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.auto.convert.join=true; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/vector_complex_all.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/vector_complex_all.q b/ql/src/test/queries/clientpositive/vector_complex_all.q index 1f23b60..91a7368 100644 --- a/ql/src/test/queries/clientpositive/vector_complex_all.q +++ b/ql/src/test/queries/clientpositive/vector_complex_all.q @@ -1,3 +1,5 @@ +set hive.compute.query.using.stats=false; +set hive.compute.query.using.stats=false; set hive.cli.print.header=true; set hive.explain.user=false; set hive.fetch.task.conversion=none; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/vectorization_short_regress.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/vectorization_short_regress.q b/ql/src/test/queries/clientpositive/vectorization_short_regress.q index 3772329..114a3e2 100644 --- a/ql/src/test/queries/clientpositive/vectorization_short_regress.q +++ b/ql/src/test/queries/clientpositive/vectorization_short_regress.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q ---------------------------------------------------------------------- diff --git a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q index 2dc1271..2d3788d 100644 --- a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=false; set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.optimize.ppd=true; http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out b/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out index 7e30942..caa41b2 100644 --- a/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out +++ b/ql/src/test/results/clientpositive/alter_merge_2_orc.q.out @@ -64,12 +64,10 @@ POSTHOOK: Lineage: src_orc_merge_test_part PARTITION(ds=2012-01-03,ts=2012-01-03 PREHOOK: query: select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' PREHOOK: type: QUERY PREHOOK: Input: default@src_orc_merge_test_part -PREHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### POSTHOOK: query: select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' POSTHOOK: type: QUERY POSTHOOK: Input: default@src_orc_merge_test_part -POSTHOOK: Input: default@src_orc_merge_test_part@ds=2012-01-03/ts=2012-01-03+14%3A46%3A31 #### A masked pattern was here #### 610 PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/alter_merge_orc.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/alter_merge_orc.q.out b/ql/src/test/results/clientpositive/alter_merge_orc.q.out index b5a6d04..aa83fce 100644 --- a/ql/src/test/results/clientpositive/alter_merge_orc.q.out +++ b/ql/src/test/results/clientpositive/alter_merge_orc.q.out @@ -179,12 +179,10 @@ minFileSize:2515 PREHOOK: query: select count(1) from src_orc_merge_test_part PREHOOK: type: QUERY PREHOOK: Input: default@src_orc_merge_test_part -PREHOOK: Input: default@src_orc_merge_test_part@ds=2011 #### A masked pattern was here #### POSTHOOK: query: select count(1) from src_orc_merge_test_part POSTHOOK: type: QUERY POSTHOOK: Input: default@src_orc_merge_test_part -POSTHOOK: Input: default@src_orc_merge_test_part@ds=2011 #### A masked pattern was here #### 1500 PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/alter_partition_coltype.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out index d6f607c..703a8e3 100644 --- a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out @@ -40,13 +40,11 @@ PREHOOK: query: -- select with paritition predicate. select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype -PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- select with paritition predicate. select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype -POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 25 PREHOOK: query: -- alter partition key column data type for dt column. @@ -74,15 +72,11 @@ PREHOOK: query: -- make sure the partition predicate still works. select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype -PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 -PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- make sure the partition predicate still works. select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype -POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 -POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 50 PREHOOK: query: explain extended select count(*) from alter_coltype where dt = '100' @@ -90,162 +84,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain extended select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alter_coltype - Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: ts=3.0 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - dt 100 - ts 3.0 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.alter_coltype - numFiles 1 - numRows 25 - partition_columns dt/ts - partition_columns.types int:string - rawDataSize 191 - serialization.ddl struct alter_coltype { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 216 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.alter_coltype - partition_columns dt/ts - partition_columns.types int:string - serialization.ddl struct alter_coltype { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.alter_coltype - name: default.alter_coltype -#### A masked pattern was here #### - Partition - base file name: ts=6.30 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - dt 100 - ts 6.30 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.alter_coltype - numFiles 1 - numRows 25 - partition_columns dt/ts - partition_columns.types int:string - rawDataSize 191 - serialization.ddl struct alter_coltype { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 216 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.alter_coltype - partition_columns dt/ts - partition_columns.types int:string - serialization.ddl struct alter_coltype { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.alter_coltype - name: default.alter_coltype - Truncated Path -> Alias: - /alter_coltype/dt=100/ts=3.0 [alter_coltype] - /alter_coltype/dt=100/ts=6.30 [alter_coltype] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -281,13 +125,11 @@ PREHOOK: query: -- validate partition key column predicate can still work. select count(*) from alter_coltype where ts = '6.30' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype -PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- validate partition key column predicate can still work. select count(*) from alter_coltype where ts = '6.30' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype -POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 25 PREHOOK: query: explain extended select count(*) from alter_coltype where ts = '6.30' @@ -295,115 +137,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain extended select count(*) from alter_coltype where ts = '6.30' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alter_coltype - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: ts=6.30 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - dt 100 - ts 6.30 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.alter_coltype - numFiles 1 - numRows 25 - partition_columns dt/ts - partition_columns.types string:double - rawDataSize 191 - serialization.ddl struct alter_coltype { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 216 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.alter_coltype - partition_columns dt/ts - partition_columns.types string:double - serialization.ddl struct alter_coltype { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.alter_coltype - name: default.alter_coltype - Truncated Path -> Alias: - /alter_coltype/dt=100/ts=6.30 [alter_coltype] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -412,14 +151,12 @@ PREHOOK: query: -- validate partition key column predicate on two different par select count(*) from alter_coltype where ts = 3.0 and dt=100 PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype -PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 #### A masked pattern was here #### POSTHOOK: query: -- validate partition key column predicate on two different partition column data type -- can still work. select count(*) from alter_coltype where ts = 3.0 and dt=100 POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype -POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 #### A masked pattern was here #### 25 PREHOOK: query: explain extended select count(*) from alter_coltype where ts = 3.0 and dt=100 @@ -427,115 +164,12 @@ PREHOOK: type: QUERY POSTHOOK: query: explain extended select count(*) from alter_coltype where ts = 3.0 and dt=100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alter_coltype - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: ts=3.0 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - dt 100 - ts 3.0 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.alter_coltype - numFiles 1 - numRows 25 - partition_columns dt/ts - partition_columns.types string:double - rawDataSize 191 - serialization.ddl struct alter_coltype { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 216 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.alter_coltype - partition_columns dt/ts - partition_columns.types string:double - serialization.ddl struct alter_coltype { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.alter_coltype - name: default.alter_coltype - Truncated Path -> Alias: - /alter_coltype/dt=100/ts=3.0 [alter_coltype] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -717,27 +351,21 @@ STAGE PLANS: PREHOOK: query: select count(*) from alter_coltype where ts = 3.0 PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype -PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 #### A masked pattern was here #### POSTHOOK: query: select count(*) from alter_coltype where ts = 3.0 POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype -POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 #### A masked pattern was here #### 25 PREHOOK: query: -- make sure the partition predicate still works. select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype -PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 -PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### POSTHOOK: query: -- make sure the partition predicate still works. select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype -POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 -POSTHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### 50 PREHOOK: query: desc alter_coltype http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/annotate_stats_select.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out b/ql/src/test/results/clientpositive/annotate_stats_select.q.out index c51b895..75401ae 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out @@ -685,44 +685,12 @@ POSTHOOK: query: -- COUNT(*) is projected as new column. It is not projected as explain select count(*) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink @@ -735,44 +703,12 @@ POSTHOOK: query: -- COUNT(1) is projected as new column. It is not projected as explain select count(1) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(1) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/avro_partitioned.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/avro_partitioned.q.out b/ql/src/test/results/clientpositive/avro_partitioned.q.out index c642736..bd45978 100644 --- a/ql/src/test/results/clientpositive/avro_partitioned.q.out +++ b/ql/src/test/results/clientpositive/avro_partitioned.q.out @@ -250,26 +250,10 @@ POSTHOOK: Output: default@episodes_partitioned@doctor_pt=7 PREHOOK: query: SELECT COUNT(*) FROM episodes_partitioned PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned -PREHOOK: Input: default@episodes_partitioned@doctor_pt=1 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=11 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=2 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=4 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=5 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=7 -PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 #### A masked pattern was here #### POSTHOOK: query: SELECT COUNT(*) FROM episodes_partitioned POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=11 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=2 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=4 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=5 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=7 -POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 #### A masked pattern was here #### 8 PREHOOK: query: -- Verify that reading from an Avro partition works http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out index 9faa0d0..48de423 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out @@ -113,12 +113,10 @@ POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.Fiel PREHOOK: query: select count(*) from test_table2 where ds = '1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### POSTHOOK: query: select count(*) from test_table2 where ds = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### 500 PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' @@ -220,12 +218,10 @@ POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.Fiel PREHOOK: query: select count(*) from test_table2 where ds = '1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### POSTHOOK: query: select count(*) from test_table2 where ds = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### 500 PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out index e778e35..8831080 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out @@ -107,12 +107,10 @@ POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.Fiel PREHOOK: query: select count(*) from test_table2 where ds = '1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### POSTHOOK: query: select count(*) from test_table2 where ds = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### 500 PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' @@ -231,12 +229,10 @@ POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(test_table1)a.Fiel PREHOOK: query: select count(*) from test_table2 where ds = '1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table2 -PREHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### POSTHOOK: query: select count(*) from test_table2 where ds = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table2 -POSTHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### 500 PREHOOK: query: select count(*) from test_table2 tablesample (bucket 1 out of 2) s where ds = '1' http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/cbo_rp_udf_udaf.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/cbo_rp_udf_udaf.q.out b/ql/src/test/results/clientpositive/cbo_rp_udf_udaf.q.out index 156d02f..b30d9da 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_udf_udaf.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_udf_udaf.q.out @@ -53,12 +53,10 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 -PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 -POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### 1 20 1 18 PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1 http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out b/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out index a1e7fd8..3a589b4 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_udf_udaf_stats_opt.q.out @@ -54,12 +54,10 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 -PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 -POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### 1 20 1 18 PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1 http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/cbo_udf_udaf.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/cbo_udf_udaf.q.out b/ql/src/test/results/clientpositive/cbo_udf_udaf.q.out index 156d02f..b30d9da 100644 --- a/ql/src/test/results/clientpositive/cbo_udf_udaf.q.out +++ b/ql/src/test/results/clientpositive/cbo_udf_udaf.q.out @@ -53,12 +53,10 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 -PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from cbo_t1) cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 -POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### 1 20 1 18 PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from cbo_t1) cbo_t1 http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/combine2.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/combine2.q.out b/ql/src/test/results/clientpositive/combine2.q.out index fb9ef84..6616f66 100644 --- a/ql/src/test/results/clientpositive/combine2.q.out +++ b/ql/src/test/results/clientpositive/combine2.q.out @@ -153,462 +153,22 @@ POSTHOOK: query: explain extended select count(1) from combine2 where value is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: combine2 - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count(1) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: bigint) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: value=2010-04-21 09%3A45%3A00 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value 2010-04-21 09:45:00 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 2 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 3 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_0 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_0 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 3 - numRows 3 - partition_columns value - partition_columns.types string - rawDataSize 3 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 6 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_2 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_2 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 1 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 2 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_4 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_4 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 1 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 2 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_5 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_5 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 3 - numRows 3 - partition_columns value - partition_columns.types string - rawDataSize 3 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 6 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_8 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_8 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 1 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 2 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=val_9 - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value val_9 - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 1 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 2 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 -#### A masked pattern was here #### - Partition - base file name: value=| - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - partition values: - value | - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"} - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - numFiles 1 - numRows 1 - partition_columns value - partition_columns.types string - rawDataSize 2 - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 3 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key - columns.comments - columns.types string -#### A masked pattern was here #### - name default.combine2 - partition_columns value - partition_columns.types string - serialization.ddl struct combine2 { string key} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.combine2 - name: default.combine2 - Truncated Path -> Alias: - /combine2/value=2010-04-21 09%3A45%3A00 [$hdt$_0:combine2] - /combine2/value=val_0 [$hdt$_0:combine2] - /combine2/value=val_2 [$hdt$_0:combine2] - /combine2/value=val_4 [$hdt$_0:combine2] - /combine2/value=val_5 [$hdt$_0:combine2] - /combine2/value=val_8 [$hdt$_0:combine2] - /combine2/value=val_9 [$hdt$_0:combine2] - /combine2/value=| [$hdt$_0:combine2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0 - columns.types bigint - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink PREHOOK: query: select count(1) from combine2 where value is not null PREHOOK: type: QUERY PREHOOK: Input: default@combine2 -PREHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00 -PREHOOK: Input: default@combine2@value=val_0 -PREHOOK: Input: default@combine2@value=val_2 -PREHOOK: Input: default@combine2@value=val_4 -PREHOOK: Input: default@combine2@value=val_5 -PREHOOK: Input: default@combine2@value=val_8 -PREHOOK: Input: default@combine2@value=val_9 -PREHOOK: Input: default@combine2@value=| #### A masked pattern was here #### POSTHOOK: query: select count(1) from combine2 where value is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@combine2 -POSTHOOK: Input: default@combine2@value=2010-04-21 09%3A45%3A00 -POSTHOOK: Input: default@combine2@value=val_0 -POSTHOOK: Input: default@combine2@value=val_2 -POSTHOOK: Input: default@combine2@value=val_4 -POSTHOOK: Input: default@combine2@value=val_5 -POSTHOOK: Input: default@combine2@value=val_8 -POSTHOOK: Input: default@combine2@value=val_9 -POSTHOOK: Input: default@combine2@value=| #### A masked pattern was here #### 12 PREHOOK: query: explain http://git-wip-us.apache.org/repos/asf/hive/blob/8763c7aa/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out ---------------------------------------------------------------------- diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out index fc4f483..cbfc7be 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out @@ -1158,53 +1158,37 @@ Storage Desc Params: PREHOOK: query: select count(*) from over1k_part_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part_orc -PREHOOK: Input: default@over1k_part_orc@ds=foo/t=27 -PREHOOK: Input: default@over1k_part_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select count(*) from over1k_part_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part_orc -POSTHOOK: Input: default@over1k_part_orc@ds=foo/t=27 -POSTHOOK: Input: default@over1k_part_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 38 PREHOOK: query: select count(*) from over1k_part_limit_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part_limit_orc -PREHOOK: Input: default@over1k_part_limit_orc@ds=foo/t=27 -PREHOOK: Input: default@over1k_part_limit_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select count(*) from over1k_part_limit_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part_limit_orc -POSTHOOK: Input: default@over1k_part_limit_orc@ds=foo/t=27 -POSTHOOK: Input: default@over1k_part_limit_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 20 PREHOOK: query: select count(*) from over1k_part_buck_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part_buck_orc -PREHOOK: Input: default@over1k_part_buck_orc@t=27 -PREHOOK: Input: default@over1k_part_buck_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select count(*) from over1k_part_buck_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part_buck_orc -POSTHOOK: Input: default@over1k_part_buck_orc@t=27 -POSTHOOK: Input: default@over1k_part_buck_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 38 PREHOOK: query: select count(*) from over1k_part_buck_sort_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part_buck_sort_orc -PREHOOK: Input: default@over1k_part_buck_sort_orc@t=27 -PREHOOK: Input: default@over1k_part_buck_sort_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select count(*) from over1k_part_buck_sort_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part_buck_sort_orc -POSTHOOK: Input: default@over1k_part_buck_sort_orc@t=27 -POSTHOOK: Input: default@over1k_part_buck_sort_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 38 PREHOOK: query: -- tests for HIVE-6883 @@ -1705,14 +1689,10 @@ POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ PREHOOK: query: select count(*) from over1k_part2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2_orc -PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 -PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select count(*) from over1k_part2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2_orc -POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 -POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 19 PREHOOK: query: insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i @@ -1852,14 +1832,10 @@ POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ PREHOOK: query: select count(*) from over1k_part2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2_orc -PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 -PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select count(*) from over1k_part2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2_orc -POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 -POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 19 PREHOOK: query: -- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets. @@ -2164,58 +2140,22 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from over1k_part_buck_sort2_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: over1k_part_buck_sort2_orc - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE - Select Operator - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink PREHOOK: query: select count(*) from over1k_part_buck_sort2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part_buck_sort2_orc -PREHOOK: Input: default@over1k_part_buck_sort2_orc@t=27 -PREHOOK: Input: default@over1k_part_buck_sort2_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select count(*) from over1k_part_buck_sort2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part_buck_sort2_orc -POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=27 -POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 19 PREHOOK: query: insert overwrite table over1k_part_buck_sort2_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27 @@ -2374,57 +2314,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select count(*) from over1k_part_buck_sort2_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: over1k_part_buck_sort2_orc - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE - Select Operator - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: count() - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator - limit: -1 + limit: 1 Processor Tree: ListSink PREHOOK: query: select count(*) from over1k_part_buck_sort2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part_buck_sort2_orc -PREHOOK: Input: default@over1k_part_buck_sort2_orc@t=27 -PREHOOK: Input: default@over1k_part_buck_sort2_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### POSTHOOK: query: select count(*) from over1k_part_buck_sort2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part_buck_sort2_orc -POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=27 -POSTHOOK: Input: default@over1k_part_buck_sort2_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 19
