Modified: hive/branches/llap/ql/src/test/queries/clientpositive/annotate_stats_groupby.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/annotate_stats_groupby.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/annotate_stats_groupby.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/annotate_stats_groupby.q Tue Oct 14 19:06:45 2014 @@ -1,4 +1,25 @@ set hive.stats.fetch.column.stats=true; +set hive.map.aggr.hash.percentmemory=0.0f; + +-- hash aggregation is disabled + +-- There are different cases for Group By depending on map/reduce side, hash aggregation, +-- grouping sets and column stats. If we don't have column stats, we just assume hash +-- aggregation is disabled. Following are the possible cases and rule for cardinality +-- estimation + +-- MAP SIDE: +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets â numRows +-- Case 2: NO column stats, NO hash aggregation, grouping sets â numRows * sizeOfGroupingSet +-- Case 3: column stats, hash aggregation, NO grouping sets â Min(numRows / 2, ndvProduct * parallelism) +-- Case 4: column stats, hash aggregation, grouping sets â Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) +-- Case 5: column stats, NO hash aggregation, NO grouping sets â numRows +-- Case 6: column stats, NO hash aggregation, grouping sets â numRows * sizeOfGroupingSet + +-- REDUCE SIDE: +-- Case 7: NO column stats â numRows / 2 +-- Case 8: column stats, grouping sets â Min(numRows, ndvProduct * sizeOfGroupingSet) +-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) create table if not exists loc_staging ( state string, @@ -29,71 +50,91 @@ from ( select state as a, locid as b, co ) sq1 group by a,c; -analyze table loc_orc compute statistics for columns state,locid,zip,year; +analyze table loc_orc compute statistics for columns state,locid,year; --- only one distinct value in year column + 1 NULL value --- map-side GBY: numRows: 8 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 +-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 2 explain select year from loc_orc group by year; --- map-side GBY: numRows: 8 --- reduce-side GBY: numRows: 4 +-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 8 explain select state,locid from loc_orc group by state,locid; --- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 32 explain select state,locid from loc_orc group by state,locid with cube; --- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 +-- Case 8: column stats, grouping sets - cardinality = 24 explain select state,locid from loc_orc group by state,locid with rollup; --- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 +-- Case 8: column stats, grouping sets - cardinality = 8 explain select state,locid from loc_orc group by state,locid grouping sets((state)); --- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 +-- Case 8: column stats, grouping sets - cardinality = 16 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); --- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 +-- Case 8: column stats, grouping sets - cardinality = 24 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); --- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 32 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); -set hive.stats.map.parallelism=10; +set hive.map.aggr.hash.percentmemory=0.5f; +set mapred.max.split.size=80; +-- map-side parallelism will be 10 --- map-side GBY: numRows: 80 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 +-- Case 9: column stats, NO grouping sets - caridnality = 2 explain select year from loc_orc group by year; --- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) +-- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 +-- Case 8: column stats, grouping sets - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube; +-- ndvProduct becomes 0 as zip does not have column stats +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state,zip from loc_orc group by state,zip; + +set mapred.max.split.size=1000; set hive.stats.fetch.column.stats=false; -set hive.stats.map.parallelism=1; --- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube; --- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 +-- Case 7: NO column stats - cardinality = 12 explain select state,locid from loc_orc group by state,locid with rollup; --- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 7: NO column stats - cardinality = 4 explain select state,locid from loc_orc group by state,locid grouping sets((state)); --- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 +-- Case 7: NO column stats - cardinality = 8 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); --- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 +-- Case 7: NO column stats - cardinality = 12 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); --- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); -set hive.stats.map.parallelism=10; +set mapred.max.split.size=80; --- map-side GBY: numRows: 80 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 7: NO column stats - cardinality = 4 explain select year from loc_orc group by year; --- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube;
Modified: hive/branches/llap/ql/src/test/queries/clientpositive/annotate_stats_part.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/annotate_stats_part.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/annotate_stats_part.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/annotate_stats_part.q Tue Oct 14 19:06:45 2014 @@ -65,6 +65,9 @@ explain select zip from loc_orc; -- basicStatState: COMPLETE colStatState: PARTIAL explain select state from loc_orc; +-- basicStatState: COMPLETE colStatState: COMPLETE +explain select year from loc_orc; + -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL -- basicStatState: COMPLETE colStatState: PARTIAL explain select state,locid from loc_orc; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/authorization_cli_createtab.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/authorization_cli_createtab.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/authorization_cli_createtab.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/authorization_cli_createtab.q Tue Oct 14 19:06:45 2014 @@ -1,6 +1,5 @@ -set hive.test.authz.sstd.hs2.mode=true; set hive.users.in.admin.role=hive_admin_user; -set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory; set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; set user.name=hive_test_user; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/authorization_role_grant2.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/authorization_role_grant2.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/authorization_role_grant2.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/authorization_role_grant2.q Tue Oct 14 19:06:45 2014 @@ -20,6 +20,7 @@ show principals src_role_wadmin; set user.name=user2; set role src_role_WadMin; +show principals src_role_wadmin; -- grant role to another user grant src_Role_wadmin to user user3; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/create_func1.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/create_func1.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/create_func1.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/create_func1.q Tue Oct 14 19:06:45 2014 @@ -2,11 +2,16 @@ -- qtest_get_java_boolean should already be created during test initialization select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1; +describe function extended qtest_get_java_boolean; + create database mydb; create function mydb.func1 as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper'; show functions mydb.func1; +describe function extended mydb.func1; + + select mydb.func1('abc') from src limit 1; drop function mydb.func1; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/decimal_udf.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/decimal_udf.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/decimal_udf.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/decimal_udf.q Tue Oct 14 19:06:45 2014 @@ -39,6 +39,9 @@ SELECT key - '1.0' FROM DECIMAL_UDF; EXPLAIN SELECT key * key FROM DECIMAL_UDF; SELECT key * key FROM DECIMAL_UDF; +EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0; +SELECT key, value FROM DECIMAL_UDF where key * value > 0; + EXPLAIN SELECT key * value FROM DECIMAL_UDF; SELECT key * value FROM DECIMAL_UDF; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,10 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.exec.reducers.max = 1; -create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_danp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/delete_all_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/delete_all_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/delete_all_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/delete_all_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,10 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; -create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dap partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_dap partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 1000 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/delete_orig_table.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/delete_orig_table.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/delete_orig_table.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/delete_orig_table.q Tue Oct 14 19:06:45 2014 @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/delete_orig_table; @@ -18,7 +17,7 @@ create table acid_dot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/delete_orig_table'; + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/delete_orig_table' TBLPROPERTIES ('transactional'='true'); select count(*) from acid_dot; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/delete_tmp_table.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/delete_tmp_table.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/delete_tmp_table.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/delete_tmp_table.q Tue Oct 14 19:06:45 2014 @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_no_match.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_no_match.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_no_match.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_no_match.q Tue Oct 14 19:06:45 2014 @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dwnm select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dwnp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/delete_where_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,10 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; -create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dwp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_dwp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/delete_whole_partition.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/delete_whole_partition.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/delete_whole_partition.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/delete_whole_partition.q Tue Oct 14 19:06:45 2014 @@ -1,10 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; -create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dwhp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_dwhp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/drop_index.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/drop_index.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/drop_index.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/drop_index.q Tue Oct 14 19:06:45 2014 @@ -1,2 +1,3 @@ SET hive.exec.drop.ignorenonexistent=false; DROP INDEX IF EXISTS UnknownIndex ON src; +DROP INDEX IF EXISTS UnknownIndex ON UnknownTable; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q Tue Oct 14 19:06:45 2014 @@ -19,6 +19,9 @@ load data local inpath '../../data/files load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2); load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3); +analyze table dim_shops compute statistics; +analyze table agg_01 partition (dim_shops_id) compute statistics; + select * from dim_shops; select * from agg_01; @@ -40,6 +43,73 @@ d1.label in ('foo', 'bar') GROUP BY d1.label ORDER BY d1.label; +set hive.tez.dynamic.partition.pruning.max.event.size=1000000; +set hive.tez.dynamic.partition.pruning.max.data.size=1; + +EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +EXPLAIN SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id; + +SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id; + +EXPLAIN SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1; + +SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1; + +set hive.tez.dynamic.partition.pruning.max.event.size=1; +set hive.tez.dynamic.partition.pruning.max.data.size=1000000; + +EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +set hive.tez.dynamic.partition.pruning.max.event.size=100000; +set hive.tez.dynamic.partition.pruning.max.data.size=1000000; + EXPLAIN SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' UNION ALL @@ -47,4 +117,4 @@ SELECT amount FROM agg_01, dim_shops WHE SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' UNION ALL -SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; \ No newline at end of file +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q Tue Oct 14 19:06:45 2014 @@ -108,6 +108,13 @@ set hive.optimize.sort.dynamic.partition explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i; set hive.optimize.sort.dynamic.partition=true; explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i; +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27; + +set hive.optimize.sort.dynamic.partition=false; +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t; +set hive.optimize.sort.dynamic.partition=true; +-- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t; set hive.optimize.sort.dynamic.partition=false; insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q Tue Oct 14 19:06:45 2014 @@ -102,6 +102,13 @@ set hive.optimize.sort.dynamic.partition explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; set hive.optimize.sort.dynamic.partition=true; explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27; + +set hive.optimize.sort.dynamic.partition=false; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t; +set hive.optimize.sort.dynamic.partition=true; +-- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t; set hive.optimize.sort.dynamic.partition=false; insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_into1.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_into1.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_into1.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_into1.q Tue Oct 14 19:06:45 2014 @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=true; DROP TABLE insert_into1; CREATE TABLE insert_into1 (key int, value string); @@ -7,14 +8,18 @@ INSERT INTO TABLE insert_into1 SELECT * SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 ) t; - +explain +select count(*) from insert_into1; +select count(*) from insert_into1; EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100; INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 ) t; +explain SELECT COUNT(*) FROM insert_into1; +select count(*) from insert_into1; EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10; INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10; @@ -22,5 +27,10 @@ SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 ) t; +explain +SELECT COUNT(*) FROM insert_into1; +select count(*) from insert_into1; DROP TABLE insert_into1; + +set hive.compute.query.using.stats=false; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_into2.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_into2.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_into2.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_into2.q Tue Oct 14 19:06:45 2014 @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=true; DROP TABLE insert_into2; CREATE TABLE insert_into2 (key int, value string) PARTITIONED BY (ds string); @@ -5,7 +6,12 @@ CREATE TABLE insert_into2 (key int, valu EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src LIMIT 100; INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100; +explain +select count (*) from insert_into2 where ds = '1'; +select count (*) from insert_into2 where ds = '1'; INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100; +explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='1'; SELECT COUNT(*) FROM insert_into2 WHERE ds='1'; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 @@ -19,6 +25,9 @@ INSERT OVERWRITE TABLE insert_into2 PART SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 ) t; +explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; +SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') SELECT * FROM src LIMIT 50; @@ -27,5 +36,11 @@ INSERT OVERWRITE TABLE insert_into2 PART SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 ) t; +explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; +SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; + DROP TABLE insert_into2; + +set hive.compute.query.using.stats=false; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_orig_table.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_orig_table.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_orig_table.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_orig_table.q Tue Oct 14 19:06:45 2014 @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_iot( @@ -15,7 +14,7 @@ create table acid_iot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc; + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true'); LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_iot; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_update_delete.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_update_delete.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_update_delete.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_update_delete.q Tue Oct 14 19:06:45 2014 @@ -1,10 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; -create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_iud select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,13 +1,11 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table ivdp(i int, de decimal(5,2), - vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc; + vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table ivdp partition (ds) values (1, 109.23, 'and everywhere that mary went', 'today'), Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_ivnp(ti tinyint, @@ -12,12 +11,14 @@ create table acid_ivnp(ti tinyint, de decimal(5,2), t timestamp, dt date, + b boolean, s string, vc varchar(128), - ch char(12)) clustered by (i) into 2 buckets stored as orc; + ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_ivnp values - (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'), - (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue' ); + (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'), + (null, null, null, null, null, null, null, null, null, null, null, null, null), + (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' ); -select ti, si, i, bi, f, d, de, t, dt, s, vc, ch from acid_ivnp order by ti; +select * from acid_ivnp order by ti; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_orig_table.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_orig_table.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_orig_table.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_orig_table.q Tue Oct 14 19:06:45 2014 @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_ivot( @@ -15,7 +14,7 @@ create table acid_ivot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc; + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true'); LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,8 +1,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.exec.dynamic.partition.mode=nonstrict; create table acid_ivp(ti tinyint, si smallint, @@ -15,7 +13,7 @@ create table acid_ivp(ti tinyint, dt date, s string, vc varchar(128), - ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc; + ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_ivp partition (ds='today') values (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'), Modified: hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_tmp_table.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_tmp_table.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_tmp_table.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/insert_values_tmp_table.q Tue Oct 14 19:06:45 2014 @@ -1,12 +1,12 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc; +create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_ivtt values (1, 109.23, 'mary had a little lamb'), - (429496729, 0.14, 'its fleece was white as snow'); + (429496729, 0.14, 'its fleece was white as snow'), + (-29496729, -0.14, 'negative values test'); select i, de, vc from acid_ivtt order by i; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q Tue Oct 14 19:06:45 2014 @@ -46,6 +46,8 @@ explain select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010; select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010; +select count(*) from stats_tbl_part; +select count(*)/2 from stats_tbl_part; drop table stats_tbl_part; set hive.compute.query.using.stats=false; set hive.stats.dbclass=jdbc:derby; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/optimize_nullscan.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/optimize_nullscan.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/optimize_nullscan.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/optimize_nullscan.q Tue Oct 14 19:06:45 2014 @@ -5,6 +5,10 @@ select key from src where false; select key from src where false; explain extended +select count(key) from srcpart where 1=2 group by key; +select count(key) from srcpart where 1=2 group by key; + +explain extended select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key; select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/parquet_types.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/parquet_types.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/parquet_types.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/parquet_types.q Tue Oct 14 19:06:45 2014 @@ -10,9 +10,14 @@ CREATE TABLE parquet_types_staging ( cstring1 string, t timestamp, cchar char(5), - cvarchar varchar(10) + cvarchar varchar(10), + m1 map<string, varchar(3)>, + l1 array<int>, + st1 struct<c1:int, c2:char(1)> ) ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|'; +FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':'; CREATE TABLE parquet_types ( cint int, @@ -23,7 +28,10 @@ CREATE TABLE parquet_types ( cstring1 string, t timestamp, cchar char(5), - cvarchar varchar(10) + cvarchar varchar(10), + m1 map<string, varchar(3)>, + l1 array<int>, + st1 struct<c1:int, c2:char(1)> ) STORED AS PARQUET; LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging; @@ -32,6 +40,8 @@ INSERT OVERWRITE TABLE parquet_types SEL SELECT * FROM parquet_types; +SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types; + SELECT ctinyint, MAX(cint), MIN(csmallint), Modified: hive/branches/llap/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q Tue Oct 14 19:06:45 2014 @@ -1,4 +1,4 @@ - +-- SORT_BEFORE_DIFF create table partition_test_partitioned(key string, value string) partitioned by (dt string); Modified: hive/branches/llap/ql/src/test/queries/clientpositive/ptf_matchpath.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/ptf_matchpath.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/ptf_matchpath.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/ptf_matchpath.q Tue Oct 14 19:06:45 2014 @@ -32,5 +32,15 @@ from matchpath(on arg2('LATE'), arg3(arr_delay > 15), arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) -where fl_num = 1142; +where fl_num = 1142; + +-- 3. empty partition. +select origin_city_name, fl_num, year, month, day_of_month, sz, tpath +from matchpath(on + (select * from flights_tiny where fl_num = -1142) flights_tiny + sort by fl_num, year, month, day_of_month + arg1('LATE.LATE+'), + arg2('LATE'), arg3(arr_delay > 15), + arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') + ); \ No newline at end of file Modified: hive/branches/llap/ql/src/test/queries/clientpositive/quote2.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/quote2.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/quote2.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/quote2.q Tue Oct 14 19:06:45 2014 @@ -10,6 +10,7 @@ SELECT 'abc\\\\\'', "abc\\\\\"", 'abc\\\\\\', "abc\\\\\\", 'abc""""\\', "abc''''\\", + 'mysql_%\\_\%', 'mysql\\\_\\\\\%', "awk '{print NR\"\\t\"$0}'", 'tab\ttab', "tab\ttab" FROM src @@ -24,6 +25,7 @@ SELECT 'abc\\\\\'', "abc\\\\\"", 'abc\\\\\\', "abc\\\\\\", 'abc""""\\', "abc''''\\", + 'mysql_%\\_\%', 'mysql\\\_\\\\\%', "awk '{print NR\"\\t\"$0}'", 'tab\ttab', "tab\ttab" FROM src Modified: hive/branches/llap/ql/src/test/queries/clientpositive/show_functions.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/show_functions.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/show_functions.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/show_functions.q Tue Oct 14 19:06:45 2014 @@ -9,3 +9,23 @@ SHOW FUNCTIONS 'log.*'; SHOW FUNCTIONS '.*date.*'; SHOW FUNCTIONS '***'; + +SHOW FUNCTIONS LIKE 'When'; + +SHOW FUNCTIONS LIKE 'max|min'; + +SHOW FUNCTIONS LIKE 'xpath*|m*'; + +SHOW FUNCTIONS LIKE 'nomatch'; + +SHOW FUNCTIONS LIKE "log"; + +SHOW FUNCTIONS LIKE 'log'; + +SHOW FUNCTIONS LIKE `log`; + +SHOW FUNCTIONS LIKE 'log*'; + +SHOW FUNCTIONS LIKE "log*"; + +SHOW FUNCTIONS LIKE `log*`; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q Tue Oct 14 19:06:45 2014 @@ -1,13 +1,11 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table acid_uami(i int, de decimal(5,2), - vc varchar(128)) clustered by (i) into 2 buckets stored as orc; + vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uami values (1, 109.23, 'mary had a little lamb'), Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_all_non_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_all_non_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_all_non_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_all_non_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uanp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_all_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_all_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_all_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_all_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,10 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; -create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uap partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_uap partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 10 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_all_types.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_all_types.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_all_types.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_all_types.q Tue Oct 14 19:06:45 2014 @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_uat(ti tinyint, @@ -15,7 +14,7 @@ create table acid_uat(ti tinyint, s string, vc varchar(128), ch char(36), - b boolean) clustered by (i) into 2 buckets stored as orc; + b boolean) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uat select ctinyint, @@ -53,4 +52,11 @@ update acid_uat set select * from acid_uat order by i; +update acid_uat set + ti = ti * 2, + si = cast(f as int), + d = floor(de) + where s = 'aw724t8c5558x2xneC624'; + +select * from acid_uat order by i; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_orig_table.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_orig_table.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_orig_table.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_orig_table.q Tue Oct 14 19:06:45 2014 @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/update_orig_table; @@ -18,7 +17,7 @@ create table acid_uot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/update_orig_table'; + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/update_orig_table' TBLPROPERTIES ('transactional'='true'); update acid_uot set cstring1 = 'fred' where cint < -1070551679; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_tmp_table.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_tmp_table.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_tmp_table.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_tmp_table.q Tue Oct 14 19:06:45 2014 @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc; +create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_utt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_two_cols.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_two_cols.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_two_cols.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_two_cols.q Tue Oct 14 19:06:45 2014 @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc; +create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_utc select cint, cast(cstring1 as varchar(128)), cfloat from alltypesorc where cint < 0 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_where_no_match.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_where_no_match.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_where_no_match.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_where_no_match.q Tue Oct 14 19:06:45 2014 @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_wnm select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_where_non_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_where_non_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_where_non_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_where_non_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uwnp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/update_where_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/update_where_partitioned.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/update_where_partitioned.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/update_where_partitioned.q Tue Oct 14 19:06:45 2014 @@ -1,10 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; -create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uwp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_uwp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 100 order by cint limit 10; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/vector_char_simple.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/vector_char_simple.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/vector_char_simple.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/vector_char_simple.q Tue Oct 14 19:06:45 2014 @@ -41,3 +41,16 @@ order by key desc limit 5; drop table char_2; + + +-- Implicit conversion. Occurs in reduce-side under Tez. +create table char_3 ( + field char(12) +) stored as orc; + +explain +insert into table char_3 select cint from alltypesorc limit 10; + +insert into table char_3 select cint from alltypesorc limit 10; + +drop table char_3; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/vector_varchar_simple.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/vector_varchar_simple.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/vector_varchar_simple.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/vector_varchar_simple.q Tue Oct 14 19:06:45 2014 @@ -1,12 +1,12 @@ SET hive.vectorized.execution.enabled=true; -drop table char_2; +drop table varchar_2; -create table char_2 ( +create table varchar_2 ( key varchar(10), value varchar(20) ) stored as orc; -insert overwrite table char_2 select * from src; +insert overwrite table varchar_2 select * from src; select key, value from src @@ -14,13 +14,13 @@ order by key asc limit 5; explain select key, value -from char_2 +from varchar_2 order by key asc limit 5; -- should match the query from src select key, value -from char_2 +from varchar_2 order by key asc limit 5; @@ -30,14 +30,26 @@ order by key desc limit 5; explain select key, value -from char_2 +from varchar_2 order by key desc limit 5; -- should match the query from src select key, value -from char_2 +from varchar_2 order by key desc limit 5; -drop table char_2; +drop table varchar_2; + +-- Implicit conversion. Occurs in reduce-side under Tez. +create table varchar_3 ( + field varchar(25) +) stored as orc; + +explain +insert into table varchar_3 select cint from alltypesorc limit 10; + +insert into table varchar_3 select cint from alltypesorc limit 10; + +drop table varchar_3; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/vectorization_0.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/vectorization_0.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/vectorization_0.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/vectorization_0.q Tue Oct 14 19:06:45 2014 @@ -1,4 +1,180 @@ SET hive.vectorized.execution.enabled=true; + +-- Use ORDER BY clauses to generate 2 stages. +EXPLAIN +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))); + SELECT AVG(cbigint), (-(AVG(cbigint))), (-6432 + AVG(cbigint)), Modified: hive/branches/llap/ql/src/test/queries/clientpositive/vectorization_short_regress.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/vectorization_short_regress.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/vectorization_short_regress.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/vectorization_short_regress.q Tue Oct 14 19:06:45 2014 @@ -850,3 +850,52 @@ WHERE (((cboolean1 IS NOT NULL)) GROUP BY cboolean1 ORDER BY cboolean1; +-- These tests verify COUNT on empty or null colulmns work correctly. +create table test_count(i int) stored as orc; + +explain +select count(*) from test_count; + +select count(*) from test_count; + +explain +select count(i) from test_count; + +select count(i) from test_count; + +create table alltypesnull like alltypesorc; +alter table alltypesnull set fileformat textfile; + +insert into table alltypesnull select null, null, null, null, null, null, null, null, null, null, null, null from alltypesorc; + +create table alltypesnullorc stored as orc as select * from alltypesnull; + +explain +select count(*) from alltypesnullorc; + +select count(*) from alltypesnullorc; + +explain +select count(ctinyint) from alltypesnullorc; + +select count(ctinyint) from alltypesnullorc; + +explain +select count(cint) from alltypesnullorc; + +select count(cint) from alltypesnullorc; + +explain +select count(cfloat) from alltypesnullorc; + +select count(cfloat) from alltypesnullorc; + +explain +select count(cstring1) from alltypesnullorc; + +select count(cstring1) from alltypesnullorc; + +explain +select count(cboolean1) from alltypesnullorc; + +select count(cboolean1) from alltypesnullorc; Modified: hive/branches/llap/ql/src/test/queries/clientpositive/vectorized_date_funcs.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/vectorized_date_funcs.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/vectorized_date_funcs.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/vectorized_date_funcs.q Tue Oct 14 19:06:45 2014 @@ -122,4 +122,20 @@ SELECT FROM date_udf_flight_orc LIMIT 10; -- Test extracting the date part of expression that includes time -SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1; \ No newline at end of file +SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1; + +EXPLAIN SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1; + +SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1; \ No newline at end of file Modified: hive/branches/llap/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q Tue Oct 14 19:06:45 2014 @@ -1,6 +1,7 @@ -SET hive.vectorized.execution.enabled = true; - -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +-- Turning on vectorization has been temporarily moved after filling the test table +-- due to bug HIVE-8197. + CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC; @@ -11,6 +12,8 @@ SELECT FROM alltypesorc LIMIT 40; +SET hive.vectorized.execution.enabled = true; + CREATE TABLE alltypesorc_wrong(stimestamp1 string) STORED AS ORC; INSERT INTO TABLE alltypesorc_wrong SELECT 'abcd' FROM alltypesorc LIMIT 1; @@ -122,3 +125,48 @@ SELECT second(stimestamp1) FROM alltypesorc_wrong ORDER BY c1; + +EXPLAIN SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string; + +SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string; + +-- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... +EXPLAIN SELECT + sum(ctimestamp1) +FROM alltypesorc_string; + +SELECT + sum(ctimestamp1) +FROM alltypesorc_string; + +EXPLAIN SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string; + +SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string; \ No newline at end of file Modified: hive/branches/llap/ql/src/test/queries/clientpositive/windowing.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/windowing.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/clientpositive/windowing.q (original) +++ hive/branches/llap/ql/src/test/queries/clientpositive/windowing.q Tue Oct 14 19:06:45 2014 @@ -444,3 +444,7 @@ select p_retailprice, avg(p_retailprice) sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) from part where p_mfgr='Manufacturer#1'; + +-- 47. empty partition +select sum(p_size) over (partition by p_mfgr ) +from part where p_mfgr = 'm1'; Modified: hive/branches/llap/ql/src/test/queries/positive/udf6.q URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/positive/udf6.q?rev=1631841&r1=1631840&r2=1631841&view=diff ============================================================================== --- hive/branches/llap/ql/src/test/queries/positive/udf6.q (original) +++ hive/branches/llap/ql/src/test/queries/positive/udf6.q Tue Oct 14 19:06:45 2014 @@ -1 +1 @@ -FROM src SELECT CONCAT('a', 'b'), IF(TRUE, 1 ,2) +FROM src SELECT CONCAT('a', 'b'), IF(TRUE, 1 ,2) + key
