http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q 
b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
index f215852..87f2624 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
@@ -51,7 +51,7 @@ set hive.merge.mapredfiles=false;
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 
 -- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
+create table list_bucketing_dynamic_part_n2 (key String, value String) 
     partitioned by (ds String, hr String) 
     skewed by (key, value) on 
(('484','val_484'),('51','val_14'),('103','val_103'))
     stored as DIRECTORIES
@@ -59,32 +59,32 @@ create table list_bucketing_dynamic_part (key String, value 
String)
 
 -- list bucketing DML without merge. use bucketize to generate a few small 
files.
 explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
+insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = 
'2008-04-08', hr)
 select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08';
 
-insert overwrite table list_bucketing_dynamic_part partition (ds = 
'2008-04-08', hr)
+insert overwrite table list_bucketing_dynamic_part_n2 partition (ds = 
'2008-04-08', hr)
 select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = 
'2008-04-08';
 
 -- check DML result
-show partitions list_bucketing_dynamic_part;
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr='a1');       
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr='b1');
+show partitions list_bucketing_dynamic_part_n2;
+desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', 
hr='a1');    
+desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', 
hr='b1');
 
 -- concatenate the partition and it will merge files
-alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') 
concatenate;
+alter table list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', 
hr='b1') concatenate;
 
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', 
hr='b1');
+desc formatted list_bucketing_dynamic_part_n2 partition (ds='2008-04-08', 
hr='b1');
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 select count(1) from srcpart where ds = '2008-04-08';
-select count(*) from list_bucketing_dynamic_part;
+select count(*) from list_bucketing_dynamic_part_n2;
 explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 
'val_484';
-select * from list_bucketing_dynamic_part where key = '484' and value = 
'val_484';
+select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 
'val_484';
+select * from list_bucketing_dynamic_part_n2 where key = '484' and value = 
'val_484';
 select * from srcpart where ds = '2008-04-08' and key = '484' and value = 
'val_484' order by hr;
 
 -- clean up
-drop table list_bucketing_dynamic_part;
+drop table list_bucketing_dynamic_part_n2;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q 
b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
index b9de430..e130f05 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
@@ -27,7 +27,7 @@ set hive.merge.mapredfiles=false;
 -- 118 000001_0
 
 -- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
+create table list_bucketing_static_part_n0 (key String, value String) 
     partitioned by (ds String, hr String) 
     skewed by (key) on ('484','103')
     stored as DIRECTORIES
@@ -35,39 +35,39 @@ create table list_bucketing_static_part (key String, value 
String)
 
 -- list bucketing DML without merge. use bucketize to generate a few small 
files.
 explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+insert overwrite table list_bucketing_static_part_n0 partition (ds = 
'2008-04-08',  hr = '11')
 select key, value from srcpart where ds = '2008-04-08';
 
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08', hr = '11')
+insert overwrite table list_bucketing_static_part_n0 partition (ds = 
'2008-04-08', hr = '11')
 select key, value from srcpart where ds = '2008-04-08';
 
 -- check DML result
-show partitions list_bucketing_static_part;
-desc formatted list_bucketing_static_part partition (ds='2008-04-08', 
hr='11');        
+show partitions list_bucketing_static_part_n0;
+desc formatted list_bucketing_static_part_n0 partition (ds='2008-04-08', 
hr='11');     
 
 set hive.merge.mapfiles=true;  
 set hive.merge.mapredfiles=true; 
 -- list bucketing DML with merge. use bucketize to generate a few small files.
 explain extended
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+insert overwrite table list_bucketing_static_part_n0 partition (ds = 
'2008-04-08',  hr = '11')
 select key, value from srcpart where ds = '2008-04-08';
 
-insert overwrite table list_bucketing_static_part partition (ds = 
'2008-04-08',  hr = '11')
+insert overwrite table list_bucketing_static_part_n0 partition (ds = 
'2008-04-08',  hr = '11')
 select key, value from srcpart where ds = '2008-04-08';
 
 -- check DML result
-show partitions list_bucketing_static_part;
-desc formatted list_bucketing_static_part partition (ds='2008-04-08', 
hr='11');        
+show partitions list_bucketing_static_part_n0;
+desc formatted list_bucketing_static_part_n0 partition (ds='2008-04-08', 
hr='11');     
 
 select count(1) from srcpart where ds = '2008-04-08';
-select count(*) from list_bucketing_static_part;
+select count(*) from list_bucketing_static_part_n0;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.optimize.listbucketing=true;
 explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = 
'11' and key = '484' and value = 'val_484';
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = 
'11' and key = '484' and value = 'val_484';
+select * from list_bucketing_static_part_n0 where ds = '2008-04-08' and  hr = 
'11' and key = '484' and value = 'val_484';
+select * from list_bucketing_static_part_n0 where ds = '2008-04-08' and  hr = 
'11' and key = '484' and value = 'val_484';
 select * from srcpart where ds = '2008-04-08' and key = '484' and value = 
'val_484';
 
 -- clean up
-drop table list_bucketing_static_part;
+drop table list_bucketing_static_part_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
index 9af9132..a5f5522 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_2.q
@@ -18,35 +18,35 @@ set 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 -- 2. query result is right
 
 -- create a skewed table
-create table fact_daily (key String, value String) 
+create table fact_daily_n2 (key String, value String) 
 partitioned by (ds String, hr String) 
 skewed by (key, value) on (('484','val_484'),('238','val_238')) 
 stored as DIRECTORIES;
 
-insert overwrite table fact_daily partition (ds = '1', hr = '4')
+insert overwrite table fact_daily_n2 partition (ds = '1', hr = '4')
 select key, value from src;
 
-describe formatted fact_daily PARTITION (ds = '1', hr='4');
+describe formatted fact_daily_n2 PARTITION (ds = '1', hr='4');
        
-SELECT count(1) FROM fact_daily WHERE ds='1' and hr='4';       
+SELECT count(1) FROM fact_daily_n2 WHERE ds='1' and hr='4';    
 
 -- pruner only pick up default directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and 
value= 'val_484';
+explain extended SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' 
and value= 'val_484';
 -- List Bucketing Query
-SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484';
+SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and value= 
'val_484';
 
 -- pruner only pick up default directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= 
'406';
+explain extended SELECT key FROM fact_daily_n2 WHERE ds='1' and hr='4' and 
key= '406';
 -- List Bucketing Query
-SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406';
+SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and key= '406';
 
 -- pruner only pick up skewed-value directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and 
( (key='484' and value ='val_484')  or (key='238' and value= 'val_238'));
+explain extended SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' 
and ( (key='484' and value ='val_484')  or (key='238' and value= 'val_238'));
 -- List Bucketing Query
-SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and 
value ='val_484')  or (key='238' and value= 'val_238'));
+SELECT key, value FROM fact_daily_n2 WHERE ds='1' and hr='4' and ( (key='484' 
and value ='val_484')  or (key='238' and value= 'val_238'));
 
 -- clean up
-drop table fact_daily;
+drop table fact_daily_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
index b68ec64..4020063 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_multiskew_3.q
@@ -19,40 +19,40 @@ set 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 -- 2. query result is right
 
 -- create a skewed table
-create table fact_daily (key String, value String) 
+create table fact_daily_n3 (key String, value String) 
 partitioned by (ds String, hr String) ;
 
 -- partition no skew
-insert overwrite table fact_daily partition (ds = '1', hr = '1')
+insert overwrite table fact_daily_n3 partition (ds = '1', hr = '1')
 select key, value from src;
-describe formatted fact_daily PARTITION (ds = '1', hr='1');
+describe formatted fact_daily_n3 PARTITION (ds = '1', hr='1');
 
 -- partition. skewed value is 484/238
-alter table fact_daily skewed by (key, value) on 
(('484','val_484'),('238','val_238')) stored as DIRECTORIES;
-insert overwrite table fact_daily partition (ds = '1', hr = '2')
+alter table fact_daily_n3 skewed by (key, value) on 
(('484','val_484'),('238','val_238')) stored as DIRECTORIES;
+insert overwrite table fact_daily_n3 partition (ds = '1', hr = '2')
 select key, value from src;
-describe formatted fact_daily PARTITION (ds = '1', hr='2');
+describe formatted fact_daily_n3 PARTITION (ds = '1', hr='2');
 
 -- another partition. skewed value is 327
-alter table fact_daily skewed by (key, value) on (('327','val_327')) stored as 
DIRECTORIES;
-insert overwrite table fact_daily partition (ds = '1', hr = '3')
+alter table fact_daily_n3 skewed by (key, value) on (('327','val_327')) stored 
as DIRECTORIES;
+insert overwrite table fact_daily_n3 partition (ds = '1', hr = '3')
 select key, value from src;
-describe formatted fact_daily PARTITION (ds = '1', hr='3');
+describe formatted fact_daily_n3 PARTITION (ds = '1', hr='3');
 
 -- query non-skewed partition
 explain extended
-select * from fact_daily where ds = '1' and  hr='1' and key='145';
-select * from fact_daily where ds = '1' and  hr='1' and key='145';
+select * from fact_daily_n3 where ds = '1' and  hr='1' and key='145';
+select * from fact_daily_n3 where ds = '1' and  hr='1' and key='145';
 explain extended
-select count(*) from fact_daily where ds = '1' and  hr='1';
-select count(*) from fact_daily where ds = '1' and  hr='1';
+select count(*) from fact_daily_n3 where ds = '1' and  hr='1';
+select count(*) from fact_daily_n3 where ds = '1' and  hr='1';
        
 -- query skewed partition
 explain extended
-SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and 
value='val_484');  
-SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and 
value='val_484');
+SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='2' and (key='484' and 
value='val_484');       
+SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='2' and (key='484' and 
value='val_484');
 
 -- query another skewed partition
 explain extended
-SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and 
value='val_327');  
-SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and 
value='val_327');  
+SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='3' and (key='327' and 
value='val_327');       
+SELECT * FROM fact_daily_n3 WHERE ds='1' and hr='3' and (key='327' and 
value='val_327');       

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
index 9032542..54ab75e 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_1.q
@@ -16,19 +16,19 @@ set 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 -- 1. pruner only pick up right directory
 -- 2. query result is right
 
--- create 2 tables: fact_daily and fact_tz
--- fact_daily will be used for list bucketing query
--- fact_tz is a table used to prepare data and test directories        
-CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING);     
-CREATE TABLE fact_tz(x int) PARTITIONED BY (ds STRING, hr STRING)      
+-- create 2 tables: fact_daily_n4 and fact_tz_n1
+-- fact_daily_n4 will be used for list bucketing query
+-- fact_tz_n1 is a table used to prepare data and test directories     
+CREATE TABLE fact_daily_n4(x int) PARTITIONED BY (ds STRING);  
+CREATE TABLE fact_tz_n1(x int) PARTITIONED BY (ds STRING, hr STRING)   
 LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz';   
 
 -- create /fact_tz/ds=1/hr=1 directory 
-INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1')      
+INSERT OVERWRITE TABLE fact_tz_n1 PARTITION (ds='1', hr='1')   
 SELECT key FROM src WHERE key=484;     
 
 -- create /fact_tz/ds=1/hr=2 directory 
-INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2')      
+INSERT OVERWRITE TABLE fact_tz_n1 PARTITION (ds='1', hr='2')   
 SELECT key+11 FROM src WHERE key=484;
 
 dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1;
@@ -36,28 +36,28 @@ dfs -mv 
${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=1 ${hiveconf:hi
 dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=2 
${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME;
 dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1;
 
--- switch fact_daily to skewed table and point its location to /fact_tz/ds=1
-alter table fact_daily skewed by (x) on (484);
-ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE');   
-ALTER TABLE fact_daily ADD PARTITION (ds='1')  
+-- switch fact_daily_n4 to skewed table and point its location to /fact_tz/ds=1
+alter table fact_daily_n4 skewed by (x) on (484);
+ALTER TABLE fact_daily_n4 SET TBLPROPERTIES('EXTERNAL'='TRUE');        
+ALTER TABLE fact_daily_n4 ADD PARTITION (ds='1')       
 LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1';      
 
 -- set List Bucketing location map
-alter table fact_daily PARTITION (ds = '1') set skewed location 
(484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME');
-describe formatted fact_daily PARTITION (ds = '1');
+alter table fact_daily_n4 PARTITION (ds = '1') set skewed location 
(484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME');
+describe formatted fact_daily_n4 PARTITION (ds = '1');
        
-SELECT * FROM fact_daily WHERE ds='1';
+SELECT * FROM fact_daily_n4 WHERE ds='1';
 
 -- pruner only pick up skewed-value directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT x FROM fact_daily WHERE ds='1' and x=484;
+explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=484;
 -- List Bucketing Query
-SELECT x FROM fact_daily WHERE ds='1' and x=484;
+SELECT x FROM fact_daily_n4 WHERE ds='1' and x=484;
 
 -- pruner only pick up default directory since x equal to non-skewed value
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT x FROM fact_daily WHERE ds='1' and x=495;
+explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=495;
 -- List Bucketing Query
-SELECT x FROM fact_daily WHERE ds='1' and x=495;
-explain extended SELECT x FROM fact_daily WHERE ds='1' and x=1;
-SELECT x FROM fact_daily WHERE ds='1' and x=1;
+SELECT x FROM fact_daily_n4 WHERE ds='1' and x=495;
+explain extended SELECT x FROM fact_daily_n4 WHERE ds='1' and x=1;
+SELECT x FROM fact_daily_n4 WHERE ds='1' and x=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
index 1fa0638..77974cf 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_2.q
@@ -17,19 +17,19 @@ set 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 -- 1. pruner only pick up right directory
 -- 2. query result is right
 
--- create 2 tables: fact_daily and fact_tz
--- fact_daily will be used for list bucketing query
--- fact_tz is a table used to prepare data and test directories        
-CREATE TABLE fact_daily(x int, y STRING) PARTITIONED BY (ds STRING);   
-CREATE TABLE fact_tz(x int, y STRING) PARTITIONED BY (ds STRING, hr STRING)    
+-- create 2 tables: fact_daily_n5 and fact_tz_n2
+-- fact_daily_n5 will be used for list bucketing query
+-- fact_tz_n2 is a table used to prepare data and test directories     
+CREATE TABLE fact_daily_n5(x int, y STRING) PARTITIONED BY (ds STRING);        
+CREATE TABLE fact_tz_n2(x int, y STRING) PARTITIONED BY (ds STRING, hr STRING) 
 LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz';   
 
 -- create /fact_tz/ds=1/hr=1 directory 
-INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1')      
+INSERT OVERWRITE TABLE fact_tz_n2 PARTITION (ds='1', hr='1')   
 SELECT key, value FROM src WHERE key=484;      
 
 -- create /fact_tz/ds=1/hr=2 directory 
-INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2')      
+INSERT OVERWRITE TABLE fact_tz_n2 PARTITION (ds='1', hr='2')   
 SELECT key+11, value FROM src WHERE key=484;
 
 dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1;
@@ -37,39 +37,39 @@ dfs -mv 
${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=1 ${hiveconf:hi
 dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=2 
${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME;
 dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1;
 
--- switch fact_daily to skewed table and point its location to /fact_tz/ds=1
-alter table fact_daily skewed by (x) on (484);
-ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE');   
-ALTER TABLE fact_daily ADD PARTITION (ds='1')  
+-- switch fact_daily_n5 to skewed table and point its location to /fact_tz/ds=1
+alter table fact_daily_n5 skewed by (x) on (484);
+ALTER TABLE fact_daily_n5 SET TBLPROPERTIES('EXTERNAL'='TRUE');        
+ALTER TABLE fact_daily_n5 ADD PARTITION (ds='1')       
 LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1';      
 
 -- set List Bucketing location map
-alter table fact_daily PARTITION (ds = '1') set skewed location 
(484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME');
-describe formatted fact_daily PARTITION (ds = '1');
+alter table fact_daily_n5 PARTITION (ds = '1') set skewed location 
(484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484','HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME');
+describe formatted fact_daily_n5 PARTITION (ds = '1');
        
-SELECT * FROM fact_daily WHERE ds='1';
+SELECT * FROM fact_daily_n5 WHERE ds='1';
 
 -- The first subquery
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended select x from (select x from fact_daily where ds = '1') subq 
where x = 484;
+explain extended select x from (select x from fact_daily_n5 where ds = '1') 
subq where x = 484;
 -- List Bucketing Query
-select x from (select * from fact_daily where ds = '1') subq where x = 484;
+select x from (select * from fact_daily_n5 where ds = '1') subq where x = 484;
 
 -- The second subquery
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily 
where ds ='1') subq where x1 = 484;
+explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily_n5 
where ds ='1') subq where x1 = 484;
 -- List Bucketing Query
-select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq 
where x1 = 484;
+select x1, y1 from(select x as x1, y as y1 from fact_daily_n5 where ds ='1') 
subq where x1 = 484;
 
 
 -- The third subquery
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended  select y, count(1) from fact_daily where ds ='1' and x = 484 
group by y;
+explain extended  select y, count(1) from fact_daily_n5 where ds ='1' and x = 
484 group by y;
 -- List Bucketing Query
-select y, count(1) from fact_daily where ds ='1' and x = 484 group by y;
+select y, count(1) from fact_daily_n5 where ds ='1' and x = 484 group by y;
 
 -- The fourth subquery
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended  select x, c from (select x, count(1) as c from fact_daily 
where ds = '1' group by x) subq where x = 484;;
+explain extended  select x, c from (select x, count(1) as c from fact_daily_n5 
where ds = '1' group by x) subq where x = 484;;
 -- List Bucketing Query
-select x, c from (select x, count(1) as c from fact_daily where ds = '1' group 
by x) subq where x = 484;
+select x, c from (select x, count(1) as c from fact_daily_n5 where ds = '1' 
group by x) subq where x = 484;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q 
b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
index 7ff3d8d..bf6b227 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_query_oneskew_3.q
@@ -17,10 +17,10 @@ set 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 -- 1. pruner only pick up right directory
 -- 2. query result is right
 
--- create 2 tables: fact_daily and fact_tz
--- fact_daily will be used for list bucketing query
+-- create 2 tables: fact_daily_n0 and fact_tz
+-- fact_daily_n0 will be used for list bucketing query
 -- fact_tz is a table used to prepare data and test directories        
-CREATE TABLE fact_daily(x int, y STRING, z STRING) PARTITIONED BY (ds STRING); 
+CREATE TABLE fact_daily_n0(x int, y STRING, z STRING) PARTITIONED BY (ds 
STRING);      
 CREATE TABLE fact_tz(x int, y STRING, z STRING) PARTITIONED BY (ds STRING, hr 
STRING)  
 LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz';   
 
@@ -42,22 +42,22 @@ dfs -mv 
${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=2 ${hiveconf:hi
 dfs -mv ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/hr=3 
${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=238;
 dfs -lsr ${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1;        
 
--- switch fact_daily to skewed table and point its location to /fact_tz/ds=1
-alter table fact_daily skewed by (x) on (484,238);
-ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE');   
-ALTER TABLE fact_daily ADD PARTITION (ds='1')  
+-- switch fact_daily_n0 to skewed table and point its location to /fact_tz/ds=1
+alter table fact_daily_n0 skewed by (x) on (484,238);
+ALTER TABLE fact_daily_n0 SET TBLPROPERTIES('EXTERNAL'='TRUE');        
+ALTER TABLE fact_daily_n0 ADD PARTITION (ds='1')       
 LOCATION '${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1';      
 
 -- set List Bucketing location map
-alter table fact_daily PARTITION (ds = '1') set skewed location 
(484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484',
+alter table fact_daily_n0 PARTITION (ds = '1') set skewed location 
(484='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=484',
 238='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/x=238',
 
'HIVE_DEFAULT_LIST_BUCKETING_KEY'='${hiveconf:hive.metastore.warehouse.dir}/fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME');
-describe formatted fact_daily PARTITION (ds = '1');
+describe formatted fact_daily_n0 PARTITION (ds = '1');
        
-SELECT * FROM fact_daily WHERE ds='1';
+SELECT * FROM fact_daily_n0 WHERE ds='1';
 
 -- pruner  pick up right directory
 -- explain plan shows which directory selected: Truncated Path -> Alias
-explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86);
+explain extended SELECT x FROM fact_daily_n0 WHERE ds='1' and not (x = 86);
 -- List Bucketing Query
-SELECT x FROM fact_daily WHERE ds='1' and not (x = 86);
+SELECT x FROM fact_daily_n0 WHERE ds='1' and not (x = 86);

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/llap_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_acid.q 
b/ql/src/test/queries/clientpositive/llap_acid.q
index dfc7b04..754461d 100644
--- a/ql/src/test/queries/clientpositive/llap_acid.q
+++ b/ql/src/test/queries/clientpositive/llap_acid.q
@@ -13,9 +13,9 @@ set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-DROP TABLE orc_llap;
+DROP TABLE orc_llap_n1;
 
-CREATE TABLE orc_llap (
+CREATE TABLE orc_llap_n1 (
     cint INT,
     cbigint BIGINT,
     cfloat FLOAT,
@@ -23,37 +23,37 @@ CREATE TABLE orc_llap (
 partitioned by (csmallint smallint)
 clustered by (cint) into 2 buckets stored as orc;
 
-insert into table orc_llap partition (csmallint = 1)
+insert into table orc_llap_n1 partition (csmallint = 1)
 select cint, cbigint, cfloat, cdouble from alltypesorc
 where cdouble is not null order by cdouble asc limit 10;
-insert into table orc_llap partition (csmallint = 2)
+insert into table orc_llap_n1 partition (csmallint = 2)
 select cint, cbigint, cfloat, cdouble from alltypesorc
 where cdouble is not null order by cdouble asc limit 10;
 
-alter table orc_llap SET TBLPROPERTIES ('transactional'='true');
+alter table orc_llap_n1 SET TBLPROPERTIES ('transactional'='true');
 
-insert into table orc_llap partition (csmallint = 3)
+insert into table orc_llap_n1 partition (csmallint = 3)
 select cint, cbigint, cfloat, cdouble from alltypesorc
 where cdouble is not null order by cdouble desc limit 10;
 
 SET hive.llap.io.enabled=true;
 
 explain vectorization only detail
-select cint, csmallint, cbigint from orc_llap where cint is not null order
+select cint, csmallint, cbigint from orc_llap_n1 where cint is not null order
 by csmallint, cint;
-select cint, csmallint, cbigint from orc_llap where cint is not null order
+select cint, csmallint, cbigint from orc_llap_n1 where cint is not null order
 by csmallint, cint;
 
-insert into table orc_llap partition (csmallint = 1) values (1, 1, 1, 1);
+insert into table orc_llap_n1 partition (csmallint = 1) values (1, 1, 1, 1);
 
 explain vectorization only detail
-update orc_llap set cbigint = 2 where cint = 1;
-update orc_llap set cbigint = 2 where cint = 1;
+update orc_llap_n1 set cbigint = 2 where cint = 1;
+update orc_llap_n1 set cbigint = 2 where cint = 1;
 
 explain vectorization only detail
-select cint, csmallint, cbigint from orc_llap where cint is not null order
+select cint, csmallint, cbigint from orc_llap_n1 where cint is not null order
 by csmallint, cint;
-select cint, csmallint, cbigint from orc_llap where cint is not null order
+select cint, csmallint, cbigint from orc_llap_n1 where cint is not null order
 by csmallint, cint;
 
-DROP TABLE orc_llap;
+DROP TABLE orc_llap_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/llap_acid2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_acid2.q 
b/ql/src/test/queries/clientpositive/llap_acid2.q
index 108f00a..a409c26 100644
--- a/ql/src/test/queries/clientpositive/llap_acid2.q
+++ b/ql/src/test/queries/clientpositive/llap_acid2.q
@@ -13,9 +13,9 @@ set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-DROP TABLE orc_llap;
+DROP TABLE orc_llap_n2;
 
-CREATE TABLE orc_llap (
+CREATE TABLE orc_llap_n2 (
     cint INT,
     cbigint BIGINT,
     cfloat FLOAT,
@@ -33,7 +33,7 @@ CREATE TABLE orc_llap (
 )  stored as orc TBLPROPERTIES ('transactional'='true');
 
 
-insert into table orc_llap
+insert into table orc_llap_n2
 select cint, cbigint, cfloat, cdouble,
  cint as c1, cbigint as c2, cfloat as c3, cdouble as c4,
  cint as c8, cbigint as c7, cfloat as c6, cdouble as c5,
@@ -73,13 +73,13 @@ update orc_llap2 set cstring1 = 'testvalue' where cstring1 
= 'N016jPED08o';
 
 SET hive.llap.io.enabled=true;
 
-select cstring1 from orc_llap;
-select cfloat2, cint from orc_llap;
-select * from orc_llap;
+select cstring1 from orc_llap_n2;
+select cfloat2, cint from orc_llap_n2;
+select * from orc_llap_n2;
 
 select cstring1 from orc_llap2;
 select cfloat2, cint from orc_llap2;
 select * from orc_llap2;
 
 
-DROP TABLE orc_llap;
+DROP TABLE orc_llap_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/llap_nullscan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_nullscan.q 
b/ql/src/test/queries/clientpositive/llap_nullscan.q
index c123e9e..c0057a5 100644
--- a/ql/src/test/queries/clientpositive/llap_nullscan.q
+++ b/ql/src/test/queries/clientpositive/llap_nullscan.q
@@ -8,24 +8,24 @@ set hive.auto.convert.join=false;
 set hive.vectorized.execution.enabled=true;
 set hive.llap.io.enabled=true;
  
-drop table if exists src_orc;
+drop table if exists src_orc_n1;
 
-create table src_orc stored as orc as select * from srcpart limit 10;
+create table src_orc_n1 stored as orc as select * from srcpart limit 10;
 
 explain extended
-select * from src_orc where 1=2;
-select * from src_orc where 1=2;
+select * from src_orc_n1 where 1=2;
+select * from src_orc_n1 where 1=2;
 
 explain
-select * from (select key from src_orc where false) a left outer join (select 
key from src_orc limit 0) b on a.key=b.key;
-select * from (select key from src_orc where false) a left outer join (select 
key from src_orc limit 0) b on a.key=b.key;
+select * from (select key from src_orc_n1 where false) a left outer join 
(select key from src_orc_n1 limit 0) b on a.key=b.key;
+select * from (select key from src_orc_n1 where false) a left outer join 
(select key from src_orc_n1 limit 0) b on a.key=b.key;
  
 explain
-select count(key) from src_orc where false union all select count(key) from 
src_orc ;
-select count(key) from src_orc where false union all select count(key) from 
src_orc ;
+select count(key) from src_orc_n1 where false union all select count(key) from 
src_orc_n1 ;
+select count(key) from src_orc_n1 where false union all select count(key) from 
src_orc_n1 ;
 
 explain 
-select * from src_orc s1, src_orc s2 where false and s1.value = s2.value;
-select * from src_orc s1, src_orc s2 where false and s1.value = s2.value;
+select * from src_orc_n1 s1, src_orc_n1 s2 where false and s1.value = s2.value;
+select * from src_orc_n1 s1, src_orc_n1 s2 where false and s1.value = s2.value;
 
-drop table if exists src_orc;
+drop table if exists src_orc_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/llap_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_partitioned.q 
b/ql/src/test/queries/clientpositive/llap_partitioned.q
index 2ba38ce..0bbd7f6 100644
--- a/ql/src/test/queries/clientpositive/llap_partitioned.q
+++ b/ql/src/test/queries/clientpositive/llap_partitioned.q
@@ -48,7 +48,7 @@ SELECT csmallint, cint, cbigint, cfloat, cdouble, cstring1, 
cstring1, cstring1,
 INSERT OVERWRITE TABLE orc_llap_dim_part PARTITION (ctinyint)
 SELECT null, null, sum(cbigint) as cbigint, null, null, null, null, null, 
null, null, ctinyint FROM alltypesorc WHERE ctinyint > 10 AND ctinyint < 21 
GROUP BY ctinyint;
 
-drop table llap_temp_table;
+drop table llap_temp_table_n0;
 
 set hive.cbo.enable=false;
 SET hive.llap.io.enabled=true;
@@ -57,14 +57,14 @@ SET hive.vectorized.execution.enabled=true;
 explain vectorization detail
 SELECT oft.ctinyint, oft.cint, oft.cchar1, oft.cvchar1 FROM orc_llap_part oft
   INNER JOIN orc_llap_dim_part od ON oft.ctinyint = od.ctinyint;
-create table llap_temp_table as
+create table llap_temp_table_n0 as
 SELECT oft.ctinyint, oft.cint, oft.cchar1, oft.cvchar1 FROM orc_llap_part oft
   INNER JOIN orc_llap_dim_part od ON oft.ctinyint = od.ctinyint;
 
 explain vectorization detail
-select sum(hash(*)) from llap_temp_table;
-select sum(hash(*)) from llap_temp_table;
-drop table llap_temp_table;
+select sum(hash(*)) from llap_temp_table_n0;
+select sum(hash(*)) from llap_temp_table_n0;
+drop table llap_temp_table_n0;
 
 
 DROP TABLE orc_llap_part;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/llap_reader.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_reader.q 
b/ql/src/test/queries/clientpositive/llap_reader.q
index ac0624d..53baf5d 100644
--- a/ql/src/test/queries/clientpositive/llap_reader.q
+++ b/ql/src/test/queries/clientpositive/llap_reader.q
@@ -3,21 +3,21 @@ SET hive.llap.io.enabled=true;
 SET hive.map.aggr=false;
 SET hive.exec.post.hooks=;
 
-CREATE TABLE test(f1 int, f2 int, f3 int) stored as orc;
-INSERT INTO TABLE test VALUES (1,1,1), (2,2,2), (3,3,3), (4,4,4);
+CREATE TABLE test_n7(f1 int, f2 int, f3 int) stored as orc;
+INSERT INTO TABLE test_n7 VALUES (1,1,1), (2,2,2), (3,3,3), (4,4,4);
 
-ALTER TABLE test CHANGE f1 f1 bigint;
-ALTER TABLE test CHANGE f2 f2 bigint;
-ALTER TABLE test CHANGE f3 f3 bigint;
+ALTER TABLE test_n7 CHANGE f1 f1 bigint;
+ALTER TABLE test_n7 CHANGE f2 f2 bigint;
+ALTER TABLE test_n7 CHANGE f3 f3 bigint;
 
 -- llap counters with data and meta cache
 SET 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
-SELECT count(f1) FROM test GROUP BY f1;
-SELECT count(f1) FROM test GROUP BY f1;
+SELECT count(f1) FROM test_n7 GROUP BY f1;
+SELECT count(f1) FROM test_n7 GROUP BY f1;
 
 SET hive.exec.post.hooks=;
 CREATE TABLE test_bigint(f1 bigint, f2 bigint, f3 bigint) stored as orc;
-INSERT OVERWRITE TABLE test_bigint select * from test;
+INSERT OVERWRITE TABLE test_bigint select * from test_n7;
 ALTER TABLE test_bigint CHANGE f1 f1 double;
 ALTER TABLE test_bigint CHANGE f2 f2 double;
 ALTER TABLE test_bigint CHANGE f3 f3 double;
@@ -32,9 +32,9 @@ select count(f1) from test_bigint group by f1;
 SET hive.exec.post.hooks=;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.support.concurrency=true;
-CREATE TABLE test_acid (f1 int, f2 int, val string) clustered by (val) into 2 
buckets stored as orc TBLPROPERTIES ('transactional'='true');
-INSERT INTO TABLE test_acid VALUES (1,1,'b1'), (2,2,'b2'), (3,3,'b3'), 
(4,4,'b4');
+CREATE TABLE test_acid_n0 (f1 int, f2 int, val string) clustered by (val) into 
2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+INSERT INTO TABLE test_acid_n0 VALUES (1,1,'b1'), (2,2,'b2'), (3,3,'b3'), 
(4,4,'b4');
 
 -- should not have llap counters
 SET 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
-SELECT count(f1) FROM test_acid GROUP BY f1;
+SELECT count(f1) FROM test_acid_n0 GROUP BY f1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/llap_udf.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_udf.q 
b/ql/src/test/queries/clientpositive/llap_udf.q
index 65d930e..919e7cc 100644
--- a/ql/src/test/queries/clientpositive/llap_udf.q
+++ b/ql/src/test/queries/clientpositive/llap_udf.q
@@ -6,15 +6,15 @@ set hive.llap.execution.mode=all;
 set hive.fetch.task.conversion=none;
 set hive.llap.allow.permanent.fns=true;
 
-drop table if exists src_orc;
-create table src_orc stored as orc as select * from src;
+drop table if exists src_orc_n0;
+create table src_orc_n0 stored as orc as select * from src;
 
 -- Not using GenericUDFTestGetJavaBoolean; that is already registered when 
tests begin
 
 CREATE TEMPORARY FUNCTION test_udf0 AS 
'org.apache.hadoop.hive.ql.udf.generic.GenericUDFEvaluateNPE';
 
 set hive.llap.execution.mode=auto;
-EXPLAIN SELECT test_udf0(cast(key as string)) from src_orc;
+EXPLAIN SELECT test_udf0(cast(key as string)) from src_orc_n0;
 
 set hive.llap.execution.mode=all;
 CREATE FUNCTION test_udf2 AS 
'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString';
@@ -22,32 +22,32 @@ CREATE FUNCTION test_udf3 AS 
'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTe
 CREATE FUNCTION test_udf4 AS 
'org.apache.hadoop.hive.ql.udf.generic.GenericUDFEvaluateNPE';
 
 EXPLAIN
-SELECT test_udf2(cast(key as string)), test_udf3(cast(key as string)), 
test_udf4(cast(key as string)) from src_orc;
+SELECT test_udf2(cast(key as string)), test_udf3(cast(key as string)), 
test_udf4(cast(key as string)) from src_orc_n0;
 
 set hive.llap.execution.mode=auto;
 -- Verification is based on classes, so 0 would work based on 4.
 EXPLAIN
-SELECT test_udf0(cast(key as string)) from src_orc;
+SELECT test_udf0(cast(key as string)) from src_orc_n0;
 
 DROP FUNCTION test_udf2;
 
 set hive.llap.execution.mode=all;
 -- ...verify that 3 still works
 EXPLAIN
-SELECT test_udf3(cast(key as string)), test_udf4(cast(key as string)) from 
src_orc;
+SELECT test_udf3(cast(key as string)), test_udf4(cast(key as string)) from 
src_orc_n0;
 
 DROP FUNCTION test_udf4;
 
 set hive.llap.execution.mode=auto;
 -- ...now 0 should stop working
 EXPLAIN
-SELECT test_udf0(cast(key as string)) from src_orc;
+SELECT test_udf0(cast(key as string)) from src_orc_n0;
 
 set hive.llap.allow.permanent.fns=false;
 
 EXPLAIN
-SELECT test_udf3(cast(key as string)) from src_orc;
+SELECT test_udf3(cast(key as string)) from src_orc_n0;
 
 
-drop table if exists src_orc;
+drop table if exists src_orc_n0;
 set hive.execution.mode=container;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/llap_uncompressed.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llap_uncompressed.q 
b/ql/src/test/queries/clientpositive/llap_uncompressed.q
index 0282457..875356c 100644
--- a/ql/src/test/queries/clientpositive/llap_uncompressed.q
+++ b/ql/src/test/queries/clientpositive/llap_uncompressed.q
@@ -7,12 +7,12 @@ SET hive.exec.orc.default.row.index.stride=1000;
 SET hive.optimize.index.filter=true;
 set hive.auto.convert.join=false;
 
-DROP TABLE orc_llap;
+DROP TABLE orc_llap_n0;
 
 set hive.auto.convert.join=true;
 SET hive.llap.io.enabled=false;
 
-CREATE TABLE orc_llap(
+CREATE TABLE orc_llap_n0(
     ctinyint TINYINT,
     csmallint SMALLINT,
     cint INT,
@@ -27,7 +27,7 @@ CREATE TABLE orc_llap(
     cboolean2 BOOLEAN)
     STORED AS ORC tblproperties ("orc.compress"="NONE");
 
-insert into table orc_llap
+insert into table orc_llap_n0
 select ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, 
cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2
 from alltypesorc;
 
@@ -36,14 +36,14 @@ SET hive.llap.io.enabled=true;
 
 drop table llap_temp_table;
 explain
-select * from orc_llap where cint > 10 and cbigint is not null;
+select * from orc_llap_n0 where cint > 10 and cbigint is not null;
 create table llap_temp_table as
-select * from orc_llap where cint > 10 and cbigint is not null;
+select * from orc_llap_n0 where cint > 10 and cbigint is not null;
 select sum(hash(*)) from llap_temp_table;
 
 explain
-select * from orc_llap where cint > 10 and cint < 5000000;
-select * from orc_llap where cint > 10 and cint < 5000000;
+select * from orc_llap_n0 where cint > 10 and cint < 5000000;
+select * from orc_llap_n0 where cint > 10 and cint < 5000000;
 
-DROP TABLE orc_llap;
+DROP TABLE orc_llap_n0;
 drop table llap_temp_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/llapdecider.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/llapdecider.q 
b/ql/src/test/queries/clientpositive/llapdecider.q
index 86f4c6e..17a57bc 100644
--- a/ql/src/test/queries/clientpositive/llapdecider.q
+++ b/ql/src/test/queries/clientpositive/llapdecider.q
@@ -8,63 +8,63 @@ set hive.llap.execution.mode=auto;
 -- simple query with multiple reduce stages
 EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt;
 
-create table src_orc stored as orc as select * from src;
+create table src_orc_n2 stored as orc as select * from src;
 
-EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt;
+EXPLAIN SELECT key, count(value) as cnt FROM src_orc_n2 GROUP BY key ORDER BY 
cnt;
 
 set hive.llap.auto.enforce.stats=false;
 
-EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt;
+EXPLAIN SELECT key, count(value) as cnt FROM src_orc_n2 GROUP BY key ORDER BY 
cnt;
 
 set hive.llap.auto.enforce.stats=true;
 
-analyze table src_orc compute statistics for columns;
+analyze table src_orc_n2 compute statistics for columns;
 
-EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt;
+EXPLAIN SELECT key, count(value) as cnt FROM src_orc_n2 GROUP BY key ORDER BY 
cnt;
 
-EXPLAIN SELECT * from src_orc join src on (src_orc.key = src.key) order by 
src.value;
+EXPLAIN SELECT * from src_orc_n2 join src on (src_orc_n2.key = src.key) order 
by src.value;
 
-EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by 
s2.value;
+EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) 
order by s2.value;
 
 set hive.llap.auto.enforce.tree=false;
 
-EXPLAIN SELECT * from src_orc join src on (src_orc.key = src.key) order by 
src.value;
+EXPLAIN SELECT * from src_orc_n2 join src on (src_orc_n2.key = src.key) order 
by src.value;
 
 set hive.llap.auto.enforce.tree=true;
 
 set hive.llap.auto.max.input.size=10;
 
-EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by 
s2.value;
+EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) 
order by s2.value;
 
 set hive.llap.auto.max.input.size=1000000000;
 set hive.llap.auto.max.output.size=10;
 
-EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by 
s2.value;
+EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) 
order by s2.value;
 
 set hive.llap.auto.max.output.size=1000000000;
 
 set hive.llap.execution.mode=map;
 
-EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by 
s2.value;
+EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) 
order by s2.value;
 
 set hive.llap.execution.mode=none;
 
-EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by 
s2.value;
+EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) 
order by s2.value;
 
 set hive.llap.execution.mode=all;
 
-EXPLAIN SELECT * from src_orc s1 join src_orc s2 on (s1.key = s2.key) order by 
s2.value;
+EXPLAIN SELECT * from src_orc_n2 s1 join src_orc_n2 s2 on (s1.key = s2.key) 
order by s2.value;
 
 CREATE TEMPORARY FUNCTION test_udf_get_java_string AS 
'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString';
 
 set hive.llap.execution.mode=auto;
 
-EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc where cast(key as int) > 
1;
-EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) 
+ 1) from src_orc where cast(key as int) > 1;
-EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc where 
cast(test_udf_get_java_string(cast(key as string)) as int) > 1;
+EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc_n2 where cast(key as 
int) > 1;
+EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) 
+ 1) from src_orc_n2 where cast(key as int) > 1;
+EXPLAIN SELECT sum(cast(key as int) + 1) from src_orc_n2 where 
cast(test_udf_get_java_string(cast(key as string)) as int) > 1;
 
 set hive.llap.skip.compile.udf.check=true;
 
-EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) 
+ 1) from src_orc where cast(key as int) > 1;
+EXPLAIN SELECT sum(cast(test_udf_get_java_string(cast(key as string)) as int) 
+ 1) from src_orc_n2 where cast(key as int) > 1;
 
 set hive.execution.mode=container;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_binary_data.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_binary_data.q 
b/ql/src/test/queries/clientpositive/load_binary_data.q
index 653918a..81d41e8 100644
--- a/ql/src/test/queries/clientpositive/load_binary_data.q
+++ b/ql/src/test/queries/clientpositive/load_binary_data.q
@@ -1,13 +1,13 @@
-CREATE TABLE mytable(key binary, value int)
+CREATE TABLE mytable_n2(key binary, value int)
 ROW FORMAT DELIMITED
 FIELDS TERMINATED BY '9'
 STORED AS TEXTFILE;
 -- this query loads native binary data, stores in a table and then queries it. 
Note that string.txt contains binary data. Also uses transform clause and then 
length udf.
 
-LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable;
+LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable_n2;
 
-create table dest1 (key binary, value int);
+create table dest1_n155 (key binary, value int);
 
-insert overwrite table dest1 select transform(*) using 'cat' as key binary, 
value int from mytable; 
+insert overwrite table dest1_n155 select transform(*) using 'cat' as key 
binary, value int from mytable_n2; 
 
-select key, value, length (key) from dest1;
+select key, value, length (key) from dest1_n155;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_data_using_job.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_data_using_job.q 
b/ql/src/test/queries/clientpositive/load_data_using_job.q
index 3659b6e..b760d9b 100644
--- a/ql/src/test/queries/clientpositive/load_data_using_job.q
+++ b/ql/src/test/queries/clientpositive/load_data_using_job.q
@@ -11,84 +11,84 @@ set 
hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hado
 
 -- Single partition
 -- Regular load happens.
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string) STORED AS TEXTFILE;
-explain load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
-select * from srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string) STORED AS TEXTFILE;
+explain load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_n8 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_n8 partition(ds='2008-04-08');
+select * from srcbucket_mapjoin_n8;
 
-drop table srcbucket_mapjoin;
+drop table srcbucket_mapjoin_n8;
 
 -- Triggers a Tez job as partition info is missing from load data.
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string) STORED AS TEXTFILE;
-explain load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin;
-load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string) STORED AS TEXTFILE;
+explain load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin_n8;
+load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Multi partitions
 -- Triggers a Tez job as partition info is missing from load data.
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string, hr int) STORED AS TEXTFILE;
-explain load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin;
-load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string, hr int) STORED AS TEXTFILE;
+explain load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin_n8;
+load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Multi partitions and directory with files (no sub dirs)
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string, hr int) STORED AS TEXTFILE;
-explain load data local inpath 
'../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO 
TABLE srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string, hr int) STORED AS TEXTFILE;
+explain load data local inpath 
'../../data/files/load_data_job/partitions/subdir' INTO TABLE 
srcbucket_mapjoin_n8;
+load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO 
TABLE srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Bucketing
-CREATE TABLE srcbucket_mapjoin(key int, value string) clustered by (key) 
sorted by (key) into 5 buckets STORED AS TEXTFILE;
-explain load data local inpath '../../data/files/load_data_job/bucketing.txt' 
INTO TABLE srcbucket_mapjoin;
-load data local inpath '../../data/files/load_data_job/bucketing.txt' INTO 
TABLE srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) clustered by (key) 
sorted by (key) into 5 buckets STORED AS TEXTFILE;
+explain load data local inpath '../../data/files/load_data_job/bucketing.txt' 
INTO TABLE srcbucket_mapjoin_n8;
+load data local inpath '../../data/files/load_data_job/bucketing.txt' INTO 
TABLE srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Single partition and bucketing
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE;
-explain load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin;
-load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string) clustered by (key) sorted by (key) into 5 buckets STORED AS TEXTFILE;
+explain load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin_n8;
+load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Multiple partitions and bucketing
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS 
TEXTFILE;
-explain load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin;
-load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS 
TEXTFILE;
+explain load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin_n8;
+load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Multiple partitions, bucketing, and directory with files (no sub dirs)
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS 
TEXTFILE;
-explain load data local inpath 
'../../data/files/load_data_job/partitions/subdir' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO 
TABLE srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS 
TEXTFILE;
+explain load data local inpath 
'../../data/files/load_data_job/partitions/subdir' INTO TABLE 
srcbucket_mapjoin_n8;
+load data local inpath '../../data/files/load_data_job/partitions/subdir' INTO 
TABLE srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Multiple partitions, bucketing, and directory with files and sub dirs
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS 
TEXTFILE;
-explain load data local inpath '../../data/files/load_data_job/partitions' 
INTO TABLE srcbucket_mapjoin;
-load data local inpath '../../data/files/load_data_job/partitions' INTO TABLE 
srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string, hr int) clustered by (key) sorted by (key) into 5 buckets STORED AS 
TEXTFILE;
+explain load data local inpath '../../data/files/load_data_job/partitions' 
INTO TABLE srcbucket_mapjoin_n8;
+load data local inpath '../../data/files/load_data_job/partitions' INTO TABLE 
srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Single partition, multiple buckets
-CREATE TABLE srcbucket_mapjoin(key int, value string, ds string) partitioned 
by (hr int) clustered by (key, value) sorted by (key, value) into 5 buckets 
STORED AS TEXTFILE;
-explain load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin;
-load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin;
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string, ds string) 
partitioned by (hr int) clustered by (key, value) sorted by (key, value) into 5 
buckets STORED AS TEXTFILE;
+explain load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin_n8;
+load data local inpath 
'../../data/files/load_data_job/partitions/load_data_2_partitions.txt' INTO 
TABLE srcbucket_mapjoin_n8;
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
 
 -- Load into ORC table using text files
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string) STORED AS ORC;
-explain load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin
+CREATE TABLE srcbucket_mapjoin_n8(key int, value string) partitioned by (ds 
string) STORED AS ORC;
+explain load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin_n8
 INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
 SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
-load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin
+load data local inpath 
'../../data/files/load_data_job/load_data_1_partition.txt' INTO TABLE 
srcbucket_mapjoin_n8
 INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'
 SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
-select * from srcbucket_mapjoin;
-drop table srcbucket_mapjoin;
\ No newline at end of file
+select * from srcbucket_mapjoin_n8;
+drop table srcbucket_mapjoin_n8;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_dyn_part1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part1.q 
b/ql/src/test/queries/clientpositive/load_dyn_part1.q
index bac1f1e..8004f89 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part1.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part1.q
@@ -7,28 +7,28 @@ show partitions srcpart;
 
 
 
-create table if not exists nzhang_part1 like srcpart;
-create table if not exists nzhang_part2 like srcpart;
-describe extended nzhang_part1;
+create table if not exists nzhang_part1_n0 like srcpart;
+create table if not exists nzhang_part2_n0 like srcpart;
+describe extended nzhang_part1_n0;
 
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.exec.dynamic.partition=true;
 
 explain
 from srcpart
-insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, 
hr where ds <= '2008-04-08'
-insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, 
value, hr where ds > '2008-04-08';
+insert overwrite table nzhang_part1_n0 partition (ds, hr) select key, value, 
ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2_n0 partition(ds='2008-12-31', hr) select 
key, value, hr where ds > '2008-04-08';
 
 from srcpart
-insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, 
hr where ds <= '2008-04-08'
-insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, 
value, hr where ds > '2008-04-08';
+insert overwrite table nzhang_part1_n0 partition (ds, hr) select key, value, 
ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part2_n0 partition(ds='2008-12-31', hr) select 
key, value, hr where ds > '2008-04-08';
 
 
-show partitions nzhang_part1;
-show partitions nzhang_part2;
+show partitions nzhang_part1_n0;
+show partitions nzhang_part2_n0;
 
-select * from nzhang_part1 where ds is not null and hr is not null;
-select * from nzhang_part2 where ds is not null and hr is not null;
+select * from nzhang_part1_n0 where ds is not null and hr is not null;
+select * from nzhang_part2_n0 where ds is not null and hr is not null;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_dyn_part11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part11.q 
b/ql/src/test/queries/clientpositive/load_dyn_part11.q
index 9e46d76..1e61981 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part11.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part11.q
@@ -2,17 +2,17 @@
 show partitions srcpart;
 
 
-create table if not exists nzhang_part like srcpart;
-describe extended nzhang_part;
+create table if not exists nzhang_part_n0 like srcpart;
+describe extended nzhang_part_n0;
 
 set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set hive.exec.compress.output=true;
 set hive.exec.dynamic.partition=true;
 
-insert overwrite table nzhang_part partition (ds="2010-03-03", hr) select key, 
value, hr from srcpart where ds is not null and hr is not null;
+insert overwrite table nzhang_part_n0 partition (ds="2010-03-03", hr) select 
key, value, hr from srcpart where ds is not null and hr is not null;
 
-select * from nzhang_part where ds = '2010-03-03' and hr = '11';
-select * from nzhang_part where ds = '2010-03-03' and hr = '12';
+select * from nzhang_part_n0 where ds = '2010-03-03' and hr = '11';
+select * from nzhang_part_n0 where ds = '2010-03-03' and hr = '12';
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_dyn_part14.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part14.q 
b/ql/src/test/queries/clientpositive/load_dyn_part14.q
index 13bcc6f..74a74d1 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part14.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part14.q
@@ -1,16 +1,16 @@
 --! qt:dataset:src
 -- SORT_QUERY_RESULTS
 
-create table if not exists nzhang_part14 (key string)
+create table if not exists nzhang_part14_n0 (key string)
   partitioned by (value string);
 
-describe extended nzhang_part14;
+describe extended nzhang_part14_n0;
 
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 
 explain
-insert overwrite table nzhang_part14 partition(value) 
+insert overwrite table nzhang_part14_n0 partition(value) 
 select key, value from (
   select * from (select 'k1' as key, cast(null as string) as value from src 
limit 2)a 
   union all
@@ -19,7 +19,7 @@ select key, value from (
   select * from (select 'k3' as key, ' ' as value from src limit 2)c
 ) T;
 
-insert overwrite table nzhang_part14 partition(value) 
+insert overwrite table nzhang_part14_n0 partition(value) 
 select key, value from (
   select * from (select 'k1' as key, cast(null as string) as value from src 
limit 2)a 
   union all
@@ -29,8 +29,8 @@ select key, value from (
 ) T;
 
 
-show partitions nzhang_part14;
+show partitions nzhang_part14_n0;
 
-select * from nzhang_part14 where value <> 'a';
+select * from nzhang_part14_n0 where value <> 'a';
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_dyn_part8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_dyn_part8.q 
b/ql/src/test/queries/clientpositive/load_dyn_part8.q
index 4330e0c..a74be62 100644
--- a/ql/src/test/queries/clientpositive/load_dyn_part8.q
+++ b/ql/src/test/queries/clientpositive/load_dyn_part8.q
@@ -5,8 +5,8 @@ show partitions srcpart;
 
 
 
-create table if not exists nzhang_part8 like srcpart;
-describe extended nzhang_part8;
+create table if not exists nzhang_part8_n0 like srcpart;
+describe extended nzhang_part8_n0;
 
 set hive.merge.mapfiles=false;
 set hive.exec.dynamic.partition=true;
@@ -14,14 +14,14 @@ set hive.exec.dynamic.partition.mode=nonstrict;
 
 explain extended
 from srcpart
-insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, 
hr where ds <= '2008-04-08'
-insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, 
value, hr where ds > '2008-04-08';
+insert overwrite table nzhang_part8_n0 partition (ds, hr) select key, value, 
ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part8_n0 partition(ds='2008-12-31', hr) select 
key, value, hr where ds > '2008-04-08';
 
 from srcpart
-insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, 
hr where ds <= '2008-04-08'
-insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, 
value, hr where ds > '2008-04-08';
+insert overwrite table nzhang_part8_n0 partition (ds, hr) select key, value, 
ds, hr where ds <= '2008-04-08'
+insert overwrite table nzhang_part8_n0 partition(ds='2008-12-31', hr) select 
key, value, hr where ds > '2008-04-08';
 
-show partitions nzhang_part8;
+show partitions nzhang_part8_n0;
 
-select * from nzhang_part8 where ds is not null and hr is not null;
+select * from nzhang_part8_n0 where ds is not null and hr is not null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q 
b/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q
index 1ce4824..922e132 100644
--- a/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q
+++ b/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q
@@ -1,6 +1,6 @@
 set 
hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
-create table hive_test_src ( col1 string ) partitioned by (pcol1 string) 
stored as textfile;
-alter table hive_test_src add partition (pcol1 = 'test_part');
+create table hive_test_src_n1 ( col1 string ) partitioned by (pcol1 string) 
stored as textfile;
+alter table hive_test_src_n1 add partition (pcol1 = 'test_part');
 set hive.security.authorization.enabled=true;
-grant Update on table hive_test_src to user hive_test_user;
-load data local inpath '../../data/files/test.dat' overwrite into table 
hive_test_src partition (pcol1 = 'test_part');
+grant Update on table hive_test_src_n1 to user hive_test_user;
+load data local inpath '../../data/files/test.dat' overwrite into table 
hive_test_src_n1 partition (pcol1 = 'test_part');

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q 
b/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q
index 6bac47f..60a4a0d 100644
--- a/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q
+++ b/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q
@@ -1,6 +1,6 @@
 -- test for loading into tables with the file with space in the name
 
 
-CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT);
-LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE 
load_file_with_space_in_the_name;
-LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE 
load_file_with_space_in_the_name;
+CREATE TABLE load_file_with_space_in_the_name_n0(name STRING, age INT);
+LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE 
load_file_with_space_in_the_name_n0;
+LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE 
load_file_with_space_in_the_name_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_fs2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_fs2.q 
b/ql/src/test/queries/clientpositive/load_fs2.q
index a75758a..d247c0e 100644
--- a/ql/src/test/queries/clientpositive/load_fs2.q
+++ b/ql/src/test/queries/clientpositive/load_fs2.q
@@ -1,20 +1,20 @@
 -- HIVE-3300 [jira] LOAD DATA INPATH fails if a hdfs file with same name is 
added to table
 -- 'loader' table is used only for uploading kv1.txt to HDFS (!hdfs -put is 
not working on minMRDriver)
 
-create table result (key string, value string);
+create table result_n2 (key string, value string);
 create table loader (key string, value string);
 
 load data local inpath '../../data/files/kv1.txt' into table loader;
 
-load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table 
result;
-show table extended like result;
+load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table 
result_n2;
+show table extended like result_n2;
 
 load data local inpath '../../data/files/kv1.txt' into table loader;
 
-load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table 
result;
-show table extended like result;
+load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table 
result_n2;
+show table extended like result_n2;
 
 load data local inpath '../../data/files/kv1.txt' into table loader;
 
-load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table 
result;
-show table extended like result;
+load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table 
result_n2;
+show table extended like result_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_fs_overwrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_fs_overwrite.q 
b/ql/src/test/queries/clientpositive/load_fs_overwrite.q
index 51a8031..ec76b15 100644
--- a/ql/src/test/queries/clientpositive/load_fs_overwrite.q
+++ b/ql/src/test/queries/clientpositive/load_fs_overwrite.q
@@ -1,20 +1,20 @@
 --HIVE 6209
 
-drop table target;
+drop table target_n0;
 drop table temp;
 
-create table target (key string, value string) stored as textfile location 
'file:${system:test.tmp.dir}/target';
+create table target_n0 (key string, value string) stored as textfile location 
'file:${system:test.tmp.dir}/target';
 create table temp (key string, value string) stored as textfile location 
'file:${system:test.tmp.dir}/temp';
 
 set fs.pfile.impl.disable.cache=false;
 
 load data local inpath '../../data/files/kv1.txt' into table temp;
-load data inpath '${system:test.tmp.dir}/temp/kv1.txt' overwrite into table 
target;
-select count(*) from target;
+load data inpath '${system:test.tmp.dir}/temp/kv1.txt' overwrite into table 
target_n0;
+select count(*) from target_n0;
 
 load data local inpath '../../data/files/kv2.txt' into table temp;
-load data inpath '${system:test.tmp.dir}/temp/kv2.txt' overwrite into table 
target;
-select count(*) from target;
+load data inpath '${system:test.tmp.dir}/temp/kv2.txt' overwrite into table 
target_n0;
+select count(*) from target_n0;
 
-drop table target;
+drop table target_n0;
 drop table temp;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_non_hdfs_path.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_non_hdfs_path.q 
b/ql/src/test/queries/clientpositive/load_non_hdfs_path.q
index 824ce69..d787278 100644
--- a/ql/src/test/queries/clientpositive/load_non_hdfs_path.q
+++ b/ql/src/test/queries/clientpositive/load_non_hdfs_path.q
@@ -2,5 +2,5 @@ dfs ${system:test.dfs.mkdir} 
${system:test.tmp.dir}/non_hdfs_path;
 dfs -touchz ${system:test.tmp.dir}/non_hdfs_path/1.txt;
 dfs -chmod 555 ${system:test.tmp.dir}/non_hdfs_path/1.txt;
 
-create table t1(i int);
-load data inpath 'pfile:${system:test.tmp.dir}/non_hdfs_path/' overwrite into 
table t1;
+create table t1_n129(i int);
+load data inpath 'pfile:${system:test.tmp.dir}/non_hdfs_path/' overwrite into 
table t1_n129;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_orc.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_orc.q 
b/ql/src/test/queries/clientpositive/load_orc.q
index 2eaf098..219d07d 100644
--- a/ql/src/test/queries/clientpositive/load_orc.q
+++ b/ql/src/test/queries/clientpositive/load_orc.q
@@ -1,10 +1,10 @@
 set hive.default.fileformat=ORC;
-create table orc_staging (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp);
-create table orc_test (userid bigint, string1 string, subtype double, decimal1 
decimal, ts timestamp);
+create table orc_staging_n0 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp);
+create table orc_test_n1 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp);
 
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_staging;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_staging/;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_staging_n0;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_staging_n0/;
 
-load data inpath 
'${hiveconf:hive.metastore.warehouse.dir}/orc_staging/orc_split_elim.orc' into 
table orc_test;
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_test;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/;
+load data inpath 
'${hiveconf:hive.metastore.warehouse.dir}/orc_staging_n0/orc_split_elim.orc' 
into table orc_test_n1;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_test_n1;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test_n1/;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_overwrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_overwrite.q 
b/ql/src/test/queries/clientpositive/load_overwrite.q
index e9e6280..50edda3 100644
--- a/ql/src/test/queries/clientpositive/load_overwrite.q
+++ b/ql/src/test/queries/clientpositive/load_overwrite.q
@@ -1,16 +1,16 @@
 --! qt:dataset:src
-create table load_overwrite like src;
+create table load_overwrite_n0 like src;
 
-insert overwrite table load_overwrite select * from src;
-show table extended like load_overwrite;
-select count(*) from load_overwrite;
+insert overwrite table load_overwrite_n0 select * from src;
+show table extended like load_overwrite_n0;
+select count(*) from load_overwrite_n0;
 
 
-load data local inpath '../../data/files/kv1.txt' into table load_overwrite;
-show table extended like load_overwrite;
-select count(*) from load_overwrite;
+load data local inpath '../../data/files/kv1.txt' into table load_overwrite_n0;
+show table extended like load_overwrite_n0;
+select count(*) from load_overwrite_n0;
 
 
-load data local inpath '../../data/files/kv1.txt' overwrite into table 
load_overwrite;
-show table extended like load_overwrite;
-select count(*) from load_overwrite;
+load data local inpath '../../data/files/kv1.txt' overwrite into table 
load_overwrite_n0;
+show table extended like load_overwrite_n0;
+select count(*) from load_overwrite_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/load_part_authsuccess.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_part_authsuccess.q 
b/ql/src/test/queries/clientpositive/load_part_authsuccess.q
index 868fd6c..47c471e 100644
--- a/ql/src/test/queries/clientpositive/load_part_authsuccess.q
+++ b/ql/src/test/queries/clientpositive/load_part_authsuccess.q
@@ -1,5 +1,5 @@
 set 
hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
-create table hive_test_src ( col1 string ) partitioned by (pcol1 string) 
stored as textfile;
+create table hive_test_src_n0 ( col1 string ) partitioned by (pcol1 string) 
stored as textfile;
 set hive.security.authorization.enabled=true;
-grant Update on table hive_test_src to user hive_test_user;
-load data local inpath '../../data/files/test.dat' overwrite into table 
hive_test_src partition (pcol1 = 'test_part');
+grant Update on table hive_test_src_n0 to user hive_test_user;
+load data local inpath '../../data/files/test.dat' overwrite into table 
hive_test_src_n0 partition (pcol1 = 'test_part');

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/loadpart1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/loadpart1.q 
b/ql/src/test/queries/clientpositive/loadpart1.q
index 735befe..7c11f24 100644
--- a/ql/src/test/queries/clientpositive/loadpart1.q
+++ b/ql/src/test/queries/clientpositive/loadpart1.q
@@ -1,14 +1,14 @@
 
 
 
-create table hive_test_src ( col1 string ) stored as textfile ;
-load data local inpath '../../data/files/test.dat' overwrite into table 
hive_test_src ;
+create table hive_test_src_n2 ( col1 string ) stored as textfile ;
+load data local inpath '../../data/files/test.dat' overwrite into table 
hive_test_src_n2 ;
 
 create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , 
pcol2 string) stored as sequencefile;
-insert overwrite table hive_test_dst partition ( pcol1='test_part', 
pCol2='test_Part') select col1 from hive_test_src ;
+insert overwrite table hive_test_dst partition ( pcol1='test_part', 
pCol2='test_Part') select col1 from hive_test_src_n2 ;
 select * from hive_test_dst where pcol1='test_part' and pcol2='test_Part';
 
-insert overwrite table hive_test_dst partition ( pCol1='test_part', 
pcol2='test_Part') select col1 from hive_test_src ;
+insert overwrite table hive_test_dst partition ( pCol1='test_part', 
pcol2='test_Part') select col1 from hive_test_src_n2 ;
 select * from hive_test_dst where pcol1='test_part' and pcol2='test_part';
 
 select * from hive_test_dst where pcol1='test_part';

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/lock1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lock1.q 
b/ql/src/test/queries/clientpositive/lock1.q
index a6b2c5b..c05a118 100644
--- a/ql/src/test/queries/clientpositive/lock1.q
+++ b/ql/src/test/queries/clientpositive/lock1.q
@@ -1,30 +1,30 @@
 --! qt:dataset:src
-drop table tstsrc;
-create table tstsrc like src;
-insert overwrite table tstsrc select key, value from src;
+drop table tstsrc_n1;
+create table tstsrc_n1 like src;
+insert overwrite table tstsrc_n1 select key, value from src;
 
 SHOW LOCKS;
-SHOW LOCKS tstsrc;
+SHOW LOCKS tstsrc_n1;
 
-LOCK TABLE tstsrc shared;
+LOCK TABLE tstsrc_n1 shared;
 SHOW LOCKS;
-SHOW LOCKS tstsrc;
-SHOW LOCKS tstsrc extended;
+SHOW LOCKS tstsrc_n1;
+SHOW LOCKS tstsrc_n1 extended;
 
-UNLOCK TABLE tstsrc;
+UNLOCK TABLE tstsrc_n1;
 SHOW LOCKS;
 SHOW LOCKS extended;
-SHOW LOCKS tstsrc;
-lock TABLE tstsrc SHARED;
+SHOW LOCKS tstsrc_n1;
+lock TABLE tstsrc_n1 SHARED;
 SHOW LOCKS;
 SHOW LOCKS extended;
-SHOW LOCKS tstsrc;
-LOCK TABLE tstsrc SHARED;
+SHOW LOCKS tstsrc_n1;
+LOCK TABLE tstsrc_n1 SHARED;
 SHOW LOCKS;
 SHOW LOCKS extended;
-SHOW LOCKS tstsrc;
-UNLOCK TABLE tstsrc;
+SHOW LOCKS tstsrc_n1;
+UNLOCK TABLE tstsrc_n1;
 SHOW LOCKS;
 SHOW LOCKS extended;
-SHOW LOCKS tstsrc;
-drop table tstsrc;
+SHOW LOCKS tstsrc_n1;
+drop table tstsrc_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/lock2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lock2.q 
b/ql/src/test/queries/clientpositive/lock2.q
index e990312..81b995c 100644
--- a/ql/src/test/queries/clientpositive/lock2.q
+++ b/ql/src/test/queries/clientpositive/lock2.q
@@ -4,35 +4,35 @@ drop table tstsrc;
 create table tstsrc like src;
 insert overwrite table tstsrc select key, value from src;
 
-drop table tstsrcpart;
-create table tstsrcpart like srcpart;
+drop table tstsrcpart_n0;
+create table tstsrcpart_n0 like srcpart;
 
-insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11')
+insert overwrite table tstsrcpart_n0 partition (ds='2008-04-08', hr='11')
 select key, value from srcpart where ds='2008-04-08' and hr='11';
 
 LOCK TABLE tstsrc SHARED;
-LOCK TABLE tstsrcpart SHARED;
-LOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE;
+LOCK TABLE tstsrcpart_n0 SHARED;
+LOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') EXCLUSIVE;
 SHOW LOCKS;
-SHOW LOCKS tstsrcpart;
-SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11');
-SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11') extended;
+SHOW LOCKS tstsrcpart_n0;
+SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11');
+SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11') extended;
 
 UNLOCK TABLE tstsrc;
 SHOW LOCKS;
-SHOW LOCKS tstsrcpart;
-SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11');
+SHOW LOCKS tstsrcpart_n0;
+SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11');
 
-UNLOCK TABLE tstsrcpart;
+UNLOCK TABLE tstsrcpart_n0;
 SHOW LOCKS;
-SHOW LOCKS tstsrcpart;
-SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11');
+SHOW LOCKS tstsrcpart_n0;
+SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11');
 
-UNLOCK TABLE tstsrcpart PARTITION(ds='2008-04-08', hr='11');
+UNLOCK TABLE tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11');
 SHOW LOCKS;
-SHOW LOCKS tstsrcpart;
-SHOW LOCKS tstsrcpart PARTITION(ds='2008-04-08', hr='11');
+SHOW LOCKS tstsrcpart_n0;
+SHOW LOCKS tstsrcpart_n0 PARTITION(ds='2008-04-08', hr='11');
 
 
 drop table tstsrc;
-drop table tstsrcpart;
+drop table tstsrcpart_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/lock3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lock3.q 
b/ql/src/test/queries/clientpositive/lock3.q
index dd1843f..1da387f 100644
--- a/ql/src/test/queries/clientpositive/lock3.q
+++ b/ql/src/test/queries/clientpositive/lock3.q
@@ -1,9 +1,9 @@
 --! qt:dataset:srcpart
-drop table tstsrcpart;
-create table tstsrcpart like srcpart;
+drop table tstsrcpart_n4;
+create table tstsrcpart_n4 like srcpart;
 
 from srcpart
-insert overwrite table tstsrcpart partition (ds='2008-04-08',hr='11')
+insert overwrite table tstsrcpart_n4 partition (ds='2008-04-08',hr='11')
 select key, value where ds='2008-04-08' and hr='11';
 
 set hive.exec.dynamic.partition.mode=nonstrict;
@@ -11,23 +11,23 @@ set hive.exec.dynamic.partition=true;
 
 
 from srcpart
-insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr 
where ds <= '2008-04-08';
+insert overwrite table tstsrcpart_n4 partition (ds, hr) select key, value, ds, 
hr where ds <= '2008-04-08';
 
 from srcpart
-insert overwrite table tstsrcpart partition (ds ='2008-04-08', hr) select key, 
value, hr where ds = '2008-04-08';
+insert overwrite table tstsrcpart_n4 partition (ds ='2008-04-08', hr) select 
key, value, hr where ds = '2008-04-08';
 
 
 SHOW LOCKS;
-SHOW LOCKS tstsrcpart;
+SHOW LOCKS tstsrcpart_n4;
 
-drop table tstsrcpart;
+drop table tstsrcpart_n4;
 
-drop table tst1;
-create table tst1 (key string, value string) partitioned by (a string, b 
string, c string, d string);
+drop table tst1_n4;
+create table tst1_n4 (key string, value string) partitioned by (a string, b 
string, c string, d string);
 
 
 from srcpart
-insert overwrite table tst1 partition (a='1', b='2', c, d) select key, value, 
ds, hr where ds = '2008-04-08';
+insert overwrite table tst1_n4 partition (a='1', b='2', c, d) select key, 
value, ds, hr where ds = '2008-04-08';
 
 
-drop table tst1;
+drop table tst1_n4;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/lock4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lock4.q 
b/ql/src/test/queries/clientpositive/lock4.q
index f113659..256ca9d 100644
--- a/ql/src/test/queries/clientpositive/lock4.q
+++ b/ql/src/test/queries/clientpositive/lock4.q
@@ -1,10 +1,10 @@
 --! qt:dataset:srcpart
 set hive.lock.mapred.only.operation=true;
-drop table tstsrcpart;
-create table tstsrcpart like srcpart;
+drop table tstsrcpart_n3;
+create table tstsrcpart_n3 like srcpart;
 
 from srcpart
-insert overwrite table tstsrcpart partition (ds='2008-04-08',hr='11')
+insert overwrite table tstsrcpart_n3 partition (ds='2008-04-08',hr='11')
 select key, value where ds='2008-04-08' and hr='11';
 
 set hive.exec.dynamic.partition.mode=nonstrict;
@@ -12,23 +12,23 @@ set hive.exec.dynamic.partition=true;
 
 
 from srcpart
-insert overwrite table tstsrcpart partition (ds, hr) select key, value, ds, hr 
where ds <= '2008-04-08';
+insert overwrite table tstsrcpart_n3 partition (ds, hr) select key, value, ds, 
hr where ds <= '2008-04-08';
 
 from srcpart
-insert overwrite table tstsrcpart partition (ds ='2008-04-08', hr) select key, 
value, hr where ds = '2008-04-08';
+insert overwrite table tstsrcpart_n3 partition (ds ='2008-04-08', hr) select 
key, value, hr where ds = '2008-04-08';
 
 
 SHOW LOCKS;
-SHOW LOCKS tstsrcpart;
+SHOW LOCKS tstsrcpart_n3;
 
-drop table tstsrcpart;
+drop table tstsrcpart_n3;
 
-drop table tst1;
-create table tst1 (key string, value string) partitioned by (a string, b 
string, c string, d string);
+drop table tst1_n3;
+create table tst1_n3 (key string, value string) partitioned by (a string, b 
string, c string, d string);
 
 
 from srcpart
-insert overwrite table tst1 partition (a='1', b='2', c, d) select key, value, 
ds, hr where ds = '2008-04-08';
+insert overwrite table tst1_n3 partition (a='1', b='2', c, d) select key, 
value, ds, hr where ds = '2008-04-08';
 
 
-drop table tst1;
+drop table tst1_n3;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/mapjoin2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mapjoin2.q 
b/ql/src/test/queries/clientpositive/mapjoin2.q
index b132e49..e194bd0 100644
--- a/ql/src/test/queries/clientpositive/mapjoin2.q
+++ b/ql/src/test/queries/clientpositive/mapjoin2.q
@@ -1,16 +1,16 @@
 set hive.mapred.mode=nonstrict;
 set hive.auto.convert.join=true;
 
-create table tbl (n bigint, t string); 
+create table tbl_n1 (n bigint, t string); 
 
-insert into tbl values (1, 'one'); 
-insert into tbl values(2, 'two');
+insert into tbl_n1 values (1, 'one'); 
+insert into tbl_n1 values(2, 'two');
 
-select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl where n = 1) 
a  left outer join  (select * from tbl where 1 = 2) b on a.n = b.n;
+select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl_n1 where n = 
1) a  left outer join  (select * from tbl_n1 where 1 = 2) b on a.n = b.n;
 
-select isnull(a.n), isnull(a.t), b.n, b.t from (select * from tbl where 2 = 1) 
a  right outer join  (select * from tbl where n = 2) b on a.n = b.n;
+select isnull(a.n), isnull(a.t), b.n, b.t from (select * from tbl_n1 where 2 = 
1) a  right outer join  (select * from tbl_n1 where n = 2) b on a.n = b.n;
 
-select isnull(a.n), isnull(a.t), isnull(b.n), isnull(b.t) from (select * from 
tbl where n = 1) a  full outer join  (select * from tbl where n = 2) b on a.n = 
b.n;
+select isnull(a.n), isnull(a.t), isnull(b.n), isnull(b.t) from (select * from 
tbl_n1 where n = 1) a  full outer join  (select * from tbl_n1 where n = 2) b on 
a.n = b.n;
 
 select a.key, a.a_one, b.b_one, a.a_zero, b.b_zero from ( SELECT 11 key, 0 
confuse_you, 1 a_one, 0 a_zero ) a join ( SELECT 11 key, 0 confuse_you, 1 
b_one, 0 b_zero ) b on a.key = b.key;
 

Reply via email to