http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q 
b/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
index 5b29ebb..644a0ce 100644
--- a/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
+++ b/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
@@ -1,25 +1,25 @@
 set hive.mapred.mode=nonstrict;
 set hive.test.mode=true;
 set hive.test.mode.prefix=;
-set hive.test.mode.nosamplelist=exim_department,exim_employee;
+set hive.test.mode.nosamplelist=exim_department,exim_employee_n1;
 
-create table exim_employee ( emp_id int comment "employee id")         
+create table exim_employee_n1 ( emp_id int comment "employee id")      
        comment "employee table"
        partitioned by (emp_country string comment "two char iso code", 
emp_state string comment "free text")
        stored as textfile      
        tblproperties("creator"="krishna");
 load data local inpath "../../data/files/test.dat" 
-       into table exim_employee partition (emp_country="in", emp_state="tn");  
+       into table exim_employee_n1 partition (emp_country="in", 
emp_state="tn");       
 load data local inpath "../../data/files/test.dat" 
-       into table exim_employee partition (emp_country="in", emp_state="ka");  
+       into table exim_employee_n1 partition (emp_country="in", 
emp_state="ka");       
 load data local inpath "../../data/files/test.dat" 
-       into table exim_employee partition (emp_country="us", emp_state="tn");  
+       into table exim_employee_n1 partition (emp_country="us", 
emp_state="tn");       
 load data local inpath "../../data/files/test.dat" 
-       into table exim_employee partition (emp_country="us", emp_state="ka");  
        
+       into table exim_employee_n1 partition (emp_country="us", 
emp_state="ka");               
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/exports/exim_employee/temp;
 dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
-export table exim_employee to 'ql/test/data/exports/exim_employee';
-drop table exim_employee;
+export table exim_employee_n1 to 'ql/test/data/exports/exim_employee';
+drop table exim_employee_n1;
 
 create database importer;
 use importer;
@@ -27,16 +27,16 @@ use importer;
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/tablestore/exim_employee/temp;
 dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
 
-import table exim_employee partition (emp_country="us", emp_state="tn") 
+import table exim_employee_n1 partition (emp_country="us", emp_state="tn") 
        from 'ql/test/data/exports/exim_employee'
        location 'ql/test/data/tablestore/exim_employee';
-describe extended exim_employee;       
-show table extended like exim_employee;
-show table extended like exim_employee partition (emp_country="us", 
emp_state="tn");
+describe extended exim_employee_n1;    
+show table extended like exim_employee_n1;
+show table extended like exim_employee_n1 partition (emp_country="us", 
emp_state="tn");
 dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
-select * from exim_employee;
+select * from exim_employee_n1;
 dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
-select * from exim_employee;
-drop table exim_employee;
+select * from exim_employee_n1;
+drop table exim_employee_n1;
 
 drop database importer;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q 
b/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
index 1e3eaee..9d4cd04 100644
--- a/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
+++ b/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
@@ -2,15 +2,15 @@ set 
hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho
 set hive.test.mode=true;
 set hive.test.mode.prefix=;
 
-create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../../data/files/test.dat" into table exim_department;
+create table exim_department_n3 ( dep_id int) stored as textfile;
+load data local inpath "../../data/files/test.dat" into table 
exim_department_n3;
 
 set hive.security.authorization.enabled=true;
 
-grant Select on table exim_department to user hive_test_user;
+grant Select on table exim_department_n3 to user hive_test_user;
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/exports/exim_department/temp;
 dfs -rmr target/tmp/ql/test/data/exports/exim_department;
-export table exim_department to 'ql/test/data/exports/exim_department';
+export table exim_department_n3 to 'ql/test/data/exports/exim_department';
 
 set hive.security.authorization.enabled=false;
-drop table exim_department;
+drop table exim_department_n3;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q 
b/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q
index 606f9af..a27ef45 100644
--- a/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q
+++ b/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q
@@ -1,27 +1,27 @@
 set hive.test.mode=true;
 set hive.test.mode.prefix=;
-set hive.test.mode.nosamplelist=exim_department,exim_employee;
+set hive.test.mode.nosamplelist=exim_department_n1,exim_employee;
 set 
hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
 
-create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../../data/files/test.dat" into table exim_department;
+create table exim_department_n1 ( dep_id int) stored as textfile;
+load data local inpath "../../data/files/test.dat" into table 
exim_department_n1;
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/exports/exim_department/temp;
 dfs -rmr target/tmp/ql/test/data/exports/exim_department;
-export table exim_department to 'ql/test/data/exports/exim_department';
-drop table exim_department;
+export table exim_department_n1 to 'ql/test/data/exports/exim_department';
+drop table exim_department_n1;
 
 create database importer;
 use importer;
 
-create table exim_department ( dep_id int) stored as textfile;
+create table exim_department_n1 ( dep_id int) stored as textfile;
 set hive.security.authorization.enabled=true;
-grant Alter on table exim_department to user hive_test_user;
-grant Update on table exim_department to user hive_test_user;
+grant Alter on table exim_department_n1 to user hive_test_user;
+grant Update on table exim_department_n1 to user hive_test_user;
 import from 'ql/test/data/exports/exim_department';
 
 set hive.security.authorization.enabled=false;
-select * from exim_department;
-drop table exim_department;
+select * from exim_department_n1;
+drop table exim_department_n1;
 drop database importer;
 dfs -rmr target/tmp/ql/test/data/exports/exim_department;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q 
b/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q
index 316f2e0..888448d 100644
--- a/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q
+++ b/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q
@@ -1,36 +1,36 @@
 set hive.mapred.mode=nonstrict;
 set hive.test.mode=true;
 set hive.test.mode.prefix=;
-set hive.test.mode.nosamplelist=exim_department,exim_employee;
+set hive.test.mode.nosamplelist=exim_department,exim_employee_n7;
 set 
hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
 
-create table exim_employee ( emp_id int comment "employee id")         
+create table exim_employee_n7 ( emp_id int comment "employee id")      
        comment "employee table"
        partitioned by (emp_country string comment "two char iso code", 
emp_state string comment "free text")
        stored as textfile      
        tblproperties("creator"="krishna");
 load data local inpath "../../data/files/test.dat" 
-       into table exim_employee partition (emp_country="in", emp_state="tn");
+       into table exim_employee_n7 partition (emp_country="in", 
emp_state="tn");
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/exports/exim_employee/temp;
 dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
-export table exim_employee to 'ql/test/data/exports/exim_employee';
-drop table exim_employee;
+export table exim_employee_n7 to 'ql/test/data/exports/exim_employee';
+drop table exim_employee_n7;
 
 create database importer;
 use importer;
-create table exim_employee ( emp_id int comment "employee id")         
+create table exim_employee_n7 ( emp_id int comment "employee id")      
        comment "employee table"
        partitioned by (emp_country string comment "two char iso code", 
emp_state string comment "free text")
        stored as textfile      
        tblproperties("creator"="krishna");
 
 set hive.security.authorization.enabled=true;
-grant Alter on table exim_employee to user hive_test_user;
-grant Update on table exim_employee to user hive_test_user;
+grant Alter on table exim_employee_n7 to user hive_test_user;
+grant Update on table exim_employee_n7 to user hive_test_user;
 import from 'ql/test/data/exports/exim_employee';
 
 set hive.security.authorization.enabled=false;
-select * from exim_employee;
+select * from exim_employee_n7;
 dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
-drop table exim_employee;
+drop table exim_employee_n7;
 drop database importer;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q 
b/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q
index 8ded70b..e403418 100644
--- a/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q
+++ b/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q
@@ -1,14 +1,14 @@
 set hive.test.mode=true;
 set hive.test.mode.prefix=;
-set hive.test.mode.nosamplelist=exim_department,exim_employee;
+set hive.test.mode.nosamplelist=exim_department_n6,exim_employee;
 set 
hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
 
-create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../../data/files/test.dat" into table exim_department;
+create table exim_department_n6 ( dep_id int) stored as textfile;
+load data local inpath "../../data/files/test.dat" into table 
exim_department_n6;
 dfs ${system:test.dfs.mkdir} 
target/tmp/ql/test/data/exports/exim_department/test;
 dfs -rmr target/tmp/ql/test/data/exports/exim_department;
-export table exim_department to 'ql/test/data/exports/exim_department';
-drop table exim_department;
+export table exim_department_n6 to 'ql/test/data/exports/exim_department';
+drop table exim_department_n6;
 
 create database importer;
 use importer;
@@ -18,8 +18,8 @@ grant Create on database importer to user hive_test_user;
 import from 'ql/test/data/exports/exim_department';
 
 set hive.security.authorization.enabled=false;
-select * from exim_department;
-drop table exim_department;
+select * from exim_department_n6;
+drop table exim_department_n6;
 drop database importer;
 dfs -rmr target/tmp/ql/test/data/exports/exim_department;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/exim_hidden_files.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/exim_hidden_files.q 
b/ql/src/test/queries/clientpositive/exim_hidden_files.q
index 18bf3ad..f046411 100644
--- a/ql/src/test/queries/clientpositive/exim_hidden_files.q
+++ b/ql/src/test/queries/clientpositive/exim_hidden_files.q
@@ -1,23 +1,23 @@
 set hive.mapred.mode=nonstrict;
 set hive.test.mode=true;
 set hive.test.mode.prefix=;
-set hive.test.mode.nosamplelist=exim_department,exim_employee;
+set hive.test.mode.nosamplelist=exim_department,exim_employee_n6;
 
-create table exim_employee ( emp_id int) partitioned by (emp_country string);
-load data local inpath "../../data/files/test.dat" into table exim_employee 
partition (emp_country="in");              
+create table exim_employee_n6 ( emp_id int) partitioned by (emp_country 
string);
+load data local inpath "../../data/files/test.dat" into table exim_employee_n6 
partition (emp_country="in");           
 
 dfs ${system:test.dfs.mkdir} 
${system:test.warehouse.dir}/exim_employee/emp_country=in/_logs;
 dfs -touchz 
${system:test.warehouse.dir}/exim_employee/emp_country=in/_logs/job.xml;
-export table exim_employee to 'ql/test/data/exports/exim_employee';
-drop table exim_employee;
+export table exim_employee_n6 to 'ql/test/data/exports/exim_employee';
+drop table exim_employee_n6;
 
 create database importer;
 use importer;
 
 import from 'ql/test/data/exports/exim_employee';
-describe formatted exim_employee;
-select * from exim_employee;
+describe formatted exim_employee_n6;
+select * from exim_employee_n6;
 dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
-drop table exim_employee;
+drop table exim_employee_n6;
 drop database importer;
 use default;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explain_ddl.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explain_ddl.q 
b/ql/src/test/queries/clientpositive/explain_ddl.q
index b873088..16f7201 100644
--- a/ql/src/test/queries/clientpositive/explain_ddl.q
+++ b/ql/src/test/queries/clientpositive/explain_ddl.q
@@ -2,18 +2,18 @@
 -- This test is used for testing explain for DDL/DML statements
 
 -- Create some views and tabels
-CREATE VIEW V1 AS SELECT key, value from src;
-select count(*) from V1 where key > 0;
+CREATE VIEW V1_n0 AS SELECT key, value from src;
+select count(*) from V1_n0 where key > 0;
 
 CREATE TABLE M1 AS SELECT key, value from src;
 select count(*) from M1 where key > 0;
 
 EXPLAIN CREATE TABLE M1 AS select * from src;
 EXPLAIN CREATE TABLE M1 AS select * from M1;
-EXPLAIN CREATE TABLE M1 AS select * from V1;
+EXPLAIN CREATE TABLE M1 AS select * from V1_n0;
 
-EXPLAIN CREATE TABLE V1 AS select * from M1;
-EXPLAIN CREATE VIEW V1 AS select * from M1;
+EXPLAIN CREATE TABLE V1_n0 AS select * from M1;
+EXPLAIN CREATE VIEW V1_n0 AS select * from M1;
 
 EXPLAIN CREATE TABLE M1 LIKE src;
 EXPLAIN CREATE TABLE M1 LIKE M1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explain_dependency.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explain_dependency.q 
b/ql/src/test/queries/clientpositive/explain_dependency.q
index df3cb7a..33c31eb 100644
--- a/ql/src/test/queries/clientpositive/explain_dependency.q
+++ b/ql/src/test/queries/clientpositive/explain_dependency.q
@@ -5,14 +5,14 @@ set hive.mapred.mode=nonstrict;
 -- This test is used for testing EXPLAIN DEPENDENCY command
 
 -- Create some views
-CREATE VIEW V1 AS SELECT key, value from src;
-CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL;
+CREATE VIEW V1_n6 AS SELECT key, value from src;
+CREATE VIEW V2_n1 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL;
 CREATE VIEW V3 AS 
-  SELECT src1.key, src2.value FROM V2 src1 
+  SELECT src1.key, src2.value FROM V2_n1 src1 
   JOIN src src2 ON src1.key = src2.key WHERE src1.ds IS NOT NULL;
 CREATE VIEW V4 AS 
   SELECT src1.key, src2.value as value1, src3.value as value2 
-  FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = 
src3.key;
+  FROM V1_n6 src1 JOIN V2_n1 src2 on src1.key = src2.key JOIN src src3 ON 
src2.key = src3.key;
 
 -- Simple select queries, union queries and join queries
 EXPLAIN DEPENDENCY 
@@ -27,8 +27,8 @@ EXPLAIN DEPENDENCY
   SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE 
ds IS NOT NULL;
 
 -- With views
-EXPLAIN DEPENDENCY SELECT * FROM V1;
-EXPLAIN DEPENDENCY SELECT * FROM V2;
+EXPLAIN DEPENDENCY SELECT * FROM V1_n6;
+EXPLAIN DEPENDENCY SELECT * FROM V2_n1;
 EXPLAIN DEPENDENCY SELECT * FROM V3;
 EXPLAIN DEPENDENCY SELECT * FROM V4;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explain_logical.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explain_logical.q 
b/ql/src/test/queries/clientpositive/explain_logical.q
index 261b50a..96b8672 100644
--- a/ql/src/test/queries/clientpositive/explain_logical.q
+++ b/ql/src/test/queries/clientpositive/explain_logical.q
@@ -5,14 +5,14 @@ set hive.mapred.mode=nonstrict;
 -- This test is used for testing EXPLAIN LOGICAL command
 
 -- Create some views
-CREATE VIEW V1 AS SELECT key, value from src;
-CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL;
-CREATE VIEW V3 AS 
-  SELECT src1.key, src2.value FROM V2 src1 
+CREATE VIEW V1_n8 AS SELECT key, value from src;
+CREATE VIEW V2_n3 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL;
+CREATE VIEW V3_n1 AS 
+  SELECT src1.key, src2.value FROM V2_n3 src1 
   JOIN src src2 ON src1.key = src2.key WHERE src1.ds IS NOT NULL;
-CREATE VIEW V4 AS 
+CREATE VIEW V4_n1 AS 
   SELECT src1.key, src2.value as value1, src3.value as value2 
-  FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = 
src3.key;
+  FROM V1_n8 src1 JOIN V2_n3 src2 on src1.key = src2.key JOIN src src3 ON 
src2.key = src3.key;
 
 -- Simple select queries, union queries and join queries
 EXPLAIN LOGICAL 
@@ -27,14 +27,14 @@ EXPLAIN LOGICAL
   SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE 
ds IS NOT NULL;
 
 -- With views
-EXPLAIN LOGICAL SELECT * FROM V1;
-EXPLAIN LOGICAL SELECT * FROM V2;
-EXPLAIN LOGICAL SELECT * FROM V3;
-EXPLAIN LOGICAL SELECT * FROM V4;
+EXPLAIN LOGICAL SELECT * FROM V1_n8;
+EXPLAIN LOGICAL SELECT * FROM V2_n3;
+EXPLAIN LOGICAL SELECT * FROM V3_n1;
+EXPLAIN LOGICAL SELECT * FROM V4_n1;
 
 -- The table should show up in the explain logical even if none
 -- of the partitions are selected.
-CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10';
-EXPLAIN LOGICAL SELECT * FROM V5;
+CREATE VIEW V5_n0 as SELECT * FROM srcpart where ds = '10';
+EXPLAIN LOGICAL SELECT * FROM V5_n0;
 
 EXPLAIN LOGICAL SELECT s1.key, s1.cnt, s2.value FROM (SELECT key, count(value) 
as cnt FROM src GROUP BY key) s1 JOIN src s2 ON (s1.key = s2.key) ORDER BY 
s1.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explain_outputs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explain_outputs.q 
b/ql/src/test/queries/clientpositive/explain_outputs.q
index d53b66e..d42847d 100644
--- a/ql/src/test/queries/clientpositive/explain_outputs.q
+++ b/ql/src/test/queries/clientpositive/explain_outputs.q
@@ -1,15 +1,15 @@
 
-create table t1 (id int);
-create table t2 (id int);
+create table t1_n22 (id int);
+create table t2_n14 (id int);
 
-insert into t1 values (1),(10);
-insert into t2 values (1),(2),(3),(4),(5);
+insert into t1_n22 values (1),(10);
+insert into t2_n14 values (1),(2),(3),(4),(5);
 
 explain
-select sum(t1.id) from t1 join t2 on (t1.id=t2.id);
+select sum(t1_n22.id) from t1_n22 join t2_n14 on (t1_n22.id=t2_n14.id);
 
 explain analyze
-select sum(t1.id) from t1 join t2 on (t1.id=t2.id);
+select sum(t1_n22.id) from t1_n22 join t2_n14 on (t1_n22.id=t2_n14.id);
 
 explain reoptimization
-select sum(t1.id) from t1 join t2 on (t1.id=t2.id);
+select sum(t1_n22.id) from t1_n22 join t2_n14 on (t1_n22.id=t2_n14.id);

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explain_rearrange.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explain_rearrange.q 
b/ql/src/test/queries/clientpositive/explain_rearrange.q
index 51644c1..5c1cc09 100644
--- a/ql/src/test/queries/clientpositive/explain_rearrange.q
+++ b/ql/src/test/queries/clientpositive/explain_rearrange.q
@@ -2,8 +2,8 @@
 set hive.mapred.mode=nonstrict;
 -- query from auto_sortmerge_join_9.q
 
-CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;
-CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;
+CREATE TABLE tbl1_n9(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;
+CREATE TABLE tbl2_n8(key int, value string) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;
 
 set hive.auto.convert.join=true;
 set hive.optimize.bucketmapjoin = true;
@@ -20,14 +20,14 @@ select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join 
tbl2_n8 b on a.key = b.key
   ) subq1 group by key
 ) src1
 join
 (
   select key, count(*) as cnt1 from
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join 
tbl2_n8 b on a.key = b.key
   ) subq2 group by key
 ) src2
 on src1.key = src2.key
@@ -42,14 +42,14 @@ select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join 
tbl2_n8 b on a.key = b.key
   ) subq1 group by key
 ) src1
 join
 (
   select key, count(*) as cnt1 from
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join 
tbl2_n8 b on a.key = b.key
   ) subq2 group by key
 ) src2
 on src1.key = src2.key
@@ -64,14 +64,14 @@ select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join 
tbl2_n8 b on a.key = b.key
   ) subq1 group by key
 ) src1
 join
 (
   select key, count(*) as cnt1 from
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join 
tbl2_n8 b on a.key = b.key
   ) subq2 group by key
 ) src2
 on src1.key = src2.key
@@ -86,14 +86,14 @@ select src1.key, src1.cnt1, src2.cnt1 from
 (
   select key, count(*) as cnt1 from
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join 
tbl2_n8 b on a.key = b.key
   ) subq1 group by key
 ) src1
 join
 (
   select key, count(*) as cnt1 from
   (
-    select a.key as key, a.value as val1, b.value as val2 from tbl1 a join 
tbl2 b on a.key = b.key
+    select a.key as key, a.value as val1, b.value as val2 from tbl1_n9 a join 
tbl2_n8 b on a.key = b.key
   ) subq2 group by key
 ) src2
 on src1.key = src2.key

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explainanalyze_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainanalyze_1.q 
b/ql/src/test/queries/clientpositive/explainanalyze_1.q
index 9a2b9c5..9043826 100644
--- a/ql/src/test/queries/clientpositive/explainanalyze_1.q
+++ b/ql/src/test/queries/clientpositive/explainanalyze_1.q
@@ -7,11 +7,11 @@ explain analyze select * from src a union all select * from 
src b limit 10;
 
 explain analyze select key from src;
 
-explain analyze create table t as select key from src;
+explain analyze create table t_n28 as select key from src;
 
-create table t as select key from src;
+create table t_n28 as select key from src;
 
-explain analyze insert overwrite table t select key from src;
+explain analyze insert overwrite table t_n28 select key from src;
 
 explain analyze select key from src limit 10;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explainanalyze_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainanalyze_2.q 
b/ql/src/test/queries/clientpositive/explainanalyze_2.q
index 10b605f..7e48a8a 100644
--- a/ql/src/test/queries/clientpositive/explainanalyze_2.q
+++ b/ql/src/test/queries/clientpositive/explainanalyze_2.q
@@ -35,31 +35,31 @@ set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;
 set hive.auto.convert.sortmerge.join.bigtable.selection.policy = 
org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ;
 
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_n11(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE tab_part_n7 (key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_part_n11 (key int, value string) partitioned by 
(ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
 
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_n11 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE 
srcbucket_mapjoin_n11 partition(ds='2008-04-08');
 
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000001_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000002_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000003_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_part_n11 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000001_0' INTO TABLE 
srcbucket_mapjoin_part_n11 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000002_0' INTO TABLE 
srcbucket_mapjoin_part_n11 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000003_0' INTO TABLE 
srcbucket_mapjoin_part_n11 partition(ds='2008-04-08');
 
 
 
 set hive.optimize.bucketingsorting=false;
-insert overwrite table tab_part partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin_part;
+insert overwrite table tab_part_n7 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part_n11;
 
-CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY 
(key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-insert overwrite table tab partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin;
+CREATE TABLE tab_n6(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED 
BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab_n6 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_n11;
 
-CREATE TABLE tab2(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED 
BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-insert overwrite table tab2 partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin;
+CREATE TABLE tab2_n3(key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab2_n3 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_n11;
 
 set hive.convert.join.bucket.mapjoin.tez = false;
 set hive.auto.convert.sortmerge.join = true;
@@ -67,32 +67,32 @@ set hive.auto.convert.sortmerge.join = true;
 set hive.auto.convert.join.noconditionaltask.size=500;
 
 explain analyze 
-select s1.key as key, s1.value as value from tab s1 join tab s3 on 
s1.key=s3.key;
+select s1.key as key, s1.value as value from tab_n6 s1 join tab_n6 s3 on 
s1.key=s3.key;
 
 explain analyze 
-select s1.key as key, s1.value as value from tab s1 join tab s3 on 
s1.key=s3.key join tab s2 on s1.value=s2.value;
+select s1.key as key, s1.value as value from tab_n6 s1 join tab_n6 s3 on 
s1.key=s3.key join tab_n6 s2 on s1.value=s2.value;
 
 explain analyze 
-select s1.key as key, s1.value as value from tab s1 join tab2 s3 on 
s1.key=s3.key;
+select s1.key as key, s1.value as value from tab_n6 s1 join tab2_n3 s3 on 
s1.key=s3.key;
 
 explain analyze 
-select s1.key as key, s1.value as value from tab s1 join tab2 s3 on 
s1.key=s3.key join tab2 s2 on s1.value=s2.value;
+select s1.key as key, s1.value as value from tab_n6 s1 join tab2_n3 s3 on 
s1.key=s3.key join tab2_n3 s2 on s1.value=s2.value;
 
 explain analyze
-select count(*) from (select s1.key as key, s1.value as value from tab s1 join 
tab s3 on s1.key=s3.key
+select count(*) from (select s1.key as key, s1.value as value from tab_n6 s1 
join tab_n6 s3 on s1.key=s3.key
 UNION  ALL
-select s2.key as key, s2.value as value from tab s2
-) a join tab_part b on (a.key = b.key);
+select s2.key as key, s2.value as value from tab_n6 s2
+) a_n14 join tab_part_n7 b_n10 on (a_n14.key = b_n10.key);
 
 explain analyze
-select count(*) from (select s1.key as key, s1.value as value from tab s1 join 
tab s3 on s1.key=s3.key join tab s2 on s1.value=s2.value
+select count(*) from (select s1.key as key, s1.value as value from tab_n6 s1 
join tab_n6 s3 on s1.key=s3.key join tab_n6 s2 on s1.value=s2.value
 UNION  ALL
-select s2.key as key, s2.value as value from tab s2
-) a join tab_part b on (a.key = b.key);
+select s2.key as key, s2.value as value from tab_n6 s2
+) a_n14 join tab_part_n7 b_n10 on (a_n14.key = b_n10.key);
 
-CREATE TABLE a(key STRING, value STRING) STORED AS TEXTFILE;
-CREATE TABLE b(key STRING, value STRING) STORED AS TEXTFILE;
-CREATE TABLE c(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE a_n14(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE b_n10(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE c_n3(key STRING, value STRING) STORED AS TEXTFILE;
 
 explain analyze
 from
@@ -109,9 +109,9 @@ SELECT x.key, y.value
 FROM src1 x JOIN src1 y ON (x.key = y.key) 
 JOIN (select key, value from src1 union all select key, value from src union 
all select key, value from src union all select key, value from src)z ON 
(x.value = z.value)
 ) tmp
-INSERT OVERWRITE TABLE a SELECT tmp.key, tmp.value
-INSERT OVERWRITE TABLE b SELECT tmp.key, tmp.value
-INSERT OVERWRITE TABLE c SELECT tmp.key, tmp.value;
+INSERT OVERWRITE TABLE a_n14 SELECT tmp.key, tmp.value
+INSERT OVERWRITE TABLE b_n10 SELECT tmp.key, tmp.value
+INSERT OVERWRITE TABLE c_n3 SELECT tmp.key, tmp.value;
 
 explain analyze
 FROM
@@ -125,22 +125,22 @@ union
 SELECT x.key as key, y.value as value from src1 x JOIN src1 y ON (x.key = 
y.key) 
 JOIN (select key, value from src1 union select key, value from src union 
select key, value from src union select key, value from src)z ON (x.value = 
z.value)
 ) tmp
-INSERT OVERWRITE TABLE a SELECT tmp.key, tmp.value
-INSERT OVERWRITE TABLE b SELECT tmp.key, tmp.value
-INSERT OVERWRITE TABLE c SELECT tmp.key, tmp.value;
+INSERT OVERWRITE TABLE a_n14 SELECT tmp.key, tmp.value
+INSERT OVERWRITE TABLE b_n10 SELECT tmp.key, tmp.value
+INSERT OVERWRITE TABLE c_n3 SELECT tmp.key, tmp.value;
 
 
-CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE;
-CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE DEST1_n105(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE DEST2_n29(key STRING, val1 STRING, val2 STRING) STORED AS 
TEXTFILE;
 
 explain analyze 
 FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION DISTINCT  
-      select s2.key as key, s2.value as value from src s2) unionsrc
-INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT 
SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key
-INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, 
COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value;
+      select s2.key as key, s2.value as value from src s2) unionsrc_n3
+INSERT OVERWRITE TABLE DEST1_n105 SELECT unionsrc_n3.key, COUNT(DISTINCT 
SUBSTR(unionsrc_n3.value,5)) GROUP BY unionsrc_n3.key
+INSERT OVERWRITE TABLE DEST2_n29 SELECT unionsrc_n3.key, unionsrc_n3.value, 
COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) GROUP BY unionsrc_n3.key, 
unionsrc_n3.value;
 
-explain analyze FROM UNIQUEJOIN PRESERVE src a (a.key), PRESERVE src1 b 
(b.key), PRESERVE srcpart c (c.key) SELECT a.key, b.key, c.key;
+explain analyze FROM UNIQUEJOIN PRESERVE src a_n14 (a_n14.key), PRESERVE src1 
b_n10 (b_n10.key), PRESERVE srcpart c_n3 (c_n3.key) SELECT a_n14.key, 
b_n10.key, c_n3.key;
 
 
 explain analyze
@@ -148,20 +148,20 @@ FROM (
       select key, value from (
       select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as 
value2 from src s1
                          UNION all 
-      select s2.key as key, s2.value as value, 'tst1' as value2 from src s2) 
unionsub
+      select s2.key as key, s2.value as value, 'tst1' as value2 from src s2) 
unionsub_n10
                          UNION all
       select key, value from src s0
-                             ) unionsrc
-INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT 
SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key
-INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, 
COUNT(DISTINCT SUBSTR(unionsrc.value,5)) 
-GROUP BY unionsrc.key, unionsrc.value;
+                             ) unionsrc_n3
+INSERT OVERWRITE TABLE DEST1_n105 SELECT unionsrc_n3.key, COUNT(DISTINCT 
SUBSTR(unionsrc_n3.value,5)) GROUP BY unionsrc_n3.key
+INSERT OVERWRITE TABLE DEST2_n29 SELECT unionsrc_n3.key, unionsrc_n3.value, 
COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) 
+GROUP BY unionsrc_n3.key, unionsrc_n3.value;
 
 explain analyze
 FROM (
       select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as 
value2 from src s1
                          UNION all 
       select s2.key as key, s2.value as value, 'tst1' as value2 from src s2
-                             ) unionsrc
-INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT 
SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key
-INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, 
COUNT(DISTINCT SUBSTR(unionsrc.value,5)) 
-GROUP BY unionsrc.key, unionsrc.value;
+                             ) unionsrc_n3
+INSERT OVERWRITE TABLE DEST1_n105 SELECT unionsrc_n3.key, COUNT(DISTINCT 
SUBSTR(unionsrc_n3.value,5)) GROUP BY unionsrc_n3.key
+INSERT OVERWRITE TABLE DEST2_n29 SELECT unionsrc_n3.key, unionsrc_n3.value, 
COUNT(DISTINCT SUBSTR(unionsrc_n3.value,5)) 
+GROUP BY unionsrc_n3.key, unionsrc_n3.value;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explainanalyze_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainanalyze_3.q 
b/ql/src/test/queries/clientpositive/explainanalyze_3.q
index d5583db..3d5b3a8 100644
--- a/ql/src/test/queries/clientpositive/explainanalyze_3.q
+++ b/ql/src/test/queries/clientpositive/explainanalyze_3.q
@@ -34,13 +34,13 @@ explain analyze use newDB;
 
 use newDB;
 
-create table tab (name string);
+create table tab_n2 (name string);
 
-explain analyze alter table tab rename to newName;
+explain analyze alter table tab_n2 rename to newName;
 
-explain analyze drop table tab;
+explain analyze drop table tab_n2;
 
-drop table tab;
+drop table tab_n2;
 
 explain analyze use default;
 
@@ -65,24 +65,24 @@ explain analyze SELECT SIGMOID(2) FROM src LIMIT 1;
 explain analyze DROP TEMPORARY MACRO SIGMOID;
 DROP TEMPORARY MACRO SIGMOID;
 
-explain analyze create table src_autho_test as select * from src;
-create table src_autho_test as select * from src;
+explain analyze create table src_autho_test_n4 as select * from src;
+create table src_autho_test_n4 as select * from src;
 
 set hive.security.authorization.enabled=true;
 
-explain analyze grant select on table src_autho_test to user hive_test_user;
-grant select on table src_autho_test to user hive_test_user;
+explain analyze grant select on table src_autho_test_n4 to user hive_test_user;
+grant select on table src_autho_test_n4 to user hive_test_user;
 
-explain analyze show grant user hive_test_user on table src_autho_test;
-explain analyze show grant user hive_test_user on table src_autho_test(key);
+explain analyze show grant user hive_test_user on table src_autho_test_n4;
+explain analyze show grant user hive_test_user on table src_autho_test_n4(key);
 
-select key from src_autho_test order by key limit 20;
+select key from src_autho_test_n4 order by key limit 20;
 
-explain analyze revoke select on table src_autho_test from user hive_test_user;
+explain analyze revoke select on table src_autho_test_n4 from user 
hive_test_user;
 
-explain analyze grant select(key) on table src_autho_test to user 
hive_test_user;
+explain analyze grant select(key) on table src_autho_test_n4 to user 
hive_test_user;
 
-explain analyze revoke select(key) on table src_autho_test from user 
hive_test_user;
+explain analyze revoke select(key) on table src_autho_test_n4 from user 
hive_test_user;
 
 explain analyze 
 create role sRc_roLE;
@@ -100,19 +100,19 @@ explain analyze drop role sRc_roLE;
 drop role sRc_roLE;
 
 set hive.security.authorization.enabled=false;
-drop table src_autho_test;
+drop table src_autho_test_n4;
 
-explain analyze drop view v;
+explain analyze drop view v_n5;
 
-explain analyze create view v as with cte as (select * from src  order by key 
limit 5)
+explain analyze create view v_n5 as with cte as (select * from src  order by 
key limit 5)
 select * from cte;
 
 explain analyze with cte as (select * from src  order by key limit 5)
 select * from cte;
 
-create table orc_merge5 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5_n1 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
 
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5_n1;
 
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET mapred.min.split.size=1000;
@@ -131,40 +131,40 @@ set hive.merge.tezfiles=true;
 set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 
-explain analyze insert overwrite table orc_merge5 select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
+explain analyze insert overwrite table orc_merge5_n1 select 
userid,string1,subtype,decimal1,ts from orc_merge5_n1 where userid<=13;
 
-drop table orc_merge5;
+drop table orc_merge5_n1;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;
 
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_n4(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE tab_part_n3 (key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_part_n5 (key int, value string) partitioned by 
(ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
 
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_n4 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE 
srcbucket_mapjoin_n4 partition(ds='2008-04-08');
 
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000001_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000002_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000003_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_part_n5 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000001_0' INTO TABLE 
srcbucket_mapjoin_part_n5 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000002_0' INTO TABLE 
srcbucket_mapjoin_part_n5 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000003_0' INTO TABLE 
srcbucket_mapjoin_part_n5 partition(ds='2008-04-08');
 
 
 
 set hive.optimize.bucketingsorting=false;
-insert overwrite table tab_part partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin_part;
+insert overwrite table tab_part_n3 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part_n5;
 
-CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY 
(key) INTO 2 BUCKETS STORED AS TEXTFILE;
-insert overwrite table tab partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin;
+CREATE TABLE tab_n2(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED 
BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab_n2 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_n4;
 
 set hive.convert.join.bucket.mapjoin.tez = true;
 explain analyze
 select a.key, a.value, b.value
-from tab a join tab_part b on a.key = b.key;
+from tab_n2 a join tab_part_n3 b on a.key = b.key;
 
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explainanalyze_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainanalyze_5.q 
b/ql/src/test/queries/clientpositive/explainanalyze_5.q
index 5f2c840..754d6e2 100644
--- a/ql/src/test/queries/clientpositive/explainanalyze_5.q
+++ b/ql/src/test/queries/clientpositive/explainanalyze_5.q
@@ -6,25 +6,25 @@ set hive.map.aggr=false;
 
 set hive.stats.column.autogather=true;
 
-drop table src_stats;
+drop table src_stats_n0;
 
-create table src_stats as select * from src;
+create table src_stats_n0 as select * from src;
 
-explain analyze analyze table src_stats compute statistics;
+explain analyze analyze table src_stats_n0 compute statistics;
 
-explain analyze analyze table src_stats compute statistics for columns;
+explain analyze analyze table src_stats_n0 compute statistics for columns;
 
-drop table src_multi2;
+drop table src_multi2_n7;
 
-create table src_multi2 like src;
+create table src_multi2_n7 like src;
 
-explain analyze insert overwrite table src_multi2 select subq.key, src.value 
from (select * from src union select * from src1)subq join src on 
subq.key=src.key;
+explain analyze insert overwrite table src_multi2_n7 select subq.key, 
src.value from (select * from src union select * from src1)subq join src on 
subq.key=src.key;
 
 select count(*) from (select * from src union select * from src1)subq;
 
-insert overwrite table src_multi2 select subq.key, src.value from (select * 
from src union select * from src1)subq join src on subq.key=src.key;
+insert overwrite table src_multi2_n7 select subq.key, src.value from (select * 
from src union select * from src1)subq join src on subq.key=src.key;
 
-describe formatted src_multi2;
+describe formatted src_multi2_n7;
 
 
 set hive.mapred.mode=nonstrict;
@@ -35,27 +35,27 @@ set 
hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
 -- SORT_QUERY_RESULTS
 
-create table acid_uami(i int,
+create table acid_uami_n2(i int,
                  de decimal(5,2),
                  vc varchar(128)) clustered by (i) into 2 buckets stored as 
orc TBLPROPERTIES ('transactional'='true');
 
-insert into table acid_uami values 
+insert into table acid_uami_n2 values 
     (1, 109.23, 'mary had a little lamb'),
     (6553, 923.19, 'its fleece was white as snow');
 
-insert into table acid_uami values 
+insert into table acid_uami_n2 values 
     (10, 119.23, 'and everywhere that mary went'),
     (65530, 823.19, 'the lamb was sure to go');
 
-select * from acid_uami order by de;
+select * from acid_uami_n2 order by de;
 
-explain analyze update acid_uami set de = 3.14 where de = 109.23 or de = 
119.23;
+explain analyze update acid_uami_n2 set de = 3.14 where de = 109.23 or de = 
119.23;
 
-select * from acid_uami order by de;
+select * from acid_uami_n2 order by de;
 
-update acid_uami set de = 3.14 where de = 109.23 or de = 119.23;
+update acid_uami_n2 set de = 3.14 where de = 109.23 or de = 119.23;
 
-select * from acid_uami order by de;
+select * from acid_uami_n2 order by de;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
@@ -64,7 +64,7 @@ set 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/delete_orig_table;
 dfs -copyFromLocal ../../data/files/alltypesorc 
${system:test.tmp.dir}/delete_orig_table/00000_0; 
 
-create table acid_dot(
+create table acid_dot_n0(
     ctinyint TINYINT,
     csmallint SMALLINT,
     cint INT,
@@ -78,14 +78,14 @@ create table acid_dot(
     cboolean1 BOOLEAN,
     cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc 
location '${system:test.tmp.dir}/delete_orig_table' TBLPROPERTIES 
('transactional'='true');
 
-select count(*) from acid_dot;
+select count(*) from acid_dot_n0;
 
-explain analyze delete from acid_dot where cint < -1070551679;
+explain analyze delete from acid_dot_n0 where cint < -1070551679;
 
-select count(*) from acid_dot;
+select count(*) from acid_dot_n0;
 
-delete from acid_dot where cint < -1070551679;
+delete from acid_dot_n0 where cint < -1070551679;
 
-select count(*) from acid_dot;
+select count(*) from acid_dot_n0;
 
 dfs -rmr ${system:test.tmp.dir}/delete_orig_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explainuser_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_1.q 
b/ql/src/test/queries/clientpositive/explainuser_1.q
index 0772fb9..23bdb79 100644
--- a/ql/src/test/queries/clientpositive/explainuser_1.q
+++ b/ql/src/test/queries/clientpositive/explainuser_1.q
@@ -13,26 +13,26 @@ set hive.strict.checks.bucketing=false;
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=true;
 
-explain create table src_orc_merge_test_part(key int, value string) 
partitioned by (ds string, ts string) stored as orc;
-create table src_orc_merge_test_part(key int, value string) partitioned by (ds 
string, ts string) stored as orc;
+explain create table src_orc_merge_test_part_n1(key int, value string) 
partitioned by (ds string, ts string) stored as orc;
+create table src_orc_merge_test_part_n1(key int, value string) partitioned by 
(ds string, ts string) stored as orc;
 
-alter table src_orc_merge_test_part add partition (ds='2012-01-03', 
ts='2012-01-03+14:46:31');
-desc extended src_orc_merge_test_part partition (ds='2012-01-03', 
ts='2012-01-03+14:46:31');
+alter table src_orc_merge_test_part_n1 add partition (ds='2012-01-03', 
ts='2012-01-03+14:46:31');
+desc extended src_orc_merge_test_part_n1 partition (ds='2012-01-03', 
ts='2012-01-03+14:46:31');
 
-explain insert overwrite table src_orc_merge_test_part 
partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src;
-insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', 
ts='2012-01-03+14:46:31') select * from src;
-explain insert into table src_orc_merge_test_part partition(ds='2012-01-03', 
ts='2012-01-03+14:46:31') select * from src limit 100;
+explain insert overwrite table src_orc_merge_test_part_n1 
partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src;
+insert overwrite table src_orc_merge_test_part_n1 partition(ds='2012-01-03', 
ts='2012-01-03+14:46:31') select * from src;
+explain insert into table src_orc_merge_test_part_n1 
partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src limit 
100;
 
-explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and 
ts='2012-01-03+14:46:31';
-explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part 
where ds='2012-01-03' and ts='2012-01-03+14:46:31';
+explain select count(1) from src_orc_merge_test_part_n1 where ds='2012-01-03' 
and ts='2012-01-03+14:46:31';
+explain select sum(hash(key)), sum(hash(value)) from 
src_orc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31';
 
-alter table src_orc_merge_test_part partition (ds='2012-01-03', 
ts='2012-01-03+14:46:31') concatenate;
+alter table src_orc_merge_test_part_n1 partition (ds='2012-01-03', 
ts='2012-01-03+14:46:31') concatenate;
 
 
-explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and 
ts='2012-01-03+14:46:31';
-explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part 
where ds='2012-01-03' and ts='2012-01-03+14:46:31';
+explain select count(1) from src_orc_merge_test_part_n1 where ds='2012-01-03' 
and ts='2012-01-03+14:46:31';
+explain select sum(hash(key)), sum(hash(value)) from 
src_orc_merge_test_part_n1 where ds='2012-01-03' and ts='2012-01-03+14:46:31';
 
-drop table src_orc_merge_test_part;
+drop table src_orc_merge_test_part_n1;
 
 set hive.auto.convert.join=true;
 
@@ -128,7 +128,7 @@ having not exists
   )
 ;
 
-create view cv1 as 
+create view cv1_n5 as 
 select * 
 from src_cbo b 
 where exists
@@ -137,7 +137,7 @@ where exists
   where b.value = a.value  and a.key = b.key and a.value > 'val_9')
 ;
 
-explain select * from cv1;
+explain select * from cv1_n5;
 
 explain select * 
 from (select * 
@@ -250,59 +250,59 @@ FROM (select x.key AS key, count(1) AS cnt
       FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key)
       GROUP BY x.key) tmp;
 
-explain create table abcd (a int, b int, c int, d int);
-create table abcd (a int, b int, c int, d int);
-LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd;
+explain create table abcd_n1 (a int, b int, c int, d int);
+create table abcd_n1 (a int, b int, c int, d int);
+LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd_n1;
 
 set hive.map.aggr=true;
-explain select a, count(distinct b), count(distinct c), sum(d) from abcd group 
by a;
+explain select a, count(distinct b), count(distinct c), sum(d) from abcd_n1 
group by a;
 
 set hive.map.aggr=false;
-explain select a, count(distinct b), count(distinct c), sum(d) from abcd group 
by a;
+explain select a, count(distinct b), count(distinct c), sum(d) from abcd_n1 
group by a;
 
-explain create table src_rc_merge_test(key int, value string) stored as rcfile;
-create table src_rc_merge_test(key int, value string) stored as rcfile;
+explain create table src_rc_merge_test_n0(key int, value string) stored as 
rcfile;
+create table src_rc_merge_test_n0(key int, value string) stored as rcfile;
 
-load data local inpath '../../data/files/smbbucket_1.rc' into table 
src_rc_merge_test;
+load data local inpath '../../data/files/smbbucket_1.rc' into table 
src_rc_merge_test_n0;
 
 set hive.exec.compress.output = true;
 
-explain create table tgt_rc_merge_test(key int, value string) stored as rcfile;
-create table tgt_rc_merge_test(key int, value string) stored as rcfile;
-insert into table tgt_rc_merge_test select * from src_rc_merge_test;
+explain create table tgt_rc_merge_test_n0(key int, value string) stored as 
rcfile;
+create table tgt_rc_merge_test_n0(key int, value string) stored as rcfile;
+insert into table tgt_rc_merge_test_n0 select * from src_rc_merge_test_n0;
 
-show table extended like `tgt_rc_merge_test`;
+show table extended like `tgt_rc_merge_test_n0`;
 
-explain select count(1) from tgt_rc_merge_test;
-explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test;
+explain select count(1) from tgt_rc_merge_test_n0;
+explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n0;
 
-alter table tgt_rc_merge_test concatenate;
+alter table tgt_rc_merge_test_n0 concatenate;
 
-show table extended like `tgt_rc_merge_test`;
+show table extended like `tgt_rc_merge_test_n0`;
 
-explain select count(1) from tgt_rc_merge_test;
-explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test;
+explain select count(1) from tgt_rc_merge_test_n0;
+explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test_n0;
 
-drop table src_rc_merge_test;
-drop table tgt_rc_merge_test;
+drop table src_rc_merge_test_n0;
+drop table tgt_rc_merge_test_n0;
 
 explain select src.key from src cross join src src2;
 
 
-explain create table nzhang_Tmp(a int, b string);
-create table nzhang_Tmp(a int, b string);
+explain create table nzhang_Tmp_n1(a int, b string);
+create table nzhang_Tmp_n1(a int, b string);
 
-explain create table nzhang_CTAS1 as select key k, value from src sort by k, 
value limit 10;
-create table nzhang_CTAS1 as select key k, value from src sort by k, value 
limit 10;
+explain create table nzhang_CTAS1_n1 as select key k, value from src sort by 
k, value limit 10;
+create table nzhang_CTAS1_n1 as select key k, value from src sort by k, value 
limit 10;
 
 
-explain create table nzhang_ctas3 row format serde 
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as 
select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, 
conb limit 10;
+explain create table nzhang_ctas3_n1 row format serde 
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as 
select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, 
conb limit 10;
 
-create table nzhang_ctas3 row format serde 
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as 
select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, 
conb limit 10;
+create table nzhang_ctas3_n1 row format serde 
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as 
select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, 
conb limit 10;
 
-explain create table if not exists nzhang_ctas3 as select key, value from src 
sort by key, value limit 2;
+explain create table if not exists nzhang_ctas3_n1 as select key, value from 
src sort by key, value limit 2;
 
-create table if not exists nzhang_ctas3 as select key, value from src sort by 
key, value limit 2;
+create table if not exists nzhang_ctas3_n1 as select key, value from src sort 
by key, value limit 2;
 
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
@@ -324,70 +324,70 @@ select src1.key as k1, src1.value as v1,
   SORT BY k1, v1, k2, v2;
 
 
-CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1;
+CREATE TABLE myinput1_n7(key int, value int);
+LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1_n7;
 
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value;
+explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value;
 
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value join 
myinput1 c on a.key=c.key;
+explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value join 
myinput1_n7 c on a.key=c.key;
 
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value join 
myinput1 c on a.key<=>c.key;
+explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value join 
myinput1_n7 c on a.key<=>c.key;
 
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND 
a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value;
+explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value AND 
a.value=b.key join myinput1_n7 c on a.key<=>c.key AND a.value=c.value;
 
-explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND 
a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value;
+explain select * from myinput1_n7 a join myinput1_n7 b on a.key<=>b.value AND 
a.value<=>b.key join myinput1_n7 c on a.key<=>c.key AND a.value<=>c.value;
 
-explain select * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value;
-explain select * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON 
a.key<=>b.value;
-explain select * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value;
+explain select * FROM myinput1_n7 a LEFT OUTER JOIN myinput1_n7 b ON 
a.key<=>b.value;
+explain select * FROM myinput1_n7 a RIGHT OUTER JOIN myinput1_n7 b ON 
a.key<=>b.value;
+explain select * FROM myinput1_n7 a FULL OUTER JOIN myinput1_n7 b ON 
a.key<=>b.value;
 
-explain select /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON 
a.key<=>b.value;
+explain select /*+ MAPJOIN(b) */ * FROM myinput1_n7 a JOIN myinput1_n7 b ON 
a.key<=>b.value;
 
-CREATE TABLE smb_input(key int, value int);
-LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input;
-LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input;
+CREATE TABLE smb_input_n0(key int, value int);
+LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input_n0;
+LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input_n0;
 
 
 ;
 
-CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) 
INTO 2 BUCKETS;
-CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY 
(value) INTO 2 BUCKETS;
+CREATE TABLE smb_input1_n2(key int, value int) CLUSTERED BY (key) SORTED BY 
(key) INTO 2 BUCKETS;
+CREATE TABLE smb_input2_n2(key int, value int) CLUSTERED BY (value) SORTED BY 
(value) INTO 2 BUCKETS;
 
-from smb_input
-insert overwrite table smb_input1 select *
-insert overwrite table smb_input2 select *;
+from smb_input_n0
+insert overwrite table smb_input1_n2 select *
+insert overwrite table smb_input2_n2 select *;
 
 SET hive.optimize.bucketmapjoin = true;
 SET hive.optimize.bucketmapjoin.sortedmerge = true;
 SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 
-analyze table smb_input1 compute statistics;
+analyze table smb_input1_n2 compute statistics;
 
-explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON 
a.key <=> b.key;
-explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON 
a.key <=> b.key AND a.value <=> b.value;
-explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN 
smb_input1 b ON a.key <=> b.key;
-explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON 
a.key <=> b.key;
-explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN 
smb_input1 b ON a.key <=> b.key;
+explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n2 a JOIN smb_input1_n2 b 
ON a.key <=> b.key;
+explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n2 a JOIN smb_input1_n2 b 
ON a.key <=> b.key AND a.value <=> b.value;
+explain select /*+ MAPJOIN(a) */ * FROM smb_input1_n2 a RIGHT OUTER JOIN 
smb_input1_n2 b ON a.key <=> b.key;
+explain select /*+ MAPJOIN(b) */ * FROM smb_input1_n2 a JOIN smb_input1_n2 b 
ON a.key <=> b.key;
+explain select /*+ MAPJOIN(b) */ * FROM smb_input1_n2 a LEFT OUTER JOIN 
smb_input1_n2 b ON a.key <=> b.key;
 
-drop table sales;
-drop table things;
+drop table sales_n0;
+drop table things_n0;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
-CREATE TABLE sales (name STRING, id INT)
+CREATE TABLE sales_n0 (name STRING, id INT)
 ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
 
-CREATE TABLE things (id INT, name STRING) partitioned by (ds string)
+CREATE TABLE things_n0 (id INT, name STRING) partitioned by (ds string)
 ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
 
-load data local inpath '../../data/files/sales.txt' INTO TABLE sales;
-load data local inpath '../../data/files/things.txt' INTO TABLE things 
partition(ds='2011-10-23');
-load data local inpath '../../data/files/things2.txt' INTO TABLE things 
partition(ds='2011-10-24');
+load data local inpath '../../data/files/sales.txt' INTO TABLE sales_n0;
+load data local inpath '../../data/files/things.txt' INTO TABLE things_n0 
partition(ds='2011-10-23');
+load data local inpath '../../data/files/things2.txt' INTO TABLE things_n0 
partition(ds='2011-10-24');
 
-explain select name,id FROM sales LEFT SEMI JOIN things ON (sales.id = 
things.id);
+explain select name,id FROM sales_n0 LEFT SEMI JOIN things_n0 ON (sales_n0.id 
= things_n0.id);
 
-drop table sales;
-drop table things;
+drop table sales_n0;
+drop table things_n0;
 
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
@@ -525,13 +525,13 @@ order by p_name
 ;
 
   
-explain create view IF NOT EXISTS mfgr_price_view as 
+explain create view IF NOT EXISTS mfgr_price_view_n3 as 
 select p_mfgr, p_brand, 
 sum(p_retailprice) as s 
 from part 
 group by p_mfgr, p_brand;
 
-CREATE TABLE part_4( 
+CREATE TABLE part_4_n1( 
 p_mfgr STRING, 
 p_name STRING, 
 p_size INT, 
@@ -539,7 +539,7 @@ r INT,
 dr INT, 
 s DOUBLE);
 
-CREATE TABLE part_5( 
+CREATE TABLE part_5_n1( 
 p_mfgr STRING, 
 p_name STRING, 
 p_size INT, 
@@ -553,11 +553,11 @@ explain
 from noop(on part 
 partition by p_mfgr 
 order by p_name) 
-INSERT OVERWRITE TABLE part_4 select p_mfgr, p_name, p_size, 
+INSERT OVERWRITE TABLE part_4_n1 select p_mfgr, p_name, p_size, 
 rank() over (distribute by p_mfgr sort by p_name) as r, 
 dense_rank() over (distribute by p_mfgr sort by p_name) as dr, 
 sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between 
unbounded preceding and current row)  as s  
-INSERT OVERWRITE TABLE part_5 select  p_mfgr,p_name, p_size,  
+INSERT OVERWRITE TABLE part_5_n1 select  p_mfgr,p_name, p_size,  
 round(sum(p_size) over (distribute by p_mfgr sort by p_size range between 5 
preceding and current row),1) as s2,
 rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as r, 
 dense_rank() over (distribute by p_mfgr sort by p_mfgr, p_name) as dr, 
@@ -622,41 +622,41 @@ explain select explode(array('a', 'b'));
 set hive.optimize.skewjoin = true;
 set hive.skewjoin.key = 2;
 
-CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE;
-CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
+CREATE TABLE T1_n119(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T2_n70(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T3_n26(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE T4_n15(key STRING, val STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n16(key INT, value STRING) STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
-LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n119;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2_n70;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3_n26;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4_n15;
 
 
 explain
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value;
+INSERT OVERWRITE TABLE dest_j1_n16 select src1.key, src2.value;
 
 FROM src src1 JOIN src src2 ON (src1.key = src2.key)
-INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value;
+INSERT OVERWRITE TABLE dest_j1_n16 select src1.key, src2.value;
 
 
 
 explain
 select /*+ STREAMTABLE(a) */ *
-FROM T1 a JOIN T2 b ON a.key = b.key
-          JOIN T3 c ON b.key = c.key
-          JOIN T4 d ON c.key = d.key;
+FROM T1_n119 a JOIN T2_n70 b ON a.key = b.key
+          JOIN T3_n26 c ON b.key = c.key
+          JOIN T4_n15 d ON c.key = d.key;
 
 explain
 select /*+ STREAMTABLE(a,c) */ *
-FROM T1 a JOIN T2 b ON a.key = b.key
-          JOIN T3 c ON b.key = c.key
-          JOIN T4 d ON c.key = d.key;
+FROM T1_n119 a JOIN T2_n70 b ON a.key = b.key
+          JOIN T3_n26 c ON b.key = c.key
+          JOIN T4_n15 d ON c.key = d.key;
 
-explain FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ 
sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
-FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ 
sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
+explain FROM T1_n119 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) 
*/ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
+FROM T1_n119 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ 
sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
 
 explain 
 select * FROM 
@@ -666,16 +666,16 @@ JOIN
 ON (x.key = Y.key);
 
 
-explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k 
join T1 v on k.key=v.val;
+explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from 
T1_n119 k join T1_n119 v on k.key=v.val;
 
-explain select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on 
k.key=v.key;
+explain select sum(hash(k.key)), sum(hash(v.val)) from T1_n119 k join T1_n119 
v on k.key=v.key;
 
-explain select count(1) from  T1 a join T1 b on a.key = b.key;
+explain select count(1) from  T1_n119 a join T1_n119 b on a.key = b.key;
 
-explain FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key select 
sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
+explain FROM T1_n119 a LEFT OUTER JOIN T2_n70 c ON c.key+1=a.key select 
sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
 
-explain FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key select /*+ 
STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
+explain FROM T1_n119 a RIGHT OUTER JOIN T2_n70 c ON c.key+1=a.key select /*+ 
STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
 
-explain FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key select /*+ 
STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
+explain FROM T1_n119 a FULL OUTER JOIN T2_n70 c ON c.key+1=a.key select /*+ 
STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key));
 
-explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k 
left outer join T1 v on k.key+1=v.key;
+explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from 
T1_n119 k left outer join T1_n119 v on k.key+1=v.key;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/explainuser_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_2.q 
b/ql/src/test/queries/clientpositive/explainuser_2.q
index 1b32b47..1423cc7 100644
--- a/ql/src/test/queries/clientpositive/explainuser_2.q
+++ b/ql/src/test/queries/clientpositive/explainuser_2.q
@@ -8,15 +8,15 @@ set hive.metastore.aggregate.stats.cache.enabled=false;
 
 -- SORT_QUERY_RESULTS
 
-CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE dest_j1_n25(key STRING, value STRING, val2 STRING) STORED AS 
TEXTFILE;
 
-CREATE TABLE ss(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) 
STORED AS TEXTFILE;
+CREATE TABLE ss_n1(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 
STRING) STORED AS TEXTFILE;
 
 CREATE TABLE sr(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) 
STORED AS TEXTFILE;
 
 CREATE TABLE cs(k1 STRING,v1 STRING,k2 STRING,v2 STRING,k3 STRING,v3 STRING) 
STORED AS TEXTFILE;
 
-INSERT OVERWRITE TABLE ss
+INSERT OVERWRITE TABLE ss_n1
 SELECT x.key,x.value,y.key,y.value,z.key,z.value
 FROM src1 x 
 JOIN src y ON (x.key = y.key) 
@@ -35,8 +35,8 @@ JOIN src y ON (x.key = y.key)
 JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08');
 
 
-ANALYZE TABLE ss COMPUTE STATISTICS;
-ANALYZE TABLE ss COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3;
+ANALYZE TABLE ss_n1 COMPUTE STATISTICS;
+ANALYZE TABLE ss_n1 COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3;
 
 ANALYZE TABLE sr COMPUTE STATISTICS;
 ANALYZE TABLE sr COMPUTE STATISTICS FOR COLUMNS k1,v1,k2,v2,k3,v3;
@@ -53,20 +53,20 @@ JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' 
and z.hr=11);
 
 EXPLAIN
 select 
-ss.k1,sr.k2,cs.k3,count(ss.v1),count(sr.v2),count(cs.v3)
+ss_n1.k1,sr.k2,cs.k3,count(ss_n1.v1),count(sr.v2),count(cs.v3)
 FROM 
-ss,sr,cs,src d1,src d2,src d3,src1,srcpart
+ss_n1,sr,cs,src d1,src d2,src d3,src1,srcpart
 where
-    ss.k1 = d1.key 
+    ss_n1.k1 = d1.key 
 and sr.k1 = d2.key 
 and cs.k1 = d3.key 
-and ss.k2 = sr.k2
-and ss.k3 = sr.k3
-and ss.v1 = src1.value
-and ss.v2 = srcpart.value
+and ss_n1.k2 = sr.k2
+and ss_n1.k3 = sr.k3
+and ss_n1.v1 = src1.value
+and ss_n1.v2 = srcpart.value
 and sr.v2 = cs.v2
 and sr.v3 = cs.v3
-and ss.v3='ssv3'
+and ss_n1.v3='ssv3'
 and sr.v1='srv1'
 and src1.key = 'src1key'
 and srcpart.key = 'srcpartkey'
@@ -74,9 +74,9 @@ and d1.value = 'd1value'
 and d2.value in ('2000Q1','2000Q2','2000Q3')
 and d3.value in ('2000Q1','2000Q2','2000Q3')
 group by 
-ss.k1,sr.k2,cs.k3
+ss_n1.k1,sr.k2,cs.k3
 order by 
-ss.k1,sr.k2,cs.k3
+ss_n1.k1,sr.k2,cs.k3
 limit 100;
 
 explain
@@ -115,20 +115,20 @@ JOIN srcpart z ON (x.value = z.value and 
z.ds='2008-04-08' and z.hr=11);
 
 EXPLAIN
 select 
-ss.k1,sr.k2,cs.k3,count(ss.v1),count(sr.v2),count(cs.v3)
+ss_n1.k1,sr.k2,cs.k3,count(ss_n1.v1),count(sr.v2),count(cs.v3)
 FROM 
-ss,sr,cs,src d1,src d2,src d3,src1,srcpart
+ss_n1,sr,cs,src d1,src d2,src d3,src1,srcpart
 where
-    ss.k1 = d1.key 
+    ss_n1.k1 = d1.key 
 and sr.k1 = d2.key 
 and cs.k1 = d3.key 
-and ss.k2 = sr.k2
-and ss.k3 = sr.k3
-and ss.v1 = src1.value
-and ss.v2 = srcpart.value
+and ss_n1.k2 = sr.k2
+and ss_n1.k3 = sr.k3
+and ss_n1.v1 = src1.value
+and ss_n1.v2 = srcpart.value
 and sr.v2 = cs.v2
 and sr.v3 = cs.v3
-and ss.v3='ssv3'
+and ss_n1.v3='ssv3'
 and sr.v1='srv1'
 and src1.key = 'src1key'
 and srcpart.key = 'srcpartkey'
@@ -136,9 +136,9 @@ and d1.value = 'd1value'
 and d2.value in ('2000Q1','2000Q2','2000Q3')
 and d3.value in ('2000Q1','2000Q2','2000Q3')
 group by 
-ss.k1,sr.k2,cs.k3
+ss_n1.k1,sr.k2,cs.k3
 order by 
-ss.k1,sr.k2,cs.k3
+ss_n1.k1,sr.k2,cs.k3
 limit 100;
 
 explain
@@ -169,31 +169,31 @@ set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;
 set hive.auto.convert.sortmerge.join.bigtable.selection.policy = 
org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ;
 
-CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_n22(key int, value string) partitioned by (ds 
string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE tab_part_n14 (key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_part_n23 (key int, value string) partitioned by 
(ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
 
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE 
srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_n22 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE 
srcbucket_mapjoin_n22 partition(ds='2008-04-08');
 
-load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000001_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000002_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../../data/files/bmj/000003_0' INTO TABLE 
srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000000_0' INTO TABLE 
srcbucket_mapjoin_part_n23 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000001_0' INTO TABLE 
srcbucket_mapjoin_part_n23 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000002_0' INTO TABLE 
srcbucket_mapjoin_part_n23 partition(ds='2008-04-08');
+load data local inpath '../../data/files/bmj/000003_0' INTO TABLE 
srcbucket_mapjoin_part_n23 partition(ds='2008-04-08');
 
 
 
 set hive.optimize.bucketingsorting=false;
-insert overwrite table tab_part partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin_part;
+insert overwrite table tab_part_n14 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part_n23;
 
-CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY 
(key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-insert overwrite table tab partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin;
+CREATE TABLE tab_n15(key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab_n15 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_n22;
 
-CREATE TABLE tab2(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED 
BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-insert overwrite table tab2 partition (ds='2008-04-08')
-select key,value from srcbucket_mapjoin;
+CREATE TABLE tab2_n7(key int, value string) PARTITIONED BY(ds STRING) 
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab2_n7 partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_n22;
 
 set hive.convert.join.bucket.mapjoin.tez = false;
 set hive.auto.convert.sortmerge.join = true;
@@ -201,28 +201,28 @@ set hive.auto.convert.sortmerge.join = true;
 set hive.auto.convert.join.noconditionaltask.size=500;
 
 explain 
-select s1.key as key, s1.value as value from tab s1 join tab s3 on 
s1.key=s3.key;
+select s1.key as key, s1.value as value from tab_n15 s1 join tab_n15 s3 on 
s1.key=s3.key;
 
 explain 
-select s1.key as key, s1.value as value from tab s1 join tab s3 on 
s1.key=s3.key join tab s2 on s1.value=s2.value;
+select s1.key as key, s1.value as value from tab_n15 s1 join tab_n15 s3 on 
s1.key=s3.key join tab_n15 s2 on s1.value=s2.value;
 
 explain 
-select s1.key as key, s1.value as value from tab s1 join tab2 s3 on 
s1.key=s3.key;
+select s1.key as key, s1.value as value from tab_n15 s1 join tab2_n7 s3 on 
s1.key=s3.key;
 
 explain 
-select s1.key as key, s1.value as value from tab s1 join tab2 s3 on 
s1.key=s3.key join tab2 s2 on s1.value=s2.value;
+select s1.key as key, s1.value as value from tab_n15 s1 join tab2_n7 s3 on 
s1.key=s3.key join tab2_n7 s2 on s1.value=s2.value;
 
 explain
-select count(*) from (select s1.key as key, s1.value as value from tab s1 join 
tab s3 on s1.key=s3.key
+select count(*) from (select s1.key as key, s1.value as value from tab_n15 s1 
join tab_n15 s3 on s1.key=s3.key
 UNION  ALL
-select s2.key as key, s2.value as value from tab s2
-) a join tab_part b on (a.key = b.key);
+select s2.key as key, s2.value as value from tab_n15 s2
+) a_n19 join tab_part_n14 b_n15 on (a_n19.key = b_n15.key);
 
 explain
-select count(*) from (select s1.key as key, s1.value as value from tab s1 join 
tab s3 on s1.key=s3.key join tab s2 on s1.value=s2.value
+select count(*) from (select s1.key as key, s1.value as value from tab_n15 s1 
join tab_n15 s3 on s1.key=s3.key join tab_n15 s2 on s1.value=s2.value
 UNION  ALL
-select s2.key as key, s2.value as value from tab s2
-) a join tab_part b on (a.key = b.key);set hive.explain.user=true;
+select s2.key as key, s2.value as value from tab_n15 s2
+) a_n19 join tab_part_n14 b_n15 on (a_n19.key = b_n15.key);set 
hive.explain.user=true;
 
 explain
 SELECT x.key, y.value
@@ -250,9 +250,9 @@ SELECT x.key, y.value
 FROM src1 x JOIN src1 y ON (x.key = y.key) 
 JOIN (select key, value from src1 union select key, value from src union 
select key, value from src union select key, value from src)z ON (x.value = 
z.value);
 
-CREATE TABLE a(key STRING, value STRING) STORED AS TEXTFILE;
-CREATE TABLE b(key STRING, value STRING) STORED AS TEXTFILE;
-CREATE TABLE c(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE a_n19(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE b_n15(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE c_n4(key STRING, value STRING) STORED AS TEXTFILE;
 
 explain
 from
@@ -269,9 +269,9 @@ SELECT x.key, y.value
 FROM src1 x JOIN src1 y ON (x.key = y.key) 
 JOIN (select key, value from src1 union all select key, value from src union 
all select key, value from src union all select key, value from src)z ON 
(x.value = z.value)
 ) tmp
-INSERT OVERWRITE TABLE a SELECT tmp.key, tmp.value
-INSERT OVERWRITE TABLE b SELECT tmp.key, tmp.value
-INSERT OVERWRITE TABLE c SELECT tmp.key, tmp.value;
+INSERT OVERWRITE TABLE a_n19 SELECT tmp.key, tmp.value
+INSERT OVERWRITE TABLE b_n15 SELECT tmp.key, tmp.value
+INSERT OVERWRITE TABLE c_n4 SELECT tmp.key, tmp.value;
 
 explain
 FROM
@@ -285,50 +285,50 @@ union
 SELECT x.key as key, y.value as value from src1 x JOIN src1 y ON (x.key = 
y.key) 
 JOIN (select key, value from src1 union select key, value from src union 
select key, value from src union select key, value from src)z ON (x.value = 
z.value)
 ) tmp
-INSERT OVERWRITE TABLE a SELECT tmp.key, tmp.value
-INSERT OVERWRITE TABLE b SELECT tmp.key, tmp.value
-INSERT OVERWRITE TABLE c SELECT tmp.key, tmp.value;
+INSERT OVERWRITE TABLE a_n19 SELECT tmp.key, tmp.value
+INSERT OVERWRITE TABLE b_n15 SELECT tmp.key, tmp.value
+INSERT OVERWRITE TABLE c_n4 SELECT tmp.key, tmp.value;
 
 
-CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE;
-CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED AS TEXTFILE;
+CREATE TABLE DEST1_n172(key STRING, value STRING) STORED AS TEXTFILE;
+CREATE TABLE DEST2_n43(key STRING, val1 STRING, val2 STRING) STORED AS 
TEXTFILE;
 
 explain 
 FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1
                          UNION DISTINCT  
-      select s2.key as key, s2.value as value from src s2) unionsrc
-INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT 
SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key
-INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, 
COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value;
+      select s2.key as key, s2.value as value from src s2) unionsrc_n4
+INSERT OVERWRITE TABLE DEST1_n172 SELECT unionsrc_n4.key, COUNT(DISTINCT 
SUBSTR(unionsrc_n4.value,5)) GROUP BY unionsrc_n4.key
+INSERT OVERWRITE TABLE DEST2_n43 SELECT unionsrc_n4.key, unionsrc_n4.value, 
COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) GROUP BY unionsrc_n4.key, 
unionsrc_n4.value;
 
-EXPLAIN FROM UNIQUEJOIN PRESERVE src a (a.key), PRESERVE src1 b (b.key), 
PRESERVE srcpart c (c.key) SELECT a.key, b.key, c.key;
+EXPLAIN FROM UNIQUEJOIN PRESERVE src a_n19 (a_n19.key), PRESERVE src1 b_n15 
(b_n15.key), PRESERVE srcpart c_n4 (c_n4.key) SELECT a_n19.key, b_n15.key, 
c_n4.key;
 
 set hive.entity.capture.transform=true;
 
 EXPLAIN
 SELECT 
-TRANSFORM(a.key, a.value) USING 'cat' AS (tkey, tvalue)
-FROM src a join src b
-on a.key = b.key;
+TRANSFORM(a_n19.key, a_n19.value) USING 'cat' AS (tkey, tvalue)
+FROM src a_n19 join src b_n15
+on a_n19.key = b_n15.key;
 
 explain
 FROM (
       select key, value from (
       select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as 
value2 from src s1
                          UNION all 
-      select s2.key as key, s2.value as value, 'tst1' as value2 from src s2) 
unionsub
+      select s2.key as key, s2.value as value, 'tst1' as value2 from src s2) 
unionsub_n15
                          UNION all
       select key, value from src s0
-                             ) unionsrc
-INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT 
SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key
-INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, 
COUNT(DISTINCT SUBSTR(unionsrc.value,5)) 
-GROUP BY unionsrc.key, unionsrc.value;
+                             ) unionsrc_n4
+INSERT OVERWRITE TABLE DEST1_n172 SELECT unionsrc_n4.key, COUNT(DISTINCT 
SUBSTR(unionsrc_n4.value,5)) GROUP BY unionsrc_n4.key
+INSERT OVERWRITE TABLE DEST2_n43 SELECT unionsrc_n4.key, unionsrc_n4.value, 
COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) 
+GROUP BY unionsrc_n4.key, unionsrc_n4.value;
 
 explain
 FROM (
       select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as 
value2 from src s1
                          UNION all 
       select s2.key as key, s2.value as value, 'tst1' as value2 from src s2
-                             ) unionsrc
-INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT 
SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key
-INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, 
COUNT(DISTINCT SUBSTR(unionsrc.value,5)) 
-GROUP BY unionsrc.key, unionsrc.value;
+                             ) unionsrc_n4
+INSERT OVERWRITE TABLE DEST1_n172 SELECT unionsrc_n4.key, COUNT(DISTINCT 
SUBSTR(unionsrc_n4.value,5)) GROUP BY unionsrc_n4.key
+INSERT OVERWRITE TABLE DEST2_n43 SELECT unionsrc_n4.key, unionsrc_n4.value, 
COUNT(DISTINCT SUBSTR(unionsrc_n4.value,5)) 
+GROUP BY unionsrc_n4.key, unionsrc_n4.value;

Reply via email to