http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_llap_counters.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_llap_counters.q 
b/ql/src/test/queries/clientpositive/orc_llap_counters.q
index 1136b55..9f8e3bb 100644
--- a/ql/src/test/queries/clientpositive/orc_llap_counters.q
+++ b/ql/src/test/queries/clientpositive/orc_llap_counters.q
@@ -8,7 +8,7 @@ SET hive.llap.io.enabled=true;
 SET hive.map.aggr=false;
 -- disabling map side aggregation as that can lead to different intermediate 
record counts
 
-CREATE TABLE staging(t tinyint,
+CREATE TABLE staging_n6(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -22,10 +22,10 @@ CREATE TABLE staging(t tinyint,
 ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging;
-LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE 
staging_n6;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging_n6;
 
-CREATE TABLE orc_ppd_staging(t tinyint,
+CREATE TABLE orc_ppd_staging_n0(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -40,14 +40,14 @@ CREATE TABLE orc_ppd_staging(t tinyint,
            bin binary)
 STORED AS ORC tblproperties("orc.row.index.stride" = "1000", 
"orc.bloom.filter.columns"="*");
 
-insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s 
as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging 
order by t, s;
+insert overwrite table orc_ppd_staging_n0 select t, si, i, b, f, d, bo, s, 
cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from 
staging_n6 order by t, s;
 
 -- just to introduce a gap in min/max range for bloom filters. The dataset has 
contiguous values
 -- which makes it hard to test bloom filters
-insert into orc_ppd_staging select 
-10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa"
 from staging limit 1;
-insert into orc_ppd_staging select 
127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz"
 from staging limit 1;
+insert into orc_ppd_staging_n0 select 
-10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa"
 from staging_n6 limit 1;
+insert into orc_ppd_staging_n0 select 
127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz"
 from staging_n6 limit 1;
 
-CREATE TABLE orc_ppd(t tinyint,
+CREATE TABLE orc_ppd_n1(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -62,9 +62,9 @@ CREATE TABLE orc_ppd(t tinyint,
            bin binary)
 STORED AS ORC tblproperties("orc.row.index.stride" = "1000", 
"orc.bloom.filter.columns"="*");
 
-insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as 
char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by 
t, s;
+insert overwrite table orc_ppd_n1 select t, si, i, b, f, d, bo, s, cast(s as 
char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n0 order 
by t, s;
 
-describe formatted orc_ppd;
+describe formatted orc_ppd_n1;
 
 SET hive.fetch.task.conversion=none;
 SET 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
@@ -75,50 +75,50 @@ SET 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrint
 -- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 
0,4,119,0,0,244,19
 
 -- INPUT_RECORDS: 2100 (all row groups)
-select count(*) from orc_ppd;
+select count(*) from orc_ppd_n1;
 
 -- INPUT_RECORDS: 0 (no row groups)
-select count(*) from orc_ppd where t > 127;
+select count(*) from orc_ppd_n1 where t > 127;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = 55;
-select count(*) from orc_ppd where t <=> 50;
-select count(*) from orc_ppd where t <=> 100;
+select count(*) from orc_ppd_n1 where t = 55;
+select count(*) from orc_ppd_n1 where t <=> 50;
+select count(*) from orc_ppd_n1 where t <=> 100;
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where t = "54";
+select count(*) from orc_ppd_n1 where t = "54";
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = -10.0;
+select count(*) from orc_ppd_n1 where t = -10.0;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = cast(53 as float);
-select count(*) from orc_ppd where t = cast(53 as double);
+select count(*) from orc_ppd_n1 where t = cast(53 as float);
+select count(*) from orc_ppd_n1 where t = cast(53 as double);
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where t < 100;
+select count(*) from orc_ppd_n1 where t < 100;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t < 100 and t > 98;
+select count(*) from orc_ppd_n1 where t < 100 and t > 98;
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where t <= 100;
+select count(*) from orc_ppd_n1 where t <= 100;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t is null;
+select count(*) from orc_ppd_n1 where t is null;
 
 -- INPUT_RECORDS: 1100 (2 row groups)
-select count(*) from orc_ppd where t in (5, 120);
+select count(*) from orc_ppd_n1 where t in (5, 120);
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t between 60 and 80;
+select count(*) from orc_ppd_n1 where t between 60 and 80;
 
 -- bloom filter tests
 -- INPUT_RECORDS: 0
-select count(*) from orc_ppd where t = -100;
-select count(*) from orc_ppd where t <=> -100;
-select count(*) from orc_ppd where t = 125;
-select count(*) from orc_ppd where t IN (-100, 125, 200);
+select count(*) from orc_ppd_n1 where t = -100;
+select count(*) from orc_ppd_n1 where t <=> -100;
+select count(*) from orc_ppd_n1 where t = 125;
+select count(*) from orc_ppd_n1 where t IN (-100, 125, 200);
 
 -- Row group statistics for column s:
 -- Entry 0: count: 1000 hasNull: false min:  max: zach young sum: 12907 
positions: 0,0,0
@@ -126,59 +126,59 @@ select count(*) from orc_ppd where t IN (-100, 125, 200);
 -- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 
positions: 0,3246,373
 
 -- INPUT_RECORDS: 0 (no row groups)
-select count(*) from orc_ppd where s > "zzz";
+select count(*) from orc_ppd_n1 where s > "zzz";
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where s = "zach young";
-select count(*) from orc_ppd where s <=> "zach zipper";
-select count(*) from orc_ppd where s <=> "";
+select count(*) from orc_ppd_n1 where s = "zach young";
+select count(*) from orc_ppd_n1 where s <=> "zach zipper";
+select count(*) from orc_ppd_n1 where s <=> "";
 
 -- INPUT_RECORDS: 0
-select count(*) from orc_ppd where s is null;
+select count(*) from orc_ppd_n1 where s is null;
 
 -- INPUT_RECORDS: 2100
-select count(*) from orc_ppd where s is not null;
+select count(*) from orc_ppd_n1 where s is not null;
 
 -- INPUT_RECORDS: 0
-select count(*) from orc_ppd where s = cast("zach young" as char(50));
+select count(*) from orc_ppd_n1 where s = cast("zach young" as char(50));
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where s = cast("zach young" as char(10));
-select count(*) from orc_ppd where s = cast("zach young" as varchar(10));
-select count(*) from orc_ppd where s = cast("zach young" as varchar(50));
+select count(*) from orc_ppd_n1 where s = cast("zach young" as char(10));
+select count(*) from orc_ppd_n1 where s = cast("zach young" as varchar(10));
+select count(*) from orc_ppd_n1 where s = cast("zach young" as varchar(50));
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where s < "b";
+select count(*) from orc_ppd_n1 where s < "b";
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where s > "alice" and s < "bob";
+select count(*) from orc_ppd_n1 where s > "alice" and s < "bob";
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where s in ("alice allen", "");
+select count(*) from orc_ppd_n1 where s in ("alice allen", "");
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where s between "" and "alice allen";
+select count(*) from orc_ppd_n1 where s between "" and "alice allen";
 
 -- INPUT_RECORDS: 100 (1 row group)
-select count(*) from orc_ppd where s between "zz" and "zzz";
+select count(*) from orc_ppd_n1 where s between "zz" and "zzz";
 
 -- INPUT_RECORDS: 1100 (2 row groups)
-select count(*) from orc_ppd where s between "zach zipper" and "zzz";
+select count(*) from orc_ppd_n1 where s between "zach zipper" and "zzz";
 
 -- bloom filter tests
 -- INPUT_RECORDS: 0
-select count(*) from orc_ppd where s = "hello world";
-select count(*) from orc_ppd where s <=> "apache hive";
-select count(*) from orc_ppd where s IN ("a", "z");
+select count(*) from orc_ppd_n1 where s = "hello world";
+select count(*) from orc_ppd_n1 where s <=> "apache hive";
+select count(*) from orc_ppd_n1 where s IN ("a", "z");
 
 -- INPUT_RECORDS: 100
-select count(*) from orc_ppd where s = "sarah ovid";
+select count(*) from orc_ppd_n1 where s = "sarah ovid";
 
 -- INPUT_RECORDS: 1100
-select count(*) from orc_ppd where s = "wendy king";
+select count(*) from orc_ppd_n1 where s = "wendy king";
 
 -- INPUT_RECORDS: 1000
-select count(*) from orc_ppd where s = "wendy king" and t < 0;
+select count(*) from orc_ppd_n1 where s = "wendy king" and t < 0;
 
 -- INPUT_RECORDS: 100
-select count(*) from orc_ppd where s = "wendy king" and t > 100;
+select count(*) from orc_ppd_n1 where s = "wendy king" and t > 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_llap_nonvector.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_llap_nonvector.q 
b/ql/src/test/queries/clientpositive/orc_llap_nonvector.q
index 6fd676d..4dfb259 100644
--- a/ql/src/test/queries/clientpositive/orc_llap_nonvector.q
+++ b/ql/src/test/queries/clientpositive/orc_llap_nonvector.q
@@ -12,12 +12,12 @@ SET hive.optimize.index.filter=true;
 set hive.auto.convert.join=false;
 set hive.fetch.task.conversion=none;
 
-DROP TABLE orc_create_staging;
+DROP TABLE orc_create_staging_n3;
 DROP TABLE orc_create_complex;
 DROP TABLE orc_llap_nonvector;
 
 
-CREATE TABLE orc_create_staging (
+CREATE TABLE orc_create_staging_n3 (
   str STRING,
   mp  MAP<STRING,STRING>,
   lst ARRAY<STRING>,
@@ -26,7 +26,7 @@ CREATE TABLE orc_create_staging (
     FIELDS TERMINATED BY '|'
     COLLECTION ITEMS TERMINATED BY ','
     MAP KEYS TERMINATED BY ':';
-LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE 
orc_create_staging;
+LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE 
orc_create_staging_n3;
 
 create table orc_llap_nonvector stored as orc as select *, rand(1234) rdm from 
alltypesorc order by rdm;
 
@@ -41,5 +41,5 @@ explain
 select cint, cstring1 from orc_llap_nonvector limit 1025;
 select cint, cstring1 from orc_llap_nonvector limit 1025;
 
-DROP TABLE orc_create_staging;
+DROP TABLE orc_create_staging_n3;
 DROP TABLE orc_llap_nonvector;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge1.q 
b/ql/src/test/queries/clientpositive/orc_merge1.q
index a4f3861..41e604f 100644
--- a/ql/src/test/queries/clientpositive/orc_merge1.q
+++ b/ql/src/test/queries/clientpositive/orc_merge1.q
@@ -20,28 +20,28 @@ set hive.merge.sparkfiles=false;
 
 -- SORT_QUERY_RESULTS
 
-DROP TABLE orcfile_merge1;
-DROP TABLE orcfile_merge1b;
-DROP TABLE orcfile_merge1c;
+DROP TABLE orcfile_merge1_n1;
+DROP TABLE orcfile_merge1b_n1;
+DROP TABLE orcfile_merge1c_n1;
 
-CREATE TABLE orcfile_merge1 (key INT, value STRING) 
+CREATE TABLE orcfile_merge1_n1 (key INT, value STRING) 
     PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
-CREATE TABLE orcfile_merge1b (key INT, value STRING) 
+CREATE TABLE orcfile_merge1b_n1 (key INT, value STRING) 
     PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
-CREATE TABLE orcfile_merge1c (key INT, value STRING) 
+CREATE TABLE orcfile_merge1c_n1 (key INT, value STRING) 
     PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
 
 -- merge disabled
 EXPLAIN
-    INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+    INSERT OVERWRITE TABLE orcfile_merge1_n1 PARTITION (ds='1', part)
         SELECT key, value, PMOD(HASH(key), 2) as part
         FROM src;
 
-INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+INSERT OVERWRITE TABLE orcfile_merge1_n1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n1/ds=1/part=0/;
 
 set hive.merge.tezfiles=true;
 set hive.merge.mapfiles=true;
@@ -49,50 +49,50 @@ set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;
 -- auto-merge slow way
 EXPLAIN
-    INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+    INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part)
         SELECT key, value, PMOD(HASH(key), 2) as part
         FROM src;
 
-INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+INSERT OVERWRITE TABLE orcfile_merge1b_n1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b/ds=1/part=0/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b_n1/ds=1/part=0/;
 
 set hive.merge.orcfile.stripe.level=true;
 -- auto-merge fast way
 EXPLAIN
-    INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+    INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part)
         SELECT key, value, PMOD(HASH(key), 2) as part
         FROM src;
 
-INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+INSERT OVERWRITE TABLE orcfile_merge1c_n1 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c/ds=1/part=0/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c_n1/ds=1/part=0/;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 -- Verify
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
-    FROM orcfile_merge1 WHERE ds='1'
+    FROM orcfile_merge1_n1 WHERE ds='1'
 ) t;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
-    FROM orcfile_merge1b WHERE ds='1'
+    FROM orcfile_merge1b_n1 WHERE ds='1'
 ) t;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
-    FROM orcfile_merge1c WHERE ds='1'
+    FROM orcfile_merge1c_n1 WHERE ds='1'
 ) t;
 
-select count(*) from orcfile_merge1;
-select count(*) from orcfile_merge1b;
-select count(*) from orcfile_merge1c;
+select count(*) from orcfile_merge1_n1;
+select count(*) from orcfile_merge1b_n1;
+select count(*) from orcfile_merge1c_n1;
 
-DROP TABLE orcfile_merge1;
-DROP TABLE orcfile_merge1b;
-DROP TABLE orcfile_merge1c;
+DROP TABLE orcfile_merge1_n1;
+DROP TABLE orcfile_merge1b_n1;
+DROP TABLE orcfile_merge1c_n1;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge11.q 
b/ql/src/test/queries/clientpositive/orc_merge11.q
index 746ba21..d5add84 100644
--- a/ql/src/test/queries/clientpositive/orc_merge11.q
+++ b/ql/src/test/queries/clientpositive/orc_merge11.q
@@ -1,19 +1,19 @@
 set hive.vectorized.execution.enabled=false;
 
-DROP TABLE orcfile_merge1;
-DROP TABLE orc_split_elim;
+DROP TABLE orcfile_merge1_n2;
+DROP TABLE orc_split_elim_n0;
 
-create table orc_split_elim (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+create table orc_split_elim_n0 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
 
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_split_elim;
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_split_elim;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_split_elim_n0;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_split_elim_n0;
 
-create table orcfile_merge1 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc 
tblproperties("orc.compress.size"="4096");
+create table orcfile_merge1_n2 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc 
tblproperties("orc.compress.size"="4096");
 
-insert overwrite table orcfile_merge1 select * from orc_split_elim;
-insert into table orcfile_merge1 select * from orc_split_elim;
+insert overwrite table orcfile_merge1_n2 select * from orc_split_elim_n0;
+insert into table orcfile_merge1_n2 select * from orc_split_elim_n0;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n2/;
 
 set hive.merge.tezfiles=true;
 set hive.merge.mapfiles=true;
@@ -25,25 +25,25 @@ set tez.grouping.split-count=1;
 set hive.exec.orc.default.buffer.size=120;
 
 SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump;
-select * from orcfile_merge1 limit 1;
+select * from orcfile_merge1_n2 limit 1;
 SET hive.exec.post.hooks=;
 
 -- concatenate
-ALTER TABLE  orcfile_merge1 CONCATENATE;
+ALTER TABLE  orcfile_merge1_n2 CONCATENATE;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n2/;
 
-select count(*) from orc_split_elim;
+select count(*) from orc_split_elim_n0;
 -- will have double the number of rows
-select count(*) from orcfile_merge1;
+select count(*) from orcfile_merge1_n2;
 
 SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecOrcFileDump;
-select * from orcfile_merge1 limit 1;
+select * from orcfile_merge1_n2 limit 1;
 SET hive.exec.post.hooks=;
 
 SET mapreduce.job.reduces=2;
 
-INSERT OVERWRITE DIRECTORY 'output' stored as orcfile select * from 
orc_split_elim;
+INSERT OVERWRITE DIRECTORY 'output' stored as orcfile select * from 
orc_split_elim_n0;
 
-DROP TABLE orc_split_elim;
-DROP TABLE orcfile_merge1;
+DROP TABLE orc_split_elim_n0;
+DROP TABLE orcfile_merge1_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge2.q 
b/ql/src/test/queries/clientpositive/orc_merge2.q
index e6fdf39..2e50a35 100644
--- a/ql/src/test/queries/clientpositive/orc_merge2.q
+++ b/ql/src/test/queries/clientpositive/orc_merge2.q
@@ -8,27 +8,27 @@ set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
 set hive.merge.sparkfiles=true;
 
-DROP TABLE orcfile_merge2a;
+DROP TABLE orcfile_merge2a_n0;
 
-CREATE TABLE orcfile_merge2a (key INT, value STRING)
+CREATE TABLE orcfile_merge2a_n0 (key INT, value STRING)
     PARTITIONED BY (one string, two string, three string)
     STORED AS ORC;
 
-EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a_n0 PARTITION (one='1', two, 
three)
     SELECT key, value, PMOD(HASH(key), 10) as two, 
         PMOD(HASH(value), 10) as three
     FROM src;
 
-INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three)
+INSERT OVERWRITE TABLE orcfile_merge2a_n0 PARTITION (one='1', two, three)
     SELECT key, value, PMOD(HASH(key), 10) as two, 
         PMOD(HASH(value), 10) as three
     FROM src;
 
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge2a/one=1/two=0/three=2/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge2a_n0/one=1/two=0/three=2/;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
-    FROM orcfile_merge2a
+    FROM orcfile_merge2a_n0
 ) t;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
@@ -39,5 +39,5 @@ SELECT SUM(HASH(c)) FROM (
     FROM src
 ) t;
 
-DROP TABLE orcfile_merge2a;
+DROP TABLE orcfile_merge2a_n0;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge3.q 
b/ql/src/test/queries/clientpositive/orc_merge3.q
index 730e4a3..8b79545 100644
--- a/ql/src/test/queries/clientpositive/orc_merge3.q
+++ b/ql/src/test/queries/clientpositive/orc_merge3.q
@@ -6,38 +6,38 @@ set hive.explain.user=false;
 set hive.merge.orcfile.stripe.level=true;
 set hive.merge.sparkfiles=true;
 
-DROP TABLE orcfile_merge3a;
-DROP TABLE orcfile_merge3b;
+DROP TABLE orcfile_merge3a_n0;
+DROP TABLE orcfile_merge3b_n0;
 
-CREATE TABLE orcfile_merge3a (key int, value string) 
+CREATE TABLE orcfile_merge3a_n0 (key int, value string) 
     PARTITIONED BY (ds string) STORED AS TEXTFILE;
-CREATE TABLE orcfile_merge3b (key int, value string) STORED AS ORC;
+CREATE TABLE orcfile_merge3b_n0 (key int, value string) STORED AS ORC;
 
-INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='1')
+INSERT OVERWRITE TABLE orcfile_merge3a_n0 PARTITION (ds='1')
     SELECT * FROM src;
-INSERT OVERWRITE TABLE orcfile_merge3a PARTITION (ds='2')
+INSERT OVERWRITE TABLE orcfile_merge3a_n0 PARTITION (ds='2')
     SELECT * FROM src;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b
-    SELECT key, value FROM orcfile_merge3a;
+EXPLAIN INSERT OVERWRITE TABLE orcfile_merge3b_n0
+    SELECT key, value FROM orcfile_merge3a_n0;
 
-INSERT OVERWRITE TABLE orcfile_merge3b
-    SELECT key, value FROM orcfile_merge3a;
+INSERT OVERWRITE TABLE orcfile_merge3b_n0
+    SELECT key, value FROM orcfile_merge3a_n0;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3b/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge3b_n0/;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
-    FROM orcfile_merge3a
+    FROM orcfile_merge3a_n0
 ) t;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
-    FROM orcfile_merge3b
+    FROM orcfile_merge3b_n0
 ) t;
 
-DROP TABLE orcfile_merge3a;
-DROP TABLE orcfile_merge3b;
+DROP TABLE orcfile_merge3a_n0;
+DROP TABLE orcfile_merge3b_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge5.q 
b/ql/src/test/queries/clientpositive/orc_merge5.q
index 810f1de..190c6e0 100644
--- a/ql/src/test/queries/clientpositive/orc_merge5.q
+++ b/ql/src/test/queries/clientpositive/orc_merge5.q
@@ -3,10 +3,10 @@ set hive.explain.user=false;
 
 -- SORT_QUERY_RESULTS
 
-create table orc_merge5 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
-create table orc_merge5b (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5_n5 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5b_n0 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
 
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5_n5;
 
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET mapred.min.split.size=1000;
@@ -22,13 +22,13 @@ set tez.grouping.max-size=50000;
 set hive.merge.sparkfiles=false;
 
 -- 3 mappers
-explain insert overwrite table orc_merge5b select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
-insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts 
from orc_merge5 where userid<=13;
+explain insert overwrite table orc_merge5b_n0 select 
userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13;
+insert overwrite table orc_merge5b_n0 select 
userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13;
 
 -- 3 files total
-analyze table orc_merge5b compute statistics noscan;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
-select * from orc_merge5b;
+analyze table orc_merge5b_n0 compute statistics noscan;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b_n0/;
+select * from orc_merge5b_n0;
 
 set hive.merge.orcfile.stripe.level=true;
 set hive.merge.tezfiles=true;
@@ -37,13 +37,13 @@ set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;
 
 -- 3 mappers
-explain insert overwrite table orc_merge5b select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
-insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts 
from orc_merge5 where userid<=13;
+explain insert overwrite table orc_merge5b_n0 select 
userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13;
+insert overwrite table orc_merge5b_n0 select 
userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13;
 
 -- 1 file after merging
-analyze table orc_merge5b compute statistics noscan;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
-select * from orc_merge5b;
+analyze table orc_merge5b_n0 compute statistics noscan;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b_n0/;
+select * from orc_merge5b_n0;
 
 set hive.merge.orcfile.stripe.level=false;
 set hive.merge.tezfiles=false;
@@ -51,17 +51,17 @@ set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set hive.merge.sparkfiles=false;
 
-insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts 
from orc_merge5 where userid<=13;
-analyze table orc_merge5b compute statistics noscan;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
-select * from orc_merge5b;
+insert overwrite table orc_merge5b_n0 select 
userid,string1,subtype,decimal1,ts from orc_merge5_n5 where userid<=13;
+analyze table orc_merge5b_n0 compute statistics noscan;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b_n0/;
+select * from orc_merge5b_n0;
 
 set hive.merge.orcfile.stripe.level=true;
-explain alter table orc_merge5b concatenate;
-alter table orc_merge5b concatenate;
+explain alter table orc_merge5b_n0 concatenate;
+alter table orc_merge5b_n0 concatenate;
 
 -- 1 file after merging
-analyze table orc_merge5b compute statistics noscan;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b/;
-select * from orc_merge5b;
+analyze table orc_merge5b_n0 compute statistics noscan;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5b_n0/;
+select * from orc_merge5b_n0;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge6.q 
b/ql/src/test/queries/clientpositive/orc_merge6.q
index af80f47..fabe656 100644
--- a/ql/src/test/queries/clientpositive/orc_merge6.q
+++ b/ql/src/test/queries/clientpositive/orc_merge6.q
@@ -4,10 +4,10 @@ set hive.explain.user=false;
 -- SORT_QUERY_RESULTS
 
 -- orc file merge tests for static partitions
-create table orc_merge5 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
-create table orc_merge5a (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored 
as orc;
+create table orc_merge5_n4 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5a_n1 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) partitioned by (year string, hour int) stored 
as orc;
 
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5_n4;
 
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET mapred.min.split.size=1000;
@@ -23,17 +23,17 @@ set tez.grouping.max-size=50000;
 set hive.merge.sparkfiles=false;
 
 -- 3 mappers
-explain insert overwrite table orc_merge5a partition (year="2000",hour=24) 
select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
-insert overwrite table orc_merge5a partition (year="2000",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
-insert overwrite table orc_merge5a partition (year="2001",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
+explain insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) 
select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13;
+insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13;
+insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13;
 
 -- 3 files total
-analyze table orc_merge5a partition(year="2000",hour=24) compute statistics 
noscan;
-analyze table orc_merge5a partition(year="2001",hour=24) compute statistics 
noscan;
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/;
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/;
-show partitions orc_merge5a;
-select * from orc_merge5a;
+analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics 
noscan;
+analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics 
noscan;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2000/hour=24/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2001/hour=24/;
+show partitions orc_merge5a_n1;
+select * from orc_merge5a_n1;
 
 set hive.merge.orcfile.stripe.level=true;
 set hive.merge.tezfiles=true;
@@ -42,17 +42,17 @@ set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;
 
 -- 3 mappers
-explain insert overwrite table orc_merge5a partition (year="2000",hour=24) 
select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
-insert overwrite table orc_merge5a partition (year="2000",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
-insert overwrite table orc_merge5a partition (year="2001",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
+explain insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) 
select userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13;
+insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13;
+insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13;
 
 -- 1 file after merging
-analyze table orc_merge5a partition(year="2000",hour=24) compute statistics 
noscan;
-analyze table orc_merge5a partition(year="2001",hour=24) compute statistics 
noscan;
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/;
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/;
-show partitions orc_merge5a;
-select * from orc_merge5a;
+analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics 
noscan;
+analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics 
noscan;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2000/hour=24/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2001/hour=24/;
+show partitions orc_merge5a_n1;
+select * from orc_merge5a_n1;
 
 set hive.merge.orcfile.stripe.level=false;
 set hive.merge.tezfiles=false;
@@ -60,25 +60,25 @@ set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set hive.merge.sparkfiles=false;
 
-insert overwrite table orc_merge5a partition (year="2000",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
-insert overwrite table orc_merge5a partition (year="2001",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
-analyze table orc_merge5a partition(year="2000",hour=24) compute statistics 
noscan;
-analyze table orc_merge5a partition(year="2001",hour=24) compute statistics 
noscan;
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/;
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/;
-show partitions orc_merge5a;
-select * from orc_merge5a;
+insert overwrite table orc_merge5a_n1 partition (year="2000",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13;
+insert overwrite table orc_merge5a_n1 partition (year="2001",hour=24) select 
userid,string1,subtype,decimal1,ts from orc_merge5_n4 where userid<=13;
+analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics 
noscan;
+analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics 
noscan;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2000/hour=24/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2001/hour=24/;
+show partitions orc_merge5a_n1;
+select * from orc_merge5a_n1;
 
 set hive.merge.orcfile.stripe.level=true;
-explain alter table orc_merge5a partition(year="2000",hour=24) concatenate;
-alter table orc_merge5a partition(year="2000",hour=24) concatenate;
-alter table orc_merge5a partition(year="2001",hour=24) concatenate;
+explain alter table orc_merge5a_n1 partition(year="2000",hour=24) concatenate;
+alter table orc_merge5a_n1 partition(year="2000",hour=24) concatenate;
+alter table orc_merge5a_n1 partition(year="2001",hour=24) concatenate;
 
 -- 1 file after merging
-analyze table orc_merge5a partition(year="2000",hour=24) compute statistics 
noscan;
-analyze table orc_merge5a partition(year="2001",hour=24) compute statistics 
noscan;
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2000/hour=24/;
-dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/year=2001/hour=24/;
-show partitions orc_merge5a;
-select * from orc_merge5a;
+analyze table orc_merge5a_n1 partition(year="2000",hour=24) compute statistics 
noscan;
+analyze table orc_merge5a_n1 partition(year="2001",hour=24) compute statistics 
noscan;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2000/hour=24/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n1/year=2001/hour=24/;
+show partitions orc_merge5a_n1;
+select * from orc_merge5a_n1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge7.q 
b/ql/src/test/queries/clientpositive/orc_merge7.q
index 2acff2c..2558797 100644
--- a/ql/src/test/queries/clientpositive/orc_merge7.q
+++ b/ql/src/test/queries/clientpositive/orc_merge7.q
@@ -5,10 +5,10 @@ set hive.explain.user=false;
 
 -- orc merge file tests for dynamic partition case
 
-create table orc_merge5 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
-create table orc_merge5a (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc;
+create table orc_merge5_n2 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5a_n0 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc;
 
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5_n2;
 
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET mapred.min.split.size=1000;
@@ -27,17 +27,17 @@ set hive.optimize.sort.dynamic.partition=false;
 set hive.merge.sparkfiles=false;
 
 -- 3 mappers
-explain insert overwrite table orc_merge5a partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
-insert overwrite table orc_merge5a partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
-insert overwrite table orc_merge5a partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
+explain insert overwrite table orc_merge5a_n0 partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2;
+insert overwrite table orc_merge5a_n0 partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2;
+insert overwrite table orc_merge5a_n0 partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2;
 
 -- 3 files total
-analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
-analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
-show partitions orc_merge5a;
-select * from orc_merge5a where userid<=13;
+analyze table orc_merge5a_n0 partition(st=80.0) compute statistics noscan;
+analyze table orc_merge5a_n0 partition(st=0.8) compute statistics noscan;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n0/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n0/st=0.8/;
+show partitions orc_merge5a_n0;
+select * from orc_merge5a_n0 where userid<=13;
 
 set hive.merge.orcfile.stripe.level=true;
 set hive.merge.tezfiles=true;
@@ -46,17 +46,17 @@ set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;
 
 -- 3 mappers
-explain insert overwrite table orc_merge5a partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
-insert overwrite table orc_merge5a partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
-insert overwrite table orc_merge5a partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
+explain insert overwrite table orc_merge5a_n0 partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2;
+insert overwrite table orc_merge5a_n0 partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2;
+insert overwrite table orc_merge5a_n0 partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2;
 
 -- 1 file after merging
-analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
-analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
-show partitions orc_merge5a;
-select * from orc_merge5a where userid<=13;
+analyze table orc_merge5a_n0 partition(st=80.0) compute statistics noscan;
+analyze table orc_merge5a_n0 partition(st=0.8) compute statistics noscan;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n0/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n0/st=0.8/;
+show partitions orc_merge5a_n0;
+select * from orc_merge5a_n0 where userid<=13;
 
 set hive.merge.orcfile.stripe.level=false;
 set hive.merge.tezfiles=false;
@@ -64,25 +64,25 @@ set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set hive.merge.sparkfiles=false;
 
-insert overwrite table orc_merge5a partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
-insert overwrite table orc_merge5a partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5;
-analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
-analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
-show partitions orc_merge5a;
-select * from orc_merge5a where userid<=13;
+insert overwrite table orc_merge5a_n0 partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2;
+insert overwrite table orc_merge5a_n0 partition (st) select 
userid,string1,subtype,decimal1,ts,subtype from orc_merge5_n2;
+analyze table orc_merge5a_n0 partition(st=80.0) compute statistics noscan;
+analyze table orc_merge5a_n0 partition(st=0.8) compute statistics noscan;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n0/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n0/st=0.8/;
+show partitions orc_merge5a_n0;
+select * from orc_merge5a_n0 where userid<=13;
 
 set hive.merge.orcfile.stripe.level=true;
-explain alter table orc_merge5a partition(st=80.0) concatenate;
-alter table orc_merge5a partition(st=80.0) concatenate;
-alter table orc_merge5a partition(st=0.8) concatenate;
+explain alter table orc_merge5a_n0 partition(st=80.0) concatenate;
+alter table orc_merge5a_n0 partition(st=80.0) concatenate;
+alter table orc_merge5a_n0 partition(st=0.8) concatenate;
 
 -- 1 file after merging
-analyze table orc_merge5a partition(st=80.0) compute statistics noscan;
-analyze table orc_merge5a partition(st=0.8) compute statistics noscan;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=80.0/;
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a/st=0.8/;
-show partitions orc_merge5a;
-select * from orc_merge5a where userid<=13;
+analyze table orc_merge5a_n0 partition(st=80.0) compute statistics noscan;
+analyze table orc_merge5a_n0 partition(st=0.8) compute statistics noscan;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n0/st=80.0/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_merge5a_n0/st=0.8/;
+show partitions orc_merge5a_n0;
+select * from orc_merge5a_n0 where userid<=13;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge8.q 
b/ql/src/test/queries/clientpositive/orc_merge8.q
index d24b2e6..b6a4260 100644
--- a/ql/src/test/queries/clientpositive/orc_merge8.q
+++ b/ql/src/test/queries/clientpositive/orc_merge8.q
@@ -1,6 +1,6 @@
 set hive.vectorized.execution.enabled=false;
 
-create table if not exists alltypes (
+create table if not exists alltypes_n1 (
  bo boolean,
  ti tinyint,
  si smallint,
@@ -21,10 +21,10 @@ create table if not exists alltypes (
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
 
-create table alltypes_orc like alltypes;
-alter table alltypes_orc set fileformat orc;
+create table alltypes_orc_n1 like alltypes_n1;
+alter table alltypes_orc_n1 set fileformat orc;
 
-load data local inpath '../../data/files/alltypes2.txt' overwrite into table 
alltypes;
+load data local inpath '../../data/files/alltypes2.txt' overwrite into table 
alltypes_n1;
 
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET hive.optimize.index.filter=true;
@@ -34,10 +34,10 @@ set hive.merge.mapfiles=false;
 set hive.merge.mapredfiles=false;
 set hive.merge.sparkfiles=false;
 
-insert overwrite table alltypes_orc select * from alltypes;
-insert into table alltypes_orc select * from alltypes;
+insert overwrite table alltypes_orc_n1 select * from alltypes_n1;
+insert into table alltypes_orc_n1 select * from alltypes_n1;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/alltypes_orc/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/alltypes_orc_n1/;
 
 set hive.merge.orcfile.stripe.level=true;
 set hive.merge.tezfiles=true;
@@ -45,6 +45,6 @@ set hive.merge.mapfiles=true;
 set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;
 
-alter table alltypes_orc concatenate;
+alter table alltypes_orc_n1 concatenate;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/alltypes_orc/;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/alltypes_orc_n1/;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q 
b/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q
index 9cfff44..5d1e8c8 100644
--- a/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q
+++ b/ql/src/test/queries/clientpositive/orc_merge_diff_fs.q
@@ -22,28 +22,28 @@ set 
hive.metastore.warehouse.dir=pfile://${system:test.tmp.dir}/orc_merge_diff_f
 
 -- SORT_QUERY_RESULTS
 
-DROP TABLE orcfile_merge1;
-DROP TABLE orcfile_merge1b;
-DROP TABLE orcfile_merge1c;
+DROP TABLE orcfile_merge1_n0;
+DROP TABLE orcfile_merge1b_n0;
+DROP TABLE orcfile_merge1c_n0;
 
-CREATE TABLE orcfile_merge1 (key INT, value STRING) 
+CREATE TABLE orcfile_merge1_n0 (key INT, value STRING) 
     PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
-CREATE TABLE orcfile_merge1b (key INT, value STRING) 
+CREATE TABLE orcfile_merge1b_n0 (key INT, value STRING) 
     PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
-CREATE TABLE orcfile_merge1c (key INT, value STRING) 
+CREATE TABLE orcfile_merge1c_n0 (key INT, value STRING) 
     PARTITIONED BY (ds STRING, part STRING) STORED AS ORC;
 
 -- merge disabled
 EXPLAIN
-    INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+    INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part)
         SELECT key, value, PMOD(HASH(key), 2) as part
         FROM src;
 
-INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part)
+INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1/ds=1/part=0/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1_n0/ds=1/part=0/;
 
 set hive.merge.tezfiles=true;
 set hive.merge.mapfiles=true;
@@ -51,50 +51,50 @@ set hive.merge.mapredfiles=true;
 set hive.merge.sparkfiles=true;
 -- auto-merge slow way
 EXPLAIN
-    INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+    INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part)
         SELECT key, value, PMOD(HASH(key), 2) as part
         FROM src;
 
-INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part)
+INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b/ds=1/part=0/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1b_n0/ds=1/part=0/;
 
 set hive.merge.orcfile.stripe.level=true;
 -- auto-merge fast way
 EXPLAIN
-    INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+    INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part)
         SELECT key, value, PMOD(HASH(key), 2) as part
         FROM src;
 
-INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part)
+INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part)
     SELECT key, value, PMOD(HASH(key), 2) as part
     FROM src;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c/ds=1/part=0/;
+dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orcfile_merge1c_n0/ds=1/part=0/;
 
 set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 -- Verify
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
-    FROM orcfile_merge1 WHERE ds='1'
+    FROM orcfile_merge1_n0 WHERE ds='1'
 ) t;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
-    FROM orcfile_merge1b WHERE ds='1'
+    FROM orcfile_merge1b_n0 WHERE ds='1'
 ) t;
 
 SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
-    FROM orcfile_merge1c WHERE ds='1'
+    FROM orcfile_merge1c_n0 WHERE ds='1'
 ) t;
 
-select count(*) from orcfile_merge1;
-select count(*) from orcfile_merge1b;
-select count(*) from orcfile_merge1c;
+select count(*) from orcfile_merge1_n0;
+select count(*) from orcfile_merge1b_n0;
+select count(*) from orcfile_merge1c_n0;
 
-DROP TABLE orcfile_merge1;
-DROP TABLE orcfile_merge1b;
-DROP TABLE orcfile_merge1c;
+DROP TABLE orcfile_merge1_n0;
+DROP TABLE orcfile_merge1b_n0;
+DROP TABLE orcfile_merge1c_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q 
b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
index 60458d0..aba4617 100644
--- a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
+++ b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q
@@ -3,10 +3,10 @@ set hive.explain.user=false;
 
 -- SORT_QUERY_RESULTS
 
-create table orc_merge5 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
+create table orc_merge5_n3 (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
 create table orc_merge5b (userid bigint, string1 string, subtype double, 
decimal1 decimal, ts timestamp) stored as orc;
 
-load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5;
+load data local inpath '../../data/files/orc_split_elim.orc' into table 
orc_merge5_n3;
 
 SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 set hive.merge.orcfile.stripe.level=false;
@@ -15,15 +15,15 @@ set hive.merge.mapredfiles=false;
 set hive.merge.sparkfiles=false;
 
 -- 3 mappers
-explain insert overwrite table orc_merge5b select 
userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13;
+explain insert overwrite table orc_merge5b select 
userid,string1,subtype,decimal1,ts from orc_merge5_n3 where userid<=13;
 set hive.exec.orc.write.format=0.12;
-insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts 
from orc_merge5 where userid<=13;
-insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5 where userid<=13;
-insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5 where userid<=13;
+insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts 
from orc_merge5_n3 where userid<=13;
+insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5_n3 where userid<=13;
+insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5_n3 where userid<=13;
 set hive.exec.orc.write.format=0.11;
-insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5 where userid<=13;
-insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5 where userid<=13;
-insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5 where userid<=13;
+insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5_n3 where userid<=13;
+insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5_n3 where userid<=13;
+insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from 
orc_merge5_n3 where userid<=13;
 
 -- 5 files total
 analyze table orc_merge5b compute statistics noscan;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q 
b/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q
index 2396194..17ccb00 100644
--- a/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q
+++ b/ql/src/test/queries/clientpositive/orc_merge_incompat_schema.q
@@ -2,7 +2,7 @@ SET hive.vectorized.execution.enabled=false;
 
 set hive.metastore.disallow.incompatible.col.type.changes=false;
 
-CREATE TABLE orc_create_staging (
+CREATE TABLE orc_create_staging_n2 (
   str STRING,
   mp  MAP<STRING,STRING>,
   lst ARRAY<STRING>,
@@ -12,9 +12,9 @@ CREATE TABLE orc_create_staging (
     COLLECTION ITEMS TERMINATED BY ','
     MAP KEYS TERMINATED BY ':';
 
-LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE 
orc_create_staging;
+LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE 
orc_create_staging_n2;
 
-CREATE TABLE orc_create_complex (
+CREATE TABLE orc_create_complex_n2 (
   str STRING,
   mp  MAP<STRING,STRING>,
   lst ARRAY<STRING>,
@@ -22,28 +22,28 @@ CREATE TABLE orc_create_complex (
   val INT
 ) STORED AS ORC tblproperties("orc.row.index.stride"="1000", 
"orc.stripe.size"="1000", "orc.compress.size"="10000");
 
-INSERT OVERWRITE TABLE orc_create_complex SELECT str,mp,lst,strct,0 FROM 
orc_create_staging;
-INSERT INTO TABLE orc_create_complex SELECT str,mp,lst,strct,0 FROM 
orc_create_staging;
+INSERT OVERWRITE TABLE orc_create_complex_n2 SELECT str,mp,lst,strct,0 FROM 
orc_create_staging_n2;
+INSERT INTO TABLE orc_create_complex_n2 SELECT str,mp,lst,strct,0 FROM 
orc_create_staging_n2;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex/;
-select sum(hash(*)) from orc_create_complex;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex_n2/;
+select sum(hash(*)) from orc_create_complex_n2;
 
 -- will be merged as the schema is the same
-ALTER TABLE orc_create_complex CONCATENATE;
+ALTER TABLE orc_create_complex_n2 CONCATENATE;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex/;
-select sum(hash(*)) from orc_create_complex;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex_n2/;
+select sum(hash(*)) from orc_create_complex_n2;
 
-ALTER TABLE orc_create_complex
+ALTER TABLE orc_create_complex_n2
 CHANGE COLUMN strct strct STRUCT<A:STRING,B:STRING,C:STRING>;
 
-INSERT INTO TABLE orc_create_complex SELECT 
str,mp,lst,NAMED_STRUCT('A',strct.A,'B',strct.B,'C','c'),0 FROM 
orc_create_staging;
+INSERT INTO TABLE orc_create_complex_n2 SELECT 
str,mp,lst,NAMED_STRUCT('A',strct.A,'B',strct.B,'C','c'),0 FROM 
orc_create_staging_n2;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex/;
-select sum(hash(*)) from orc_create_complex;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex_n2/;
+select sum(hash(*)) from orc_create_complex_n2;
 
 -- schema is different for both files, will not be merged
-ALTER TABLE orc_create_complex CONCATENATE;
+ALTER TABLE orc_create_complex_n2 CONCATENATE;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex/;
-select sum(hash(*)) from orc_create_complex;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_create_complex_n2/;
+select sum(hash(*)) from orc_create_complex_n2;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q 
b/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q
index 139098c..04bb978 100644
--- a/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q
+++ b/ql/src/test/queries/clientpositive/orc_merge_incompat_writer_version.q
@@ -2,8 +2,8 @@
 
 set hive.vectorized.execution.enabled=false;
 
-DROP TABLE part_orc;
-CREATE TABLE part_orc(
+DROP TABLE part_orc_n0;
+CREATE TABLE part_orc_n0(
   p_partkey int,
   p_name string,
   p_mfgr string,
@@ -17,18 +17,18 @@ CREATE TABLE part_orc(
 STORED AS ORC;
 
 -- writer version for this file is HIVE_13083
-LOAD DATA LOCAL INPATH '../../data/files/part.orc' OVERWRITE INTO TABLE 
part_orc;
+LOAD DATA LOCAL INPATH '../../data/files/part.orc' OVERWRITE INTO TABLE 
part_orc_n0;
 
-create table part_orc_staging as select * from part_orc;
+create table part_orc_staging as select * from part_orc_n0;
 
 -- will be written with current writer version
-insert into table part_orc select * from part_orc_staging;
+insert into table part_orc_n0 select * from part_orc_staging;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/part_orc/;
-select sum(hash(*)) from part_orc;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/part_orc_n0/;
+select sum(hash(*)) from part_orc_n0;
 
 -- will not be merged as writer version is not matching
-ALTER TABLE part_orc CONCATENATE;
+ALTER TABLE part_orc_n0 CONCATENATE;
 
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/part_orc/;
-select sum(hash(*)) from part_orc;
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/part_orc_n0/;
+select sum(hash(*)) from part_orc_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_min_max.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_min_max.q 
b/ql/src/test/queries/clientpositive/orc_min_max.q
index 58a35d0..18a21b0 100644
--- a/ql/src/test/queries/clientpositive/orc_min_max.q
+++ b/ql/src/test/queries/clientpositive/orc_min_max.q
@@ -1,6 +1,6 @@
 set hive.vectorized.execution.enabled=false;
 
-create table if not exists alltypes (
+create table if not exists alltypes_n2 (
  bo boolean,
  ti tinyint,
  si smallint,
@@ -21,14 +21,14 @@ create table if not exists alltypes (
 collection items terminated by ','
 map keys terminated by ':' stored as textfile;
 
-create table alltypes_orc like alltypes;
-alter table alltypes_orc set fileformat orc;
+create table alltypes_orc_n3 like alltypes_n2;
+alter table alltypes_orc_n3 set fileformat orc;
 
-load data local inpath '../../data/files/alltypes2.txt' overwrite into table 
alltypes;
+load data local inpath '../../data/files/alltypes2.txt' overwrite into table 
alltypes_n2;
 
-insert overwrite table alltypes_orc select * from alltypes;
+insert overwrite table alltypes_orc_n3 select * from alltypes_n2;
 
-select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), 
min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), 
max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) 
from alltypes;
+select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), 
min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), 
max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) 
from alltypes_n2;
 
-select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), 
min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), 
max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) 
from alltypes_orc;
+select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), 
min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), 
max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) 
from alltypes_orc_n3;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q 
b/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q
index 700fdd4..91dc873 100644
--- a/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q
+++ b/ql/src/test/queries/clientpositive/orc_nested_column_pruning.q
@@ -4,12 +4,12 @@ set hive.exec.dynamic.partition.mode = nonstrict;
 set hive.strict.checks.cartesian.product=false;
 
 -- First, create source tables
-DROP TABLE IF EXISTS dummy;
-CREATE TABLE dummy (i int);
-INSERT INTO TABLE dummy VALUES (42);
+DROP TABLE IF EXISTS dummy_n4;
+CREATE TABLE dummy_n4 (i int);
+INSERT INTO TABLE dummy_n4 VALUES (42);
 
-DROP TABLE IF EXISTS nested_tbl_1;
-CREATE TABLE nested_tbl_1 (
+DROP TABLE IF EXISTS nested_tbl_1_n0;
+CREATE TABLE nested_tbl_1_n0 (
   a int,
   s1 struct<f1: boolean, f2: string, f3: struct<f4: int, f5: double>, f6: int>,
   s2 struct<f7: string, f8: struct<f9 : boolean, f10: array<int>, f11: 
map<string, boolean>>>,
@@ -19,7 +19,7 @@ CREATE TABLE nested_tbl_1 (
   s6 map<string, struct<f20:array<struct<f21:struct<f22:int>>>>>
 ) STORED AS ORC;
 
-INSERT INTO TABLE nested_tbl_1 SELECT
+INSERT INTO TABLE nested_tbl_1_n0 SELECT
   1, named_struct('f1', false, 'f2', 'foo', 'f3', named_struct('f4', 4, 'f5', 
cast(5.0 as double)), 'f6', 4),
   named_struct('f7', 'f7', 'f8', named_struct('f9', true, 'f10', array(10, 
11), 'f11', map('key1', true, 'key2', false))),
   named_struct('f12', array(named_struct('f13', 'foo', 'f14', 14), 
named_struct('f13', 'bar', 'f14', 28))),
@@ -27,12 +27,12 @@ INSERT INTO TABLE nested_tbl_1 SELECT
   named_struct('f16', array(named_struct('f17', 'foo', 'f18', 
named_struct('f19', 14)), named_struct('f17', 'bar', 'f18', named_struct('f19', 
28)))),
   map('key1', named_struct('f20', array(named_struct('f21', 
named_struct('f22', 1)))),
       'key2', named_struct('f20', array(named_struct('f21', 
named_struct('f22', 2)))))
-FROM dummy;
+FROM dummy_n4;
 
-DROP TABLE IF EXISTS nested_tbl_2;
-CREATE TABLE nested_tbl_2 LIKE nested_tbl_1;
+DROP TABLE IF EXISTS nested_tbl_2_n0;
+CREATE TABLE nested_tbl_2_n0 LIKE nested_tbl_1_n0;
 
-INSERT INTO TABLE nested_tbl_2 SELECT
+INSERT INTO TABLE nested_tbl_2_n0 SELECT
   2, named_struct('f1', true, 'f2', 'bar', 'f3', named_struct('f4', 4, 'f5', 
cast(6.5 as double)), 'f6', 4),
   named_struct('f7', 'f72', 'f8', named_struct('f9', false, 'f10', array(20, 
22), 'f11', map('key3', true, 'key4', false))),
   named_struct('f12', array(named_struct('f13', 'bar', 'f14', 28), 
named_struct('f13', 'foo', 'f14', 56))),
@@ -40,175 +40,175 @@ INSERT INTO TABLE nested_tbl_2 SELECT
   named_struct('f16', array(named_struct('f17', 'bar', 'f18', 
named_struct('f19', 28)), named_struct('f17', 'foo', 'f18', named_struct('f19', 
56)))),
   map('key3', named_struct('f20', array(named_struct('f21', 
named_struct('f22', 3)))),
       'key4', named_struct('f20', array(named_struct('f21', 
named_struct('f22', 4)))))
-FROM dummy;
+FROM dummy_n4;
 
 -- Testing only select statements
 
-EXPLAIN SELECT a FROM nested_tbl_1;
-SELECT a FROM nested_tbl_1;
+EXPLAIN SELECT a FROM nested_tbl_1_n0;
+SELECT a FROM nested_tbl_1_n0;
 
-EXPLAIN SELECT s1.f1 FROM nested_tbl_1;
-SELECT s1.f1 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f1 FROM nested_tbl_1_n0;
+SELECT s1.f1 FROM nested_tbl_1_n0;
 
-EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1;
-SELECT s1.f1, s1.f2 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f1, s1.f2 FROM nested_tbl_1_n0;
+SELECT s1.f1, s1.f2 FROM nested_tbl_1_n0;
 
 -- In this case 's1.f3' and 's1.f3.f4' should be merged
-EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1;
-SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n0;
+SELECT s1.f3, s1.f3.f4 FROM nested_tbl_1_n0;
 
 -- Testing select array and index shifting
-EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1;
-SELECT s1.f3.f5 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n0;
+SELECT s1.f3.f5 FROM nested_tbl_1_n0;
 
 -- Testing select from multiple structs
-EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1;
-SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1;
+EXPLAIN SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n0;
+SELECT s1.f3.f4, s2.f8.f9 FROM nested_tbl_1_n0;
 
 
 -- Testing select with filter
 
-EXPLAIN SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE;
-SELECT s1.f2 FROM nested_tbl_1 WHERE s1.f1 = FALSE;
+EXPLAIN SELECT s1.f2 FROM nested_tbl_1_n0 WHERE s1.f1 = FALSE;
+SELECT s1.f2 FROM nested_tbl_1_n0 WHERE s1.f1 = FALSE;
 
-EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4;
-SELECT s1.f3.f5 FROM nested_tbl_1 WHERE s1.f3.f4 = 4;
+EXPLAIN SELECT s1.f3.f5 FROM nested_tbl_1_n0 WHERE s1.f3.f4 = 4;
+SELECT s1.f3.f5 FROM nested_tbl_1_n0 WHERE s1.f3.f4 = 4;
 
-EXPLAIN SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) 
> 1 AND s2.f8.f11['key1'] = TRUE;
-SELECT s2.f8 FROM nested_tbl_1 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 AND 
s2.f8.f11['key1'] = TRUE;
+EXPLAIN SELECT s2.f8 FROM nested_tbl_1_n0 WHERE s1.f2 = 'foo' AND 
size(s2.f8.f10) > 1 AND s2.f8.f11['key1'] = TRUE;
+SELECT s2.f8 FROM nested_tbl_1_n0 WHERE s1.f2 = 'foo' AND size(s2.f8.f10) > 1 
AND s2.f8.f11['key1'] = TRUE;
 
 
 -- Testing lateral view
 
-EXPLAIN SELECT col1, col2 FROM nested_tbl_1
+EXPLAIN SELECT col1, col2 FROM nested_tbl_1_n0
 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1
 LATERAL VIEW explode(s3.f12) tbl2 AS col2;
-SELECT col1, col2 FROM nested_tbl_1
+SELECT col1, col2 FROM nested_tbl_1_n0
 LATERAL VIEW explode(s2.f8.f10) tbl1 AS col1
 LATERAL VIEW explode(s3.f12) tbl2 AS col2;
 
 
 -- Testing UDFs
-EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1;
-SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1;
+EXPLAIN SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n0;
+SELECT pmod(s2.f8.f10[1], s1.f3.f4) FROM nested_tbl_1_n0;
 
 
 -- Testing aggregations
 
-EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5;
-SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3.f5;
+EXPLAIN SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY 
s1.f3.f5;
+SELECT s1.f3.f5, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3.f5;
 
-EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3;
-SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3;
+EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3;
+SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3;
 
-EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER 
BY s1.f3;
-SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1 GROUP BY s1.f3 ORDER BY s1.f3;
+EXPLAIN SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 
ORDER BY s1.f3;
+SELECT s1.f3, count(s1.f3.f4) FROM nested_tbl_1_n0 GROUP BY s1.f3 ORDER BY 
s1.f3;
 
 
 -- Testing joins
 
 EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8
-FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2
+FROM nested_tbl_1_n0 t1 JOIN nested_tbl_2_n0 t2
 ON t1.s1.f3.f4 = t2.s1.f6
 WHERE t2.s2.f8.f9 == FALSE;
 SELECT t1.s1.f3.f5, t2.s2.f8
-FROM nested_tbl_1 t1 JOIN nested_tbl_2 t2
+FROM nested_tbl_1_n0 t1 JOIN nested_tbl_2_n0 t2
 ON t1.s1.f3.f4 = t2.s1.f6
 WHERE t2.s2.f8.f9 == FALSE;
 
 EXPLAIN SELECT t1.s1.f3.f5, t2.s2.f8
-FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n0 t1 JOIN nested_tbl_1_n0 t2
 ON t1.s1.f3.f4 = t2.s1.f6
 WHERE t2.s2.f8.f9 == TRUE;
 SELECT t1.s1.f3.f5, t2.s2.f8
-FROM nested_tbl_1 t1 JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n0 t1 JOIN nested_tbl_1_n0 t2
 ON t1.s1.f3.f4 = t2.s1.f6
 WHERE t2.s2.f8.f9 == TRUE;
 
 EXPLAIN SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2
 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE;
 SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2
 ON t1.s1.f3.f4 = t2.s1.f6 AND t2.s2.f8.f9 == TRUE;
 
 EXPLAIN SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2
 ON t1.s1.f1 <> t2.s2.f8.f9;
 SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2
 ON t1.s1.f1 <> t2.s2.f8.f9;
 
 EXPLAIN SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2
 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9;
 SELECT t1.s1.f3.f5
-FROM nested_tbl_1 t1 LEFT SEMI JOIN nested_tbl_1 t2
+FROM nested_tbl_1_n0 t1 LEFT SEMI JOIN nested_tbl_1_n0 t2
 ON t1.s1.f3.f4 = t2.s1.f6 AND t1.s1.f1 <> t2.s2.f8.f9;
 
 -- Testing insert with aliases
 
-DROP TABLE IF EXISTS nested_tbl_3;
-CREATE TABLE nested_tbl_3 (f1 boolean, f2 string) PARTITIONED BY (f3 int) 
STORED AS ORC;
+DROP TABLE IF EXISTS nested_tbl_3_n0;
+CREATE TABLE nested_tbl_3_n0 (f1 boolean, f2 string) PARTITIONED BY (f3 int) 
STORED AS ORC;
 
-INSERT OVERWRITE TABLE nested_tbl_3 PARTITION(f3)
+INSERT OVERWRITE TABLE nested_tbl_3_n0 PARTITION(f3)
 SELECT s1.f1 AS f1, S1.f2 AS f2, s1.f6 AS f3
-FROM nested_tbl_1;
+FROM nested_tbl_1_n0;
 
-SELECT * FROM nested_tbl_3;
+SELECT * FROM nested_tbl_3_n0;
 
 -- Testing select struct field from elements in array or map
 
 EXPLAIN
 SELECT count(s1.f6), s3.f12[0].f14
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s3.f12[0].f14;
 
 SELECT count(s1.f6), s3.f12[0].f14
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s3.f12[0].f14;
 
 EXPLAIN
 SELECT count(s1.f6), s4['key1'].f15
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s4['key1'].f15;
 
 SELECT count(s1.f6), s4['key1'].f15
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s4['key1'].f15;
 
 EXPLAIN
 SELECT count(s1.f6), s5.f16[0].f18.f19
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s5.f16[0].f18.f19;
 
 SELECT count(s1.f6), s5.f16[0].f18.f19
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s5.f16[0].f18.f19;
 
 EXPLAIN
 SELECT count(s1.f6), s5.f16.f18.f19
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s5.f16.f18.f19;
 
 SELECT count(s1.f6), s5.f16.f18.f19
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s5.f16.f18.f19;
 
 EXPLAIN
 SELECT count(s1.f6), s6['key1'].f20[0].f21.f22
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s6['key1'].f20[0].f21.f22;
 
 SELECT count(s1.f6), s6['key1'].f20[0].f21.f22
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s6['key1'].f20[0].f21.f22;
 
 EXPLAIN
 SELECT count(s1.f6), s6['key1'].f20.f21.f22
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s6['key1'].f20.f21.f22;
 
 SELECT count(s1.f6), s6['key1'].f20.f21.f22
-FROM nested_tbl_1
+FROM nested_tbl_1_n0
 GROUP BY s6['key1'].f20.f21.f22;

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_ppd_basic.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_ppd_basic.q 
b/ql/src/test/queries/clientpositive/orc_ppd_basic.q
index fb2efce..f0b0b96 100644
--- a/ql/src/test/queries/clientpositive/orc_ppd_basic.q
+++ b/ql/src/test/queries/clientpositive/orc_ppd_basic.q
@@ -8,7 +8,7 @@ SET hive.cbo.enable=false;
 SET hive.map.aggr=false;
 -- disabling map side aggregation as that can lead to different intermediate 
record counts
 
-CREATE TABLE staging(t tinyint,
+CREATE TABLE staging_n7(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -22,10 +22,10 @@ CREATE TABLE staging(t tinyint,
 ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
 STORED AS TEXTFILE;
 
-LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging;
-LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE 
staging_n7;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging_n7;
 
-CREATE TABLE orc_ppd_staging(t tinyint,
+CREATE TABLE orc_ppd_staging_n1(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -40,14 +40,14 @@ CREATE TABLE orc_ppd_staging(t tinyint,
            bin binary)
 STORED AS ORC tblproperties("orc.row.index.stride" = "1000", 
"orc.bloom.filter.columns"="*");
 
-insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s 
as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from staging 
order by t, s;
+insert overwrite table orc_ppd_staging_n1 select t, si, i, b, f, d, bo, s, 
cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), `dec`, bin from 
staging_n7 order by t, s;
 
 -- just to introduce a gap in min/max range for bloom filters. The dataset has 
contiguous values
 -- which makes it hard to test bloom filters
-insert into orc_ppd_staging select 
-10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa"
 from staging limit 1;
-insert into orc_ppd_staging select 
127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz"
 from staging limit 1;
+insert into orc_ppd_staging_n1 select 
-10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11",-71.54,"aaa"
 from staging_n7 limit 1;
+insert into orc_ppd_staging_n1 select 
127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11",71.54,"zzz"
 from staging_n7 limit 1;
 
-CREATE TABLE orc_ppd(t tinyint,
+CREATE TABLE orc_ppd_n2(t tinyint,
            si smallint,
            i int,
            b bigint,
@@ -62,7 +62,7 @@ CREATE TABLE orc_ppd(t tinyint,
            bin binary)
 STORED AS ORC tblproperties("orc.row.index.stride" = "1000", 
"orc.bloom.filter.columns"="*");
 
-insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as 
char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging order by 
t, s;
+insert overwrite table orc_ppd_n2 select t, si, i, b, f, d, bo, s, cast(s as 
char(50)), cast(s as varchar(50)), da, `dec`, bin from orc_ppd_staging_n1 order 
by t, s;
 
 SET 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
 
@@ -72,50 +72,50 @@ SET 
hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrint
 -- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 
0,4,119,0,0,244,19
 
 -- INPUT_RECORDS: 2100 (all row groups)
-select count(*) from orc_ppd;
+select count(*) from orc_ppd_n2;
 
 -- INPUT_RECORDS: 0 (no row groups)
-select count(*) from orc_ppd where t > 127;
+select count(*) from orc_ppd_n2 where t > 127;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = 55;
-select count(*) from orc_ppd where t <=> 50;
-select count(*) from orc_ppd where t <=> 100;
+select count(*) from orc_ppd_n2 where t = 55;
+select count(*) from orc_ppd_n2 where t <=> 50;
+select count(*) from orc_ppd_n2 where t <=> 100;
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where t = "54";
+select count(*) from orc_ppd_n2 where t = "54";
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = -10.0;
+select count(*) from orc_ppd_n2 where t = -10.0;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t = cast(53 as float);
-select count(*) from orc_ppd where t = cast(53 as double);
+select count(*) from orc_ppd_n2 where t = cast(53 as float);
+select count(*) from orc_ppd_n2 where t = cast(53 as double);
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where t < 100;
+select count(*) from orc_ppd_n2 where t < 100;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t < 100 and t > 98;
+select count(*) from orc_ppd_n2 where t < 100 and t > 98;
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where t <= 100;
+select count(*) from orc_ppd_n2 where t <= 100;
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t is null;
+select count(*) from orc_ppd_n2 where t is null;
 
 -- INPUT_RECORDS: 1100 (2 row groups)
-select count(*) from orc_ppd where t in (5, 120);
+select count(*) from orc_ppd_n2 where t in (5, 120);
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where t between 60 and 80;
+select count(*) from orc_ppd_n2 where t between 60 and 80;
 
 -- bloom filter tests
 -- INPUT_RECORDS: 0
-select count(*) from orc_ppd where t = -100;
-select count(*) from orc_ppd where t <=> -100;
-select count(*) from orc_ppd where t = 125;
-select count(*) from orc_ppd where t IN (-100, 125, 200);
+select count(*) from orc_ppd_n2 where t = -100;
+select count(*) from orc_ppd_n2 where t <=> -100;
+select count(*) from orc_ppd_n2 where t = 125;
+select count(*) from orc_ppd_n2 where t IN (-100, 125, 200);
 
 -- Row group statistics for column s:
 -- Entry 0: count: 1000 hasNull: false min:  max: zach young sum: 12907 
positions: 0,0,0
@@ -123,79 +123,79 @@ select count(*) from orc_ppd where t IN (-100, 125, 200);
 -- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 
positions: 0,3246,373
 
 -- INPUT_RECORDS: 0 (no row groups)
-select count(*) from orc_ppd where s > "zzz";
+select count(*) from orc_ppd_n2 where s > "zzz";
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where s = "zach young";
-select count(*) from orc_ppd where s <=> "zach zipper";
-select count(*) from orc_ppd where s <=> "";
+select count(*) from orc_ppd_n2 where s = "zach young";
+select count(*) from orc_ppd_n2 where s <=> "zach zipper";
+select count(*) from orc_ppd_n2 where s <=> "";
 
 -- INPUT_RECORDS: 0
-select count(*) from orc_ppd where s is null;
+select count(*) from orc_ppd_n2 where s is null;
 
 -- INPUT_RECORDS: 2100
-select count(*) from orc_ppd where s is not null;
+select count(*) from orc_ppd_n2 where s is not null;
 
 -- INPUT_RECORDS: 0
-select count(*) from orc_ppd where s = cast("zach young" as char(50));
+select count(*) from orc_ppd_n2 where s = cast("zach young" as char(50));
 
 -- INPUT_RECORDS: 1000 (1 row group)
-select count(*) from orc_ppd where s = cast("zach young" as char(10));
-select count(*) from orc_ppd where s = cast("zach young" as varchar(10));
-select count(*) from orc_ppd where s = cast("zach young" as varchar(50));
+select count(*) from orc_ppd_n2 where s = cast("zach young" as char(10));
+select count(*) from orc_ppd_n2 where s = cast("zach young" as varchar(10));
+select count(*) from orc_ppd_n2 where s = cast("zach young" as varchar(50));
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where s < "b";
+select count(*) from orc_ppd_n2 where s < "b";
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where s > "alice" and s < "bob";
+select count(*) from orc_ppd_n2 where s > "alice" and s < "bob";
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where s in ("alice allen", "");
+select count(*) from orc_ppd_n2 where s in ("alice allen", "");
 
 -- INPUT_RECORDS: 2000 (2 row groups)
-select count(*) from orc_ppd where s between "" and "alice allen";
+select count(*) from orc_ppd_n2 where s between "" and "alice allen";
 
 -- INPUT_RECORDS: 100 (1 row group)
-select count(*) from orc_ppd where s between "zz" and "zzz";
+select count(*) from orc_ppd_n2 where s between "zz" and "zzz";
 
 -- INPUT_RECORDS: 1100 (2 row groups)
-select count(*) from orc_ppd where s between "zach zipper" and "zzz";
+select count(*) from orc_ppd_n2 where s between "zach zipper" and "zzz";
 
 -- bloom filter tests
 -- INPUT_RECORDS: 0
-select count(*) from orc_ppd where s = "hello world";
-select count(*) from orc_ppd where s <=> "apache hive";
-select count(*) from orc_ppd where s IN ("a", "z");
+select count(*) from orc_ppd_n2 where s = "hello world";
+select count(*) from orc_ppd_n2 where s <=> "apache hive";
+select count(*) from orc_ppd_n2 where s IN ("a", "z");
 
 -- INPUT_RECORDS: 100
-select count(*) from orc_ppd where s = "sarah ovid";
+select count(*) from orc_ppd_n2 where s = "sarah ovid";
 
 -- INPUT_RECORDS: 1100
-select count(*) from orc_ppd where s = "wendy king";
+select count(*) from orc_ppd_n2 where s = "wendy king";
 
 -- INPUT_RECORDS: 1000
-select count(*) from orc_ppd where s = "wendy king" and t < 0;
+select count(*) from orc_ppd_n2 where s = "wendy king" and t < 0;
 
 -- INPUT_RECORDS: 100
-select count(*) from orc_ppd where s = "wendy king" and t > 100;
+select count(*) from orc_ppd_n2 where s = "wendy king" and t > 100;
 
 set hive.cbo.enable=false;
 set hive.optimize.index.filter=false;
 -- when cbo is disabled constant gets converted to HiveDecimal
 --  74.72f + 0.0 = 74.72000122070312
-select count(*) from orc_ppd where f=74.72;
+select count(*) from orc_ppd_n2 where f=74.72;
 set hive.optimize.index.filter=true;
-select count(*) from orc_ppd where f=74.72;
+select count(*) from orc_ppd_n2 where f=74.72;
 
 set hive.cbo.enable=true;
 set hive.optimize.index.filter=false;
-select count(*) from orc_ppd where f=74.72;
+select count(*) from orc_ppd_n2 where f=74.72;
 set hive.optimize.index.filter=true;
-select count(*) from orc_ppd where f=74.72;
+select count(*) from orc_ppd_n2 where f=74.72;
 
 -- 42.47f + 0.0 == 42.470001220703125
-create temporary table orc_ppd_1 stored as orc as select * from 
orc_ppd_staging where d = 42.47;
+create temporary table orc_ppd_1 stored as orc as select * from 
orc_ppd_staging_n1 where d = 42.47;
 
 set hive.cbo.enable=false;
 set hive.optimize.index.filter=false;
@@ -256,7 +256,7 @@ drop table if exists tmp_orcppd;
 create temporary table tmp_orcppd
                     stored as orc
                     as select ts, cast(ts as date)
-                    from staging ;
+                    from staging_n7 ;
 insert into table tmp_orcppd
                     values(null, null);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_ppd_boolean.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_ppd_boolean.q 
b/ql/src/test/queries/clientpositive/orc_ppd_boolean.q
index 7cec204..4d4f56d 100644
--- a/ql/src/test/queries/clientpositive/orc_ppd_boolean.q
+++ b/ql/src/test/queries/clientpositive/orc_ppd_boolean.q
@@ -6,33 +6,33 @@ SET 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 
-create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), b boolean) 
stored as orc tblproperties("orc.stripe.size"="16777216"); 
+create table newtypesorc_n0(c char(10), v varchar(10), d decimal(5,3), b 
boolean) stored as orc tblproperties("orc.stripe.size"="16777216"); 
 
-insert overwrite table newtypesorc select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all 
select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false 
from src src2) uniontbl;
+insert overwrite table newtypesorc_n0 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, true from src src1 union all 
select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, false 
from src src2) uniontbl;
 
 set hive.optimize.index.filter=false;
 
 -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN 
tests)
-select sum(hash(*)) from newtypesorc where b=true;
+select sum(hash(*)) from newtypesorc_n0 where b=true;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where b=false;
+select sum(hash(*)) from newtypesorc_n0 where b=false;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where b!=true;
+select sum(hash(*)) from newtypesorc_n0 where b!=true;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where b!=false;
+select sum(hash(*)) from newtypesorc_n0 where b!=false;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where b<true;
+select sum(hash(*)) from newtypesorc_n0 where b<true;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where b<false;
+select sum(hash(*)) from newtypesorc_n0 where b<false;
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where b<=true;
+select sum(hash(*)) from newtypesorc_n0 where b<=true;
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where b<=false;
+select sum(hash(*)) from newtypesorc_n0 where b<=false;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/38d3b8e1/ql/src/test/queries/clientpositive/orc_ppd_char.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_ppd_char.q 
b/ql/src/test/queries/clientpositive/orc_ppd_char.q
index d9ccf6f..5c216f9 100644
--- a/ql/src/test/queries/clientpositive/orc_ppd_char.q
+++ b/ql/src/test/queries/clientpositive/orc_ppd_char.q
@@ -7,75 +7,75 @@ SET 
hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
 SET mapred.min.split.size=1000;
 SET mapred.max.split.size=5000;
 
-create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) 
stored as orc tblproperties("orc.stripe.size"="16777216"); 
+create table newtypesorc_n4(c char(10), v varchar(10), d decimal(5,3), da 
date) stored as orc tblproperties("orc.stripe.size"="16777216"); 
 
-insert overwrite table newtypesorc select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+insert overwrite table newtypesorc_n4 select * from (select cast("apple" as 
char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from 
src src1 union all select cast("hello" as char(10)), cast("world" as 
varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
 
 set hive.optimize.index.filter=false;
 
 -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN 
tests)
-select sum(hash(*)) from newtypesorc where c="apple";
+select sum(hash(*)) from newtypesorc_n4 where c="apple";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c="apple";
+select sum(hash(*)) from newtypesorc_n4 where c="apple";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c!="apple";
+select sum(hash(*)) from newtypesorc_n4 where c!="apple";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c!="apple";
+select sum(hash(*)) from newtypesorc_n4 where c!="apple";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c<"hello";
+select sum(hash(*)) from newtypesorc_n4 where c<"hello";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c<"hello";
+select sum(hash(*)) from newtypesorc_n4 where c<"hello";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c<="hello";
+select sum(hash(*)) from newtypesorc_n4 where c<="hello";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c<="hello";
+select sum(hash(*)) from newtypesorc_n4 where c<="hello";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c="apple ";
+select sum(hash(*)) from newtypesorc_n4 where c="apple ";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c="apple ";
+select sum(hash(*)) from newtypesorc_n4 where c="apple ";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c in ("apple", "carrot");
+select sum(hash(*)) from newtypesorc_n4 where c in ("apple", "carrot");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c in ("apple", "carrot");
+select sum(hash(*)) from newtypesorc_n4 where c in ("apple", "carrot");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c in ("apple", "hello");
+select sum(hash(*)) from newtypesorc_n4 where c in ("apple", "hello");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c in ("apple", "hello");
+select sum(hash(*)) from newtypesorc_n4 where c in ("apple", "hello");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c in ("carrot");
+select sum(hash(*)) from newtypesorc_n4 where c in ("carrot");
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c in ("carrot");
+select sum(hash(*)) from newtypesorc_n4 where c in ("carrot");
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c between "apple" and "carrot";
+select sum(hash(*)) from newtypesorc_n4 where c between "apple" and "carrot";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c between "apple" and "carrot";
+select sum(hash(*)) from newtypesorc_n4 where c between "apple" and "carrot";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c between "apple" and "zombie";
+select sum(hash(*)) from newtypesorc_n4 where c between "apple" and "zombie";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c between "apple" and "zombie";
+select sum(hash(*)) from newtypesorc_n4 where c between "apple" and "zombie";
 
 set hive.optimize.index.filter=false;
-select sum(hash(*)) from newtypesorc where c between "carrot" and "carrot1";
+select sum(hash(*)) from newtypesorc_n4 where c between "carrot" and "carrot1";
 
 set hive.optimize.index.filter=true;
-select sum(hash(*)) from newtypesorc where c between "carrot" and "carrot1";
+select sum(hash(*)) from newtypesorc_n4 where c between "carrot" and "carrot1";
 

Reply via email to