http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q 
b/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q
new file mode 100644
index 0000000..938a0bc
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_reorder_columns2_acid.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=false;
+create table src_orc (key tinyint, val string) stored as orc TBLPROPERTIES 
('transactional'='true');
+alter table src_orc change key k tinyint after val;

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_replace_columns1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns1.q 
b/ql/src/test/queries/clientnegative/orc_replace_columns1.q
new file mode 100644
index 0000000..f6b1c06
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns1.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=true;
+create table src_orc (key tinyint, val string) stored as orc;
+alter table src_orc replace columns (k int);

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q 
b/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q
new file mode 100644
index 0000000..68a8127
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns1_acid.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=false;
+create table src_orc (key tinyint, val string) stored as orc TBLPROPERTIES 
('transactional'='true');
+alter table src_orc replace columns (k int);

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_replace_columns2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns2.q 
b/ql/src/test/queries/clientnegative/orc_replace_columns2.q
new file mode 100644
index 0000000..2a50b94
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns2.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=true;
+create table src_orc (key tinyint, val string) stored as orc;
+alter table src_orc replace columns (k smallint, val string);

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q 
b/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q
new file mode 100644
index 0000000..417a5de
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns2_acid.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=false;
+create table src_orc (key tinyint, val string) stored as orc TBLPROPERTIES 
('transactional'='true');
+alter table src_orc replace columns (k smallint, val string);

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_replace_columns3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns3.q 
b/ql/src/test/queries/clientnegative/orc_replace_columns3.q
new file mode 100644
index 0000000..b7b527f
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns3.q
@@ -0,0 +1,4 @@
+SET hive.exec.schema.evolution=true;
+create table src_orc (key smallint, val string) stored as orc;
+alter table src_orc replace columns (k int, val string, z smallint);
+alter table src_orc replace columns (k int, val string, z tinyint);

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q 
b/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q
new file mode 100644
index 0000000..b09eb37
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_replace_columns3_acid.q
@@ -0,0 +1,4 @@
+SET hive.exec.schema.evolution=false;
+create table src_orc (key smallint, val string) stored as orc TBLPROPERTIES 
('transactional'='true');
+alter table src_orc replace columns (k int, val string, z smallint);
+alter table src_orc replace columns (k int, val string, z tinyint);

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_type_promotion1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion1.q 
b/ql/src/test/queries/clientnegative/orc_type_promotion1.q
new file mode 100644
index 0000000..d7facc3
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion1.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=true;
+create table src_orc (key tinyint, val string) stored as orc;
+alter table src_orc change key key float;

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q 
b/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q
new file mode 100644
index 0000000..26e67e5
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion1_acid.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=false;
+create table src_orc (key tinyint, val string) stored as orc TBLPROPERTIES 
('transactional'='true');
+alter table src_orc change key key float;

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_type_promotion2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion2.q 
b/ql/src/test/queries/clientnegative/orc_type_promotion2.q
new file mode 100644
index 0000000..c4ee1b5
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion2.q
@@ -0,0 +1,10 @@
+SET hive.exec.schema.evolution=true;
+create table src_orc (key smallint, val string) stored as orc;
+desc src_orc;
+alter table src_orc change key key smallint;
+desc src_orc;
+alter table src_orc change key key int;
+desc src_orc;
+alter table src_orc change key key bigint;
+desc src_orc;
+alter table src_orc change val val char(100);

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q 
b/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q
new file mode 100644
index 0000000..e076d2b
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion2_acid.q
@@ -0,0 +1,10 @@
+SET hive.exec.schema.evolution=false;
+create table src_orc (key smallint, val string) stored as orc TBLPROPERTIES 
('transactional'='true');
+desc src_orc;
+alter table src_orc change key key smallint;
+desc src_orc;
+alter table src_orc change key key int;
+desc src_orc;
+alter table src_orc change key key bigint;
+desc src_orc;
+alter table src_orc change val val char(100);

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_type_promotion3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion3.q 
b/ql/src/test/queries/clientnegative/orc_type_promotion3.q
new file mode 100644
index 0000000..3ee99ec
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion3.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=true;
+create table src_orc (key tinyint, val string) stored as orc;
+alter table src_orc change key key smallint;

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q 
b/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q
new file mode 100644
index 0000000..3b7c28b
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/orc_type_promotion3_acid.q
@@ -0,0 +1,3 @@
+SET hive.exec.schema.evolution=false;
+create table src_orc (key tinyint, val string) stored as orc TBLPROPERTIES 
('transactional'='true');
+alter table src_orc change key key smallint;

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q 
b/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q
index 23076a9..a1be063 100644
--- a/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q
+++ b/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q
@@ -41,7 +41,6 @@ alter table T4 add partition (ds='tomorrow');
 
 create table T5 (a string, b int);
 alter table T5 set fileformat orc;
-alter table T4 partition (ds='tomorrow') set fileformat RCFILE;
 
 create table T7 (a string, b int);
 alter table T7 set location 'file:///tmp';

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/load_orc_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_orc_part.q 
b/ql/src/test/queries/clientpositive/load_orc_part.q
index 2902c72..281ce4b 100644
--- a/ql/src/test/queries/clientpositive/load_orc_part.q
+++ b/ql/src/test/queries/clientpositive/load_orc_part.q
@@ -12,13 +12,3 @@ dfs -ls 
${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=10/;
 load data local inpath '../../data/files/orc_split_elim.orc' overwrite into 
table orc_staging;
 load data inpath '${hiveconf:hive.metastore.warehouse.dir}/orc_staging/' 
overwrite into table orc_test partition (ds='10');
 dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=10/;
-
-alter table orc_test add partition(ds='11');
-alter table orc_test partition(ds='11') set fileformat textfile;
-load data local inpath '../../data/files/kv1.txt' into table orc_test 
partition(ds='11');
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=11/;
-
-alter table orc_test add partition(ds='12');
-alter table orc_test partition(ds='12') set fileformat textfile;
-load data local inpath '../../data/files/types/primitives' into table orc_test 
partition(ds='12');
-dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=12/;

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q 
b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
index 4a805a0..7625e6f 100644
--- a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
+++ b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
@@ -1,3 +1,4 @@
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 create table if not exists alltypes (
  bo boolean,
  ti tinyint,
@@ -51,24 +52,12 @@ alter table alltypes_orc change si si bigint;
 alter table alltypes_orc change i i bigint;
 select * from alltypes_orc;
 
-alter table alltypes_orc change l l array<bigint>;
-select * from alltypes_orc;
-
 set hive.vectorized.execution.enabled=true;
 set hive.fetch.task.conversion=none;
-alter table alltypes_orc change si si smallint;
-alter table alltypes_orc change i i int;
 
 explain select ti, si, i, bi from alltypes_orc;
 select ti, si, i, bi from alltypes_orc;
 
-alter table alltypes_orc change si si int;
-select ti, si, i, bi from alltypes_orc;
-
-alter table alltypes_orc change si si bigint;
-alter table alltypes_orc change i i bigint;
-select ti, si, i, bi from alltypes_orc;
-
 set hive.exec.dynamic.partition.mode=nonstrict;
 create table src_part_orc (key int, value string) partitioned by (ds string) 
stored as orc;
 insert overwrite table src_part_orc partition(ds) select key, value, ds from 
srcpart where ds is not null;
@@ -77,3 +66,4 @@ select * from src_part_orc limit 10;
 
 alter table src_part_orc change key key bigint;
 select * from src_part_orc limit 10;
+reset hive.metastore.disallow.incompatible.col.type.changes;

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/orc_schema_evolution.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_schema_evolution.q 
b/ql/src/test/queries/clientpositive/orc_schema_evolution.q
new file mode 100644
index 0000000..285acf4
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/orc_schema_evolution.q
@@ -0,0 +1,39 @@
+set hive.fetch.task.conversion=none;
+create table src_orc (key smallint, val string) stored as orc;
+create table src_orc2 (key smallint, val string) stored as orc;
+
+-- integer type widening
+insert overwrite table src_orc select * from src;
+select sum(hash(*)) from src_orc;
+
+alter table src_orc change key key smallint;
+select sum(hash(*)) from src_orc;
+
+alter table src_orc change key key int;
+select sum(hash(*)) from src_orc;
+
+alter table src_orc change key key bigint;
+select sum(hash(*)) from src_orc;
+
+-- replace columns for adding columns and type widening
+insert overwrite table src_orc2 select * from src;
+select sum(hash(*)) from src_orc2;
+
+alter table src_orc2 replace columns (k smallint, v string);
+select sum(hash(*)) from src_orc2;
+
+alter table src_orc2 replace columns (k int, v string);
+select sum(hash(*)) from src_orc2;
+
+alter table src_orc2 replace columns (k bigint, v string);
+select sum(hash(*)) from src_orc2;
+
+alter table src_orc2 replace columns (k bigint, v string, z int);
+select sum(hash(*)) from src_orc2;
+
+alter table src_orc2 replace columns (k bigint, v string, z bigint);
+select sum(hash(*)) from src_orc2;
+
+alter table src_orc2 replace columns (k bigint, v string, z bigint, y float);
+select sum(hash(*)) from src_orc2;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q
new file mode 100644
index 0000000..480c345
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_part.q
@@ -0,0 +1,173 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=false;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Partitioned
+-- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema 
evolution is always used for ACID.
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) 
clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES 
('transactional'='true');
+
+insert into table partitioned2 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 
'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 
'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 
2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 
'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) 
clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES 
('transactional'='true');
+
+insert into table partitioned4 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 
'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns
+---
+CREATE TABLE partitioned5(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned5 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned5 add columns(c int, d string);
+
+insert into table partitioned5 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned5 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned5;
+
+-- UPDATE New Columns
+update partitioned5 set c=99;
+
+select part,a,b,c,d from partitioned5;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column
+---
+CREATE TABLE partitioned6(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned6 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned6 add columns(c int, d string);
+
+insert into table partitioned6 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned6 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned6;
+
+-- DELETE where old column
+delete from partitioned6 where a = 2 or a = 4 or a = 6;
+
+select part,a,b,c,d from partitioned6;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column
+---
+CREATE TABLE partitioned7(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned7 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned7 add columns(c int, d string);
+
+insert into table partitioned7 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned7 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned7;
+
+-- DELETE where new column
+delete from partitioned7 where a = 1 or c = 30 or c == 100;
+
+select part,a,b,c,d from partitioned7;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
+DROP TABLE partitioned5;
+DROP TABLE partitioned6;
+DROP TABLE partitioned7;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q
new file mode 100644
index 0000000..61ba005
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_mapwork_table.q
@@ -0,0 +1,131 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=false;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Table
+-- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema 
evolution is always used for ACID.
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) clustered by (a) into 2 buckets STORED AS 
ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) clustered by (a) into 2 buckets 
STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns
+---
+CREATE TABLE table5(a INT, b STRING) clustered by (a) into 2 buckets STORED AS 
ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table5 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table5 add columns(c int, d string);
+
+insert into table table5 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table5 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+select a,b,c,d from table5;
+
+-- UPDATE New Columns
+update table5 set c=99;
+
+select a,b,c,d from table5;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column
+---
+CREATE TABLE table6(a INT, b STRING) clustered by (a) into 2 buckets STORED AS 
ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table6 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table6 add columns(c int, d string);
+
+insert into table table6 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table6 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+select a,b,c,d from table6;
+
+-- DELETE where old column
+delete from table6 where a = 2 or a = 4 or a = 6;
+
+select a,b,c,d from table6;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column
+---
+CREATE TABLE table7(a INT, b STRING) clustered by (a) into 2 buckets STORED AS 
ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table7 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table7 add columns(c int, d string);
+
+insert into table table7 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table7 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+select a,b,c,d from table7;
+
+-- DELETE where new column
+delete from table7 where a = 1 or c = 30 or c == 100;
+
+select a,b,c,d from table7;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
+DROP TABLE table5;
+DROP TABLE table6;
+DROP TABLE table7;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
new file mode 100644
index 0000000..8f1d369
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
@@ -0,0 +1,173 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=false;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, ACID Vectorized, MapWork, Partitioned
+-- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema 
evolution is always used for ACID.
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) 
clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES 
('transactional'='true');
+
+insert into table partitioned2 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 
'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 
'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 
2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 
'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) 
clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES 
('transactional'='true');
+
+insert into table partitioned4 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 
'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns
+---
+CREATE TABLE partitioned5(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned5 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned5 add columns(c int, d string);
+
+insert into table partitioned5 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned5 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned5;
+
+-- UPDATE New Columns
+update partitioned5 set c=99;
+
+select part,a,b,c,d from partitioned5;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column
+---
+CREATE TABLE partitioned6(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned6 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned6 add columns(c int, d string);
+
+insert into table partitioned6 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned6 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned6;
+
+-- DELETE where old column
+delete from partitioned6 where a = 2 or a = 4 or a = 6;
+
+select part,a,b,c,d from partitioned6;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column
+---
+CREATE TABLE partitioned7(a INT, b STRING) PARTITIONED BY(part INT) clustered 
by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned7 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned7 add columns(c int, d string);
+
+insert into table partitioned7 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned7 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned7;
+
+-- DELETE where new column
+delete from partitioned7 where a = 1 or c = 30 or c == 100;
+
+select part,a,b,c,d from partitioned7;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
+DROP TABLE partitioned5;
+DROP TABLE partitioned6;
+DROP TABLE partitioned7;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
new file mode 100644
index 0000000..c901ad4
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
@@ -0,0 +1,131 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=false;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, ACID Vectorized, MapWork, Table
+-- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema 
evolution is always used for ACID.
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) clustered by (a) into 2 buckets STORED AS 
ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) clustered by (a) into 2 buckets 
STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns
+---
+CREATE TABLE table5(a INT, b STRING) clustered by (a) into 2 buckets STORED AS 
ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table5 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table5 add columns(c int, d string);
+
+insert into table table5 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table5 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+select a,b,c,d from table5;
+
+-- UPDATE New Columns
+update table5 set c=99;
+
+select a,b,c,d from table5;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column
+---
+CREATE TABLE table6(a INT, b STRING) clustered by (a) into 2 buckets STORED AS 
ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table6 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table6 add columns(c int, d string);
+
+insert into table table6 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table6 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+select a,b,c,d from table6;
+
+-- DELETE where old column
+delete from table6 where a = 2 or a = 4 or a = 6;
+
+select a,b,c,d from table6;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column
+---
+CREATE TABLE table7(a INT, b STRING) clustered by (a) into 2 buckets STORED AS 
ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table7 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table7 add columns(c int, d string);
+
+insert into table table7 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table7 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+select a,b,c,d from table7;
+
+-- DELETE where new column
+delete from table7 where a = 1 or c = 30 or c == 100;
+
+select a,b,c,d from table7;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
+DROP TABLE table5;
+DROP TABLE table6;
+DROP TABLE table7;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_part.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_part.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_part.q
new file mode 100644
index 0000000..cf42e9c
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_part.q
@@ -0,0 +1,97 @@
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=more;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, FetchWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
ORC;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS ORC;
+
+insert into table partitioned2 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 
'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 
'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
ORC;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 
2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 
'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS ORC;
+
+insert into table partitioned4 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 
'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
new file mode 100644
index 0000000..b239a42
--- /dev/null
+++ 
b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
@@ -0,0 +1,57 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=more;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, FetchWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS ORC;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS ORC;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_part.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_part.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_part.q
new file mode 100644
index 0000000..c120d60
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_part.q
@@ -0,0 +1,97 @@
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, FetchWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
ORC;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS ORC;
+
+insert into table partitioned2 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 
'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 
'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
ORC;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 
2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 
'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS ORC;
+
+insert into table partitioned4 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 
'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
new file mode 100644
index 0000000..ece45eb
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
@@ -0,0 +1,57 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS ORC;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS ORC;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_part.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_part.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_part.q
new file mode 100644
index 0000000..8bd6de3
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_part.q
@@ -0,0 +1,97 @@
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=more;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Vectorized, MapWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
ORC;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS ORC;
+
+insert into table partitioned2 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 
'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 
'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
ORC;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 
2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 
'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS ORC;
+
+insert into table partitioned4 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 
'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
new file mode 100644
index 0000000..cad22d5
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
@@ -0,0 +1,57 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS ORC;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS ORC;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
new file mode 100644
index 0000000..929524b
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
@@ -0,0 +1,57 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS TEXTFILE;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
new file mode 100644
index 0000000..929524b
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
@@ -0,0 +1,57 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two 
hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS TEXTFILE;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_part.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_part.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_part.q
new file mode 100644
index 0000000..2d78c6d
--- /dev/null
+++ 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_part.q
@@ -0,0 +1,97 @@
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=more;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, FetchWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
TEXTFILE;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS TEXTFILE;
+
+insert into table partitioned2 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 
'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 
'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
TEXTFILE;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 
2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 
'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS TEXTFILE;
+
+insert into table partitioned4 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 
'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_table.q
new file mode 100644
index 0000000..731cf77
--- /dev/null
+++ 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_table.q
@@ -0,0 +1,67 @@
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=more;
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+select a,b from table1;
+
+-- ADD COLUMNS
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+select a,b,c,d from table1;
+
+-- ADD COLUMNS
+alter table table1 add columns(e string);
+
+insert into table table1 values(5, 'new', 100, 'hundred', 'another1'),(6, 
'new', 200, 'two hundred', 'another2');
+
+select a,b,c,d,e from table1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table3(a smallint, b STRING) STORED AS TEXTFILE;
+
+insert into table table3 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+select a,b from table3;
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 change column a a int;
+
+insert into table table3 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+select a,b from table3;
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 add columns(e string);
+
+insert into table table3 values(5000, 'new', 'another5'),(90000, 'new', 
'another6');
+
+select a,b from table3;
+
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 change column a a int;
+
+select a,b from table3;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
+DROP TABLE table3;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_part.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_part.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_part.q
new file mode 100644
index 0000000..5f557c9
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_part.q
@@ -0,0 +1,97 @@
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
TEXTFILE;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 
'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 
'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 
'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS TEXTFILE;
+
+insert into table partitioned2 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 
'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 
'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS 
TEXTFILE;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 
'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 
2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 
'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) 
STORED AS TEXTFILE;
+
+insert into table partitioned4 partition(part=1) values(1000, 
'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 
'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_table.q
----------------------------------------------------------------------
diff --git 
a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_table.q 
b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_table.q
new file mode 100644
index 0000000..155602e
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_table.q
@@ -0,0 +1,67 @@
+set hive.cli.print.header=true;
+SET hive.exec.schema.evolution=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 
'original'),(4, 'original');
+
+select a,b from table1;
+
+-- ADD COLUMNS
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), 
(3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+select a,b,c,d from table1;
+
+-- ADD COLUMNS
+alter table table1 add columns(e string);
+
+insert into table table1 values(5, 'new', 100, 'hundred', 'another1'),(6, 
'new', 200, 'two hundred', 'another2');
+
+select a,b,c,d,e from table1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table3(a smallint, b STRING) STORED AS TEXTFILE;
+
+insert into table table3 values(1000, 'original'),(6737, 'original'), ('3', 
'original'),('4', 'original');
+
+select a,b from table3;
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 change column a a int;
+
+insert into table table3 values(72909, 'new'),(200, 'new'), (32768, 
'new'),(40000, 'new');
+
+select a,b from table3;
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 add columns(e string);
+
+insert into table table3 values(5000, 'new', 'another5'),(90000, 'new', 
'another6');
+
+select a,b from table3;
+
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 change column a a int;
+
+select a,b from table3;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
+DROP TABLE table3;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_change_fileformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_change_fileformat.q.out 
b/ql/src/test/results/clientnegative/orc_change_fileformat.q.out
new file mode 100644
index 0000000..db454fe
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_change_fileformat.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc set fileformat textfile
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Changing file format (from ORC) is not 
supported for table default.src_orc

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out
----------------------------------------------------------------------
diff --git 
a/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out 
b/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out
new file mode 100644
index 0000000..c29fe79
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_change_fileformat_acid.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc set fileformat textfile
+PREHOOK: type: ALTERTABLE_FILEFORMAT
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Changing file format (from ORC) is not 
supported for table default.src_orc

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_change_serde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_change_serde.q.out 
b/ql/src/test/results/clientnegative/orc_change_serde.q.out
new file mode 100644
index 0000000..7f882b5
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_change_serde.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc set serde 
'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
+PREHOOK: type: ALTERTABLE_SERIALIZER
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Changing SerDe (from OrcSerde) is not 
supported for table default.src_orc. File format may be incompatible

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out 
b/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out
new file mode 100644
index 0000000..01fb870
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_change_serde_acid.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc set serde 
'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
+PREHOOK: type: ALTERTABLE_SERIALIZER
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Changing SerDe (from OrcSerde) is not 
supported for table default.src_orc. File format may be incompatible

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out 
b/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out
new file mode 100644
index 0000000..c581f4e
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_reorder_columns1.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc change key k tinyint first
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Reordering columns is not supported for 
table default.src_orc. SerDe may be incompatible

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out 
b/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out
new file mode 100644
index 0000000..5186081
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_reorder_columns1_acid.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc change key k tinyint first
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Reordering columns is not supported for 
table default.src_orc. SerDe may be incompatible

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out 
b/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out
new file mode 100644
index 0000000..54dcdec
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_reorder_columns2.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc change key k tinyint after val
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Reordering columns is not supported for 
table default.src_orc. SerDe may be incompatible

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out 
b/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out
new file mode 100644
index 0000000..7b65d7c
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_reorder_columns2_acid.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc change key k tinyint after val
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Reordering columns is not supported for 
table default.src_orc. SerDe may be incompatible

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_replace_columns1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_replace_columns1.q.out 
b/ql/src/test/results/clientnegative/orc_replace_columns1.q.out
new file mode 100644
index 0000000..13f3f14
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_replace_columns1.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc replace columns (k int)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Replacing columns cannot drop columns 
for table default.src_orc. SerDe may be incompatible

http://git-wip-us.apache.org/repos/asf/hive/blob/0fd9069e/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out 
b/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out
new file mode 100644
index 0000000..ec09d4b
--- /dev/null
+++ b/ql/src/test/results/clientnegative/orc_replace_columns1_acid.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_orc
+POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc 
TBLPROPERTIES ('transactional'='true')
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_orc
+PREHOOK: query: alter table src_orc replace columns (k int)
+PREHOOK: type: ALTERTABLE_REPLACECOLS
+PREHOOK: Input: default@src_orc
+PREHOOK: Output: default@src_orc
+FAILED: Execution Error, return code 1 from 
org.apache.hadoop.hive.ql.exec.DDLTask. Replacing columns cannot drop columns 
for table default.src_orc. SerDe may be incompatible

Reply via email to