You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2015/11/18 23:41:00 UTC

[18/34] hive git commit: HIVE-11981: ORC Schema Evolution Issues (Vectorized, ACID, and Non-Vectorized) (Matt McCline, reviewed by Prasanth J)

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
new file mode 100644
index 0000000..6b75505
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_part.q
@@ -0,0 +1,171 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, ACID Vectorized, MapWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned2 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned4 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns
+---
+CREATE TABLE partitioned5(a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned5 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned5 add columns(c int, d string);
+
+insert into table partitioned5 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned5 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned5;
+
+-- UPDATE New Columns
+update partitioned5 set c=99;
+
+select part,a,b,c,d from partitioned5;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column
+---
+CREATE TABLE partitioned6(a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned6 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned6 add columns(c int, d string);
+
+insert into table partitioned6 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned6 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned6;
+
+-- DELETE where old column
+delete from partitioned6 where a = 2 or a = 4 or a = 6;
+
+select part,a,b,c,d from partitioned6;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column
+---
+CREATE TABLE partitioned7(a INT, b STRING) PARTITIONED BY(part INT) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table partitioned7 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned7 add columns(c int, d string);
+
+insert into table partitioned7 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned7 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+select part,a,b,c,d from partitioned7;
+
+-- DELETE where new column
+delete from partitioned7 where a = 1 or c = 30 or c == 100;
+
+select part,a,b,c,d from partitioned7;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
+DROP TABLE partitioned5;
+DROP TABLE partitioned6;
+DROP TABLE partitioned7;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
new file mode 100644
index 0000000..0edca16
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_mapwork_table.q
@@ -0,0 +1,129 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, ACID Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... UPDATE New Columns
+---
+CREATE TABLE table5(a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table5 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table5 add columns(c int, d string);
+
+insert into table table5 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table5 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+select a,b,c,d from table5;
+
+-- UPDATE New Columns
+update table5 set c=99;
+
+select a,b,c,d from table5;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where old column
+---
+CREATE TABLE table6(a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table6 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table6 add columns(c int, d string);
+
+insert into table table6 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table6 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+select a,b,c,d from table6;
+
+-- DELETE where old column
+delete from table6 where a = 2 or a = 4 or a = 6;
+
+select a,b,c,d from table6;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DELETE where new column
+---
+CREATE TABLE table7(a INT, b STRING) clustered by (a) into 2 buckets STORED AS ORC TBLPROPERTIES ('transactional'='true');
+
+insert into table table7 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table7 add columns(c int, d string);
+
+insert into table table7 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table7 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+select a,b,c,d from table7;
+
+-- DELETE where new column
+delete from table7 where a = 1 or c = 30 or c == 100;
+
+select a,b,c,d from table7;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
+DROP TABLE table5;
+DROP TABLE table6;
+DROP TABLE table7;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_part.q
new file mode 100644
index 0000000..06dc5f8
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_part.q
@@ -0,0 +1,96 @@
+set hive.cli.print.header=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=more;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, FetchWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned2 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned4 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
new file mode 100644
index 0000000..04189cd
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_fetchwork_table.q
@@ -0,0 +1,56 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=more;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, FetchWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS ORC;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS ORC;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_part.q
new file mode 100644
index 0000000..8b413cc
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_part.q
@@ -0,0 +1,96 @@
+set hive.cli.print.header=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, FetchWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned2 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned4 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
new file mode 100644
index 0000000..db57965
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_nonvec_mapwork_table.q
@@ -0,0 +1,56 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS ORC;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS ORC;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_part.q
new file mode 100644
index 0000000..f56668f
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_part.q
@@ -0,0 +1,96 @@
+set hive.cli.print.header=true;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=more;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Vectorized, MapWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned2 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS ORC;
+
+insert into table partitioned4 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
new file mode 100644
index 0000000..d80d7be
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_vec_mapwork_table.q
@@ -0,0 +1,56 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS ORC;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS ORC;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
new file mode 100644
index 0000000..0c85044
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_fetchwork_table.q
@@ -0,0 +1,56 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS TEXTFILE;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
new file mode 100644
index 0000000..0c85044
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_mapwork_table.q
@@ -0,0 +1,56 @@
+set hive.cli.print.header=true;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table table1 values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select a,b from table1;
+select a,b,c from table1;
+select a,b,c,d from table1;
+select a,c,d from table1;
+select a,d from table1;
+select c from table1;
+select d from table1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table2(a smallint, b STRING) STORED AS TEXTFILE;
+
+insert into table table2 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table table2 change column a a int;
+
+insert into table table2 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table table2 values(5000, 'new'),(90000, 'new');
+
+select a,b from table2;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_part.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_part.q
new file mode 100644
index 0000000..cd00afe
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_part.q
@@ -0,0 +1,96 @@
+set hive.cli.print.header=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=more;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, FetchWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+insert into table partitioned2 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+insert into table partitioned4 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_table.q
new file mode 100644
index 0000000..67c2fc3
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_fetchwork_table.q
@@ -0,0 +1,66 @@
+set hive.cli.print.header=true;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=more;
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+select a,b from table1;
+
+-- ADD COLUMNS
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+select a,b,c,d from table1;
+
+-- ADD COLUMNS
+alter table table1 add columns(e string);
+
+insert into table table1 values(5, 'new', 100, 'hundred', 'another1'),(6, 'new', 200, 'two hundred', 'another2');
+
+select a,b,c,d,e from table1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table3(a smallint, b STRING) STORED AS TEXTFILE;
+
+insert into table table3 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+select a,b from table3;
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 change column a a int;
+
+insert into table table3 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+select a,b from table3;
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 add columns(e string);
+
+insert into table table3 values(5000, 'new', 'another5'),(90000, 'new', 'another6');
+
+select a,b from table3;
+
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 change column a a int;
+
+select a,b from table3;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
+DROP TABLE table3;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_part.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_part.q
new file mode 100644
index 0000000..5d0318d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_part.q
@@ -0,0 +1,96 @@
+set hive.cli.print.header=true;
+SET hive.vectorized.execution.enabled=false;
+set hive.fetch.task.conversion=none;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT
+---
+CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned1 add columns(c int, d string);
+
+insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred');
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... STATIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned2(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+insert into table partitioned2 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned2 change column a a int;
+
+insert into table partitioned2 partition(part=2) values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+insert into table partitioned2 partition(part=1) values(5000, 'new'),(90000, 'new');
+
+select part,a,b from partitioned2;
+
+
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... DYNAMIC INSERT
+---
+CREATE TABLE partitioned3(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+insert into table partitioned3 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+-- Table-Non-Cascade ADD COLUMNS ...
+alter table partitioned3 add columns(c int, d string);
+
+insert into table partitioned3 partition(part) values(1, 'new', 10, 'ten', 2),(2, 'new', 20, 'twenty', 2), (3, 'new', 30, 'thirty', 2),(4, 'new', 40, 'forty', 2),
+    (5, 'new', 100, 'hundred', 1),(6, 'new', 200, 'two hundred', 1);
+
+-- SELECT permutation columns to make sure NULL defaulting works right
+select part,a,b from partitioned1;
+select part,a,b,c from partitioned1;
+select part,a,b,c,d from partitioned1;
+select part,a,c,d from partitioned1;
+select part,a,d from partitioned1;
+select part,c from partitioned1;
+select part,d from partitioned1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN ... DYNAMIC INSERT
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE partitioned4(a smallint, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE;
+
+insert into table partitioned4 partition(part=1) values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+-- Table-Non-Cascade CHANGE COLUMNS ...
+alter table partitioned4 change column a a int;
+
+insert into table partitioned4 partition(part) values(72909, 'new', 2),(200, 'new', 2), (32768, 'new', 2),(40000, 'new', 2),
+    (5000, 'new', 1),(90000, 'new', 1);
+
+select part,a,b from partitioned4;
+
+
+DROP TABLE partitioned1;
+DROP TABLE partitioned2;
+DROP TABLE partitioned3;
+DROP TABLE partitioned4;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/30f20e99/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_table.q b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_table.q
new file mode 100644
index 0000000..499d36d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schema_evol_text_nonvec_mapwork_table.q
@@ -0,0 +1,66 @@
+set hive.cli.print.header=true;
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+
+-- SORT_QUERY_RESULTS
+--
+-- FILE VARIATION: ORC, Non-Vectorized, MapWork, Table
+--
+--
+-- SECTION VARIATION: ALTER TABLE ADD COLUMNS
+---
+CREATE TABLE table1(a INT, b STRING) STORED AS TEXTFILE;
+
+insert into table table1 values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original');
+
+select a,b from table1;
+
+-- ADD COLUMNS
+alter table table1 add columns(c int, d string);
+
+insert into table table1 values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty');
+
+select a,b,c,d from table1;
+
+-- ADD COLUMNS
+alter table table1 add columns(e string);
+
+insert into table table1 values(5, 'new', 100, 'hundred', 'another1'),(6, 'new', 200, 'two hundred', 'another2');
+
+select a,b,c,d,e from table1;
+
+
+--
+-- SECTION VARIATION: ALTER TABLE CHANGE COLUMN
+-- smallint = (2-byte signed integer, from -32,768 to 32,767)
+--
+CREATE TABLE table3(a smallint, b STRING) STORED AS TEXTFILE;
+
+insert into table table3 values(1000, 'original'),(6737, 'original'), ('3', 'original'),('4', 'original');
+
+select a,b from table3;
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 change column a a int;
+
+insert into table table3 values(72909, 'new'),(200, 'new'), (32768, 'new'),(40000, 'new');
+
+select a,b from table3;
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 add columns(e string);
+
+insert into table table3 values(5000, 'new', 'another5'),(90000, 'new', 'another6');
+
+select a,b from table3;
+
+
+-- ADD COLUMNS ... RESTRICT
+alter table table3 change column a a int;
+
+select a,b from table3;
+
+
+DROP TABLE table1;
+DROP TABLE table2;
+DROP TABLE table3;
\ No newline at end of file