You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2019/01/04 00:24:33 UTC

[34/35] hive git commit: HIVE-16957: Support CTAS for auto gather column stats (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_6.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_6.q
index 23fc3c1..5ff60bc 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_6.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_6.q
@@ -15,7 +15,6 @@ create table emps (
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into emps values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500),
   (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (120, 10, 'Bill', 10000, 250);
-analyze table emps compute statistics for columns;
 
 create table depts (
   deptno int,
@@ -23,21 +22,18 @@ create table depts (
   locationid int)
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into depts values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20);
-analyze table depts compute statistics for columns;
 
 create table dependents (
   empid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into dependents values (10, 'Michael'), (20, 'Jane');
-analyze table dependents compute statistics for columns;
 
 create table locations (
   locationid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into locations values (10, 'San Francisco'), (20, 'San Diego');
-analyze table locations compute statistics for columns;
 
 alter table emps add constraint pk1 primary key (empid) disable novalidate rely;
 alter table depts add constraint pk2 primary key (deptno) disable novalidate rely;
@@ -55,7 +51,6 @@ alter table depts change column locationid locationid int constraint nn2 not nul
 create materialized view mv1 as
 select name, deptno, salary, count(*) + 1 as c, sum(empid) as s
 from emps where deptno >= 10 group by name, deptno, salary;
-analyze table mv1 compute statistics for columns;
 
 explain
 select salary, sum(empid) + 1 as s
@@ -70,7 +65,6 @@ drop materialized view mv1;
 create materialized view mv1 as
 select name, deptno, salary, count(*) + 1 as c, sum(empid) as s
 from emps where deptno >= 15 group by name, deptno, salary;
-analyze table mv1 compute statistics for columns;
 
 explain
 select salary + 1, sum(empid) + 1 as s
@@ -86,7 +80,6 @@ create materialized view mv1 as
 select depts.name
 from emps
 join depts on (emps.deptno = depts.deptno);
-analyze table mv1 compute statistics for columns;
 
 explain
 select dependents.empid
@@ -106,7 +99,6 @@ create materialized view mv1 as
 select depts.name
 from emps
 join depts on (emps.deptno = depts.deptno);
-analyze table mv1 compute statistics for columns;
 
 explain
 select dependents.empid
@@ -127,7 +119,6 @@ drop materialized view mv1;
 create materialized view mv1 as
 select emps.empid, emps.deptno, emps.name as name1, emps.salary, emps.commission, dependents.name as name2
 from emps join dependents on (emps.empid = dependents.empid);
-analyze table mv1 compute statistics for columns;
 
 explain
 select emps.empid, dependents.empid, emps.deptno

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q
index 3d1cedc..9a0e3c8 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_7.q
@@ -15,7 +15,6 @@ create table emps_n8 (
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into emps_n8 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500),
   (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250);
-analyze table emps_n8 compute statistics for columns;
 
 create table depts_n6 (
   deptno int,
@@ -23,21 +22,18 @@ create table depts_n6 (
   locationid int)
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into depts_n6 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20);
-analyze table depts_n6 compute statistics for columns;
 
 create table dependents_n4 (
   empid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into dependents_n4 values (10, 'Michael'), (20, 'Jane');
-analyze table dependents_n4 compute statistics for columns;
 
 create table locations_n4 (
   locationid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into locations_n4 values (10, 'San Francisco'), (20, 'San Diego');
-analyze table locations_n4 compute statistics for columns;
 
 alter table emps_n8 add constraint pk1 primary key (empid) disable novalidate rely;
 alter table depts_n6 add constraint pk2 primary key (deptno) disable novalidate rely;
@@ -60,7 +56,6 @@ join locations_n4 on (locations_n4.name = dependents_n4.name)
 join emps_n8 on (emps_n8.deptno = depts_n6.deptno)
 where depts_n6.deptno > 11
 group by depts_n6.deptno, dependents_n4.empid;
-analyze table mv1_n4 compute statistics for columns;
 
 explain
 select dependents_n4.empid, depts_n6.deptno
@@ -90,7 +85,6 @@ join locations_n4 on (locations_n4.name = dependents_n4.name)
 join emps_n8 on (emps_n8.deptno = depts_n6.deptno)
 where depts_n6.deptno > 11 and depts_n6.deptno < 19
 group by depts_n6.deptno, dependents_n4.empid;
-analyze table mv1_n4 compute statistics for columns;
 
 explain
 select dependents_n4.empid, count(emps_n8.salary) + 1
@@ -119,7 +113,6 @@ from depts_n6
 join dependents_n4 on (depts_n6.name = dependents_n4.name)
 join emps_n8 on (emps_n8.deptno = depts_n6.deptno)
 where depts_n6.deptno >= 10;
-analyze table mv1_n4 compute statistics for columns;
 
 explain
 select dependents_n4.empid

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_8.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_8.q
index cfcfddc..919a356 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_8.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_8.q
@@ -18,7 +18,6 @@ stored AS ORC
 TBLPROPERTIES("transactional"="true");
 insert into table source_table_001
   values ('2010-10-10', 1, 1, 'env', 1, 1);
-analyze table source_table_001 compute statistics for columns;
 
 CREATE MATERIALIZED VIEW source_table_001_mv AS
 SELECT
@@ -27,7 +26,6 @@ SUM(A.UP_VOLUME) AS UP_VOLUME_SUM,
 A.MY_DATE,A.MY_ID2,A.ENVIRONMENT
 from source_table_001 AS A
 group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,A.MY_DATE;
-analyze table source_table_001_mv compute statistics for columns;
 
 
 explain

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q
index 18d5cec..40c4117 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_9.q
@@ -18,7 +18,6 @@ stored AS ORC
 TBLPROPERTIES("transactional"="true");
 insert into table source_table_001_n0
   values ('2010-10-10 00:00:00', 1, 1, 'env', 1, 1);
-analyze table source_table_001_n0 compute statistics for columns;
 
 CREATE MATERIALIZED VIEW source_table_001_mv_n0 AS
 SELECT
@@ -27,7 +26,6 @@ SUM(A.UP_VOLUME) AS UP_VOLUME_SUM,
 A.MY_ID,A.MY_DATE,A.MY_ID2,A.ENVIRONMENT
 from source_table_001_n0 AS A
 group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,A.MY_DATE;
-analyze table source_table_001_mv_n0 compute statistics for columns;
 
 explain
 select
@@ -45,7 +43,6 @@ SUM(A.UP_VOLUME) AS UP_VOLUME_SUM,
 A.MY_ID,FLOOR(A.MY_DATE to hour),A.MY_ID2,A.ENVIRONMENT
 from source_table_001_n0 AS A
 group by A.MY_ID,A.MY_ID2,A.ENVIRONMENT,FLOOR(A.MY_DATE to hour);
-analyze table source_table_001_mv_n0 compute statistics for columns;
 
 explain
 select

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_empty.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_empty.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_empty.q
index 9ae1d4e..db56a38 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_empty.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_empty.q
@@ -13,7 +13,6 @@ create table emps_mv_rewrite_empty (
   salary float,
   commission int)
 stored as orc TBLPROPERTIES ('transactional'='true');
-analyze table emps_mv_rewrite_empty compute statistics for columns;
 
 create materialized view emps_mv_rewrite_empty_mv1 as
 select * from emps_mv_rewrite_empty where empid < 150;

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt.q
index 8de9c70..353cef8 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt.q
@@ -15,7 +15,6 @@ create table emps_n30 (
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into emps_n30 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500),
   (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (120, 10, 'Bill', 10000, 250);
-analyze table emps_n30 compute statistics for columns;
 
 create table depts_n20 (
   deptno int,
@@ -23,21 +22,18 @@ create table depts_n20 (
   locationid int)
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into depts_n20 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20);
-analyze table depts_n20 compute statistics for columns;
 
 create table dependents_n20 (
   empid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into dependents_n20 values (10, 'Michael'), (20, 'Jane');
-analyze table dependents_n20 compute statistics for columns;
 
 create table locations_n20 (
   locationid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into locations_n20 values (10, 'San Francisco'), (20, 'San Diego');
-analyze table locations_n20 compute statistics for columns;
 
 alter table emps_n30 add constraint pk1 primary key (empid) disable novalidate rely;
 alter table depts_n20 add constraint pk2 primary key (deptno) disable novalidate rely;
@@ -51,7 +47,6 @@ alter table depts_n20 add constraint fk2 foreign key (locationid) references loc
 create materialized view mv1_n20 as
 select deptno, name, salary, commission
 from emps_n30;
-analyze table mv1_n20 compute statistics for columns;
 
 explain
 select emps_n30.name, emps_n30.salary, emps_n30.commission
@@ -69,7 +64,6 @@ create materialized view mv1_n20 as
 select empid, emps_n30.deptno, count(*) as c, sum(empid) as s
 from emps_n30 join depts_n20 using (deptno)
 group by empid, emps_n30.deptno;
-analyze table mv1_n20 compute statistics for columns;
 
 explain
 select depts_n20.deptno, count(*) as c, sum(empid) as s
@@ -88,7 +82,6 @@ select dependents_n20.empid, emps_n30.deptno, sum(salary) as s
 from emps_n30
 join dependents_n20 on (emps_n30.empid = dependents_n20.empid)
 group by dependents_n20.empid, emps_n30.deptno;
-analyze table mv1_n20 compute statistics for columns;
 
 explain
 select dependents_n20.empid, sum(salary) as s
@@ -109,7 +102,6 @@ drop materialized view mv1_n20;
 create materialized view mv1_n20 as
 select emps_n30.empid, emps_n30.deptno, emps_n30.name as name1, emps_n30.salary, emps_n30.commission, dependents_n20.name as name2
 from emps_n30 join dependents_n20 on (emps_n30.empid = dependents_n20.empid);
-analyze table mv1_n20 compute statistics for columns;
 
 explain
 select emps_n30.empid, dependents_n20.empid, emps_n30.deptno

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt_2.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt_2.q
index a137230..a5bb2c0 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt_2.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_no_join_opt_2.q
@@ -15,7 +15,6 @@ create table emps_n30 (
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into emps_n30 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500),
   (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (120, 10, 'Bill', 10000, 250);
-analyze table emps_n30 compute statistics for columns;
 
 create table depts_n20 (
   deptno int,
@@ -23,21 +22,18 @@ create table depts_n20 (
   locationid int)
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into depts_n20 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20);
-analyze table depts_n20 compute statistics for columns;
 
 create table dependents_n20 (
   empid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into dependents_n20 values (10, 'Michael'), (20, 'Jane');
-analyze table dependents_n20 compute statistics for columns;
 
 create table locations_n20 (
   locationid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into locations_n20 values (10, 'San Francisco'), (20, 'San Diego');
-analyze table locations_n20 compute statistics for columns;
 
 alter table emps_n30 add constraint pk1 primary key (empid) disable novalidate rely;
 alter table depts_n20 add constraint pk2 primary key (deptno) disable novalidate rely;
@@ -50,7 +46,6 @@ alter table depts_n20 add constraint fk2 foreign key (locationid) references loc
 -- EXAMPLE 1
 create materialized view mv1_part_n2 partitioned on (deptno) as
 select * from emps_n30 where empid < 150;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select *
@@ -67,7 +62,6 @@ drop materialized view mv1_part_n2;
 create materialized view mv1_part_n2 partitioned on (deptno) as
 select deptno, name, salary, commission
 from emps_n30;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select emps_n30.name, emps_n30.salary, emps_n30.commission
@@ -83,7 +77,6 @@ drop materialized view mv1_part_n2;
 -- EXAMPLE 4
 create materialized view mv1_part_n2 partitioned on (deptno) as
 select * from emps_n30 where empid < 200;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select * from emps_n30 where empid > 120
@@ -97,7 +90,6 @@ drop materialized view mv1_part_n2;
 -- EXAMPLE 5
 create materialized view mv1_part_n2 partitioned on (name) as
 select name, salary from emps_n30 group by name, salary;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select name, salary from emps_n30 group by name, salary;
@@ -109,7 +101,6 @@ drop materialized view mv1_part_n2;
 -- EXAMPLE 6
 create materialized view mv1_part_n2 partitioned on (name) as
 select name, salary from emps_n30 group by name, salary;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select name from emps_n30 group by name;
@@ -121,7 +112,6 @@ drop materialized view mv1_part_n2;
 -- EXAMPLE 7
 create materialized view mv1_part_n2 partitioned on (name) as
 select name, salary from emps_n30 where deptno = 10 group by name, salary;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select name from emps_n30 where deptno = 10 group by name;
@@ -134,7 +124,6 @@ drop materialized view mv1_part_n2;
 create materialized view mv1_part_n2 partitioned on (name) as
 select name, salary, count(*) as c, sum(empid) as s
 from emps_n30 group by name, salary;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select name from emps_n30 group by name;

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_1.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_1.q
index e6980c0..3fecac1 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_1.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_1.q
@@ -16,7 +16,6 @@ create table emps_n30 (
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into emps_n30 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500),
   (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (120, 10, 'Bill', 10000, 250);
-analyze table emps_n30 compute statistics for columns;
 
 create table depts_n20 (
   deptno int,
@@ -24,21 +23,18 @@ create table depts_n20 (
   locationid int)
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into depts_n20 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20);
-analyze table depts_n20 compute statistics for columns;
 
 create table dependents_n20 (
   empid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into dependents_n20 values (10, 'Michael'), (20, 'Jane');
-analyze table dependents_n20 compute statistics for columns;
 
 create table locations_n20 (
   locationid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into locations_n20 values (10, 'San Francisco'), (20, 'San Diego');
-analyze table locations_n20 compute statistics for columns;
 
 alter table emps_n30 add constraint pk1 primary key (empid) disable novalidate rely;
 alter table depts_n20 add constraint pk2 primary key (deptno) disable novalidate rely;
@@ -51,7 +47,6 @@ alter table depts_n20 add constraint fk2 foreign key (locationid) references loc
 -- EXAMPLE 1
 create materialized view mv1_part_n2 partitioned on (deptno) as
 select * from emps_n30 where empid < 150;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select *
@@ -68,7 +63,6 @@ drop materialized view mv1_part_n2;
 create materialized view mv1_part_n2 partitioned on (deptno) as
 select deptno, name, salary, commission
 from emps_n30;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select emps_n30.name, emps_n30.salary, emps_n30.commission
@@ -84,7 +78,6 @@ drop materialized view mv1_part_n2;
 -- EXAMPLE 4
 create materialized view mv1_part_n2 partitioned on (deptno) as
 select * from emps_n30 where empid < 200;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select * from emps_n30 where empid > 120
@@ -98,7 +91,6 @@ drop materialized view mv1_part_n2;
 -- EXAMPLE 5
 create materialized view mv1_part_n2 partitioned on (name) as
 select name, salary from emps_n30 group by name, salary;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select name, salary from emps_n30 group by name, salary;
@@ -110,7 +102,6 @@ drop materialized view mv1_part_n2;
 -- EXAMPLE 6
 create materialized view mv1_part_n2 partitioned on (name) as
 select name, salary from emps_n30 group by name, salary;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select name from emps_n30 group by name;
@@ -122,7 +113,6 @@ drop materialized view mv1_part_n2;
 -- EXAMPLE 7
 create materialized view mv1_part_n2 partitioned on (name) as
 select name, salary from emps_n30 where deptno = 10 group by name, salary;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select name from emps_n30 where deptno = 10 group by name;
@@ -135,7 +125,6 @@ drop materialized view mv1_part_n2;
 create materialized view mv1_part_n2 partitioned on (name) as
 select name, salary, count(*) as c, sum(empid) as s
 from emps_n30 group by name, salary;
-analyze table mv1_part_n2 compute statistics for columns;
 
 explain
 select name from emps_n30 group by name;

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q
index b2e6ebd..8d6df04 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_part_2.q
@@ -16,7 +16,6 @@ create table emps_n00 (
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into emps_n00 values (100, 10, 'Bill', 10000, 1000), (200, 20, 'Eric', 8000, 500),
   (150, 10, 'Sebastian', 7000, null), (110, 10, 'Theodore', 10000, 250), (110, 10, 'Bill', 10000, 250);
-analyze table emps_n00 compute statistics for columns;
 
 create table depts_n00 (
   deptno int,
@@ -24,21 +23,18 @@ create table depts_n00 (
   locationid int)
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into depts_n00 values (10, 'Sales', 10), (30, 'Marketing', null), (20, 'HR', 20);
-analyze table depts_n00 compute statistics for columns;
 
 create table dependents_n00 (
   empid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into dependents_n00 values (10, 'Michael'), (10, 'Jane');
-analyze table dependents_n00 compute statistics for columns;
 
 create table locations_n00 (
   locationid int,
   name varchar(256))
 stored as orc TBLPROPERTIES ('transactional'='true');
 insert into locations_n00 values (10, 'San Francisco'), (10, 'San Diego');
-analyze table locations_n00 compute statistics for columns;
 
 alter table emps_n00 add constraint pk1 primary key (empid) disable novalidate rely;
 alter table depts_n00 add constraint pk2 primary key (deptno) disable novalidate rely;
@@ -53,7 +49,6 @@ create materialized view mv1_part_n0 partitioned on (deptno) as
 select empid, depts_n00.deptno as deptno from emps_n00
 join depts_n00 using (deptno) where depts_n00.deptno > 10
 group by empid, depts_n00.deptno;
-analyze table mv1_part_n0 compute statistics for columns;
 
 explain
 select empid from emps_n00
@@ -71,7 +66,6 @@ create materialized view mv1_part_n0 partitioned on (deptno) as
 select depts_n00.deptno as deptno, empid from depts_n00
 join emps_n00 using (deptno) where depts_n00.deptno > 10
 group by empid, depts_n00.deptno;
-analyze table mv1_part_n0 compute statistics for columns;
 
 explain
 select empid from emps_n00
@@ -89,7 +83,6 @@ create materialized view mv1_part_n0 partitioned on (deptno) as
 select empid, depts_n00.deptno as deptno from emps_n00
 join depts_n00 using (deptno) where emps_n00.deptno > 10
 group by empid, depts_n00.deptno;
-analyze table mv1_part_n0 compute statistics for columns;
 
 explain
 select empid from emps_n00
@@ -107,7 +100,6 @@ create materialized view mv1_part_n0 partitioned on (deptno) as
 select depts_n00.deptno as deptno, emps_n00.empid from depts_n00
 join emps_n00 using (deptno) where emps_n00.empid > 10
 group by depts_n00.deptno, emps_n00.empid;
-analyze table mv1_part_n0 compute statistics for columns;
 
 explain
 select depts_n00.deptno from depts_n00
@@ -125,7 +117,6 @@ create materialized view mv1_part_n0 partitioned on (deptno) as
 select depts_n00.deptno as deptno, emps_n00.empid from depts_n00
 join emps_n00 using (deptno) where emps_n00.empid > 10
 group by depts_n00.deptno, emps_n00.empid;
-analyze table mv1_part_n0 compute statistics for columns;
 
 explain
 select depts_n00.deptno from depts_n00
@@ -144,7 +135,6 @@ select depts_n00.name, dependents_n00.name as name2, emps_n00.deptno, depts_n00.
 from depts_n00, dependents_n00, emps_n00
 where depts_n00.deptno > 10
 group by depts_n00.name, dependents_n00.name, emps_n00.deptno, depts_n00.deptno, dependents_n00.empid;
-analyze table mv1_part_n0 compute statistics for columns;
 
 explain
 select dependents_n00.empid

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q
index aed5bdb..2bd92bd 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb.q
@@ -202,12 +202,6 @@ TBLPROPERTIES ('transactional'='true');
 INSERT INTO `lineorder_n0`
 SELECT * FROM `lineorder_ext_n0`;
 
-analyze table customer_n1 compute statistics for columns;
-analyze table dates_n0 compute statistics for columns;
-analyze table ssb_part_n0 compute statistics for columns;
-analyze table supplier_n0 compute statistics for columns;
-analyze table lineorder_n0 compute statistics for columns;
-
 CREATE MATERIALIZED VIEW `ssb_mv_n0`
 AS
 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q
index 0982b66..1f4621e 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_ssb_2.q
@@ -202,12 +202,6 @@ TBLPROPERTIES ('transactional'='true');
 INSERT INTO `lineorder`
 SELECT * FROM `lineorder_ext`;
 
-analyze table customer_n0 compute statistics for columns;
-analyze table dates compute statistics for columns;
-analyze table ssb_part compute statistics for columns;
-analyze table supplier compute statistics for columns;
-analyze table lineorder compute statistics for columns;
-
 CREATE MATERIALIZED VIEW `ssb_mv`
 AS
 SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/results/clientnegative/masking_mv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/masking_mv.q.out b/ql/src/test/results/clientnegative/masking_mv.q.out
index 54e9843..c16631a 100644
--- a/ql/src/test/results/clientnegative/masking_mv.q.out
+++ b/ql/src/test/results/clientnegative/masking_mv.q.out
@@ -57,7 +57,32 @@ STAGE PLANS:
                     output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
                     serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
                     name: default.masking_test_view_n_mv
-      Execution mode: vectorized
+              Select Operator
+                expressions: _col0 (type: int)
+                outputColumnNames: col1
+                Statistics: Num rows: 500 Data size: 30200 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: compute_stats(col1, 'hll')
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 424 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-7
     Conditional Operator
@@ -86,6 +111,10 @@ STAGE PLANS:
   Stage: Stage-2
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: key
+          Column Types: int
+          Table: default.masking_test_view_n_mv
 
   Stage: Stage-9
     Materialized View Work

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/results/clientpositive/alter_table_update_status.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_update_status.q.out b/ql/src/test/results/clientpositive/alter_table_update_status.q.out
index ec8a64c..6453391 100644
--- a/ql/src/test/results/clientpositive/alter_table_update_status.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_update_status.q.out
@@ -60,7 +60,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
 PREHOOK: Input: default@src_stat_n0
@@ -87,7 +87,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat_n0 UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
 PREHOOK: Input: default@src_stat_n0
@@ -112,7 +112,7 @@ avg_col_len         	1.23
 max_col_len         	124                 	 	 	 	 	 	 	 	 	 	 
 num_trues           	                    	 	 	 	 	 	 	 	 	 	 
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
-bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ANALYZE TABLE src_stat_int_n0 COMPUTE STATISTICS for columns key
@@ -234,7 +234,7 @@ avg_col_len         	2.34
 max_col_len         	235                 	 	 	 	 	 	 	 	 	 	 
 num_trues           	                    	 	 	 	 	 	 	 	 	 	 
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
-bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: use default

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out b/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
index 1b787af..068f302 100644
--- a/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_update_status_disable_bitvector.q.out
@@ -60,7 +60,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	                    	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column key SET ('numDVs'='1111','avgColLen'='1.111')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
 PREHOOK: Input: default@src_stat
@@ -87,7 +87,7 @@ num_trues
 num_falses          	                    	 	 	 	 	 	 	 	 	 	 
 bitVector           	                    	 	 	 	 	 	 	 	 	 	 
 comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: ALTER TABLE src_stat UPDATE STATISTICS for column value SET ('numDVs'='121','numNulls'='122','avgColLen'='1.23','maxColLen'='124')
 PREHOOK: type: ALTERTABLE_UPDATETABLESTATS
 PREHOOK: Input: default@src_stat

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
index 83ee0f7..bd8c5c8 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -77,6 +77,7 @@ STAGE PLANS:
                   key expressions: _col0 (type: int)
                   sort order: +
                   Statistics: Num rows: 12288 Data size: 2907994 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
                   value expressions: _col1 (type: varchar(128))
       Execution mode: vectorized
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
index 1976545..257dbd6 100644
--- a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
+++ b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
@@ -28,16 +28,6 @@ POSTHOOK: Lineage: cmv_basetable_n10.a SCRIPT []
 POSTHOOK: Lineage: cmv_basetable_n10.b SCRIPT []
 POSTHOOK: Lineage: cmv_basetable_n10.c SCRIPT []
 POSTHOOK: Lineage: cmv_basetable_n10.d SCRIPT []
-PREHOOK: query: analyze table cmv_basetable_n10 compute statistics for columns
-PREHOOK: type: ANALYZE_TABLE
-PREHOOK: Input: default@cmv_basetable_n10
-#### A masked pattern was here ####
-PREHOOK: Output: default@cmv_basetable_n10
-POSTHOOK: query: analyze table cmv_basetable_n10 compute statistics for columns
-POSTHOOK: type: ANALYZE_TABLE
-POSTHOOK: Input: default@cmv_basetable_n10
-#### A masked pattern was here ####
-POSTHOOK: Output: default@cmv_basetable_n10
 PREHOOK: query: create materialized view cmv_mat_view_n10
 as select a, b, c from cmv_basetable_n10 where a = 2
 PREHOOK: type: CREATE_MATERIALIZED_VIEW
@@ -64,7 +54,7 @@ PREHOOK: query: show tblproperties cmv_mat_view_n10
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: show tblproperties cmv_mat_view_n10
 POSTHOOK: type: SHOW_TBLPROPERTIES
-COLUMN_STATS_ACCURATE	{"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE	{"BASIC_STATS":"true","COLUMN_STATS":{"a":"true","b":"true","c":"true"}}
 bucketing_version	2
 numFiles	1
 numFilesErasureCoded	0
@@ -98,7 +88,7 @@ PREHOOK: query: show tblproperties cmv_mat_view2_n4
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: show tblproperties cmv_mat_view2_n4
 POSTHOOK: type: SHOW_TBLPROPERTIES
-COLUMN_STATS_ACCURATE	{"BASIC_STATS":"true"}
+COLUMN_STATS_ACCURATE	{"BASIC_STATS":"true","COLUMN_STATS":{"a":"true","c":"true"}}
 bucketing_version	2
 numFiles	1
 numFilesErasureCoded	0

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
index a960b06..89c967b 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
@@ -1,31 +1,43 @@
-PREHOOK: query: create table A_n18 as
-select * from src
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
+PREHOOK: query: create table A_n18 (key string, value string)
+PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@A_n18
-POSTHOOK: query: create table A_n18 as
-select * from src
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
+POSTHOOK: query: create table A_n18 (key string, value string)
+POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A_n18
+PREHOOK: query: insert into A_n18
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@a_n18
+POSTHOOK: query: insert into A_n18
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@a_n18
 POSTHOOK: Lineage: a_n18.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: a_n18.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: create table B_n14 as
+PREHOOK: query: create table B_n14 (key string, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@B_n14
+POSTHOOK: query: create table B_n14 (key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@B_n14
+PREHOOK: query: insert into B_n14
 select * from src order by key
 limit 10
-PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: type: QUERY
 PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@B_n14
-POSTHOOK: query: create table B_n14 as
+PREHOOK: Output: default@b_n14
+POSTHOOK: query: insert into B_n14
 select * from src order by key
 limit 10
-POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@B_n14
+POSTHOOK: Output: default@b_n14
 POSTHOOK: Lineage: b_n14.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: b_n14.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 Warning: Map Join MAPJOIN[8][bigTable=?] in task 'Stage-3:MAPRED' is a cross product

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/results/clientpositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
index f414c68..b56d4b3 100644
--- a/ql/src/test/results/clientpositive/ctas.q.out
+++ b/ql/src/test/results/clientpositive/ctas.q.out
@@ -28,8 +28,9 @@ STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-0, Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -89,6 +90,21 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.nzhang_CTAS1
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string)
+              outputColumnNames: col1, col2
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll')
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -96,7 +112,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
+  Stage: Stage-5
       Create Table Operator:
         Create Table
           columns: k string, value string
@@ -108,6 +124,33 @@ STAGE PLANS:
   Stage: Stage-3
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: k, value
+          Column Types: string, string
+          Table: default.nzhang_CTAS1
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -156,7 +199,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"k\":\"true\",\"value\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	10                  
@@ -188,8 +231,9 @@ STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-0, Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -249,6 +293,21 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.nzhang_ctas2
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string)
+              outputColumnNames: col1, col2
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll')
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -256,7 +315,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
+  Stage: Stage-5
       Create Table Operator:
         Create Table
           columns: key string, value string
@@ -268,6 +327,33 @@ STAGE PLANS:
   Stage: Stage-3
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value
+          Column Types: string, string
+          Table: default.nzhang_ctas2
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -316,7 +402,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	10                  
@@ -348,8 +434,9 @@ STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-0, Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -409,6 +496,21 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
                   name: default.nzhang_ctas3
+            Select Operator
+              expressions: _col0 (type: double), _col1 (type: string)
+              outputColumnNames: col1, col2
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll')
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -416,7 +518,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
+  Stage: Stage-5
       Create Table Operator:
         Create Table
           columns: half_key double, conb string
@@ -428,6 +530,33 @@ STAGE PLANS:
   Stage: Stage-3
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: half_key, conb
+          Column Types: double, string
+          Table: default.nzhang_ctas3
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 864 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -476,7 +605,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"conb\":\"true\",\"half_key\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	10                  
@@ -540,7 +669,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"conb\":\"true\",\"half_key\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	10                  
@@ -572,8 +701,9 @@ STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-0, Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -633,6 +763,21 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.nzhang_ctas4
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string)
+              outputColumnNames: col1, col2
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll')
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -640,7 +785,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
+  Stage: Stage-5
       Create Table Operator:
         Create Table
           columns: key string, value string
@@ -653,6 +798,33 @@ STAGE PLANS:
   Stage: Stage-3
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value
+          Column Types: string, string
+          Table: default.nzhang_ctas4
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -701,7 +873,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	10                  
@@ -734,8 +906,9 @@ STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-0, Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -795,6 +968,21 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.nzhang_ctas5
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string)
+              outputColumnNames: col1, col2
+              Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll')
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -802,7 +990,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
+  Stage: Stage-5
       Create Table Operator:
         Create Table
           columns: key string, value string
@@ -817,6 +1005,33 @@ STAGE PLANS:
   Stage: Stage-3
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value
+          Column Types: string, string
+          Table: default.nzhang_ctas5
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT

http://git-wip-us.apache.org/repos/asf/hive/blob/138b00ca/ql/src/test/results/clientpositive/ctas_colname.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out
index 9cc82d1..7f7465c 100644
--- a/ql/src/test/results/clientpositive/ctas_colname.q.out
+++ b/ql/src/test/results/clientpositive/ctas_colname.q.out
@@ -13,8 +13,9 @@ POSTHOOK: Output: default@summary
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-0
-  Stage-2 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0, Stage-3
+  Stage-2 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -52,6 +53,21 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.summary
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string), _col2 (type: double), _col3 (type: string)
+              outputColumnNames: col1, col2, col3, col4
+              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll'), compute_stats(col3, 'hll'), compute_stats(col4, 'hll')
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -59,7 +75,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-3
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: key string, value string, _c1 double, _c2 string
@@ -71,6 +87,33 @@ STAGE PLANS:
   Stage: Stage-2
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value, _c1, _c2
+          Column Types: string, string, double, string
+          Table: default.summary
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 1744 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 1760 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table summary as select *, key + 1, concat(value, value) from src limit 20
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -105,7 +148,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"_c1\":\"true\",\"_c2\":\"true\",\"key\":\"true\",\"value\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	20                  
@@ -166,8 +209,9 @@ POSTHOOK: Output: default@x4
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-0
-  Stage-2 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0, Stage-3
+  Stage-2 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -220,6 +264,21 @@ STAGE PLANS:
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.x4
+              Select Operator
+                expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
+                outputColumnNames: col1, col2, col3
+                Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll'), compute_stats(col3, 'hll')
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -227,7 +286,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-3
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: key string, value string, rr int
@@ -239,6 +298,33 @@ STAGE PLANS:
   Stage: Stage-2
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value, rr
+          Column Types: string, string, int
+          Table: default.x4
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table x4 as select *, rank() over(partition by key order by value) as rr from src1
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -271,7 +357,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"rr\":\"true\",\"value\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	25                  
@@ -338,8 +424,9 @@ STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
   Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-  Stage-3 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-0, Stage-4
+  Stage-3 depends on stages: Stage-5
+  Stage-4 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -420,6 +507,21 @@ STAGE PLANS:
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.x5
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+              outputColumnNames: col1, col2, col3
+              Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll'), compute_stats(col3, 'hll')
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -427,7 +529,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-4
+  Stage: Stage-5
       Create Table Operator:
         Create Table
           columns: key string, value string, lead1 string
@@ -439,6 +541,33 @@ STAGE PLANS:
   Stage: Stage-3
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value, lead1
+          Column Types: string, string, string
+          Table: default.x5
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table x5 as select *, lead(key,1) over(partition by key order by value) as lead1 from src limit 20
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -471,7 +600,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"lead1\":\"true\",\"value\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	20                  
@@ -559,7 +688,32 @@ STAGE PLANS:
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.x6
-      Execution mode: vectorized
+              Select Operator
+                expressions: _col0 (type: string), _col1 (type: string), _col2 (type: double)
+                outputColumnNames: col1, col2, col3
+                Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll'), compute_stats(col3, 'hll')
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:double,max:double,countnulls:bigint,bitvector:binary>)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-7
     Conditional Operator
@@ -588,6 +742,10 @@ STAGE PLANS:
   Stage: Stage-2
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: key, value, _c1
+          Column Types: string, string, double
+          Table: default.x6
 
   Stage: Stage-3
     Map Reduce
@@ -650,7 +808,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"_c1\":\"true\",\"key\":\"true\",\"value\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	25                  
@@ -716,8 +874,9 @@ POSTHOOK: Output: default@x7
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-0
-  Stage-2 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0, Stage-3
+  Stage-2 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -758,6 +917,21 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: default.x7
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+            outputColumnNames: col1, col2, col3
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+            Group By Operator
+              aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll'), compute_stats(col3, 'hll')
+              mode: hash
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -765,7 +939,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-3
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: _col0 string, _col1 string, _c1 bigint
@@ -777,6 +951,33 @@ STAGE PLANS:
   Stage: Stage-2
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: _col0, _col1, _c1
+          Column Types: string, string, bigint
+          Table: default.x7
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table x7 as select * from (select *, count(value) from src group by key, value) a
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -809,7 +1010,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"_c1\":\"true\",\"_col0\":\"true\",\"_col1\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	309                 
@@ -1159,8 +1360,9 @@ POSTHOOK: Output: default@x8
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-0
-  Stage-2 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0, Stage-3
+  Stage-2 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1201,6 +1403,21 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 name: default.x8
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
+            outputColumnNames: col1, col2, col3
+            Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+            Group By Operator
+              aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll'), compute_stats(col3, 'hll')
+              mode: hash
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -1208,7 +1425,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-3
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: _col0 string, _col1 string, _c1 bigint
@@ -1220,6 +1437,33 @@ STAGE PLANS:
   Stage: Stage-2
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: _col0, _col1, _c1
+          Column Types: string, string, bigint
+          Table: default.x8
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 1304 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 1320 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -1252,7 +1496,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"_c1\":\"true\",\"_col0\":\"true\",\"_col1\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	5                   
@@ -1298,8 +1542,9 @@ POSTHOOK: Output: default@x9
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-0
-  Stage-2 depends on stages: Stage-3
+  Stage-4 depends on stages: Stage-0, Stage-3
+  Stage-2 depends on stages: Stage-4
+  Stage-3 depends on stages: Stage-1
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1347,6 +1592,21 @@ STAGE PLANS:
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.x9
+              Select Operator
+                expressions: _col0 (type: string), _col1 (type: string)
+                outputColumnNames: col1, col2
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: compute_stats(col1, 'hll'), compute_stats(col2, 'hll')
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 964 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Move Operator
@@ -1354,7 +1614,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-3
+  Stage: Stage-4
       Create Table Operator:
         Create Table
           columns: _c0 string, key string
@@ -1366,6 +1626,33 @@ STAGE PLANS:
   Stage: Stage-2
     Stats Work
       Basic Stats Work:
+      Column Stats Desc:
+          Columns: _c0, key
+          Column Types: string, string
+          Table: default.x9
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 964 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 964 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 964 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
 PREHOOK: query: create table x9 as select * from (select max(value),key from src group by key having key < 9 AND max(value) IS NOT NULL) a
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -1396,7 +1683,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"_c0\":\"true\",\"key\":\"true\"}}
 	bucketing_version   	2                   
 	numFiles            	1                   
 	numRows             	5