You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2020/02/17 12:18:59 UTC

[hive] 02/03: HIVE-22866: Add more testcases for scheduled queries (Zoltan Haindrich reviewed by Miklos Gergely)

This is an automated email from the ASF dual-hosted git repository.

kgyrtkirk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git

commit 0581c0061da678920411768da97e933fb9b87d50
Author: Zoltan Haindrich <ki...@rxd.hu>
AuthorDate: Mon Feb 17 12:07:05 2020 +0000

    HIVE-22866: Add more testcases for scheduled queries (Zoltan Haindrich reviewed by Miklos Gergely)
    
    Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>
---
 .../test/resources/testconfiguration.properties    |   2 +
 ql/src/test/queries/clientpositive/schq_analyze.q  |  31 +++++
 ql/src/test/queries/clientpositive/schq_ingest.q   |  45 +++++++
 .../queries/clientpositive/schq_materialized.q     |  28 +++-
 .../results/clientpositive/llap/schq_analyze.q.out | 110 ++++++++++++++++
 .../results/clientpositive/llap/schq_ingest.q.out  | 124 ++++++++++++++++++
 .../clientpositive/llap/schq_materialized.q.out    | 141 ++++++++++++++++++++-
 7 files changed, 472 insertions(+), 9 deletions(-)

diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 3108d16..1b1bf11 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -790,6 +790,8 @@ minillaplocal.query.files=\
   sysdb.q,\
   sysdb_schq.q,\
   schq_materialized.q,\
+  schq_analyze.q,\
+  schq_ingest.q,\
   table_access_keys_stats.q,\
   temp_table_llap_partitioned.q,\
   tez_bmj_schema_evolution.q,\
diff --git a/ql/src/test/queries/clientpositive/schq_analyze.q b/ql/src/test/queries/clientpositive/schq_analyze.q
new file mode 100644
index 0000000..969b47b
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schq_analyze.q
@@ -0,0 +1,31 @@
+--! qt:authorizer
+--! qt:scheduledqueryservice
+--! qt:sysdb
+
+set user.name=hive_admin_user;
+set role admin;
+
+-- create external table
+create external table t (a integer);
+ 
+-- disable autogather
+set hive.stats.autogather=false;
+ 
+insert into t values (1),(2),(3);
+
+-- basic stats show that the table has "0" rows
+desc formatted t;
+
+-- create a schedule to compute stats
+create scheduled query t_analyze cron '0 */1 * * * ? *' as analyze table t compute statistics for columns;
+
+alter scheduled query t_analyze execute;
+
+!sleep 3; 
+ 
+select * from information_schema.scheduled_executions s where schedule_name='ex_analyze' order by scheduled_execution_id desc limit 3;
+ 
+-- and the numrows have been updated
+desc formatted t;
+ 
+
diff --git a/ql/src/test/queries/clientpositive/schq_ingest.q b/ql/src/test/queries/clientpositive/schq_ingest.q
new file mode 100644
index 0000000..d7283d5
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/schq_ingest.q
@@ -0,0 +1,45 @@
+--! qt:authorizer
+--! qt:scheduledqueryservice
+--! qt:sysdb
+
+set user.name=hive_admin_user;
+set role admin;
+
+drop table if exists t;
+drop table if exists s;
+ 
+-- suppose that this table is an external table or something
+-- which supports the pushdown of filter condition on the id column
+create table s(id integer, cnt integer);
+ 
+-- create an internal table and an offset table
+create table t(id integer, cnt integer);
+create table t_offset(offset integer);
+insert into t_offset values(0);
+ 
+-- pretend that data is added to s
+insert into s values(1,1);
+ 
+-- run an ingestion...
+from (select id==offset as first,* from s
+join t_offset on id>=offset) s1
+insert into t select id,cnt where not first
+insert overwrite table t_offset select max(s1.id);
+ 
+-- configure to run ingestion every 10 minutes
+create scheduled query ingest every 10 minutes defined as
+from (select id==offset as first,* from s
+join t_offset on id>=offset) s1
+insert into t select id,cnt where not first
+insert overwrite table t_offset select max(s1.id);
+ 
+-- add some new values
+insert into s values(2,2),(3,3);
+ 
+-- pretend that a timeout have happened
+alter scheduled query ingest execute;
+
+!sleep 3;
+select state,error_message from sys.scheduled_executions;
+
+select * from t order by id;
diff --git a/ql/src/test/queries/clientpositive/schq_materialized.q b/ql/src/test/queries/clientpositive/schq_materialized.q
index fae5239..6baed49 100644
--- a/ql/src/test/queries/clientpositive/schq_materialized.q
+++ b/ql/src/test/queries/clientpositive/schq_materialized.q
@@ -1,5 +1,10 @@
+--! qt:authorizer
+--! qt:scheduledqueryservice
 --! qt:sysdb
 
+set user.name=hive_admin_user;
+set role admin;
+
 drop materialized view if exists mv1;
 drop table if exists emps;
 drop table if exists depts;
@@ -42,16 +47,31 @@ CREATE MATERIALIZED VIEW mv1 AS
     JOIN depts ON (emps.deptno = depts.deptno)
     WHERE hire_date >= '2016-01-01 00:00:00';
 
+-- mv1 is used
+EXPLAIN
+SELECT empid, deptname FROM emps
+JOIN depts ON (emps.deptno = depts.deptno)
+WHERE hire_date >= '2018-01-01';
+
+-- insert a new record
+insert into emps values (1330, 10, 'Bill', 10000, '2020-01-02');
+
+-- mv1 is NOT used
 EXPLAIN
 SELECT empid, deptname FROM emps
 JOIN depts ON (emps.deptno = depts.deptno)
 WHERE hire_date >= '2018-01-01';
 
 -- create a schedule to rebuild mv
-create scheduled query d cron '0 */10 * * * ? *' defined as 
+create scheduled query d cron '0 0 * * * ? *' defined as 
   alter materialized view mv1 rebuild;
 
-
 set hive.support.quoted.identifiers=none;
--- expected result to have it created
-select `(NEXT_EXECUTION)?+.+` from sys.scheduled_queries;
+select `(NEXT_EXECUTION|SCHEDULED_QUERY_ID)?+.+` from sys.scheduled_queries;
+
+alter scheduled query d execute;
+
+!sleep 3;
+
+-- the scheduled execution will fail - because of missing TXN; but overall it works..
+select state,error_message from sys.scheduled_executions;
diff --git a/ql/src/test/results/clientpositive/llap/schq_analyze.q.out b/ql/src/test/results/clientpositive/llap/schq_analyze.q.out
new file mode 100644
index 0000000..a083479
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/schq_analyze.q.out
@@ -0,0 +1,110 @@
+PREHOOK: query: set role admin
+PREHOOK: type: SHOW_ROLES
+POSTHOOK: query: set role admin
+POSTHOOK: type: SHOW_ROLES
+PREHOOK: query: create external table t (a integer)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t
+POSTHOOK: query: create external table t (a integer)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t
+PREHOOK: query: insert into t values (1),(2),(3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@t
+POSTHOOK: query: insert into t values (1),(2),(3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@t
+POSTHOOK: Lineage: t.a SCRIPT []
+PREHOOK: query: desc formatted t
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@t
+POSTHOOK: query: desc formatted t
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@t
+# col_name            	data_type           	comment             
+a                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	EXTERNAL            	TRUE                
+	bucketing_version   	2                   
+	numFiles            	1                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	6                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: create scheduled query t_analyze cron '0 */1 * * * ? *' as analyze table t compute statistics for columns
+PREHOOK: type: CREATE SCHEDULED QUERY
+POSTHOOK: query: create scheduled query t_analyze cron '0 */1 * * * ? *' as analyze table t compute statistics for columns
+POSTHOOK: type: CREATE SCHEDULED QUERY
+PREHOOK: query: alter scheduled query t_analyze execute
+PREHOOK: type: ALTER SCHEDULED QUERY
+POSTHOOK: query: alter scheduled query t_analyze execute
+POSTHOOK: type: ALTER SCHEDULED QUERY
+PREHOOK: query: select * from information_schema.scheduled_executions s where schedule_name='ex_analyze' order by scheduled_execution_id desc limit 3
+PREHOOK: type: QUERY
+PREHOOK: Input: information_schema@scheduled_executions
+PREHOOK: Input: sys@scheduled_executions
+PREHOOK: Input: sys@scheduled_queries
+#### A masked pattern was here ####
+POSTHOOK: query: select * from information_schema.scheduled_executions s where schedule_name='ex_analyze' order by scheduled_execution_id desc limit 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: information_schema@scheduled_executions
+POSTHOOK: Input: sys@scheduled_executions
+POSTHOOK: Input: sys@scheduled_queries
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted t
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@t
+POSTHOOK: query: desc formatted t
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@t
+# col_name            	data_type           	comment             
+a                   	int                 	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	EXTERNAL_TABLE      	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\"}}
+	EXTERNAL            	TRUE                
+	bucketing_version   	2                   
+	numFiles            	1                   
+	numRows             	3                   
+	rawDataSize         	3                   
+	totalSize           	6                   
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
diff --git a/ql/src/test/results/clientpositive/llap/schq_ingest.q.out b/ql/src/test/results/clientpositive/llap/schq_ingest.q.out
new file mode 100644
index 0000000..19d2b11
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/schq_ingest.q.out
@@ -0,0 +1,124 @@
+PREHOOK: query: set role admin
+PREHOOK: type: SHOW_ROLES
+POSTHOOK: query: set role admin
+POSTHOOK: type: SHOW_ROLES
+PREHOOK: query: drop table if exists t
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists t
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists s
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists s
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table s(id integer, cnt integer)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@s
+POSTHOOK: query: create table s(id integer, cnt integer)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@s
+PREHOOK: query: create table t(id integer, cnt integer)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t
+POSTHOOK: query: create table t(id integer, cnt integer)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t
+PREHOOK: query: create table t_offset(offset integer)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t_offset
+POSTHOOK: query: create table t_offset(offset integer)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t_offset
+PREHOOK: query: insert into t_offset values(0)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@t_offset
+POSTHOOK: query: insert into t_offset values(0)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@t_offset
+POSTHOOK: Lineage: t_offset.offset SCRIPT []
+PREHOOK: query: insert into s values(1,1)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@s
+POSTHOOK: query: insert into s values(1,1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.cnt SCRIPT []
+POSTHOOK: Lineage: s.id SCRIPT []
+Warning: Shuffle Join MERGEJOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: from (select id==offset as first,* from s
+join t_offset on id>=offset) s1
+insert into t select id,cnt where not first
+insert overwrite table t_offset select max(s1.id)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@s
+PREHOOK: Input: default@t_offset
+PREHOOK: Output: default@t
+PREHOOK: Output: default@t_offset
+POSTHOOK: query: from (select id==offset as first,* from s
+join t_offset on id>=offset) s1
+insert into t select id,cnt where not first
+insert overwrite table t_offset select max(s1.id)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@s
+POSTHOOK: Input: default@t_offset
+POSTHOOK: Output: default@t
+POSTHOOK: Output: default@t_offset
+POSTHOOK: Lineage: t.cnt SIMPLE [(s)s.FieldSchema(name:cnt, type:int, comment:null), ]
+POSTHOOK: Lineage: t.id SIMPLE [(s)s.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: t_offset.offset EXPRESSION [(s)s.FieldSchema(name:id, type:int, comment:null), ]
+Warning: Shuffle Join MERGEJOIN[34][tables = [s, t_offset]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: create scheduled query ingest every 10 minutes defined as
+from (select id==offset as first,* from s
+join t_offset on id>=offset) s1
+insert into t select id,cnt where not first
+insert overwrite table t_offset select max(s1.id)
+PREHOOK: type: CREATE SCHEDULED QUERY
+POSTHOOK: query: create scheduled query ingest every 10 minutes defined as
+from (select id==offset as first,* from s
+join t_offset on id>=offset) s1
+insert into t select id,cnt where not first
+insert overwrite table t_offset select max(s1.id)
+POSTHOOK: type: CREATE SCHEDULED QUERY
+PREHOOK: query: insert into s values(2,2),(3,3)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@s
+POSTHOOK: query: insert into s values(2,2),(3,3)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.cnt SCRIPT []
+POSTHOOK: Lineage: s.id SCRIPT []
+PREHOOK: query: alter scheduled query ingest execute
+PREHOOK: type: ALTER SCHEDULED QUERY
+POSTHOOK: query: alter scheduled query ingest execute
+POSTHOOK: type: ALTER SCHEDULED QUERY
+PREHOOK: query: select state,error_message from sys.scheduled_executions
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@scheduled_executions
+#### A masked pattern was here ####
+POSTHOOK: query: select state,error_message from sys.scheduled_executions
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@scheduled_executions
+#### A masked pattern was here ####
+FINISHED	NULL
+PREHOOK: query: select * from t order by id
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t order by id
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+1	1
+2	2
+3	3
diff --git a/ql/src/test/results/clientpositive/llap/schq_materialized.q.out b/ql/src/test/results/clientpositive/llap/schq_materialized.q.out
index 7599be7..e904d46 100644
--- a/ql/src/test/results/clientpositive/llap/schq_materialized.q.out
+++ b/ql/src/test/results/clientpositive/llap/schq_materialized.q.out
@@ -1,3 +1,7 @@
+PREHOOK: query: set role admin
+PREHOOK: type: SHOW_ROLES
+POSTHOOK: query: set role admin
+POSTHOOK: type: SHOW_ROLES
 PREHOOK: query: drop materialized view if exists mv1
 PREHOOK: type: DROP_MATERIALIZED_VIEW
 POSTHOOK: query: drop materialized view if exists mv1
@@ -145,18 +149,145 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               ListSink
 
-PREHOOK: query: create scheduled query d cron '0 */10 * * * ? *' defined as 
+PREHOOK: query: insert into emps values (1330, 10, 'Bill', 10000, '2020-01-02')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@emps
+POSTHOOK: query: insert into emps values (1330, 10, 'Bill', 10000, '2020-01-02')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@emps
+POSTHOOK: Lineage: emps.deptno SCRIPT []
+POSTHOOK: Lineage: emps.empid SCRIPT []
+POSTHOOK: Lineage: emps.hire_date SCRIPT []
+POSTHOOK: Lineage: emps.name SCRIPT []
+POSTHOOK: Lineage: emps.salary SCRIPT []
+PREHOOK: query: EXPLAIN
+SELECT empid, deptname FROM emps
+JOIN depts ON (emps.deptno = depts.deptno)
+WHERE hire_date >= '2018-01-01'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@depts
+PREHOOK: Input: default@emps
+#### A masked pattern was here ####
+POSTHOOK: query: EXPLAIN
+SELECT empid, deptname FROM emps
+JOIN depts ON (emps.deptno = depts.deptno)
+WHERE hire_date >= '2018-01-01'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@depts
+POSTHOOK: Input: default@emps
+#### A masked pattern was here ####
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: emps
+                  filterExpr: ((hire_date >= TIMESTAMP'2018-01-01 00:00:00') and deptno is not null) (type: boolean)
+                  Statistics: Num rows: 8 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((hire_date >= TIMESTAMP'2018-01-01 00:00:00') and deptno is not null) (type: boolean)
+                    Statistics: Num rows: 8 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: empid (type: int), deptno (type: int)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: int)
+                        Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: int)
+            Execution mode: vectorized, llap
+            LLAP IO: may be used (ACID table)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: depts
+                  filterExpr: deptno is not null (type: boolean)
+                  Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: deptno is not null (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: deptno (type: int), deptname (type: varchar(256))
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: varchar(256))
+            Execution mode: vectorized, llap
+            LLAP IO: may be used (ACID table)
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col1 (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col3
+                Statistics: Num rows: 8 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: int), _col3 (type: varchar(256))
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 8 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 8 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: create scheduled query d cron '0 0 * * * ? *' defined as 
   alter materialized view mv1 rebuild
 PREHOOK: type: CREATE SCHEDULED QUERY
-POSTHOOK: query: create scheduled query d cron '0 */10 * * * ? *' defined as 
+POSTHOOK: query: create scheduled query d cron '0 0 * * * ? *' defined as 
   alter materialized view mv1 rebuild
 POSTHOOK: type: CREATE SCHEDULED QUERY
-PREHOOK: query: select `(NEXT_EXECUTION)?+.+` from sys.scheduled_queries
+PREHOOK: query: select `(NEXT_EXECUTION|SCHEDULED_QUERY_ID)?+.+` from sys.scheduled_queries
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@scheduled_queries
 #### A masked pattern was here ####
-POSTHOOK: query: select `(NEXT_EXECUTION)?+.+` from sys.scheduled_queries
+POSTHOOK: query: select `(NEXT_EXECUTION|SCHEDULED_QUERY_ID)?+.+` from sys.scheduled_queries
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@scheduled_queries
 #### A masked pattern was here ####
-1	d	true	hive	0 */10 * * * ? *	hive_test_user	alter materialized view `default`.`mv1` rebuild
+d	true	hive	0 0 * * * ? *	hive_admin_user	alter materialized view `default`.`mv1` rebuild
+PREHOOK: query: alter scheduled query d execute
+PREHOOK: type: ALTER SCHEDULED QUERY
+POSTHOOK: query: alter scheduled query d execute
+POSTHOOK: type: ALTER SCHEDULED QUERY
+PREHOOK: query: select state,error_message from sys.scheduled_executions
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@scheduled_executions
+#### A masked pattern was here ####
+POSTHOOK: query: select state,error_message from sys.scheduled_executions
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@scheduled_executions
+#### A masked pattern was here ####
+FAILED	FAILED: SemanticException [Error 10265]: This command is not allowed on an ACID table default.emps with a non-ACID transaction manager. Failed command: /* schedule: d */alter materialized view `default`.`mv1` rebuild